Update baseline symbols for hppa-linux.
[official-gcc.git] / gcc / range-op.cc
blob33b193be7d09ee4d6b21bd26525cf5824cb119a9
1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
76 // Instantaite a range operator table.
77 range_op_table operator_table;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR, op_addr);
109 set (BIT_NOT_EXPR, op_bitwise_not);
110 set (BIT_XOR_EXPR, op_bitwise_xor);
112 // These are in both integer and pointer tables, but pointer has a different
113 // implementation.
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator = &default_operator;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code)
140 m_operator = operator_table[code];
141 if (!m_operator)
142 m_operator = &default_operator;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator != &default_operator;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
156 range_operator *
157 range_op_handler::range_op () const
159 if (m_operator != &default_operator)
160 return m_operator;
161 return NULL;
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
168 constexpr unsigned
169 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
171 return ((lhs << 8) + (op1 << 4) + (op2));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
179 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
180 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
181 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
182 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
183 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
185 // Return a dispatch value for parameter types LHS, OP1 and OP2.
187 unsigned
188 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
189 const vrange& op2) const
191 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
192 op2.m_discriminator);
195 // Dispatch a call to fold_range based on the types of R, LH and RH.
197 bool
198 range_op_handler::fold_range (vrange &r, tree type,
199 const vrange &lh,
200 const vrange &rh,
201 relation_trio rel) const
203 gcc_checking_assert (m_operator);
204 switch (dispatch_kind (r, lh, rh))
206 case RO_III:
207 return m_operator->fold_range (as_a <irange> (r), type,
208 as_a <irange> (lh),
209 as_a <irange> (rh), rel);
210 case RO_IFI:
211 return m_operator->fold_range (as_a <irange> (r), type,
212 as_a <frange> (lh),
213 as_a <irange> (rh), rel);
214 case RO_IFF:
215 return m_operator->fold_range (as_a <irange> (r), type,
216 as_a <frange> (lh),
217 as_a <frange> (rh), rel);
218 case RO_FFF:
219 return m_operator->fold_range (as_a <frange> (r), type,
220 as_a <frange> (lh),
221 as_a <frange> (rh), rel);
222 case RO_FII:
223 return m_operator->fold_range (as_a <frange> (r), type,
224 as_a <irange> (lh),
225 as_a <irange> (rh), rel);
226 default:
227 return false;
231 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
233 bool
234 range_op_handler::op1_range (vrange &r, tree type,
235 const vrange &lhs,
236 const vrange &op2,
237 relation_trio rel) const
239 gcc_checking_assert (m_operator);
241 if (lhs.undefined_p ())
242 return false;
243 switch (dispatch_kind (r, lhs, op2))
245 case RO_III:
246 return m_operator->op1_range (as_a <irange> (r), type,
247 as_a <irange> (lhs),
248 as_a <irange> (op2), rel);
249 case RO_FIF:
250 return m_operator->op1_range (as_a <frange> (r), type,
251 as_a <irange> (lhs),
252 as_a <frange> (op2), rel);
253 case RO_FFF:
254 return m_operator->op1_range (as_a <frange> (r), type,
255 as_a <frange> (lhs),
256 as_a <frange> (op2), rel);
257 default:
258 return false;
262 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
264 bool
265 range_op_handler::op2_range (vrange &r, tree type,
266 const vrange &lhs,
267 const vrange &op1,
268 relation_trio rel) const
270 gcc_checking_assert (m_operator);
271 if (lhs.undefined_p ())
272 return false;
274 switch (dispatch_kind (r, lhs, op1))
276 case RO_III:
277 return m_operator->op2_range (as_a <irange> (r), type,
278 as_a <irange> (lhs),
279 as_a <irange> (op1), rel);
280 case RO_FIF:
281 return m_operator->op2_range (as_a <frange> (r), type,
282 as_a <irange> (lhs),
283 as_a <frange> (op1), rel);
284 case RO_FFF:
285 return m_operator->op2_range (as_a <frange> (r), type,
286 as_a <frange> (lhs),
287 as_a <frange> (op1), rel);
288 default:
289 return false;
293 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
295 relation_kind
296 range_op_handler::lhs_op1_relation (const vrange &lhs,
297 const vrange &op1,
298 const vrange &op2,
299 relation_kind rel) const
301 gcc_checking_assert (m_operator);
303 switch (dispatch_kind (lhs, op1, op2))
305 case RO_III:
306 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
307 as_a <irange> (op1),
308 as_a <irange> (op2), rel);
309 case RO_IFF:
310 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
311 as_a <frange> (op1),
312 as_a <frange> (op2), rel);
313 case RO_FFF:
314 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
315 as_a <frange> (op1),
316 as_a <frange> (op2), rel);
317 default:
318 return VREL_VARYING;
322 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
324 relation_kind
325 range_op_handler::lhs_op2_relation (const vrange &lhs,
326 const vrange &op1,
327 const vrange &op2,
328 relation_kind rel) const
330 gcc_checking_assert (m_operator);
331 switch (dispatch_kind (lhs, op1, op2))
333 case RO_III:
334 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
335 as_a <irange> (op1),
336 as_a <irange> (op2), rel);
337 case RO_IFF:
338 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
339 as_a <frange> (op1),
340 as_a <frange> (op2), rel);
341 case RO_FFF:
342 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
343 as_a <frange> (op1),
344 as_a <frange> (op2), rel);
345 default:
346 return VREL_VARYING;
350 // Dispatch a call to op1_op2_relation based on the type of LHS.
352 relation_kind
353 range_op_handler::op1_op2_relation (const vrange &lhs,
354 const vrange &op1,
355 const vrange &op2) const
357 gcc_checking_assert (m_operator);
358 switch (dispatch_kind (lhs, op1, op2))
360 case RO_III:
361 return m_operator->op1_op2_relation (as_a <irange> (lhs),
362 as_a <irange> (op1),
363 as_a <irange> (op2));
365 case RO_IFF:
366 return m_operator->op1_op2_relation (as_a <irange> (lhs),
367 as_a <frange> (op1),
368 as_a <frange> (op2));
370 case RO_FFF:
371 return m_operator->op1_op2_relation (as_a <frange> (lhs),
372 as_a <frange> (op1),
373 as_a <frange> (op2));
375 default:
376 return VREL_VARYING;
380 bool
381 range_op_handler::overflow_free_p (const vrange &lh,
382 const vrange &rh,
383 relation_trio rel) const
385 gcc_checking_assert (m_operator);
386 switch (dispatch_kind (lh, lh, rh))
388 case RO_III:
389 return m_operator->overflow_free_p(as_a <irange> (lh),
390 as_a <irange> (rh),
391 rel);
392 default:
393 return false;
397 // Update the known bitmasks in R when applying the operation CODE to
398 // LH and RH.
400 void
401 update_known_bitmask (irange &r, tree_code code,
402 const irange &lh, const irange &rh)
404 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ()
405 || r.singleton_p ())
406 return;
408 widest_int widest_value, widest_mask;
409 tree type = r.type ();
410 signop sign = TYPE_SIGN (type);
411 int prec = TYPE_PRECISION (type);
412 irange_bitmask lh_bits = lh.get_bitmask ();
413 irange_bitmask rh_bits = rh.get_bitmask ();
415 switch (get_gimple_rhs_class (code))
417 case GIMPLE_UNARY_RHS:
418 bit_value_unop (code, sign, prec, &widest_value, &widest_mask,
419 TYPE_SIGN (lh.type ()),
420 TYPE_PRECISION (lh.type ()),
421 widest_int::from (lh_bits.value (), sign),
422 widest_int::from (lh_bits.mask (), sign));
423 break;
424 case GIMPLE_BINARY_RHS:
425 bit_value_binop (code, sign, prec, &widest_value, &widest_mask,
426 TYPE_SIGN (lh.type ()),
427 TYPE_PRECISION (lh.type ()),
428 widest_int::from (lh_bits.value (), sign),
429 widest_int::from (lh_bits.mask (), sign),
430 TYPE_SIGN (rh.type ()),
431 TYPE_PRECISION (rh.type ()),
432 widest_int::from (rh_bits.value (), sign),
433 widest_int::from (rh_bits.mask (), sign));
434 break;
435 default:
436 gcc_unreachable ();
439 wide_int mask = wide_int::from (widest_mask, prec, sign);
440 wide_int value = wide_int::from (widest_value, prec, sign);
441 // Bitmasks must have the unknown value bits cleared.
442 value &= ~mask;
443 irange_bitmask bm (value, mask);
444 r.update_bitmask (bm);
447 // Return the upper limit for a type.
449 static inline wide_int
450 max_limit (const_tree type)
452 return irange_val_max (type);
455 // Return the lower limit for a type.
457 static inline wide_int
458 min_limit (const_tree type)
460 return irange_val_min (type);
463 // Return false if shifting by OP is undefined behavior. Otherwise, return
464 // true and the range it is to be shifted by. This allows trimming out of
465 // undefined ranges, leaving only valid ranges if there are any.
467 static inline bool
468 get_shift_range (irange &r, tree type, const irange &op)
470 if (op.undefined_p ())
471 return false;
473 // Build valid range and intersect it with the shift range.
474 r = value_range (op.type (),
475 wi::shwi (0, TYPE_PRECISION (op.type ())),
476 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
477 r.intersect (op);
479 // If there are no valid ranges in the shift range, returned false.
480 if (r.undefined_p ())
481 return false;
482 return true;
485 // Default wide_int fold operation returns [MIN, MAX].
487 void
488 range_operator::wi_fold (irange &r, tree type,
489 const wide_int &lh_lb ATTRIBUTE_UNUSED,
490 const wide_int &lh_ub ATTRIBUTE_UNUSED,
491 const wide_int &rh_lb ATTRIBUTE_UNUSED,
492 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
494 gcc_checking_assert (r.supports_type_p (type));
495 r.set_varying (type);
498 // Call wi_fold when both op1 and op2 are equivalent. Further split small
499 // subranges into constants. This can provide better precision.
500 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
501 // [0,0][2, 2][4,4][6, 6][8, 8]
502 // LIMIT is the maximum number of elements in range allowed before we
503 // do not process them individually.
505 void
506 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
507 const wide_int &lh_lb,
508 const wide_int &lh_ub,
509 unsigned limit) const
511 int_range_max tmp;
512 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
513 widest_int::from (lh_lb, TYPE_SIGN (type)));
514 // if there are 1 to 8 values in the LH range, split them up.
515 r.set_undefined ();
516 if (lh_range >= 0 && lh_range < limit)
518 for (unsigned x = 0; x <= lh_range; x++)
520 wide_int val = lh_lb + x;
521 wi_fold (tmp, type, val, val, val, val);
522 r.union_ (tmp);
525 // Otherwise just call wi_fold.
526 else
527 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
530 // Call wi_fold, except further split small subranges into constants.
531 // This can provide better precision. For something 8 >> [0,1]
532 // Instead of [8, 16], we will produce [8,8][16,16]
534 void
535 range_operator::wi_fold_in_parts (irange &r, tree type,
536 const wide_int &lh_lb,
537 const wide_int &lh_ub,
538 const wide_int &rh_lb,
539 const wide_int &rh_ub) const
541 int_range_max tmp;
542 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
543 widest_int::from (rh_lb, TYPE_SIGN (type)));
544 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
545 widest_int::from (lh_lb, TYPE_SIGN (type)));
546 // If there are 2, 3, or 4 values in the RH range, do them separately.
547 // Call wi_fold_in_parts to check the RH side.
548 if (rh_range > 0 && rh_range < 4)
550 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
551 if (rh_range > 1)
553 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
554 r.union_ (tmp);
555 if (rh_range == 3)
557 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
558 r.union_ (tmp);
561 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
562 r.union_ (tmp);
564 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
565 // The RH side has been checked, so no recursion needed.
566 else if (lh_range > 0 && lh_range < 4)
568 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
569 if (lh_range > 1)
571 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
572 r.union_ (tmp);
573 if (lh_range == 3)
575 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
576 r.union_ (tmp);
579 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
580 r.union_ (tmp);
582 // Otherwise just call wi_fold.
583 else
584 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
587 // The default for fold is to break all ranges into sub-ranges and
588 // invoke the wi_fold method on each sub-range pair.
590 bool
591 range_operator::fold_range (irange &r, tree type,
592 const irange &lh,
593 const irange &rh,
594 relation_trio trio) const
596 gcc_checking_assert (r.supports_type_p (type));
597 if (empty_range_varying (r, type, lh, rh))
598 return true;
600 relation_kind rel = trio.op1_op2 ();
601 unsigned num_lh = lh.num_pairs ();
602 unsigned num_rh = rh.num_pairs ();
604 // If op1 and op2 are equivalences, then we don't need a complete cross
605 // product, just pairs of matching elements.
606 if (relation_equiv_p (rel) && lh == rh)
608 int_range_max tmp;
609 r.set_undefined ();
610 for (unsigned x = 0; x < num_lh; ++x)
612 // If the number of subranges is too high, limit subrange creation.
613 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
614 wide_int lh_lb = lh.lower_bound (x);
615 wide_int lh_ub = lh.upper_bound (x);
616 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
617 r.union_ (tmp);
618 if (r.varying_p ())
619 break;
621 op1_op2_relation_effect (r, type, lh, rh, rel);
622 update_bitmask (r, lh, rh);
623 return true;
626 // If both ranges are single pairs, fold directly into the result range.
627 // If the number of subranges grows too high, produce a summary result as the
628 // loop becomes exponential with little benefit. See PR 103821.
629 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
631 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
632 rh.lower_bound (), rh.upper_bound ());
633 op1_op2_relation_effect (r, type, lh, rh, rel);
634 update_bitmask (r, lh, rh);
635 return true;
638 int_range_max tmp;
639 r.set_undefined ();
640 for (unsigned x = 0; x < num_lh; ++x)
641 for (unsigned y = 0; y < num_rh; ++y)
643 wide_int lh_lb = lh.lower_bound (x);
644 wide_int lh_ub = lh.upper_bound (x);
645 wide_int rh_lb = rh.lower_bound (y);
646 wide_int rh_ub = rh.upper_bound (y);
647 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
648 r.union_ (tmp);
649 if (r.varying_p ())
651 op1_op2_relation_effect (r, type, lh, rh, rel);
652 update_bitmask (r, lh, rh);
653 return true;
656 op1_op2_relation_effect (r, type, lh, rh, rel);
657 update_bitmask (r, lh, rh);
658 return true;
661 // The default for op1_range is to return false.
663 bool
664 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
665 tree type ATTRIBUTE_UNUSED,
666 const irange &lhs ATTRIBUTE_UNUSED,
667 const irange &op2 ATTRIBUTE_UNUSED,
668 relation_trio) const
670 return false;
673 // The default for op2_range is to return false.
675 bool
676 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
677 tree type ATTRIBUTE_UNUSED,
678 const irange &lhs ATTRIBUTE_UNUSED,
679 const irange &op1 ATTRIBUTE_UNUSED,
680 relation_trio) const
682 return false;
685 // The default relation routines return VREL_VARYING.
687 relation_kind
688 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
689 const irange &op1 ATTRIBUTE_UNUSED,
690 const irange &op2 ATTRIBUTE_UNUSED,
691 relation_kind rel ATTRIBUTE_UNUSED) const
693 return VREL_VARYING;
696 relation_kind
697 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
698 const irange &op1 ATTRIBUTE_UNUSED,
699 const irange &op2 ATTRIBUTE_UNUSED,
700 relation_kind rel ATTRIBUTE_UNUSED) const
702 return VREL_VARYING;
705 relation_kind
706 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
707 const irange &op1 ATTRIBUTE_UNUSED,
708 const irange &op2 ATTRIBUTE_UNUSED) const
710 return VREL_VARYING;
713 // Default is no relation affects the LHS.
715 bool
716 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
717 tree type ATTRIBUTE_UNUSED,
718 const irange &op1_range ATTRIBUTE_UNUSED,
719 const irange &op2_range ATTRIBUTE_UNUSED,
720 relation_kind rel ATTRIBUTE_UNUSED) const
722 return false;
725 bool
726 range_operator::overflow_free_p (const irange &, const irange &,
727 relation_trio) const
729 return false;
732 // Apply any known bitmask updates based on this operator.
734 void
735 range_operator::update_bitmask (irange &, const irange &,
736 const irange &) const
740 // Create and return a range from a pair of wide-ints that are known
741 // to have overflowed (or underflowed).
743 static void
744 value_range_from_overflowed_bounds (irange &r, tree type,
745 const wide_int &wmin,
746 const wide_int &wmax)
748 const signop sgn = TYPE_SIGN (type);
749 const unsigned int prec = TYPE_PRECISION (type);
751 wide_int tmin = wide_int::from (wmin, prec, sgn);
752 wide_int tmax = wide_int::from (wmax, prec, sgn);
754 bool covers = false;
755 wide_int tem = tmin;
756 tmin = tmax + 1;
757 if (wi::cmp (tmin, tmax, sgn) < 0)
758 covers = true;
759 tmax = tem - 1;
760 if (wi::cmp (tmax, tem, sgn) > 0)
761 covers = true;
763 // If the anti-range would cover nothing, drop to varying.
764 // Likewise if the anti-range bounds are outside of the types
765 // values.
766 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
767 r.set_varying (type);
768 else
769 r.set (type, tmin, tmax, VR_ANTI_RANGE);
772 // Create and return a range from a pair of wide-ints. MIN_OVF and
773 // MAX_OVF describe any overflow that might have occurred while
774 // calculating WMIN and WMAX respectively.
776 static void
777 value_range_with_overflow (irange &r, tree type,
778 const wide_int &wmin, const wide_int &wmax,
779 wi::overflow_type min_ovf = wi::OVF_NONE,
780 wi::overflow_type max_ovf = wi::OVF_NONE)
782 const signop sgn = TYPE_SIGN (type);
783 const unsigned int prec = TYPE_PRECISION (type);
784 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
786 // For one bit precision if max != min, then the range covers all
787 // values.
788 if (prec == 1 && wi::ne_p (wmax, wmin))
790 r.set_varying (type);
791 return;
794 if (overflow_wraps)
796 // If overflow wraps, truncate the values and adjust the range,
797 // kind, and bounds appropriately.
798 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
800 wide_int tmin = wide_int::from (wmin, prec, sgn);
801 wide_int tmax = wide_int::from (wmax, prec, sgn);
802 // If the limits are swapped, we wrapped around and cover
803 // the entire range.
804 if (wi::gt_p (tmin, tmax, sgn))
805 r.set_varying (type);
806 else
807 // No overflow or both overflow or underflow. The range
808 // kind stays normal.
809 r.set (type, tmin, tmax);
810 return;
813 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
814 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
815 value_range_from_overflowed_bounds (r, type, wmin, wmax);
816 else
817 // Other underflow and/or overflow, drop to VR_VARYING.
818 r.set_varying (type);
820 else
822 // If both bounds either underflowed or overflowed, then the result
823 // is undefined.
824 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
825 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
827 r.set_undefined ();
828 return;
831 // If overflow does not wrap, saturate to [MIN, MAX].
832 wide_int new_lb, new_ub;
833 if (min_ovf == wi::OVF_UNDERFLOW)
834 new_lb = wi::min_value (prec, sgn);
835 else if (min_ovf == wi::OVF_OVERFLOW)
836 new_lb = wi::max_value (prec, sgn);
837 else
838 new_lb = wmin;
840 if (max_ovf == wi::OVF_UNDERFLOW)
841 new_ub = wi::min_value (prec, sgn);
842 else if (max_ovf == wi::OVF_OVERFLOW)
843 new_ub = wi::max_value (prec, sgn);
844 else
845 new_ub = wmax;
847 r.set (type, new_lb, new_ub);
851 // Create and return a range from a pair of wide-ints. Canonicalize
852 // the case where the bounds are swapped. In which case, we transform
853 // [10,5] into [MIN,5][10,MAX].
855 static inline void
856 create_possibly_reversed_range (irange &r, tree type,
857 const wide_int &new_lb, const wide_int &new_ub)
859 signop s = TYPE_SIGN (type);
860 // If the bounds are swapped, treat the result as if an overflow occurred.
861 if (wi::gt_p (new_lb, new_ub, s))
862 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
863 else
864 // Otherwise it's just a normal range.
865 r.set (type, new_lb, new_ub);
868 // Return the summary information about boolean range LHS. If EMPTY/FULL,
869 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
871 bool_range_state
872 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
874 // If there is no result, then this is unexecutable.
875 if (lhs.undefined_p ())
877 r.set_undefined ();
878 return BRS_EMPTY;
881 if (lhs.zero_p ())
882 return BRS_FALSE;
884 // For TRUE, we can't just test for [1,1] because Ada can have
885 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
886 if (lhs.contains_p (build_zero_cst (lhs.type ())))
888 r.set_varying (val_type);
889 return BRS_FULL;
892 return BRS_TRUE;
895 // ------------------------------------------------------------------------
897 void
898 operator_equal::update_bitmask (irange &r, const irange &lh,
899 const irange &rh) const
901 update_known_bitmask (r, EQ_EXPR, lh, rh);
904 // Check if the LHS range indicates a relation between OP1 and OP2.
906 relation_kind
907 operator_equal::op1_op2_relation (const irange &lhs, const irange &,
908 const irange &) const
910 if (lhs.undefined_p ())
911 return VREL_UNDEFINED;
913 // FALSE = op1 == op2 indicates NE_EXPR.
914 if (lhs.zero_p ())
915 return VREL_NE;
917 // TRUE = op1 == op2 indicates EQ_EXPR.
918 if (!contains_zero_p (lhs))
919 return VREL_EQ;
920 return VREL_VARYING;
923 bool
924 operator_equal::fold_range (irange &r, tree type,
925 const irange &op1,
926 const irange &op2,
927 relation_trio rel) const
929 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
930 return true;
932 // We can be sure the values are always equal or not if both ranges
933 // consist of a single value, and then compare them.
934 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
935 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
937 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
938 r = range_true (type);
939 else
940 r = range_false (type);
942 else
944 // If ranges do not intersect, we know the range is not equal,
945 // otherwise we don't know anything for sure.
946 int_range_max tmp = op1;
947 tmp.intersect (op2);
948 if (tmp.undefined_p ())
949 r = range_false (type);
950 else
951 r = range_true_and_false (type);
953 return true;
956 bool
957 operator_equal::op1_range (irange &r, tree type,
958 const irange &lhs,
959 const irange &op2,
960 relation_trio) const
962 switch (get_bool_state (r, lhs, type))
964 case BRS_TRUE:
965 // If it's true, the result is the same as OP2.
966 r = op2;
967 break;
969 case BRS_FALSE:
970 // If the result is false, the only time we know anything is
971 // if OP2 is a constant.
972 if (!op2.undefined_p ()
973 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
975 r = op2;
976 r.invert ();
978 else
979 r.set_varying (type);
980 break;
982 default:
983 break;
985 return true;
988 bool
989 operator_equal::op2_range (irange &r, tree type,
990 const irange &lhs,
991 const irange &op1,
992 relation_trio rel) const
994 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
997 // -------------------------------------------------------------------------
999 void
1000 operator_not_equal::update_bitmask (irange &r, const irange &lh,
1001 const irange &rh) const
1003 update_known_bitmask (r, NE_EXPR, lh, rh);
1006 // Check if the LHS range indicates a relation between OP1 and OP2.
1008 relation_kind
1009 operator_not_equal::op1_op2_relation (const irange &lhs, const irange &,
1010 const irange &) const
1012 if (lhs.undefined_p ())
1013 return VREL_UNDEFINED;
1015 // FALSE = op1 != op2 indicates EQ_EXPR.
1016 if (lhs.zero_p ())
1017 return VREL_EQ;
1019 // TRUE = op1 != op2 indicates NE_EXPR.
1020 if (!contains_zero_p (lhs))
1021 return VREL_NE;
1022 return VREL_VARYING;
1025 bool
1026 operator_not_equal::fold_range (irange &r, tree type,
1027 const irange &op1,
1028 const irange &op2,
1029 relation_trio rel) const
1031 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
1032 return true;
1034 // We can be sure the values are always equal or not if both ranges
1035 // consist of a single value, and then compare them.
1036 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
1037 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
1039 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
1040 r = range_true (type);
1041 else
1042 r = range_false (type);
1044 else
1046 // If ranges do not intersect, we know the range is not equal,
1047 // otherwise we don't know anything for sure.
1048 int_range_max tmp = op1;
1049 tmp.intersect (op2);
1050 if (tmp.undefined_p ())
1051 r = range_true (type);
1052 else
1053 r = range_true_and_false (type);
1055 return true;
1058 bool
1059 operator_not_equal::op1_range (irange &r, tree type,
1060 const irange &lhs,
1061 const irange &op2,
1062 relation_trio) const
1064 switch (get_bool_state (r, lhs, type))
1066 case BRS_TRUE:
1067 // If the result is true, the only time we know anything is if
1068 // OP2 is a constant.
1069 if (!op2.undefined_p ()
1070 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1072 r = op2;
1073 r.invert ();
1075 else
1076 r.set_varying (type);
1077 break;
1079 case BRS_FALSE:
1080 // If it's false, the result is the same as OP2.
1081 r = op2;
1082 break;
1084 default:
1085 break;
1087 return true;
1091 bool
1092 operator_not_equal::op2_range (irange &r, tree type,
1093 const irange &lhs,
1094 const irange &op1,
1095 relation_trio rel) const
1097 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1100 // (X < VAL) produces the range of [MIN, VAL - 1].
1102 static void
1103 build_lt (irange &r, tree type, const wide_int &val)
1105 wi::overflow_type ov;
1106 wide_int lim;
1107 signop sgn = TYPE_SIGN (type);
1109 // Signed 1 bit cannot represent 1 for subtraction.
1110 if (sgn == SIGNED)
1111 lim = wi::add (val, -1, sgn, &ov);
1112 else
1113 lim = wi::sub (val, 1, sgn, &ov);
1115 // If val - 1 underflows, check if X < MIN, which is an empty range.
1116 if (ov)
1117 r.set_undefined ();
1118 else
1119 r = int_range<1> (type, min_limit (type), lim);
1122 // (X <= VAL) produces the range of [MIN, VAL].
1124 static void
1125 build_le (irange &r, tree type, const wide_int &val)
1127 r = int_range<1> (type, min_limit (type), val);
1130 // (X > VAL) produces the range of [VAL + 1, MAX].
1132 static void
1133 build_gt (irange &r, tree type, const wide_int &val)
1135 wi::overflow_type ov;
1136 wide_int lim;
1137 signop sgn = TYPE_SIGN (type);
1139 // Signed 1 bit cannot represent 1 for addition.
1140 if (sgn == SIGNED)
1141 lim = wi::sub (val, -1, sgn, &ov);
1142 else
1143 lim = wi::add (val, 1, sgn, &ov);
1144 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1145 if (ov)
1146 r.set_undefined ();
1147 else
1148 r = int_range<1> (type, lim, max_limit (type));
1151 // (X >= val) produces the range of [VAL, MAX].
1153 static void
1154 build_ge (irange &r, tree type, const wide_int &val)
1156 r = int_range<1> (type, val, max_limit (type));
1160 void
1161 operator_lt::update_bitmask (irange &r, const irange &lh,
1162 const irange &rh) const
1164 update_known_bitmask (r, LT_EXPR, lh, rh);
1167 // Check if the LHS range indicates a relation between OP1 and OP2.
1169 relation_kind
1170 operator_lt::op1_op2_relation (const irange &lhs, const irange &,
1171 const irange &) const
1173 if (lhs.undefined_p ())
1174 return VREL_UNDEFINED;
1176 // FALSE = op1 < op2 indicates GE_EXPR.
1177 if (lhs.zero_p ())
1178 return VREL_GE;
1180 // TRUE = op1 < op2 indicates LT_EXPR.
1181 if (!contains_zero_p (lhs))
1182 return VREL_LT;
1183 return VREL_VARYING;
1186 bool
1187 operator_lt::fold_range (irange &r, tree type,
1188 const irange &op1,
1189 const irange &op2,
1190 relation_trio rel) const
1192 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1193 return true;
1195 signop sign = TYPE_SIGN (op1.type ());
1196 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1198 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1199 r = range_true (type);
1200 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1201 r = range_false (type);
1202 // Use nonzero bits to determine if < 0 is false.
1203 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1204 r = range_false (type);
1205 else
1206 r = range_true_and_false (type);
1207 return true;
1210 bool
1211 operator_lt::op1_range (irange &r, tree type,
1212 const irange &lhs,
1213 const irange &op2,
1214 relation_trio) const
1216 if (op2.undefined_p ())
1217 return false;
1219 switch (get_bool_state (r, lhs, type))
1221 case BRS_TRUE:
1222 build_lt (r, type, op2.upper_bound ());
1223 break;
1225 case BRS_FALSE:
1226 build_ge (r, type, op2.lower_bound ());
1227 break;
1229 default:
1230 break;
1232 return true;
1235 bool
1236 operator_lt::op2_range (irange &r, tree type,
1237 const irange &lhs,
1238 const irange &op1,
1239 relation_trio) const
1241 if (op1.undefined_p ())
1242 return false;
1244 switch (get_bool_state (r, lhs, type))
1246 case BRS_TRUE:
1247 build_gt (r, type, op1.lower_bound ());
1248 break;
1250 case BRS_FALSE:
1251 build_le (r, type, op1.upper_bound ());
1252 break;
1254 default:
1255 break;
1257 return true;
1261 void
1262 operator_le::update_bitmask (irange &r, const irange &lh,
1263 const irange &rh) const
1265 update_known_bitmask (r, LE_EXPR, lh, rh);
1268 // Check if the LHS range indicates a relation between OP1 and OP2.
1270 relation_kind
1271 operator_le::op1_op2_relation (const irange &lhs, const irange &,
1272 const irange &) const
1274 if (lhs.undefined_p ())
1275 return VREL_UNDEFINED;
1277 // FALSE = op1 <= op2 indicates GT_EXPR.
1278 if (lhs.zero_p ())
1279 return VREL_GT;
1281 // TRUE = op1 <= op2 indicates LE_EXPR.
1282 if (!contains_zero_p (lhs))
1283 return VREL_LE;
1284 return VREL_VARYING;
1287 bool
1288 operator_le::fold_range (irange &r, tree type,
1289 const irange &op1,
1290 const irange &op2,
1291 relation_trio rel) const
1293 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1294 return true;
1296 signop sign = TYPE_SIGN (op1.type ());
1297 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1299 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1300 r = range_true (type);
1301 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1302 r = range_false (type);
1303 else
1304 r = range_true_and_false (type);
1305 return true;
1308 bool
1309 operator_le::op1_range (irange &r, tree type,
1310 const irange &lhs,
1311 const irange &op2,
1312 relation_trio) const
1314 if (op2.undefined_p ())
1315 return false;
1317 switch (get_bool_state (r, lhs, type))
1319 case BRS_TRUE:
1320 build_le (r, type, op2.upper_bound ());
1321 break;
1323 case BRS_FALSE:
1324 build_gt (r, type, op2.lower_bound ());
1325 break;
1327 default:
1328 break;
1330 return true;
1333 bool
1334 operator_le::op2_range (irange &r, tree type,
1335 const irange &lhs,
1336 const irange &op1,
1337 relation_trio) const
1339 if (op1.undefined_p ())
1340 return false;
1342 switch (get_bool_state (r, lhs, type))
1344 case BRS_TRUE:
1345 build_ge (r, type, op1.lower_bound ());
1346 break;
1348 case BRS_FALSE:
1349 build_lt (r, type, op1.upper_bound ());
1350 break;
1352 default:
1353 break;
1355 return true;
1359 void
1360 operator_gt::update_bitmask (irange &r, const irange &lh,
1361 const irange &rh) const
1363 update_known_bitmask (r, GT_EXPR, lh, rh);
1366 // Check if the LHS range indicates a relation between OP1 and OP2.
1368 relation_kind
1369 operator_gt::op1_op2_relation (const irange &lhs, const irange &,
1370 const irange &) const
1372 if (lhs.undefined_p ())
1373 return VREL_UNDEFINED;
1375 // FALSE = op1 > op2 indicates LE_EXPR.
1376 if (lhs.zero_p ())
1377 return VREL_LE;
1379 // TRUE = op1 > op2 indicates GT_EXPR.
1380 if (!contains_zero_p (lhs))
1381 return VREL_GT;
1382 return VREL_VARYING;
1385 bool
1386 operator_gt::fold_range (irange &r, tree type,
1387 const irange &op1, const irange &op2,
1388 relation_trio rel) const
1390 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1391 return true;
1393 signop sign = TYPE_SIGN (op1.type ());
1394 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1396 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1397 r = range_true (type);
1398 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1399 r = range_false (type);
1400 else
1401 r = range_true_and_false (type);
1402 return true;
1405 bool
1406 operator_gt::op1_range (irange &r, tree type,
1407 const irange &lhs, const irange &op2,
1408 relation_trio) const
1410 if (op2.undefined_p ())
1411 return false;
1413 switch (get_bool_state (r, lhs, type))
1415 case BRS_TRUE:
1416 build_gt (r, type, op2.lower_bound ());
1417 break;
1419 case BRS_FALSE:
1420 build_le (r, type, op2.upper_bound ());
1421 break;
1423 default:
1424 break;
1426 return true;
1429 bool
1430 operator_gt::op2_range (irange &r, tree type,
1431 const irange &lhs,
1432 const irange &op1,
1433 relation_trio) const
1435 if (op1.undefined_p ())
1436 return false;
1438 switch (get_bool_state (r, lhs, type))
1440 case BRS_TRUE:
1441 build_lt (r, type, op1.upper_bound ());
1442 break;
1444 case BRS_FALSE:
1445 build_ge (r, type, op1.lower_bound ());
1446 break;
1448 default:
1449 break;
1451 return true;
1455 void
1456 operator_ge::update_bitmask (irange &r, const irange &lh,
1457 const irange &rh) const
1459 update_known_bitmask (r, GE_EXPR, lh, rh);
1462 // Check if the LHS range indicates a relation between OP1 and OP2.
1464 relation_kind
1465 operator_ge::op1_op2_relation (const irange &lhs, const irange &,
1466 const irange &) const
1468 if (lhs.undefined_p ())
1469 return VREL_UNDEFINED;
1471 // FALSE = op1 >= op2 indicates LT_EXPR.
1472 if (lhs.zero_p ())
1473 return VREL_LT;
1475 // TRUE = op1 >= op2 indicates GE_EXPR.
1476 if (!contains_zero_p (lhs))
1477 return VREL_GE;
1478 return VREL_VARYING;
1481 bool
1482 operator_ge::fold_range (irange &r, tree type,
1483 const irange &op1,
1484 const irange &op2,
1485 relation_trio rel) const
1487 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1488 return true;
1490 signop sign = TYPE_SIGN (op1.type ());
1491 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1493 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1494 r = range_true (type);
1495 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1496 r = range_false (type);
1497 else
1498 r = range_true_and_false (type);
1499 return true;
1502 bool
1503 operator_ge::op1_range (irange &r, tree type,
1504 const irange &lhs,
1505 const irange &op2,
1506 relation_trio) const
1508 if (op2.undefined_p ())
1509 return false;
1511 switch (get_bool_state (r, lhs, type))
1513 case BRS_TRUE:
1514 build_ge (r, type, op2.lower_bound ());
1515 break;
1517 case BRS_FALSE:
1518 build_lt (r, type, op2.upper_bound ());
1519 break;
1521 default:
1522 break;
1524 return true;
1527 bool
1528 operator_ge::op2_range (irange &r, tree type,
1529 const irange &lhs,
1530 const irange &op1,
1531 relation_trio) const
1533 if (op1.undefined_p ())
1534 return false;
1536 switch (get_bool_state (r, lhs, type))
1538 case BRS_TRUE:
1539 build_le (r, type, op1.upper_bound ());
1540 break;
1542 case BRS_FALSE:
1543 build_gt (r, type, op1.lower_bound ());
1544 break;
1546 default:
1547 break;
1549 return true;
1553 void
1554 operator_plus::update_bitmask (irange &r, const irange &lh,
1555 const irange &rh) const
1557 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1560 // Check to see if the range of OP2 indicates anything about the relation
1561 // between LHS and OP1.
1563 relation_kind
1564 operator_plus::lhs_op1_relation (const irange &lhs,
1565 const irange &op1,
1566 const irange &op2,
1567 relation_kind) const
1569 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1570 return VREL_VARYING;
1572 tree type = lhs.type ();
1573 unsigned prec = TYPE_PRECISION (type);
1574 wi::overflow_type ovf1, ovf2;
1575 signop sign = TYPE_SIGN (type);
1577 // LHS = OP1 + 0 indicates LHS == OP1.
1578 if (op2.zero_p ())
1579 return VREL_EQ;
1581 if (TYPE_OVERFLOW_WRAPS (type))
1583 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1584 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1586 else
1587 ovf1 = ovf2 = wi::OVF_NONE;
1589 // Never wrapping additions.
1590 if (!ovf1 && !ovf2)
1592 // Positive op2 means lhs > op1.
1593 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1594 return VREL_GT;
1595 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1596 return VREL_GE;
1598 // Negative op2 means lhs < op1.
1599 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1600 return VREL_LT;
1601 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1602 return VREL_LE;
1604 // Always wrapping additions.
1605 else if (ovf1 && ovf1 == ovf2)
1607 // Positive op2 means lhs < op1.
1608 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1609 return VREL_LT;
1610 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1611 return VREL_LE;
1613 // Negative op2 means lhs > op1.
1614 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1615 return VREL_GT;
1616 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1617 return VREL_GE;
1620 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1621 if (!range_includes_zero_p (&op2))
1622 return VREL_NE;
1624 return VREL_VARYING;
1627 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1628 // operands.
1630 relation_kind
1631 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1632 const irange &op2, relation_kind rel) const
1634 return lhs_op1_relation (lhs, op2, op1, rel);
1637 void
1638 operator_plus::wi_fold (irange &r, tree type,
1639 const wide_int &lh_lb, const wide_int &lh_ub,
1640 const wide_int &rh_lb, const wide_int &rh_ub) const
1642 wi::overflow_type ov_lb, ov_ub;
1643 signop s = TYPE_SIGN (type);
1644 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1645 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1646 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1649 // Given addition or subtraction, determine the possible NORMAL ranges and
1650 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1651 // Return the relation that exists between the LHS and OP1 in order for the
1652 // NORMAL range to apply.
1653 // a return value of VREL_VARYING means no ranges were applicable.
1655 static relation_kind
1656 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1657 bool add_p)
1659 relation_kind kind = VREL_VARYING;
1660 // For now, only deal with constant adds. This could be extended to ranges
1661 // when someone is so motivated.
1662 if (!offset.singleton_p () || offset.zero_p ())
1663 return kind;
1665 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1666 wide_int off = offset.lower_bound ();
1667 if (wi::neg_p (off, SIGNED))
1669 add_p = !add_p;
1670 off = wi::neg (off);
1673 wi::overflow_type ov;
1674 tree type = offset.type ();
1675 unsigned prec = TYPE_PRECISION (type);
1676 wide_int ub;
1677 wide_int lb;
1678 // calculate the normal range and relation for the operation.
1679 if (add_p)
1681 // [ 0 , INF - OFF]
1682 lb = wi::zero (prec);
1683 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1684 kind = VREL_GT;
1686 else
1688 // [ OFF, INF ]
1689 lb = off;
1690 ub = irange_val_max (type);
1691 kind = VREL_LT;
1693 int_range<2> normal_range (type, lb, ub);
1694 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1696 r_ov = ov_range;
1697 r_normal = normal_range;
1698 return kind;
1701 // Once op1 has been calculated by operator_plus or operator_minus, check
1702 // to see if the relation passed causes any part of the calculation to
1703 // be not possible. ie
1704 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1705 // and that further refines a_2 to [0, 0].
1706 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1707 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1708 // MINUS. IF any adjustment can be made, R will reflect it.
1710 static void
1711 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1712 bool add_p)
1714 if (r.undefined_p ())
1715 return;
1716 tree type = r.type ();
1717 // Check for unsigned overflow and calculate the overflow part.
1718 signop s = TYPE_SIGN (type);
1719 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1720 return;
1722 // Only work with <, <=, >, >= relations.
1723 if (!relation_lt_le_gt_ge_p (rel))
1724 return;
1726 // Get the ranges for this offset.
1727 int_range_max normal, overflow;
1728 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1730 // VREL_VARYING means there are no adjustments.
1731 if (k == VREL_VARYING)
1732 return;
1734 // If the relations match use the normal range, otherwise use overflow range.
1735 if (relation_intersect (k, rel) == k)
1736 r.intersect (normal);
1737 else
1738 r.intersect (overflow);
1739 return;
1742 bool
1743 operator_plus::op1_range (irange &r, tree type,
1744 const irange &lhs,
1745 const irange &op2,
1746 relation_trio trio) const
1748 if (lhs.undefined_p ())
1749 return false;
1750 // Start with the default operation.
1751 range_op_handler minus (MINUS_EXPR);
1752 if (!minus)
1753 return false;
1754 bool res = minus.fold_range (r, type, lhs, op2);
1755 relation_kind rel = trio.lhs_op1 ();
1756 // Check for a relation refinement.
1757 if (res)
1758 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1759 return res;
1762 bool
1763 operator_plus::op2_range (irange &r, tree type,
1764 const irange &lhs,
1765 const irange &op1,
1766 relation_trio rel) const
1768 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1771 class operator_widen_plus_signed : public range_operator
1773 public:
1774 virtual void wi_fold (irange &r, tree type,
1775 const wide_int &lh_lb,
1776 const wide_int &lh_ub,
1777 const wide_int &rh_lb,
1778 const wide_int &rh_ub) const;
1779 } op_widen_plus_signed;
1781 void
1782 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1783 const wide_int &lh_lb,
1784 const wide_int &lh_ub,
1785 const wide_int &rh_lb,
1786 const wide_int &rh_ub) const
1788 wi::overflow_type ov_lb, ov_ub;
1789 signop s = TYPE_SIGN (type);
1791 wide_int lh_wlb
1792 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1793 wide_int lh_wub
1794 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1795 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1796 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1798 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1799 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1801 r = int_range<2> (type, new_lb, new_ub);
1804 class operator_widen_plus_unsigned : public range_operator
1806 public:
1807 virtual void wi_fold (irange &r, tree type,
1808 const wide_int &lh_lb,
1809 const wide_int &lh_ub,
1810 const wide_int &rh_lb,
1811 const wide_int &rh_ub) const;
1812 } op_widen_plus_unsigned;
1814 void
1815 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1816 const wide_int &lh_lb,
1817 const wide_int &lh_ub,
1818 const wide_int &rh_lb,
1819 const wide_int &rh_ub) const
1821 wi::overflow_type ov_lb, ov_ub;
1822 signop s = TYPE_SIGN (type);
1824 wide_int lh_wlb
1825 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1826 wide_int lh_wub
1827 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1828 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1829 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1831 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1832 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1834 r = int_range<2> (type, new_lb, new_ub);
1837 void
1838 operator_minus::update_bitmask (irange &r, const irange &lh,
1839 const irange &rh) const
1841 update_known_bitmask (r, MINUS_EXPR, lh, rh);
1844 void
1845 operator_minus::wi_fold (irange &r, tree type,
1846 const wide_int &lh_lb, const wide_int &lh_ub,
1847 const wide_int &rh_lb, const wide_int &rh_ub) const
1849 wi::overflow_type ov_lb, ov_ub;
1850 signop s = TYPE_SIGN (type);
1851 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
1852 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
1853 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1857 // Return the relation between LHS and OP1 based on the relation between
1858 // OP1 and OP2.
1860 relation_kind
1861 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
1862 const irange &, relation_kind rel) const
1864 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
1865 switch (rel)
1867 case VREL_GT:
1868 case VREL_GE:
1869 return VREL_LE;
1870 default:
1871 break;
1873 return VREL_VARYING;
1876 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1877 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1878 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1880 bool
1881 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
1882 const irange &op1_range ATTRIBUTE_UNUSED,
1883 const irange &op2_range ATTRIBUTE_UNUSED,
1884 relation_kind rel)
1886 if (rel == VREL_VARYING)
1887 return false;
1889 int_range<2> rel_range;
1890 unsigned prec = TYPE_PRECISION (type);
1891 signop sgn = TYPE_SIGN (type);
1893 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1894 if (rel == VREL_EQ)
1895 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
1896 else if (rel == VREL_NE)
1897 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1898 VR_ANTI_RANGE);
1899 else if (TYPE_OVERFLOW_WRAPS (type))
1901 switch (rel)
1903 // For wrapping signed values and unsigned, if op1 > op2 or
1904 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1905 case VREL_GT:
1906 case VREL_LT:
1907 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1908 VR_ANTI_RANGE);
1909 break;
1910 default:
1911 return false;
1914 else
1916 switch (rel)
1918 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1919 case VREL_GT:
1920 rel_range = int_range<2> (type, wi::one (prec),
1921 wi::max_value (prec, sgn));
1922 break;
1923 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1924 case VREL_GE:
1925 rel_range = int_range<2> (type, wi::zero (prec),
1926 wi::max_value (prec, sgn));
1927 break;
1928 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1929 case VREL_LT:
1930 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1931 wi::minus_one (prec));
1932 break;
1933 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1934 case VREL_LE:
1935 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1936 wi::zero (prec));
1937 break;
1938 default:
1939 return false;
1942 lhs_range.intersect (rel_range);
1943 return true;
1946 bool
1947 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
1948 const irange &op1_range,
1949 const irange &op2_range,
1950 relation_kind rel) const
1952 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
1953 rel);
1956 bool
1957 operator_minus::op1_range (irange &r, tree type,
1958 const irange &lhs,
1959 const irange &op2,
1960 relation_trio trio) const
1962 if (lhs.undefined_p ())
1963 return false;
1964 // Start with the default operation.
1965 range_op_handler minus (PLUS_EXPR);
1966 if (!minus)
1967 return false;
1968 bool res = minus.fold_range (r, type, lhs, op2);
1969 relation_kind rel = trio.lhs_op1 ();
1970 if (res)
1971 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
1972 return res;
1976 bool
1977 operator_minus::op2_range (irange &r, tree type,
1978 const irange &lhs,
1979 const irange &op1,
1980 relation_trio) const
1982 if (lhs.undefined_p ())
1983 return false;
1984 return fold_range (r, type, op1, lhs);
1987 void
1988 operator_min::update_bitmask (irange &r, const irange &lh,
1989 const irange &rh) const
1991 update_known_bitmask (r, MIN_EXPR, lh, rh);
1994 void
1995 operator_min::wi_fold (irange &r, tree type,
1996 const wide_int &lh_lb, const wide_int &lh_ub,
1997 const wide_int &rh_lb, const wide_int &rh_ub) const
1999 signop s = TYPE_SIGN (type);
2000 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
2001 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
2002 value_range_with_overflow (r, type, new_lb, new_ub);
2006 void
2007 operator_max::update_bitmask (irange &r, const irange &lh,
2008 const irange &rh) const
2010 update_known_bitmask (r, MAX_EXPR, lh, rh);
2013 void
2014 operator_max::wi_fold (irange &r, tree type,
2015 const wide_int &lh_lb, const wide_int &lh_ub,
2016 const wide_int &rh_lb, const wide_int &rh_ub) const
2018 signop s = TYPE_SIGN (type);
2019 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
2020 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
2021 value_range_with_overflow (r, type, new_lb, new_ub);
2025 // Calculate the cross product of two sets of ranges and return it.
2027 // Multiplications, divisions and shifts are a bit tricky to handle,
2028 // depending on the mix of signs we have in the two ranges, we need to
2029 // operate on different values to get the minimum and maximum values
2030 // for the new range. One approach is to figure out all the
2031 // variations of range combinations and do the operations.
2033 // However, this involves several calls to compare_values and it is
2034 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2035 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2036 // figure the smallest and largest values to form the new range.
2038 void
2039 cross_product_operator::wi_cross_product (irange &r, tree type,
2040 const wide_int &lh_lb,
2041 const wide_int &lh_ub,
2042 const wide_int &rh_lb,
2043 const wide_int &rh_ub) const
2045 wide_int cp1, cp2, cp3, cp4;
2046 // Default to varying.
2047 r.set_varying (type);
2049 // Compute the 4 cross operations, bailing if we get an overflow we
2050 // can't handle.
2051 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
2052 return;
2053 if (wi::eq_p (lh_lb, lh_ub))
2054 cp3 = cp1;
2055 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2056 return;
2057 if (wi::eq_p (rh_lb, rh_ub))
2058 cp2 = cp1;
2059 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2060 return;
2061 if (wi::eq_p (lh_lb, lh_ub))
2062 cp4 = cp2;
2063 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2064 return;
2066 // Order pairs.
2067 signop sign = TYPE_SIGN (type);
2068 if (wi::gt_p (cp1, cp2, sign))
2069 std::swap (cp1, cp2);
2070 if (wi::gt_p (cp3, cp4, sign))
2071 std::swap (cp3, cp4);
2073 // Choose min and max from the ordered pairs.
2074 wide_int res_lb = wi::min (cp1, cp3, sign);
2075 wide_int res_ub = wi::max (cp2, cp4, sign);
2076 value_range_with_overflow (r, type, res_lb, res_ub);
2080 void
2081 operator_mult::update_bitmask (irange &r, const irange &lh,
2082 const irange &rh) const
2084 update_known_bitmask (r, MULT_EXPR, lh, rh);
2087 bool
2088 operator_mult::op1_range (irange &r, tree type,
2089 const irange &lhs, const irange &op2,
2090 relation_trio) const
2092 if (lhs.undefined_p ())
2093 return false;
2095 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2096 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2097 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2098 if (TYPE_OVERFLOW_WRAPS (type))
2099 return false;
2101 wide_int offset;
2102 if (op2.singleton_p (offset) && offset != 0)
2103 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2104 return false;
2107 bool
2108 operator_mult::op2_range (irange &r, tree type,
2109 const irange &lhs, const irange &op1,
2110 relation_trio rel) const
2112 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2115 bool
2116 operator_mult::wi_op_overflows (wide_int &res, tree type,
2117 const wide_int &w0, const wide_int &w1) const
2119 wi::overflow_type overflow = wi::OVF_NONE;
2120 signop sign = TYPE_SIGN (type);
2121 res = wi::mul (w0, w1, sign, &overflow);
2122 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2124 // For multiplication, the sign of the overflow is given
2125 // by the comparison of the signs of the operands.
2126 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2127 res = wi::max_value (w0.get_precision (), sign);
2128 else
2129 res = wi::min_value (w0.get_precision (), sign);
2130 return false;
2132 return overflow;
2135 void
2136 operator_mult::wi_fold (irange &r, tree type,
2137 const wide_int &lh_lb, const wide_int &lh_ub,
2138 const wide_int &rh_lb, const wide_int &rh_ub) const
2140 if (TYPE_OVERFLOW_UNDEFINED (type))
2142 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2143 return;
2146 // Multiply the ranges when overflow wraps. This is basically fancy
2147 // code so we don't drop to varying with an unsigned
2148 // [-3,-1]*[-3,-1].
2150 // This test requires 2*prec bits if both operands are signed and
2151 // 2*prec + 2 bits if either is not. Therefore, extend the values
2152 // using the sign of the result to PREC2. From here on out,
2153 // everything is just signed math no matter what the input types
2154 // were.
2156 signop sign = TYPE_SIGN (type);
2157 unsigned prec = TYPE_PRECISION (type);
2158 widest2_int min0 = widest2_int::from (lh_lb, sign);
2159 widest2_int max0 = widest2_int::from (lh_ub, sign);
2160 widest2_int min1 = widest2_int::from (rh_lb, sign);
2161 widest2_int max1 = widest2_int::from (rh_ub, sign);
2162 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2163 widest2_int size = sizem1 + 1;
2165 // Canonicalize the intervals.
2166 if (sign == UNSIGNED)
2168 if (wi::ltu_p (size, min0 + max0))
2170 min0 -= size;
2171 max0 -= size;
2173 if (wi::ltu_p (size, min1 + max1))
2175 min1 -= size;
2176 max1 -= size;
2180 // Sort the 4 products so that min is in prod0 and max is in
2181 // prod3.
2182 widest2_int prod0 = min0 * min1;
2183 widest2_int prod1 = min0 * max1;
2184 widest2_int prod2 = max0 * min1;
2185 widest2_int prod3 = max0 * max1;
2187 // min0min1 > max0max1
2188 if (prod0 > prod3)
2189 std::swap (prod0, prod3);
2191 // min0max1 > max0min1
2192 if (prod1 > prod2)
2193 std::swap (prod1, prod2);
2195 if (prod0 > prod1)
2196 std::swap (prod0, prod1);
2198 if (prod2 > prod3)
2199 std::swap (prod2, prod3);
2201 // diff = max - min
2202 prod2 = prod3 - prod0;
2203 if (wi::geu_p (prod2, sizem1))
2205 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2206 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2207 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2209 r.set (type, rh_lb, wi::max_value (prec, sign));
2210 int_range<2> zero;
2211 zero.set_zero (type);
2212 r.union_ (zero);
2214 else
2215 // The range covers all values.
2216 r.set_varying (type);
2218 else
2220 wide_int new_lb = wide_int::from (prod0, prec, sign);
2221 wide_int new_ub = wide_int::from (prod3, prec, sign);
2222 create_possibly_reversed_range (r, type, new_lb, new_ub);
2226 class operator_widen_mult_signed : public range_operator
2228 public:
2229 virtual void wi_fold (irange &r, tree type,
2230 const wide_int &lh_lb,
2231 const wide_int &lh_ub,
2232 const wide_int &rh_lb,
2233 const wide_int &rh_ub)
2234 const;
2235 } op_widen_mult_signed;
2237 void
2238 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2239 const wide_int &lh_lb,
2240 const wide_int &lh_ub,
2241 const wide_int &rh_lb,
2242 const wide_int &rh_ub) const
2244 signop s = TYPE_SIGN (type);
2246 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2247 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2248 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2249 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2251 /* We don't expect a widening multiplication to be able to overflow but range
2252 calculations for multiplications are complicated. After widening the
2253 operands lets call the base class. */
2254 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2258 class operator_widen_mult_unsigned : public range_operator
2260 public:
2261 virtual void wi_fold (irange &r, tree type,
2262 const wide_int &lh_lb,
2263 const wide_int &lh_ub,
2264 const wide_int &rh_lb,
2265 const wide_int &rh_ub)
2266 const;
2267 } op_widen_mult_unsigned;
2269 void
2270 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2271 const wide_int &lh_lb,
2272 const wide_int &lh_ub,
2273 const wide_int &rh_lb,
2274 const wide_int &rh_ub) const
2276 signop s = TYPE_SIGN (type);
2278 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2279 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2280 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2281 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2283 /* We don't expect a widening multiplication to be able to overflow but range
2284 calculations for multiplications are complicated. After widening the
2285 operands lets call the base class. */
2286 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2289 class operator_div : public cross_product_operator
2291 public:
2292 operator_div (tree_code div_kind) { m_code = div_kind; }
2293 virtual void wi_fold (irange &r, tree type,
2294 const wide_int &lh_lb,
2295 const wide_int &lh_ub,
2296 const wide_int &rh_lb,
2297 const wide_int &rh_ub) const final override;
2298 virtual bool wi_op_overflows (wide_int &res, tree type,
2299 const wide_int &, const wide_int &)
2300 const final override;
2301 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2302 { update_known_bitmask (r, m_code, lh, rh); }
2303 protected:
2304 tree_code m_code;
2307 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2308 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2309 static operator_div op_round_div (ROUND_DIV_EXPR);
2310 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2312 bool
2313 operator_div::wi_op_overflows (wide_int &res, tree type,
2314 const wide_int &w0, const wide_int &w1) const
2316 if (w1 == 0)
2317 return true;
2319 wi::overflow_type overflow = wi::OVF_NONE;
2320 signop sign = TYPE_SIGN (type);
2322 switch (m_code)
2324 case EXACT_DIV_EXPR:
2325 case TRUNC_DIV_EXPR:
2326 res = wi::div_trunc (w0, w1, sign, &overflow);
2327 break;
2328 case FLOOR_DIV_EXPR:
2329 res = wi::div_floor (w0, w1, sign, &overflow);
2330 break;
2331 case ROUND_DIV_EXPR:
2332 res = wi::div_round (w0, w1, sign, &overflow);
2333 break;
2334 case CEIL_DIV_EXPR:
2335 res = wi::div_ceil (w0, w1, sign, &overflow);
2336 break;
2337 default:
2338 gcc_unreachable ();
2341 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2343 // For division, the only case is -INF / -1 = +INF.
2344 res = wi::max_value (w0.get_precision (), sign);
2345 return false;
2347 return overflow;
2350 void
2351 operator_div::wi_fold (irange &r, tree type,
2352 const wide_int &lh_lb, const wide_int &lh_ub,
2353 const wide_int &rh_lb, const wide_int &rh_ub) const
2355 const wide_int dividend_min = lh_lb;
2356 const wide_int dividend_max = lh_ub;
2357 const wide_int divisor_min = rh_lb;
2358 const wide_int divisor_max = rh_ub;
2359 signop sign = TYPE_SIGN (type);
2360 unsigned prec = TYPE_PRECISION (type);
2361 wide_int extra_min, extra_max;
2363 // If we know we won't divide by zero, just do the division.
2364 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2366 wi_cross_product (r, type, dividend_min, dividend_max,
2367 divisor_min, divisor_max);
2368 return;
2371 // If we're definitely dividing by zero, there's nothing to do.
2372 if (wi_zero_p (type, divisor_min, divisor_max))
2374 r.set_undefined ();
2375 return;
2378 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2379 // skip any division by zero.
2381 // First divide by the negative numbers, if any.
2382 if (wi::neg_p (divisor_min, sign))
2383 wi_cross_product (r, type, dividend_min, dividend_max,
2384 divisor_min, wi::minus_one (prec));
2385 else
2386 r.set_undefined ();
2388 // Then divide by the non-zero positive numbers, if any.
2389 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2391 int_range_max tmp;
2392 wi_cross_product (tmp, type, dividend_min, dividend_max,
2393 wi::one (prec), divisor_max);
2394 r.union_ (tmp);
2396 // We shouldn't still have undefined here.
2397 gcc_checking_assert (!r.undefined_p ());
2401 class operator_exact_divide : public operator_div
2403 using range_operator::op1_range;
2404 public:
2405 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2406 virtual bool op1_range (irange &r, tree type,
2407 const irange &lhs,
2408 const irange &op2,
2409 relation_trio) const;
2411 } op_exact_div;
2413 bool
2414 operator_exact_divide::op1_range (irange &r, tree type,
2415 const irange &lhs,
2416 const irange &op2,
2417 relation_trio) const
2419 if (lhs.undefined_p ())
2420 return false;
2421 wide_int offset;
2422 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2423 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2424 // We wont bother trying to enumerate all the in between stuff :-P
2425 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2426 // the time however.
2427 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2428 if (op2.singleton_p (offset) && offset != 0)
2429 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2430 return false;
2434 class operator_lshift : public cross_product_operator
2436 using range_operator::fold_range;
2437 using range_operator::op1_range;
2438 public:
2439 virtual bool op1_range (irange &r, tree type, const irange &lhs,
2440 const irange &op2, relation_trio rel = TRIO_VARYING)
2441 const final override;
2442 virtual bool fold_range (irange &r, tree type, const irange &op1,
2443 const irange &op2, relation_trio rel = TRIO_VARYING)
2444 const final override;
2446 virtual void wi_fold (irange &r, tree type,
2447 const wide_int &lh_lb, const wide_int &lh_ub,
2448 const wide_int &rh_lb,
2449 const wide_int &rh_ub) const final override;
2450 virtual bool wi_op_overflows (wide_int &res,
2451 tree type,
2452 const wide_int &,
2453 const wide_int &) const final override;
2454 void update_bitmask (irange &r, const irange &lh,
2455 const irange &rh) const final override
2456 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2457 } op_lshift;
2459 class operator_rshift : public cross_product_operator
2461 using range_operator::fold_range;
2462 using range_operator::op1_range;
2463 using range_operator::lhs_op1_relation;
2464 public:
2465 virtual bool fold_range (irange &r, tree type, const irange &op1,
2466 const irange &op2, relation_trio rel = TRIO_VARYING)
2467 const final override;
2468 virtual void wi_fold (irange &r, tree type,
2469 const wide_int &lh_lb,
2470 const wide_int &lh_ub,
2471 const wide_int &rh_lb,
2472 const wide_int &rh_ub) const final override;
2473 virtual bool wi_op_overflows (wide_int &res,
2474 tree type,
2475 const wide_int &w0,
2476 const wide_int &w1) const final override;
2477 virtual bool op1_range (irange &, tree type, const irange &lhs,
2478 const irange &op2, relation_trio rel = TRIO_VARYING)
2479 const final override;
2480 virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1,
2481 const irange &op2, relation_kind rel)
2482 const final override;
2483 void update_bitmask (irange &r, const irange &lh,
2484 const irange &rh) const final override
2485 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2486 } op_rshift;
2489 relation_kind
2490 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2491 const irange &op1,
2492 const irange &op2,
2493 relation_kind) const
2495 // If both operands range are >= 0, then the LHS <= op1.
2496 if (!op1.undefined_p () && !op2.undefined_p ()
2497 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2498 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2499 return VREL_LE;
2500 return VREL_VARYING;
2503 bool
2504 operator_lshift::fold_range (irange &r, tree type,
2505 const irange &op1,
2506 const irange &op2,
2507 relation_trio rel) const
2509 int_range_max shift_range;
2510 if (!get_shift_range (shift_range, type, op2))
2512 if (op2.undefined_p ())
2513 r.set_undefined ();
2514 else
2515 r.set_zero (type);
2516 return true;
2519 // Transform left shifts by constants into multiplies.
2520 if (shift_range.singleton_p ())
2522 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2523 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2524 int_range<1> mult (type, tmp, tmp);
2526 // Force wrapping multiplication.
2527 bool saved_flag_wrapv = flag_wrapv;
2528 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2529 flag_wrapv = 1;
2530 flag_wrapv_pointer = 1;
2531 bool b = op_mult.fold_range (r, type, op1, mult);
2532 flag_wrapv = saved_flag_wrapv;
2533 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2534 return b;
2536 else
2537 // Otherwise, invoke the generic fold routine.
2538 return range_operator::fold_range (r, type, op1, shift_range, rel);
2541 void
2542 operator_lshift::wi_fold (irange &r, tree type,
2543 const wide_int &lh_lb, const wide_int &lh_ub,
2544 const wide_int &rh_lb, const wide_int &rh_ub) const
2546 signop sign = TYPE_SIGN (type);
2547 unsigned prec = TYPE_PRECISION (type);
2548 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2549 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2550 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2551 // overflow. However, for that to happen, rh.max needs to be zero,
2552 // which means rh is a singleton range of zero, which means we simply return
2553 // [lh_lb, lh_ub] as the range.
2554 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2556 r = int_range<2> (type, lh_lb, lh_ub);
2557 return;
2560 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2561 wide_int complement = ~(bound - 1);
2562 wide_int low_bound, high_bound;
2563 bool in_bounds = false;
2565 if (sign == UNSIGNED)
2567 low_bound = bound;
2568 high_bound = complement;
2569 if (wi::ltu_p (lh_ub, low_bound))
2571 // [5, 6] << [1, 2] == [10, 24].
2572 // We're shifting out only zeroes, the value increases
2573 // monotonically.
2574 in_bounds = true;
2576 else if (wi::ltu_p (high_bound, lh_lb))
2578 // [0xffffff00, 0xffffffff] << [1, 2]
2579 // == [0xfffffc00, 0xfffffffe].
2580 // We're shifting out only ones, the value decreases
2581 // monotonically.
2582 in_bounds = true;
2585 else
2587 // [-1, 1] << [1, 2] == [-4, 4]
2588 low_bound = complement;
2589 high_bound = bound;
2590 if (wi::lts_p (lh_ub, high_bound)
2591 && wi::lts_p (low_bound, lh_lb))
2593 // For non-negative numbers, we're shifting out only zeroes,
2594 // the value increases monotonically. For negative numbers,
2595 // we're shifting out only ones, the value decreases
2596 // monotonically.
2597 in_bounds = true;
2601 if (in_bounds)
2602 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2603 else
2604 r.set_varying (type);
2607 bool
2608 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2609 const wide_int &w0, const wide_int &w1) const
2611 signop sign = TYPE_SIGN (type);
2612 if (wi::neg_p (w1))
2614 // It's unclear from the C standard whether shifts can overflow.
2615 // The following code ignores overflow; perhaps a C standard
2616 // interpretation ruling is needed.
2617 res = wi::rshift (w0, -w1, sign);
2619 else
2620 res = wi::lshift (w0, w1);
2621 return false;
2624 bool
2625 operator_lshift::op1_range (irange &r,
2626 tree type,
2627 const irange &lhs,
2628 const irange &op2,
2629 relation_trio) const
2631 if (lhs.undefined_p ())
2632 return false;
2634 if (!contains_zero_p (lhs))
2635 r.set_nonzero (type);
2636 else
2637 r.set_varying (type);
2639 wide_int shift;
2640 if (op2.singleton_p (shift))
2642 if (wi::lt_p (shift, 0, SIGNED))
2643 return false;
2644 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2645 TYPE_PRECISION (op2.type ())),
2646 UNSIGNED))
2647 return false;
2648 if (shift == 0)
2650 r.intersect (lhs);
2651 return true;
2654 // Work completely in unsigned mode to start.
2655 tree utype = type;
2656 int_range_max tmp_range;
2657 if (TYPE_SIGN (type) == SIGNED)
2659 int_range_max tmp = lhs;
2660 utype = unsigned_type_for (type);
2661 range_cast (tmp, utype);
2662 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2664 else
2665 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2667 // Start with ranges which can produce the LHS by right shifting the
2668 // result by the shift amount.
2669 // ie [0x08, 0xF0] = op1 << 2 will start with
2670 // [00001000, 11110000] = op1 << 2
2671 // [0x02, 0x4C] aka [00000010, 00111100]
2673 // Then create a range from the LB with the least significant upper bit
2674 // set, to the upper bound with all the bits set.
2675 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2677 // Ideally we do this for each subrange, but just lump them all for now.
2678 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2679 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2680 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2681 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2682 int_range<2> fill_range (utype, new_lb, new_ub);
2683 tmp_range.union_ (fill_range);
2685 if (utype != type)
2686 range_cast (tmp_range, type);
2688 r.intersect (tmp_range);
2689 return true;
2692 return !r.varying_p ();
2695 bool
2696 operator_rshift::op1_range (irange &r,
2697 tree type,
2698 const irange &lhs,
2699 const irange &op2,
2700 relation_trio) const
2702 if (lhs.undefined_p ())
2703 return false;
2704 wide_int shift;
2705 if (op2.singleton_p (shift))
2707 // Ignore nonsensical shifts.
2708 unsigned prec = TYPE_PRECISION (type);
2709 if (wi::ge_p (shift,
2710 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2711 UNSIGNED))
2712 return false;
2713 if (shift == 0)
2715 r = lhs;
2716 return true;
2719 // Folding the original operation may discard some impossible
2720 // ranges from the LHS.
2721 int_range_max lhs_refined;
2722 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2723 lhs_refined.intersect (lhs);
2724 if (lhs_refined.undefined_p ())
2726 r.set_undefined ();
2727 return true;
2729 int_range_max shift_range (op2.type (), shift, shift);
2730 int_range_max lb, ub;
2731 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2732 // LHS
2733 // 0000 0111 = OP1 >> 3
2735 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2736 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2737 // right hand side (0x07).
2738 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2739 int_range_max mask_range (type,
2740 wi::zero (TYPE_PRECISION (type)),
2741 mask);
2742 op_plus.fold_range (ub, type, lb, mask_range);
2743 r = lb;
2744 r.union_ (ub);
2745 if (!contains_zero_p (lhs_refined))
2747 mask_range.invert ();
2748 r.intersect (mask_range);
2750 return true;
2752 return false;
2755 bool
2756 operator_rshift::wi_op_overflows (wide_int &res,
2757 tree type,
2758 const wide_int &w0,
2759 const wide_int &w1) const
2761 signop sign = TYPE_SIGN (type);
2762 if (wi::neg_p (w1))
2763 res = wi::lshift (w0, -w1);
2764 else
2766 // It's unclear from the C standard whether shifts can overflow.
2767 // The following code ignores overflow; perhaps a C standard
2768 // interpretation ruling is needed.
2769 res = wi::rshift (w0, w1, sign);
2771 return false;
2774 bool
2775 operator_rshift::fold_range (irange &r, tree type,
2776 const irange &op1,
2777 const irange &op2,
2778 relation_trio rel) const
2780 int_range_max shift;
2781 if (!get_shift_range (shift, type, op2))
2783 if (op2.undefined_p ())
2784 r.set_undefined ();
2785 else
2786 r.set_zero (type);
2787 return true;
2790 return range_operator::fold_range (r, type, op1, shift, rel);
2793 void
2794 operator_rshift::wi_fold (irange &r, tree type,
2795 const wide_int &lh_lb, const wide_int &lh_ub,
2796 const wide_int &rh_lb, const wide_int &rh_ub) const
2798 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2802 // Add a partial equivalence between the LHS and op1 for casts.
2804 relation_kind
2805 operator_cast::lhs_op1_relation (const irange &lhs,
2806 const irange &op1,
2807 const irange &op2 ATTRIBUTE_UNUSED,
2808 relation_kind) const
2810 if (lhs.undefined_p () || op1.undefined_p ())
2811 return VREL_VARYING;
2812 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2813 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2814 // If the result gets sign extended into a larger type check first if this
2815 // qualifies as a partial equivalence.
2816 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2818 // If the result is sign extended, and the LHS is larger than op1,
2819 // check if op1's range can be negative as the sign extension will
2820 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2821 int_range<3> negs = range_negatives (op1.type ());
2822 negs.intersect (op1);
2823 if (!negs.undefined_p ())
2824 return VREL_VARYING;
2827 unsigned prec = MIN (lhs_prec, op1_prec);
2828 return bits_to_pe (prec);
2831 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2833 inline bool
2834 operator_cast::truncating_cast_p (const irange &inner,
2835 const irange &outer) const
2837 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2840 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2842 bool
2843 operator_cast::inside_domain_p (const wide_int &min,
2844 const wide_int &max,
2845 const irange &range) const
2847 wide_int domain_min = irange_val_min (range.type ());
2848 wide_int domain_max = irange_val_max (range.type ());
2849 signop domain_sign = TYPE_SIGN (range.type ());
2850 return (wi::le_p (min, domain_max, domain_sign)
2851 && wi::le_p (max, domain_max, domain_sign)
2852 && wi::ge_p (min, domain_min, domain_sign)
2853 && wi::ge_p (max, domain_min, domain_sign));
2857 // Helper for fold_range which work on a pair at a time.
2859 void
2860 operator_cast::fold_pair (irange &r, unsigned index,
2861 const irange &inner,
2862 const irange &outer) const
2864 tree inner_type = inner.type ();
2865 tree outer_type = outer.type ();
2866 signop inner_sign = TYPE_SIGN (inner_type);
2867 unsigned outer_prec = TYPE_PRECISION (outer_type);
2869 // check to see if casting from INNER to OUTER is a conversion that
2870 // fits in the resulting OUTER type.
2871 wide_int inner_lb = inner.lower_bound (index);
2872 wide_int inner_ub = inner.upper_bound (index);
2873 if (truncating_cast_p (inner, outer))
2875 // We may be able to accommodate a truncating cast if the
2876 // resulting range can be represented in the target type...
2877 if (wi::rshift (wi::sub (inner_ub, inner_lb),
2878 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
2879 inner_sign) != 0)
2881 r.set_varying (outer_type);
2882 return;
2885 // ...but we must still verify that the final range fits in the
2886 // domain. This catches -fstrict-enum restrictions where the domain
2887 // range is smaller than what fits in the underlying type.
2888 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
2889 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
2890 if (inside_domain_p (min, max, outer))
2891 create_possibly_reversed_range (r, outer_type, min, max);
2892 else
2893 r.set_varying (outer_type);
2897 bool
2898 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2899 const irange &inner,
2900 const irange &outer,
2901 relation_trio) const
2903 if (empty_range_varying (r, type, inner, outer))
2904 return true;
2906 gcc_checking_assert (outer.varying_p ());
2907 gcc_checking_assert (inner.num_pairs () > 0);
2909 // Avoid a temporary by folding the first pair directly into the result.
2910 fold_pair (r, 0, inner, outer);
2912 // Then process any additional pairs by unioning with their results.
2913 for (unsigned x = 1; x < inner.num_pairs (); ++x)
2915 int_range_max tmp;
2916 fold_pair (tmp, x, inner, outer);
2917 r.union_ (tmp);
2918 if (r.varying_p ())
2919 return true;
2922 update_bitmask (r, inner, outer);
2923 return true;
2926 void
2927 operator_cast::update_bitmask (irange &r, const irange &lh,
2928 const irange &rh) const
2930 update_known_bitmask (r, CONVERT_EXPR, lh, rh);
2933 bool
2934 operator_cast::op1_range (irange &r, tree type,
2935 const irange &lhs,
2936 const irange &op2,
2937 relation_trio) const
2939 if (lhs.undefined_p ())
2940 return false;
2941 tree lhs_type = lhs.type ();
2942 gcc_checking_assert (types_compatible_p (op2.type(), type));
2944 // If we are calculating a pointer, shortcut to what we really care about.
2945 if (POINTER_TYPE_P (type))
2947 // Conversion from other pointers or a constant (including 0/NULL)
2948 // are straightforward.
2949 if (POINTER_TYPE_P (lhs.type ())
2950 || (lhs.singleton_p ()
2951 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
2953 r = lhs;
2954 range_cast (r, type);
2956 else
2958 // If the LHS is not a pointer nor a singleton, then it is
2959 // either VARYING or non-zero.
2960 if (!lhs.undefined_p () && !contains_zero_p (lhs))
2961 r.set_nonzero (type);
2962 else
2963 r.set_varying (type);
2965 r.intersect (op2);
2966 return true;
2969 if (truncating_cast_p (op2, lhs))
2971 if (lhs.varying_p ())
2972 r.set_varying (type);
2973 else
2975 // We want to insert the LHS as an unsigned value since it
2976 // would not trigger the signed bit of the larger type.
2977 int_range_max converted_lhs = lhs;
2978 range_cast (converted_lhs, unsigned_type_for (lhs_type));
2979 range_cast (converted_lhs, type);
2980 // Start by building the positive signed outer range for the type.
2981 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
2982 TYPE_PRECISION (type));
2983 create_possibly_reversed_range (r, type, lim,
2984 wi::max_value (TYPE_PRECISION (type),
2985 SIGNED));
2986 // For the signed part, we need to simply union the 2 ranges now.
2987 r.union_ (converted_lhs);
2989 // Create maximal negative number outside of LHS bits.
2990 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
2991 TYPE_PRECISION (type));
2992 // Add this to the unsigned LHS range(s).
2993 int_range_max lim_range (type, lim, lim);
2994 int_range_max lhs_neg;
2995 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
2996 converted_lhs, lim_range);
2997 // lhs_neg now has all the negative versions of the LHS.
2998 // Now union in all the values from SIGNED MIN (0x80000) to
2999 // lim-1 in order to fill in all the ranges with the upper
3000 // bits set.
3002 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
3003 // we don't need to create a range from min to lim-1
3004 // calculate neg range traps trying to create [lim, lim - 1].
3005 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
3006 if (lim != min_val)
3008 int_range_max neg (type,
3009 wi::min_value (TYPE_PRECISION (type),
3010 SIGNED),
3011 lim - 1);
3012 lhs_neg.union_ (neg);
3014 // And finally, munge the signed and unsigned portions.
3015 r.union_ (lhs_neg);
3017 // And intersect with any known value passed in the extra operand.
3018 r.intersect (op2);
3019 return true;
3022 int_range_max tmp;
3023 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
3024 tmp = lhs;
3025 else
3027 // The cast is not truncating, and the range is restricted to
3028 // the range of the RHS by this assignment.
3030 // Cast the range of the RHS to the type of the LHS.
3031 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
3032 // Intersect this with the LHS range will produce the range,
3033 // which will be cast to the RHS type before returning.
3034 tmp.intersect (lhs);
3037 // Cast the calculated range to the type of the RHS.
3038 fold_range (r, type, tmp, int_range<1> (type));
3039 return true;
3043 class operator_logical_and : public range_operator
3045 using range_operator::fold_range;
3046 using range_operator::op1_range;
3047 using range_operator::op2_range;
3048 public:
3049 virtual bool fold_range (irange &r, tree type,
3050 const irange &lh,
3051 const irange &rh,
3052 relation_trio rel = TRIO_VARYING) const;
3053 virtual bool op1_range (irange &r, tree type,
3054 const irange &lhs,
3055 const irange &op2,
3056 relation_trio rel = TRIO_VARYING) const;
3057 virtual bool op2_range (irange &r, tree type,
3058 const irange &lhs,
3059 const irange &op1,
3060 relation_trio rel = TRIO_VARYING) const;
3061 } op_logical_and;
3064 bool
3065 operator_logical_and::fold_range (irange &r, tree type,
3066 const irange &lh,
3067 const irange &rh,
3068 relation_trio) const
3070 if (empty_range_varying (r, type, lh, rh))
3071 return true;
3073 // 0 && anything is 0.
3074 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3075 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3076 r = range_false (type);
3077 else if (contains_zero_p (lh) || contains_zero_p (rh))
3078 // To reach this point, there must be a logical 1 on each side, and
3079 // the only remaining question is whether there is a zero or not.
3080 r = range_true_and_false (type);
3081 else
3082 r = range_true (type);
3083 return true;
3086 bool
3087 operator_logical_and::op1_range (irange &r, tree type,
3088 const irange &lhs,
3089 const irange &op2 ATTRIBUTE_UNUSED,
3090 relation_trio) const
3092 switch (get_bool_state (r, lhs, type))
3094 case BRS_TRUE:
3095 // A true result means both sides of the AND must be true.
3096 r = range_true (type);
3097 break;
3098 default:
3099 // Any other result means only one side has to be false, the
3100 // other side can be anything. So we cannot be sure of any
3101 // result here.
3102 r = range_true_and_false (type);
3103 break;
3105 return true;
3108 bool
3109 operator_logical_and::op2_range (irange &r, tree type,
3110 const irange &lhs,
3111 const irange &op1,
3112 relation_trio) const
3114 return operator_logical_and::op1_range (r, type, lhs, op1);
3118 void
3119 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3120 const irange &rh) const
3122 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3125 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3126 // by considering the number of leading redundant sign bit copies.
3127 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3128 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3129 static bool
3130 wi_optimize_signed_bitwise_op (irange &r, tree type,
3131 const wide_int &lh_lb, const wide_int &lh_ub,
3132 const wide_int &rh_lb, const wide_int &rh_ub)
3134 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3135 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3136 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3137 if (new_clrsb == 0)
3138 return false;
3139 int type_prec = TYPE_PRECISION (type);
3140 int rprec = (type_prec - new_clrsb) - 1;
3141 value_range_with_overflow (r, type,
3142 wi::mask (rprec, true, type_prec),
3143 wi::mask (rprec, false, type_prec));
3144 return true;
3147 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3148 // the LHS and op1.
3150 relation_kind
3151 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3152 const irange &op1,
3153 const irange &op2,
3154 relation_kind) const
3156 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3157 return VREL_VARYING;
3158 if (!op2.singleton_p ())
3159 return VREL_VARYING;
3160 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3161 int prec1 = TYPE_PRECISION (op1.type ());
3162 int prec2 = TYPE_PRECISION (op2.type ());
3163 int mask_prec = 0;
3164 wide_int mask = op2.lower_bound ();
3165 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3166 mask_prec = 8;
3167 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3168 mask_prec = 16;
3169 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3170 mask_prec = 32;
3171 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3172 mask_prec = 64;
3173 return bits_to_pe (MIN (prec1, mask_prec));
3176 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3177 // possible. Basically, see if we can optimize:
3179 // [LB, UB] op Z
3180 // into:
3181 // [LB op Z, UB op Z]
3183 // If the optimization was successful, accumulate the range in R and
3184 // return TRUE.
3186 static bool
3187 wi_optimize_and_or (irange &r,
3188 enum tree_code code,
3189 tree type,
3190 const wide_int &lh_lb, const wide_int &lh_ub,
3191 const wide_int &rh_lb, const wide_int &rh_ub)
3193 // Calculate the singleton mask among the ranges, if any.
3194 wide_int lower_bound, upper_bound, mask;
3195 if (wi::eq_p (rh_lb, rh_ub))
3197 mask = rh_lb;
3198 lower_bound = lh_lb;
3199 upper_bound = lh_ub;
3201 else if (wi::eq_p (lh_lb, lh_ub))
3203 mask = lh_lb;
3204 lower_bound = rh_lb;
3205 upper_bound = rh_ub;
3207 else
3208 return false;
3210 // If Z is a constant which (for op | its bitwise not) has n
3211 // consecutive least significant bits cleared followed by m 1
3212 // consecutive bits set immediately above it and either
3213 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3215 // The least significant n bits of all the values in the range are
3216 // cleared or set, the m bits above it are preserved and any bits
3217 // above these are required to be the same for all values in the
3218 // range.
3219 wide_int w = mask;
3220 int m = 0, n = 0;
3221 if (code == BIT_IOR_EXPR)
3222 w = ~w;
3223 if (wi::eq_p (w, 0))
3224 n = w.get_precision ();
3225 else
3227 n = wi::ctz (w);
3228 w = ~(w | wi::mask (n, false, w.get_precision ()));
3229 if (wi::eq_p (w, 0))
3230 m = w.get_precision () - n;
3231 else
3232 m = wi::ctz (w) - n;
3234 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3235 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3236 return false;
3238 wide_int res_lb, res_ub;
3239 if (code == BIT_AND_EXPR)
3241 res_lb = wi::bit_and (lower_bound, mask);
3242 res_ub = wi::bit_and (upper_bound, mask);
3244 else if (code == BIT_IOR_EXPR)
3246 res_lb = wi::bit_or (lower_bound, mask);
3247 res_ub = wi::bit_or (upper_bound, mask);
3249 else
3250 gcc_unreachable ();
3251 value_range_with_overflow (r, type, res_lb, res_ub);
3253 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3254 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3256 int_range<2> tmp;
3257 tmp.set_nonzero (type);
3258 r.intersect (tmp);
3260 return true;
3263 // For range [LB, UB] compute two wide_int bit masks.
3265 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3266 // for all numbers in the range the bit is 0, otherwise it might be 0
3267 // or 1.
3269 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3270 // for all numbers in the range the bit is 1, otherwise it might be 0
3271 // or 1.
3273 void
3274 wi_set_zero_nonzero_bits (tree type,
3275 const wide_int &lb, const wide_int &ub,
3276 wide_int &maybe_nonzero,
3277 wide_int &mustbe_nonzero)
3279 signop sign = TYPE_SIGN (type);
3281 if (wi::eq_p (lb, ub))
3282 maybe_nonzero = mustbe_nonzero = lb;
3283 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3285 wide_int xor_mask = lb ^ ub;
3286 maybe_nonzero = lb | ub;
3287 mustbe_nonzero = lb & ub;
3288 if (xor_mask != 0)
3290 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3291 maybe_nonzero.get_precision ());
3292 maybe_nonzero = maybe_nonzero | mask;
3293 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3296 else
3298 maybe_nonzero = wi::minus_one (lb.get_precision ());
3299 mustbe_nonzero = wi::zero (lb.get_precision ());
3303 void
3304 operator_bitwise_and::wi_fold (irange &r, tree type,
3305 const wide_int &lh_lb,
3306 const wide_int &lh_ub,
3307 const wide_int &rh_lb,
3308 const wide_int &rh_ub) const
3310 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3311 return;
3313 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3314 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3315 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3316 maybe_nonzero_lh, mustbe_nonzero_lh);
3317 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3318 maybe_nonzero_rh, mustbe_nonzero_rh);
3320 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3321 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3322 signop sign = TYPE_SIGN (type);
3323 unsigned prec = TYPE_PRECISION (type);
3324 // If both input ranges contain only negative values, we can
3325 // truncate the result range maximum to the minimum of the
3326 // input range maxima.
3327 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3329 new_ub = wi::min (new_ub, lh_ub, sign);
3330 new_ub = wi::min (new_ub, rh_ub, sign);
3332 // If either input range contains only non-negative values
3333 // we can truncate the result range maximum to the respective
3334 // maximum of the input range.
3335 if (wi::ge_p (lh_lb, 0, sign))
3336 new_ub = wi::min (new_ub, lh_ub, sign);
3337 if (wi::ge_p (rh_lb, 0, sign))
3338 new_ub = wi::min (new_ub, rh_ub, sign);
3339 // PR68217: In case of signed & sign-bit-CST should
3340 // result in [-INF, 0] instead of [-INF, INF].
3341 if (wi::gt_p (new_lb, new_ub, sign))
3343 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3344 if (sign == SIGNED
3345 && ((wi::eq_p (lh_lb, lh_ub)
3346 && !wi::cmps (lh_lb, sign_bit))
3347 || (wi::eq_p (rh_lb, rh_ub)
3348 && !wi::cmps (rh_lb, sign_bit))))
3350 new_lb = wi::min_value (prec, sign);
3351 new_ub = wi::zero (prec);
3354 // If the limits got swapped around, return varying.
3355 if (wi::gt_p (new_lb, new_ub,sign))
3357 if (sign == SIGNED
3358 && wi_optimize_signed_bitwise_op (r, type,
3359 lh_lb, lh_ub,
3360 rh_lb, rh_ub))
3361 return;
3362 r.set_varying (type);
3364 else
3365 value_range_with_overflow (r, type, new_lb, new_ub);
3368 static void
3369 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3371 if (lhs.undefined_p () || contains_zero_p (lhs))
3372 r.set_varying (type);
3373 else
3374 r.set_nonzero (type);
3377 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3378 (otherwise return VAL). VAL and MASK must be zero-extended for
3379 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3380 (to transform signed values into unsigned) and at the end xor
3381 SGNBIT back. */
3383 wide_int
3384 masked_increment (const wide_int &val_in, const wide_int &mask,
3385 const wide_int &sgnbit, unsigned int prec)
3387 wide_int bit = wi::one (prec), res;
3388 unsigned int i;
3390 wide_int val = val_in ^ sgnbit;
3391 for (i = 0; i < prec; i++, bit += bit)
3393 res = mask;
3394 if ((res & bit) == 0)
3395 continue;
3396 res = bit - 1;
3397 res = wi::bit_and_not (val + bit, res);
3398 res &= mask;
3399 if (wi::gtu_p (res, val))
3400 return res ^ sgnbit;
3402 return val ^ sgnbit;
3405 // This was shamelessly stolen from register_edge_assert_for_2 and
3406 // adjusted to work with iranges.
3408 void
3409 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3410 const irange &lhs,
3411 const irange &op2) const
3413 if (!op2.singleton_p ())
3415 set_nonzero_range_from_mask (r, type, lhs);
3416 return;
3418 unsigned int nprec = TYPE_PRECISION (type);
3419 wide_int cst2v = op2.lower_bound ();
3420 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3421 wide_int sgnbit;
3422 if (cst2n)
3423 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3424 else
3425 sgnbit = wi::zero (nprec);
3427 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3429 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3430 // maximum unsigned value is ~0. For signed comparison, if CST2
3431 // doesn't have the most significant bit set, handle it similarly. If
3432 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3433 wide_int valv = lhs.lower_bound ();
3434 wide_int minv = valv & cst2v, maxv;
3435 bool we_know_nothing = false;
3436 if (minv != valv)
3438 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3439 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3440 if (minv == valv)
3442 // If we can't determine anything on this bound, fall
3443 // through and conservatively solve for the other end point.
3444 we_know_nothing = true;
3447 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3448 if (we_know_nothing)
3449 r.set_varying (type);
3450 else
3451 create_possibly_reversed_range (r, type, minv, maxv);
3453 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3455 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3456 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3457 // VAL2 where
3458 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3459 // as maximum.
3460 // For signed comparison, if CST2 doesn't have most significant bit
3461 // set, handle it similarly. If CST2 has MSB set, the maximum is
3462 // the same and minimum is INT_MIN.
3463 valv = lhs.upper_bound ();
3464 minv = valv & cst2v;
3465 if (minv == valv)
3466 maxv = valv;
3467 else
3469 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3470 if (maxv == valv)
3472 // If we couldn't determine anything on either bound, return
3473 // undefined.
3474 if (we_know_nothing)
3475 r.set_undefined ();
3476 return;
3478 maxv -= 1;
3480 maxv |= ~cst2v;
3481 minv = sgnbit;
3482 int_range<2> upper_bits;
3483 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3484 r.intersect (upper_bits);
3487 bool
3488 operator_bitwise_and::op1_range (irange &r, tree type,
3489 const irange &lhs,
3490 const irange &op2,
3491 relation_trio) const
3493 if (lhs.undefined_p ())
3494 return false;
3495 if (types_compatible_p (type, boolean_type_node))
3496 return op_logical_and.op1_range (r, type, lhs, op2);
3498 r.set_undefined ();
3499 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3501 int_range_max chunk (lhs.type (),
3502 lhs.lower_bound (i),
3503 lhs.upper_bound (i));
3504 int_range_max res;
3505 simple_op1_range_solver (res, type, chunk, op2);
3506 r.union_ (res);
3508 if (r.undefined_p ())
3509 set_nonzero_range_from_mask (r, type, lhs);
3511 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3512 wide_int mask;
3513 if (lhs == op2 && lhs.singleton_p (mask))
3515 r.update_bitmask (irange_bitmask (mask, ~mask));
3516 return true;
3519 // For 0 = op1 & MASK, op1 is ~MASK.
3520 if (lhs.zero_p () && op2.singleton_p ())
3522 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3523 int_range<2> tmp (type);
3524 tmp.set_nonzero_bits (nz);
3525 r.intersect (tmp);
3527 return true;
3530 bool
3531 operator_bitwise_and::op2_range (irange &r, tree type,
3532 const irange &lhs,
3533 const irange &op1,
3534 relation_trio) const
3536 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3540 class operator_logical_or : public range_operator
3542 using range_operator::fold_range;
3543 using range_operator::op1_range;
3544 using range_operator::op2_range;
3545 public:
3546 virtual bool fold_range (irange &r, tree type,
3547 const irange &lh,
3548 const irange &rh,
3549 relation_trio rel = TRIO_VARYING) const;
3550 virtual bool op1_range (irange &r, tree type,
3551 const irange &lhs,
3552 const irange &op2,
3553 relation_trio rel = TRIO_VARYING) const;
3554 virtual bool op2_range (irange &r, tree type,
3555 const irange &lhs,
3556 const irange &op1,
3557 relation_trio rel = TRIO_VARYING) const;
3558 } op_logical_or;
3560 bool
3561 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3562 const irange &lh,
3563 const irange &rh,
3564 relation_trio) const
3566 if (empty_range_varying (r, type, lh, rh))
3567 return true;
3569 r = lh;
3570 r.union_ (rh);
3571 return true;
3574 bool
3575 operator_logical_or::op1_range (irange &r, tree type,
3576 const irange &lhs,
3577 const irange &op2 ATTRIBUTE_UNUSED,
3578 relation_trio) const
3580 switch (get_bool_state (r, lhs, type))
3582 case BRS_FALSE:
3583 // A false result means both sides of the OR must be false.
3584 r = range_false (type);
3585 break;
3586 default:
3587 // Any other result means only one side has to be true, the
3588 // other side can be anything. so we can't be sure of any result
3589 // here.
3590 r = range_true_and_false (type);
3591 break;
3593 return true;
3596 bool
3597 operator_logical_or::op2_range (irange &r, tree type,
3598 const irange &lhs,
3599 const irange &op1,
3600 relation_trio) const
3602 return operator_logical_or::op1_range (r, type, lhs, op1);
3606 void
3607 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3608 const irange &rh) const
3610 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3613 void
3614 operator_bitwise_or::wi_fold (irange &r, tree type,
3615 const wide_int &lh_lb,
3616 const wide_int &lh_ub,
3617 const wide_int &rh_lb,
3618 const wide_int &rh_ub) const
3620 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3621 return;
3623 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3624 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3625 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3626 maybe_nonzero_lh, mustbe_nonzero_lh);
3627 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3628 maybe_nonzero_rh, mustbe_nonzero_rh);
3629 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3630 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3631 signop sign = TYPE_SIGN (type);
3632 // If the input ranges contain only positive values we can
3633 // truncate the minimum of the result range to the maximum
3634 // of the input range minima.
3635 if (wi::ge_p (lh_lb, 0, sign)
3636 && wi::ge_p (rh_lb, 0, sign))
3638 new_lb = wi::max (new_lb, lh_lb, sign);
3639 new_lb = wi::max (new_lb, rh_lb, sign);
3641 // If either input range contains only negative values
3642 // we can truncate the minimum of the result range to the
3643 // respective minimum range.
3644 if (wi::lt_p (lh_ub, 0, sign))
3645 new_lb = wi::max (new_lb, lh_lb, sign);
3646 if (wi::lt_p (rh_ub, 0, sign))
3647 new_lb = wi::max (new_lb, rh_lb, sign);
3648 // If the limits got swapped around, return a conservative range.
3649 if (wi::gt_p (new_lb, new_ub, sign))
3651 // Make sure that nonzero|X is nonzero.
3652 if (wi::gt_p (lh_lb, 0, sign)
3653 || wi::gt_p (rh_lb, 0, sign)
3654 || wi::lt_p (lh_ub, 0, sign)
3655 || wi::lt_p (rh_ub, 0, sign))
3656 r.set_nonzero (type);
3657 else if (sign == SIGNED
3658 && wi_optimize_signed_bitwise_op (r, type,
3659 lh_lb, lh_ub,
3660 rh_lb, rh_ub))
3661 return;
3662 else
3663 r.set_varying (type);
3664 return;
3666 value_range_with_overflow (r, type, new_lb, new_ub);
3669 bool
3670 operator_bitwise_or::op1_range (irange &r, tree type,
3671 const irange &lhs,
3672 const irange &op2,
3673 relation_trio) const
3675 if (lhs.undefined_p ())
3676 return false;
3677 // If this is really a logical wi_fold, call that.
3678 if (types_compatible_p (type, boolean_type_node))
3679 return op_logical_or.op1_range (r, type, lhs, op2);
3681 if (lhs.zero_p ())
3683 r.set_zero (type);
3684 return true;
3686 r.set_varying (type);
3687 return true;
3690 bool
3691 operator_bitwise_or::op2_range (irange &r, tree type,
3692 const irange &lhs,
3693 const irange &op1,
3694 relation_trio) const
3696 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3699 void
3700 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3701 const irange &rh) const
3703 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3706 void
3707 operator_bitwise_xor::wi_fold (irange &r, tree type,
3708 const wide_int &lh_lb,
3709 const wide_int &lh_ub,
3710 const wide_int &rh_lb,
3711 const wide_int &rh_ub) const
3713 signop sign = TYPE_SIGN (type);
3714 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3715 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3716 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3717 maybe_nonzero_lh, mustbe_nonzero_lh);
3718 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3719 maybe_nonzero_rh, mustbe_nonzero_rh);
3721 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3722 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3723 wide_int result_one_bits
3724 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3725 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3726 wide_int new_ub = ~result_zero_bits;
3727 wide_int new_lb = result_one_bits;
3729 // If the range has all positive or all negative values, the result
3730 // is better than VARYING.
3731 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3732 value_range_with_overflow (r, type, new_lb, new_ub);
3733 else if (sign == SIGNED
3734 && wi_optimize_signed_bitwise_op (r, type,
3735 lh_lb, lh_ub,
3736 rh_lb, rh_ub))
3737 ; /* Do nothing. */
3738 else
3739 r.set_varying (type);
3741 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3742 if (wi::lt_p (lh_ub, rh_lb, sign)
3743 || wi::lt_p (rh_ub, lh_lb, sign)
3744 || wi::ne_p (result_one_bits, 0))
3746 int_range<2> tmp;
3747 tmp.set_nonzero (type);
3748 r.intersect (tmp);
3752 bool
3753 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3754 tree type,
3755 const irange &,
3756 const irange &,
3757 relation_kind rel) const
3759 if (rel == VREL_VARYING)
3760 return false;
3762 int_range<2> rel_range;
3764 switch (rel)
3766 case VREL_EQ:
3767 rel_range.set_zero (type);
3768 break;
3769 case VREL_NE:
3770 rel_range.set_nonzero (type);
3771 break;
3772 default:
3773 return false;
3776 lhs_range.intersect (rel_range);
3777 return true;
3780 bool
3781 operator_bitwise_xor::op1_range (irange &r, tree type,
3782 const irange &lhs,
3783 const irange &op2,
3784 relation_trio) const
3786 if (lhs.undefined_p () || lhs.varying_p ())
3788 r = lhs;
3789 return true;
3791 if (types_compatible_p (type, boolean_type_node))
3793 switch (get_bool_state (r, lhs, type))
3795 case BRS_TRUE:
3796 if (op2.varying_p ())
3797 r.set_varying (type);
3798 else if (op2.zero_p ())
3799 r = range_true (type);
3800 // See get_bool_state for the rationale
3801 else if (op2.undefined_p () || contains_zero_p (op2))
3802 r = range_true_and_false (type);
3803 else
3804 r = range_false (type);
3805 break;
3806 case BRS_FALSE:
3807 r = op2;
3808 break;
3809 default:
3810 break;
3812 return true;
3814 r.set_varying (type);
3815 return true;
3818 bool
3819 operator_bitwise_xor::op2_range (irange &r, tree type,
3820 const irange &lhs,
3821 const irange &op1,
3822 relation_trio) const
3824 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3827 class operator_trunc_mod : public range_operator
3829 using range_operator::op1_range;
3830 using range_operator::op2_range;
3831 public:
3832 virtual void wi_fold (irange &r, tree type,
3833 const wide_int &lh_lb,
3834 const wide_int &lh_ub,
3835 const wide_int &rh_lb,
3836 const wide_int &rh_ub) const;
3837 virtual bool op1_range (irange &r, tree type,
3838 const irange &lhs,
3839 const irange &op2,
3840 relation_trio) const;
3841 virtual bool op2_range (irange &r, tree type,
3842 const irange &lhs,
3843 const irange &op1,
3844 relation_trio) const;
3845 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
3846 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
3847 } op_trunc_mod;
3849 void
3850 operator_trunc_mod::wi_fold (irange &r, tree type,
3851 const wide_int &lh_lb,
3852 const wide_int &lh_ub,
3853 const wide_int &rh_lb,
3854 const wide_int &rh_ub) const
3856 wide_int new_lb, new_ub, tmp;
3857 signop sign = TYPE_SIGN (type);
3858 unsigned prec = TYPE_PRECISION (type);
3860 // Mod 0 is undefined.
3861 if (wi_zero_p (type, rh_lb, rh_ub))
3863 r.set_undefined ();
3864 return;
3867 // Check for constant and try to fold.
3868 if (lh_lb == lh_ub && rh_lb == rh_ub)
3870 wi::overflow_type ov = wi::OVF_NONE;
3871 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
3872 if (ov == wi::OVF_NONE)
3874 r = int_range<2> (type, tmp, tmp);
3875 return;
3879 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3880 new_ub = rh_ub - 1;
3881 if (sign == SIGNED)
3883 tmp = -1 - rh_lb;
3884 new_ub = wi::smax (new_ub, tmp);
3887 if (sign == UNSIGNED)
3888 new_lb = wi::zero (prec);
3889 else
3891 new_lb = -new_ub;
3892 tmp = lh_lb;
3893 if (wi::gts_p (tmp, 0))
3894 tmp = wi::zero (prec);
3895 new_lb = wi::smax (new_lb, tmp);
3897 tmp = lh_ub;
3898 if (sign == SIGNED && wi::neg_p (tmp))
3899 tmp = wi::zero (prec);
3900 new_ub = wi::min (new_ub, tmp, sign);
3902 value_range_with_overflow (r, type, new_lb, new_ub);
3905 bool
3906 operator_trunc_mod::op1_range (irange &r, tree type,
3907 const irange &lhs,
3908 const irange &,
3909 relation_trio) const
3911 if (lhs.undefined_p ())
3912 return false;
3913 // PR 91029.
3914 signop sign = TYPE_SIGN (type);
3915 unsigned prec = TYPE_PRECISION (type);
3916 // (a % b) >= x && x > 0 , then a >= x.
3917 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3919 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
3920 return true;
3922 // (a % b) <= x && x < 0 , then a <= x.
3923 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3925 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
3926 return true;
3928 return false;
3931 bool
3932 operator_trunc_mod::op2_range (irange &r, tree type,
3933 const irange &lhs,
3934 const irange &,
3935 relation_trio) const
3937 if (lhs.undefined_p ())
3938 return false;
3939 // PR 91029.
3940 signop sign = TYPE_SIGN (type);
3941 unsigned prec = TYPE_PRECISION (type);
3942 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3943 // or b > x for unsigned.
3944 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3946 if (sign == SIGNED)
3947 r = value_range (type, wi::neg (lhs.lower_bound ()),
3948 lhs.lower_bound (), VR_ANTI_RANGE);
3949 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
3950 sign))
3951 r = value_range (type, lhs.lower_bound () + 1,
3952 wi::max_value (prec, sign));
3953 else
3954 return false;
3955 return true;
3957 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3958 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3960 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
3961 r = value_range (type, lhs.upper_bound (),
3962 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
3963 else
3964 return false;
3965 return true;
3967 return false;
3971 class operator_logical_not : public range_operator
3973 using range_operator::fold_range;
3974 using range_operator::op1_range;
3975 public:
3976 virtual bool fold_range (irange &r, tree type,
3977 const irange &lh,
3978 const irange &rh,
3979 relation_trio rel = TRIO_VARYING) const;
3980 virtual bool op1_range (irange &r, tree type,
3981 const irange &lhs,
3982 const irange &op2,
3983 relation_trio rel = TRIO_VARYING) const;
3984 } op_logical_not;
3986 // Folding a logical NOT, oddly enough, involves doing nothing on the
3987 // forward pass through. During the initial walk backwards, the
3988 // logical NOT reversed the desired outcome on the way back, so on the
3989 // way forward all we do is pass the range forward.
3991 // b_2 = x_1 < 20
3992 // b_3 = !b_2
3993 // if (b_3)
3994 // to determine the TRUE branch, walking backward
3995 // if (b_3) if ([1,1])
3996 // b_3 = !b_2 [1,1] = ![0,0]
3997 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3998 // which is the result we are looking for.. so.. pass it through.
4000 bool
4001 operator_logical_not::fold_range (irange &r, tree type,
4002 const irange &lh,
4003 const irange &rh ATTRIBUTE_UNUSED,
4004 relation_trio) const
4006 if (empty_range_varying (r, type, lh, rh))
4007 return true;
4009 r = lh;
4010 if (!lh.varying_p () && !lh.undefined_p ())
4011 r.invert ();
4013 return true;
4016 bool
4017 operator_logical_not::op1_range (irange &r,
4018 tree type,
4019 const irange &lhs,
4020 const irange &op2,
4021 relation_trio) const
4023 // Logical NOT is involutary...do it again.
4024 return fold_range (r, type, lhs, op2);
4028 bool
4029 operator_bitwise_not::fold_range (irange &r, tree type,
4030 const irange &lh,
4031 const irange &rh,
4032 relation_trio) const
4034 if (empty_range_varying (r, type, lh, rh))
4035 return true;
4037 if (types_compatible_p (type, boolean_type_node))
4038 return op_logical_not.fold_range (r, type, lh, rh);
4040 // ~X is simply -1 - X.
4041 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
4042 wi::minus_one (TYPE_PRECISION (type)));
4043 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
4046 bool
4047 operator_bitwise_not::op1_range (irange &r, tree type,
4048 const irange &lhs,
4049 const irange &op2,
4050 relation_trio) const
4052 if (lhs.undefined_p ())
4053 return false;
4054 if (types_compatible_p (type, boolean_type_node))
4055 return op_logical_not.op1_range (r, type, lhs, op2);
4057 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4058 return fold_range (r, type, lhs, op2);
4061 void
4062 operator_bitwise_not::update_bitmask (irange &r, const irange &lh,
4063 const irange &rh) const
4065 update_known_bitmask (r, BIT_NOT_EXPR, lh, rh);
4069 bool
4070 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4071 const irange &lh,
4072 const irange &rh ATTRIBUTE_UNUSED,
4073 relation_trio) const
4075 r = lh;
4076 return true;
4080 // Determine if there is a relationship between LHS and OP1.
4082 relation_kind
4083 operator_identity::lhs_op1_relation (const irange &lhs,
4084 const irange &op1 ATTRIBUTE_UNUSED,
4085 const irange &op2 ATTRIBUTE_UNUSED,
4086 relation_kind) const
4088 if (lhs.undefined_p ())
4089 return VREL_VARYING;
4090 // Simply a copy, so they are equivalent.
4091 return VREL_EQ;
4094 bool
4095 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4096 const irange &lh,
4097 const irange &rh ATTRIBUTE_UNUSED,
4098 relation_trio) const
4100 r = lh;
4101 return true;
4104 bool
4105 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4106 const irange &lhs,
4107 const irange &op2 ATTRIBUTE_UNUSED,
4108 relation_trio) const
4110 r = lhs;
4111 return true;
4115 class operator_unknown : public range_operator
4117 using range_operator::fold_range;
4118 public:
4119 virtual bool fold_range (irange &r, tree type,
4120 const irange &op1,
4121 const irange &op2,
4122 relation_trio rel = TRIO_VARYING) const;
4123 } op_unknown;
4125 bool
4126 operator_unknown::fold_range (irange &r, tree type,
4127 const irange &lh ATTRIBUTE_UNUSED,
4128 const irange &rh ATTRIBUTE_UNUSED,
4129 relation_trio) const
4131 r.set_varying (type);
4132 return true;
4136 void
4137 operator_abs::wi_fold (irange &r, tree type,
4138 const wide_int &lh_lb, const wide_int &lh_ub,
4139 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4140 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4142 wide_int min, max;
4143 signop sign = TYPE_SIGN (type);
4144 unsigned prec = TYPE_PRECISION (type);
4146 // Pass through LH for the easy cases.
4147 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4149 r = int_range<1> (type, lh_lb, lh_ub);
4150 return;
4153 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4154 // a useful range.
4155 wide_int min_value = wi::min_value (prec, sign);
4156 wide_int max_value = wi::max_value (prec, sign);
4157 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4159 r.set_varying (type);
4160 return;
4163 // ABS_EXPR may flip the range around, if the original range
4164 // included negative values.
4165 if (wi::eq_p (lh_lb, min_value))
4167 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4168 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4169 if (wi::eq_p (lh_ub, min_value))
4171 r = int_range<1> (type, min_value, min_value);
4172 return;
4174 min = max_value;
4176 else
4177 min = wi::abs (lh_lb);
4179 if (wi::eq_p (lh_ub, min_value))
4180 max = max_value;
4181 else
4182 max = wi::abs (lh_ub);
4184 // If the range contains zero then we know that the minimum value in the
4185 // range will be zero.
4186 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4188 if (wi::gt_p (min, max, sign))
4189 max = min;
4190 min = wi::zero (prec);
4192 else
4194 // If the range was reversed, swap MIN and MAX.
4195 if (wi::gt_p (min, max, sign))
4196 std::swap (min, max);
4199 // If the new range has its limits swapped around (MIN > MAX), then
4200 // the operation caused one of them to wrap around. The only thing
4201 // we know is that the result is positive.
4202 if (wi::gt_p (min, max, sign))
4204 min = wi::zero (prec);
4205 max = max_value;
4207 r = int_range<1> (type, min, max);
4210 bool
4211 operator_abs::op1_range (irange &r, tree type,
4212 const irange &lhs,
4213 const irange &op2,
4214 relation_trio) const
4216 if (empty_range_varying (r, type, lhs, op2))
4217 return true;
4218 if (TYPE_UNSIGNED (type))
4220 r = lhs;
4221 return true;
4223 // Start with the positives because negatives are an impossible result.
4224 int_range_max positives = range_positives (type);
4225 positives.intersect (lhs);
4226 r = positives;
4227 // Then add the negative of each pair:
4228 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4229 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4230 r.union_ (int_range<1> (type,
4231 -positives.upper_bound (i),
4232 -positives.lower_bound (i)));
4233 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4234 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4235 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4236 wide_int lb = lhs.lower_bound ();
4237 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4238 r.union_ (int_range<2> (type, lb, lb));
4239 return true;
4242 void
4243 operator_abs::update_bitmask (irange &r, const irange &lh,
4244 const irange &rh) const
4246 update_known_bitmask (r, ABS_EXPR, lh, rh);
4249 class operator_absu : public range_operator
4251 public:
4252 virtual void wi_fold (irange &r, tree type,
4253 const wide_int &lh_lb, const wide_int &lh_ub,
4254 const wide_int &rh_lb, const wide_int &rh_ub) const;
4255 virtual void update_bitmask (irange &r, const irange &lh,
4256 const irange &rh) const final override;
4257 } op_absu;
4259 void
4260 operator_absu::wi_fold (irange &r, tree type,
4261 const wide_int &lh_lb, const wide_int &lh_ub,
4262 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4263 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4265 wide_int new_lb, new_ub;
4267 // Pass through VR0 the easy cases.
4268 if (wi::ges_p (lh_lb, 0))
4270 new_lb = lh_lb;
4271 new_ub = lh_ub;
4273 else
4275 new_lb = wi::abs (lh_lb);
4276 new_ub = wi::abs (lh_ub);
4278 // If the range contains zero then we know that the minimum
4279 // value in the range will be zero.
4280 if (wi::ges_p (lh_ub, 0))
4282 if (wi::gtu_p (new_lb, new_ub))
4283 new_ub = new_lb;
4284 new_lb = wi::zero (TYPE_PRECISION (type));
4286 else
4287 std::swap (new_lb, new_ub);
4290 gcc_checking_assert (TYPE_UNSIGNED (type));
4291 r = int_range<1> (type, new_lb, new_ub);
4294 void
4295 operator_absu::update_bitmask (irange &r, const irange &lh,
4296 const irange &rh) const
4298 update_known_bitmask (r, ABSU_EXPR, lh, rh);
4302 bool
4303 operator_negate::fold_range (irange &r, tree type,
4304 const irange &lh,
4305 const irange &rh,
4306 relation_trio) const
4308 if (empty_range_varying (r, type, lh, rh))
4309 return true;
4310 // -X is simply 0 - X.
4311 return range_op_handler (MINUS_EXPR).fold_range (r, type,
4312 range_zero (type), lh);
4315 bool
4316 operator_negate::op1_range (irange &r, tree type,
4317 const irange &lhs,
4318 const irange &op2,
4319 relation_trio) const
4321 // NEGATE is involutory.
4322 return fold_range (r, type, lhs, op2);
4326 bool
4327 operator_addr_expr::fold_range (irange &r, tree type,
4328 const irange &lh,
4329 const irange &rh,
4330 relation_trio) const
4332 if (empty_range_varying (r, type, lh, rh))
4333 return true;
4335 // Return a non-null pointer of the LHS type (passed in op2).
4336 if (lh.zero_p ())
4337 r = range_zero (type);
4338 else if (lh.undefined_p () || contains_zero_p (lh))
4339 r.set_varying (type);
4340 else
4341 r.set_nonzero (type);
4342 return true;
4345 bool
4346 operator_addr_expr::op1_range (irange &r, tree type,
4347 const irange &lhs,
4348 const irange &op2,
4349 relation_trio) const
4351 if (empty_range_varying (r, type, lhs, op2))
4352 return true;
4354 // Return a non-null pointer of the LHS type (passed in op2), but only
4355 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4356 // See PR 111009.
4357 if (!lhs.undefined_p () && !contains_zero_p (lhs) && TYPE_OVERFLOW_UNDEFINED (type))
4358 r.set_nonzero (type);
4359 else
4360 r.set_varying (type);
4361 return true;
4364 // Initialize any integral operators to the primary table
4366 void
4367 range_op_table::initialize_integral_ops ()
4369 set (TRUNC_DIV_EXPR, op_trunc_div);
4370 set (FLOOR_DIV_EXPR, op_floor_div);
4371 set (ROUND_DIV_EXPR, op_round_div);
4372 set (CEIL_DIV_EXPR, op_ceil_div);
4373 set (EXACT_DIV_EXPR, op_exact_div);
4374 set (LSHIFT_EXPR, op_lshift);
4375 set (RSHIFT_EXPR, op_rshift);
4376 set (TRUTH_AND_EXPR, op_logical_and);
4377 set (TRUTH_OR_EXPR, op_logical_or);
4378 set (TRUNC_MOD_EXPR, op_trunc_mod);
4379 set (TRUTH_NOT_EXPR, op_logical_not);
4380 set (IMAGPART_EXPR, op_unknown);
4381 set (REALPART_EXPR, op_unknown);
4382 set (ABSU_EXPR, op_absu);
4383 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4384 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4385 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4386 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4390 bool
4391 operator_plus::overflow_free_p (const irange &lh, const irange &rh,
4392 relation_trio) const
4394 if (lh.undefined_p () || rh.undefined_p ())
4395 return false;
4397 tree type = lh.type ();
4398 if (TYPE_OVERFLOW_UNDEFINED (type))
4399 return true;
4401 wi::overflow_type ovf;
4402 signop sgn = TYPE_SIGN (type);
4403 wide_int wmax0 = lh.upper_bound ();
4404 wide_int wmax1 = rh.upper_bound ();
4405 wi::add (wmax0, wmax1, sgn, &ovf);
4406 if (ovf != wi::OVF_NONE)
4407 return false;
4409 if (TYPE_UNSIGNED (type))
4410 return true;
4412 wide_int wmin0 = lh.lower_bound ();
4413 wide_int wmin1 = rh.lower_bound ();
4414 wi::add (wmin0, wmin1, sgn, &ovf);
4415 if (ovf != wi::OVF_NONE)
4416 return false;
4418 return true;
4421 bool
4422 operator_minus::overflow_free_p (const irange &lh, const irange &rh,
4423 relation_trio) const
4425 if (lh.undefined_p () || rh.undefined_p ())
4426 return false;
4428 tree type = lh.type ();
4429 if (TYPE_OVERFLOW_UNDEFINED (type))
4430 return true;
4432 wi::overflow_type ovf;
4433 signop sgn = TYPE_SIGN (type);
4434 wide_int wmin0 = lh.lower_bound ();
4435 wide_int wmax1 = rh.upper_bound ();
4436 wi::sub (wmin0, wmax1, sgn, &ovf);
4437 if (ovf != wi::OVF_NONE)
4438 return false;
4440 if (TYPE_UNSIGNED (type))
4441 return true;
4443 wide_int wmax0 = lh.upper_bound ();
4444 wide_int wmin1 = rh.lower_bound ();
4445 wi::sub (wmax0, wmin1, sgn, &ovf);
4446 if (ovf != wi::OVF_NONE)
4447 return false;
4449 return true;
4452 bool
4453 operator_mult::overflow_free_p (const irange &lh, const irange &rh,
4454 relation_trio) const
4456 if (lh.undefined_p () || rh.undefined_p ())
4457 return false;
4459 tree type = lh.type ();
4460 if (TYPE_OVERFLOW_UNDEFINED (type))
4461 return true;
4463 wi::overflow_type ovf;
4464 signop sgn = TYPE_SIGN (type);
4465 wide_int wmax0 = lh.upper_bound ();
4466 wide_int wmax1 = rh.upper_bound ();
4467 wi::mul (wmax0, wmax1, sgn, &ovf);
4468 if (ovf != wi::OVF_NONE)
4469 return false;
4471 if (TYPE_UNSIGNED (type))
4472 return true;
4474 wide_int wmin0 = lh.lower_bound ();
4475 wide_int wmin1 = rh.lower_bound ();
4476 wi::mul (wmin0, wmin1, sgn, &ovf);
4477 if (ovf != wi::OVF_NONE)
4478 return false;
4480 wi::mul (wmin0, wmax1, sgn, &ovf);
4481 if (ovf != wi::OVF_NONE)
4482 return false;
4484 wi::mul (wmax0, wmin1, sgn, &ovf);
4485 if (ovf != wi::OVF_NONE)
4486 return false;
4488 return true;
4491 #if CHECKING_P
4492 #include "selftest.h"
4494 namespace selftest
4496 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4497 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4498 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4499 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4500 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4501 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4503 static void
4504 range_op_cast_tests ()
4506 int_range<2> r0, r1, r2, rold;
4507 r0.set_varying (integer_type_node);
4508 wide_int maxint = r0.upper_bound ();
4510 // If a range is in any way outside of the range for the converted
4511 // to range, default to the range for the new type.
4512 r0.set_varying (short_integer_type_node);
4513 wide_int minshort = r0.lower_bound ();
4514 wide_int maxshort = r0.upper_bound ();
4515 if (TYPE_PRECISION (integer_type_node)
4516 > TYPE_PRECISION (short_integer_type_node))
4518 r1 = int_range<1> (integer_type_node,
4519 wi::zero (TYPE_PRECISION (integer_type_node)),
4520 maxint);
4521 range_cast (r1, short_integer_type_node);
4522 ASSERT_TRUE (r1.lower_bound () == minshort
4523 && r1.upper_bound() == maxshort);
4526 // (unsigned char)[-5,-1] => [251,255].
4527 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4528 range_cast (r0, unsigned_char_type_node);
4529 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4530 UCHAR (251), UCHAR (255)));
4531 range_cast (r0, signed_char_type_node);
4532 ASSERT_TRUE (r0 == rold);
4534 // (signed char)[15, 150] => [-128,-106][15,127].
4535 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4536 range_cast (r0, signed_char_type_node);
4537 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4538 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4539 r1.union_ (r2);
4540 ASSERT_TRUE (r1 == r0);
4541 range_cast (r0, unsigned_char_type_node);
4542 ASSERT_TRUE (r0 == rold);
4544 // (unsigned char)[-5, 5] => [0,5][251,255].
4545 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4546 range_cast (r0, unsigned_char_type_node);
4547 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4548 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4549 r1.union_ (r2);
4550 ASSERT_TRUE (r0 == r1);
4551 range_cast (r0, signed_char_type_node);
4552 ASSERT_TRUE (r0 == rold);
4554 // (unsigned char)[-5,5] => [0,5][251,255].
4555 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4556 range_cast (r0, unsigned_char_type_node);
4557 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4558 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4559 ASSERT_TRUE (r0 == r1);
4561 // (unsigned char)[5U,1974U] => [0,255].
4562 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4563 range_cast (r0, unsigned_char_type_node);
4564 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4565 range_cast (r0, integer_type_node);
4566 // Going to a wider range should not sign extend.
4567 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4569 // (unsigned char)[-350,15] => [0,255].
4570 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4571 range_cast (r0, unsigned_char_type_node);
4572 ASSERT_TRUE (r0 == (int_range<1>
4573 (unsigned_char_type_node,
4574 min_limit (unsigned_char_type_node),
4575 max_limit (unsigned_char_type_node))));
4577 // Casting [-120,20] from signed char to unsigned short.
4578 // => [0, 20][0xff88, 0xffff].
4579 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4580 range_cast (r0, short_unsigned_type_node);
4581 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4582 r2 = int_range<1> (short_unsigned_type_node,
4583 UINT16 (0xff88), UINT16 (0xffff));
4584 r1.union_ (r2);
4585 ASSERT_TRUE (r0 == r1);
4586 // A truncating cast back to signed char will work because [-120, 20]
4587 // is representable in signed char.
4588 range_cast (r0, signed_char_type_node);
4589 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4590 SCHAR (-120), SCHAR (20)));
4592 // unsigned char -> signed short
4593 // (signed short)[(unsigned char)25, (unsigned char)250]
4594 // => [(signed short)25, (signed short)250]
4595 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4596 range_cast (r0, short_integer_type_node);
4597 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4598 ASSERT_TRUE (r0 == r1);
4599 range_cast (r0, unsigned_char_type_node);
4600 ASSERT_TRUE (r0 == rold);
4602 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4603 r0 = int_range<1> (long_long_integer_type_node,
4604 min_limit (long_long_integer_type_node),
4605 max_limit (long_long_integer_type_node));
4606 range_cast (r0, short_unsigned_type_node);
4607 r1 = int_range<1> (short_unsigned_type_node,
4608 min_limit (short_unsigned_type_node),
4609 max_limit (short_unsigned_type_node));
4610 ASSERT_TRUE (r0 == r1);
4612 // Casting NONZERO to a narrower type will wrap/overflow so
4613 // it's just the entire range for the narrower type.
4615 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4616 // is outside of the range of a smaller range, return the full
4617 // smaller range.
4618 if (TYPE_PRECISION (integer_type_node)
4619 > TYPE_PRECISION (short_integer_type_node))
4621 r0 = range_nonzero (integer_type_node);
4622 range_cast (r0, short_integer_type_node);
4623 r1 = int_range<1> (short_integer_type_node,
4624 min_limit (short_integer_type_node),
4625 max_limit (short_integer_type_node));
4626 ASSERT_TRUE (r0 == r1);
4629 // Casting NONZERO from a narrower signed to a wider signed.
4631 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4632 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4633 r0 = range_nonzero (short_integer_type_node);
4634 range_cast (r0, integer_type_node);
4635 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4636 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4637 r1.union_ (r2);
4638 ASSERT_TRUE (r0 == r1);
4641 static void
4642 range_op_lshift_tests ()
4644 // Test that 0x808.... & 0x8.... still contains 0x8....
4645 // for a large set of numbers.
4647 int_range_max res;
4648 tree big_type = long_long_unsigned_type_node;
4649 unsigned big_prec = TYPE_PRECISION (big_type);
4650 // big_num = 0x808,0000,0000,0000
4651 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4652 wi::uhwi (48, big_prec));
4653 op_bitwise_and.fold_range (res, big_type,
4654 int_range <1> (big_type),
4655 int_range <1> (big_type, big_num, big_num));
4656 // val = 0x8,0000,0000,0000
4657 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4658 wi::uhwi (48, big_prec));
4659 ASSERT_TRUE (res.contains_p (val));
4662 if (TYPE_PRECISION (unsigned_type_node) > 31)
4664 // unsigned VARYING = op1 << 1 should be VARYING.
4665 int_range<2> lhs (unsigned_type_node);
4666 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4667 int_range_max op1;
4668 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4669 ASSERT_TRUE (op1.varying_p ());
4671 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4672 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4673 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4674 ASSERT_TRUE (op1.num_pairs () == 2);
4675 // Remove the [0,0] range.
4676 op1.intersect (zero);
4677 ASSERT_TRUE (op1.num_pairs () == 1);
4678 // op1 << 1 should be [0x8000,0x8000] << 1,
4679 // which should result in [0,0].
4680 int_range_max result;
4681 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4682 ASSERT_TRUE (result == zero);
4684 // signed VARYING = op1 << 1 should be VARYING.
4685 if (TYPE_PRECISION (integer_type_node) > 31)
4687 // unsigned VARYING = op1 << 1 should be VARYING.
4688 int_range<2> lhs (integer_type_node);
4689 int_range<2> shift (integer_type_node, INT (1), INT (1));
4690 int_range_max op1;
4691 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4692 ASSERT_TRUE (op1.varying_p ());
4694 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4695 int_range<2> zero (integer_type_node, INT (0), INT (0));
4696 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4697 ASSERT_TRUE (op1.num_pairs () == 2);
4698 // Remove the [0,0] range.
4699 op1.intersect (zero);
4700 ASSERT_TRUE (op1.num_pairs () == 1);
4701 // op1 << 1 should be [0x8000,0x8000] << 1,
4702 // which should result in [0,0].
4703 int_range_max result;
4704 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4705 ASSERT_TRUE (result == zero);
4709 static void
4710 range_op_rshift_tests ()
4712 // unsigned: [3, MAX] = OP1 >> 1
4714 int_range_max lhs (unsigned_type_node,
4715 UINT (3), max_limit (unsigned_type_node));
4716 int_range_max one (unsigned_type_node,
4717 wi::one (TYPE_PRECISION (unsigned_type_node)),
4718 wi::one (TYPE_PRECISION (unsigned_type_node)));
4719 int_range_max op1;
4720 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4721 ASSERT_FALSE (op1.contains_p (UINT (3)));
4724 // signed: [3, MAX] = OP1 >> 1
4726 int_range_max lhs (integer_type_node,
4727 INT (3), max_limit (integer_type_node));
4728 int_range_max one (integer_type_node, INT (1), INT (1));
4729 int_range_max op1;
4730 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4731 ASSERT_FALSE (op1.contains_p (INT (-2)));
4734 // This is impossible, so OP1 should be [].
4735 // signed: [MIN, MIN] = OP1 >> 1
4737 int_range_max lhs (integer_type_node,
4738 min_limit (integer_type_node),
4739 min_limit (integer_type_node));
4740 int_range_max one (integer_type_node, INT (1), INT (1));
4741 int_range_max op1;
4742 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4743 ASSERT_TRUE (op1.undefined_p ());
4746 // signed: ~[-1] = OP1 >> 31
4747 if (TYPE_PRECISION (integer_type_node) > 31)
4749 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4750 int_range_max shift (integer_type_node, INT (31), INT (31));
4751 int_range_max op1;
4752 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4753 int_range_max negatives = range_negatives (integer_type_node);
4754 negatives.intersect (op1);
4755 ASSERT_TRUE (negatives.undefined_p ());
4759 static void
4760 range_op_bitwise_and_tests ()
4762 int_range_max res;
4763 wide_int min = min_limit (integer_type_node);
4764 wide_int max = max_limit (integer_type_node);
4765 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4766 int_range_max i1 (integer_type_node, tiny, max);
4767 int_range_max i2 (integer_type_node, INT (255), INT (255));
4769 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4770 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4771 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4773 // VARYING = OP1 & 255: OP1 is VARYING
4774 i1 = int_range<1> (integer_type_node);
4775 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4776 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4778 // For 0 = x & MASK, x is ~MASK.
4780 int_range<2> zero (integer_type_node, INT (0), INT (0));
4781 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4782 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4783 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4784 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4787 // (NONZERO | X) is nonzero.
4788 i1.set_nonzero (integer_type_node);
4789 i2.set_varying (integer_type_node);
4790 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4791 ASSERT_TRUE (res.nonzero_p ());
4793 // (NEGATIVE | X) is nonzero.
4794 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4795 i2.set_varying (integer_type_node);
4796 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4797 ASSERT_FALSE (res.contains_p (INT (0)));
4800 static void
4801 range_relational_tests ()
4803 int_range<2> lhs (unsigned_char_type_node);
4804 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4805 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4807 // Never wrapping additions mean LHS > OP1.
4808 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4809 ASSERT_TRUE (code == VREL_GT);
4811 // Most wrapping additions mean nothing...
4812 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4813 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4814 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4815 ASSERT_TRUE (code == VREL_VARYING);
4817 // However, always wrapping additions mean LHS < OP1.
4818 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
4819 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
4820 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4821 ASSERT_TRUE (code == VREL_LT);
4824 void
4825 range_op_tests ()
4827 range_op_rshift_tests ();
4828 range_op_lshift_tests ();
4829 range_op_bitwise_and_tests ();
4830 range_op_cast_tests ();
4831 range_relational_tests ();
4833 extern void range_op_float_tests ();
4834 range_op_float_tests ();
4837 } // namespace selftest
4839 #endif // CHECKING_P