xfail scan-tree-dump-not throw in g++.dg/pr99966.C on hppa*64*-*-*
[official-gcc.git] / gcc / range-op.cc
blob9a1a3c8bcdbb65b3b794df5f455c00bb39d27533
1 /* Code for range operators.
2 Copyright (C) 2017-2024 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
76 // Instantaite a range operator table.
77 range_op_table operator_table;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR, op_addr);
109 set (BIT_NOT_EXPR, op_bitwise_not);
110 set (BIT_XOR_EXPR, op_bitwise_xor);
112 // These are in both integer and pointer tables, but pointer has a different
113 // implementation.
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator = &default_operator;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code)
140 m_operator = operator_table[code];
141 if (!m_operator)
142 m_operator = &default_operator;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator != &default_operator;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
156 range_operator *
157 range_op_handler::range_op () const
159 if (m_operator != &default_operator)
160 return m_operator;
161 return NULL;
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
168 constexpr unsigned
169 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
171 return ((lhs << 8) + (op1 << 4) + (op2));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
179 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
180 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
181 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
182 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
183 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
185 // Return a dispatch value for parameter types LHS, OP1 and OP2.
187 unsigned
188 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
189 const vrange& op2) const
191 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
192 op2.m_discriminator);
195 // Dispatch a call to fold_range based on the types of R, LH and RH.
197 bool
198 range_op_handler::fold_range (vrange &r, tree type,
199 const vrange &lh,
200 const vrange &rh,
201 relation_trio rel) const
203 gcc_checking_assert (m_operator);
204 #if CHECKING_P
205 if (!lh.undefined_p () && !rh.undefined_p ())
206 gcc_assert (m_operator->operand_check_p (type, lh.type (), rh.type ()));
207 #endif
208 switch (dispatch_kind (r, lh, rh))
210 case RO_III:
211 return m_operator->fold_range (as_a <irange> (r), type,
212 as_a <irange> (lh),
213 as_a <irange> (rh), rel);
214 case RO_IFI:
215 return m_operator->fold_range (as_a <irange> (r), type,
216 as_a <frange> (lh),
217 as_a <irange> (rh), rel);
218 case RO_IFF:
219 return m_operator->fold_range (as_a <irange> (r), type,
220 as_a <frange> (lh),
221 as_a <frange> (rh), rel);
222 case RO_FFF:
223 return m_operator->fold_range (as_a <frange> (r), type,
224 as_a <frange> (lh),
225 as_a <frange> (rh), rel);
226 case RO_FII:
227 return m_operator->fold_range (as_a <frange> (r), type,
228 as_a <irange> (lh),
229 as_a <irange> (rh), rel);
230 default:
231 return false;
235 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
237 bool
238 range_op_handler::op1_range (vrange &r, tree type,
239 const vrange &lhs,
240 const vrange &op2,
241 relation_trio rel) const
243 gcc_checking_assert (m_operator);
244 if (lhs.undefined_p ())
245 return false;
246 #if CHECKING_P
247 if (!op2.undefined_p ())
248 gcc_assert (m_operator->operand_check_p (lhs.type (), type, op2.type ()));
249 #endif
250 switch (dispatch_kind (r, lhs, op2))
252 case RO_III:
253 return m_operator->op1_range (as_a <irange> (r), type,
254 as_a <irange> (lhs),
255 as_a <irange> (op2), rel);
256 case RO_FIF:
257 return m_operator->op1_range (as_a <frange> (r), type,
258 as_a <irange> (lhs),
259 as_a <frange> (op2), rel);
260 case RO_FFF:
261 return m_operator->op1_range (as_a <frange> (r), type,
262 as_a <frange> (lhs),
263 as_a <frange> (op2), rel);
264 default:
265 return false;
269 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
271 bool
272 range_op_handler::op2_range (vrange &r, tree type,
273 const vrange &lhs,
274 const vrange &op1,
275 relation_trio rel) const
277 gcc_checking_assert (m_operator);
278 if (lhs.undefined_p ())
279 return false;
280 #if CHECKING_P
281 if (!op1.undefined_p ())
282 gcc_assert (m_operator->operand_check_p (lhs.type (), op1.type (), type));
283 #endif
284 switch (dispatch_kind (r, lhs, op1))
286 case RO_III:
287 return m_operator->op2_range (as_a <irange> (r), type,
288 as_a <irange> (lhs),
289 as_a <irange> (op1), rel);
290 case RO_FIF:
291 return m_operator->op2_range (as_a <frange> (r), type,
292 as_a <irange> (lhs),
293 as_a <frange> (op1), rel);
294 case RO_FFF:
295 return m_operator->op2_range (as_a <frange> (r), type,
296 as_a <frange> (lhs),
297 as_a <frange> (op1), rel);
298 default:
299 return false;
303 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
305 relation_kind
306 range_op_handler::lhs_op1_relation (const vrange &lhs,
307 const vrange &op1,
308 const vrange &op2,
309 relation_kind rel) const
311 gcc_checking_assert (m_operator);
313 switch (dispatch_kind (lhs, op1, op2))
315 case RO_III:
316 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
317 as_a <irange> (op1),
318 as_a <irange> (op2), rel);
319 case RO_IFF:
320 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
321 as_a <frange> (op1),
322 as_a <frange> (op2), rel);
323 case RO_FFF:
324 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
325 as_a <frange> (op1),
326 as_a <frange> (op2), rel);
327 default:
328 return VREL_VARYING;
332 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
334 relation_kind
335 range_op_handler::lhs_op2_relation (const vrange &lhs,
336 const vrange &op1,
337 const vrange &op2,
338 relation_kind rel) const
340 gcc_checking_assert (m_operator);
341 switch (dispatch_kind (lhs, op1, op2))
343 case RO_III:
344 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
345 as_a <irange> (op1),
346 as_a <irange> (op2), rel);
347 case RO_IFF:
348 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
349 as_a <frange> (op1),
350 as_a <frange> (op2), rel);
351 case RO_FFF:
352 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
353 as_a <frange> (op1),
354 as_a <frange> (op2), rel);
355 default:
356 return VREL_VARYING;
360 // Dispatch a call to op1_op2_relation based on the type of LHS.
362 relation_kind
363 range_op_handler::op1_op2_relation (const vrange &lhs,
364 const vrange &op1,
365 const vrange &op2) const
367 gcc_checking_assert (m_operator);
368 switch (dispatch_kind (lhs, op1, op2))
370 case RO_III:
371 return m_operator->op1_op2_relation (as_a <irange> (lhs),
372 as_a <irange> (op1),
373 as_a <irange> (op2));
375 case RO_IFF:
376 return m_operator->op1_op2_relation (as_a <irange> (lhs),
377 as_a <frange> (op1),
378 as_a <frange> (op2));
380 case RO_FFF:
381 return m_operator->op1_op2_relation (as_a <frange> (lhs),
382 as_a <frange> (op1),
383 as_a <frange> (op2));
385 default:
386 return VREL_VARYING;
390 bool
391 range_op_handler::overflow_free_p (const vrange &lh,
392 const vrange &rh,
393 relation_trio rel) const
395 gcc_checking_assert (m_operator);
396 switch (dispatch_kind (lh, lh, rh))
398 case RO_III:
399 return m_operator->overflow_free_p(as_a <irange> (lh),
400 as_a <irange> (rh),
401 rel);
402 default:
403 return false;
407 bool
408 range_op_handler::operand_check_p (tree t1, tree t2, tree t3) const
410 gcc_checking_assert (m_operator);
411 return m_operator->operand_check_p (t1, t2, t3);
414 // Update the known bitmasks in R when applying the operation CODE to
415 // LH and RH.
417 void
418 update_known_bitmask (irange &r, tree_code code,
419 const irange &lh, const irange &rh)
421 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ()
422 || r.singleton_p ())
423 return;
425 widest_int widest_value, widest_mask;
426 tree type = r.type ();
427 signop sign = TYPE_SIGN (type);
428 int prec = TYPE_PRECISION (type);
429 irange_bitmask lh_bits = lh.get_bitmask ();
430 irange_bitmask rh_bits = rh.get_bitmask ();
432 switch (get_gimple_rhs_class (code))
434 case GIMPLE_UNARY_RHS:
435 bit_value_unop (code, sign, prec, &widest_value, &widest_mask,
436 TYPE_SIGN (lh.type ()),
437 TYPE_PRECISION (lh.type ()),
438 widest_int::from (lh_bits.value (), sign),
439 widest_int::from (lh_bits.mask (), sign));
440 break;
441 case GIMPLE_BINARY_RHS:
442 bit_value_binop (code, sign, prec, &widest_value, &widest_mask,
443 TYPE_SIGN (lh.type ()),
444 TYPE_PRECISION (lh.type ()),
445 widest_int::from (lh_bits.value (), sign),
446 widest_int::from (lh_bits.mask (), sign),
447 TYPE_SIGN (rh.type ()),
448 TYPE_PRECISION (rh.type ()),
449 widest_int::from (rh_bits.value (), sign),
450 widest_int::from (rh_bits.mask (), sign));
451 break;
452 default:
453 gcc_unreachable ();
456 wide_int mask = wide_int::from (widest_mask, prec, sign);
457 wide_int value = wide_int::from (widest_value, prec, sign);
458 // Bitmasks must have the unknown value bits cleared.
459 value &= ~mask;
460 irange_bitmask bm (value, mask);
461 r.update_bitmask (bm);
464 // Return the upper limit for a type.
466 static inline wide_int
467 max_limit (const_tree type)
469 return irange_val_max (type);
472 // Return the lower limit for a type.
474 static inline wide_int
475 min_limit (const_tree type)
477 return irange_val_min (type);
480 // Return false if shifting by OP is undefined behavior. Otherwise, return
481 // true and the range it is to be shifted by. This allows trimming out of
482 // undefined ranges, leaving only valid ranges if there are any.
484 static inline bool
485 get_shift_range (irange &r, tree type, const irange &op)
487 if (op.undefined_p ())
488 return false;
490 // Build valid range and intersect it with the shift range.
491 r = value_range (op.type (),
492 wi::shwi (0, TYPE_PRECISION (op.type ())),
493 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
494 r.intersect (op);
496 // If there are no valid ranges in the shift range, returned false.
497 if (r.undefined_p ())
498 return false;
499 return true;
502 // Default wide_int fold operation returns [MIN, MAX].
504 void
505 range_operator::wi_fold (irange &r, tree type,
506 const wide_int &lh_lb ATTRIBUTE_UNUSED,
507 const wide_int &lh_ub ATTRIBUTE_UNUSED,
508 const wide_int &rh_lb ATTRIBUTE_UNUSED,
509 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
511 gcc_checking_assert (r.supports_type_p (type));
512 r.set_varying (type);
515 // Call wi_fold when both op1 and op2 are equivalent. Further split small
516 // subranges into constants. This can provide better precision.
517 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
518 // [0,0][2, 2][4,4][6, 6][8, 8]
519 // LIMIT is the maximum number of elements in range allowed before we
520 // do not process them individually.
522 void
523 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
524 const wide_int &lh_lb,
525 const wide_int &lh_ub,
526 unsigned limit) const
528 int_range_max tmp;
529 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
530 widest_int::from (lh_lb, TYPE_SIGN (type)));
531 // if there are 1 to 8 values in the LH range, split them up.
532 r.set_undefined ();
533 if (lh_range >= 0 && lh_range < limit)
535 for (unsigned x = 0; x <= lh_range; x++)
537 wide_int val = lh_lb + x;
538 wi_fold (tmp, type, val, val, val, val);
539 r.union_ (tmp);
542 // Otherwise just call wi_fold.
543 else
544 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
547 // Call wi_fold, except further split small subranges into constants.
548 // This can provide better precision. For something 8 >> [0,1]
549 // Instead of [8, 16], we will produce [8,8][16,16]
551 void
552 range_operator::wi_fold_in_parts (irange &r, tree type,
553 const wide_int &lh_lb,
554 const wide_int &lh_ub,
555 const wide_int &rh_lb,
556 const wide_int &rh_ub) const
558 int_range_max tmp;
559 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
560 widest_int::from (rh_lb, TYPE_SIGN (type)));
561 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
562 widest_int::from (lh_lb, TYPE_SIGN (type)));
563 // If there are 2, 3, or 4 values in the RH range, do them separately.
564 // Call wi_fold_in_parts to check the RH side.
565 if (rh_range > 0 && rh_range < 4)
567 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
568 if (rh_range > 1)
570 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
571 r.union_ (tmp);
572 if (rh_range == 3)
574 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
575 r.union_ (tmp);
578 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
579 r.union_ (tmp);
581 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
582 // The RH side has been checked, so no recursion needed.
583 else if (lh_range > 0 && lh_range < 4)
585 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
586 if (lh_range > 1)
588 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
589 r.union_ (tmp);
590 if (lh_range == 3)
592 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
593 r.union_ (tmp);
596 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
597 r.union_ (tmp);
599 // Otherwise just call wi_fold.
600 else
601 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
604 // The default for fold is to break all ranges into sub-ranges and
605 // invoke the wi_fold method on each sub-range pair.
607 bool
608 range_operator::fold_range (irange &r, tree type,
609 const irange &lh,
610 const irange &rh,
611 relation_trio trio) const
613 gcc_checking_assert (r.supports_type_p (type));
614 if (empty_range_varying (r, type, lh, rh))
615 return true;
617 relation_kind rel = trio.op1_op2 ();
618 unsigned num_lh = lh.num_pairs ();
619 unsigned num_rh = rh.num_pairs ();
621 // If op1 and op2 are equivalences, then we don't need a complete cross
622 // product, just pairs of matching elements.
623 if (relation_equiv_p (rel) && lh == rh)
625 int_range_max tmp;
626 r.set_undefined ();
627 for (unsigned x = 0; x < num_lh; ++x)
629 // If the number of subranges is too high, limit subrange creation.
630 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
631 wide_int lh_lb = lh.lower_bound (x);
632 wide_int lh_ub = lh.upper_bound (x);
633 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
634 r.union_ (tmp);
635 if (r.varying_p ())
636 break;
638 op1_op2_relation_effect (r, type, lh, rh, rel);
639 update_bitmask (r, lh, rh);
640 return true;
643 // If both ranges are single pairs, fold directly into the result range.
644 // If the number of subranges grows too high, produce a summary result as the
645 // loop becomes exponential with little benefit. See PR 103821.
646 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
648 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
649 rh.lower_bound (), rh.upper_bound ());
650 op1_op2_relation_effect (r, type, lh, rh, rel);
651 update_bitmask (r, lh, rh);
652 return true;
655 int_range_max tmp;
656 r.set_undefined ();
657 for (unsigned x = 0; x < num_lh; ++x)
658 for (unsigned y = 0; y < num_rh; ++y)
660 wide_int lh_lb = lh.lower_bound (x);
661 wide_int lh_ub = lh.upper_bound (x);
662 wide_int rh_lb = rh.lower_bound (y);
663 wide_int rh_ub = rh.upper_bound (y);
664 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
665 r.union_ (tmp);
666 if (r.varying_p ())
668 op1_op2_relation_effect (r, type, lh, rh, rel);
669 update_bitmask (r, lh, rh);
670 return true;
673 op1_op2_relation_effect (r, type, lh, rh, rel);
674 update_bitmask (r, lh, rh);
675 return true;
678 // The default for op1_range is to return false.
680 bool
681 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
682 tree type ATTRIBUTE_UNUSED,
683 const irange &lhs ATTRIBUTE_UNUSED,
684 const irange &op2 ATTRIBUTE_UNUSED,
685 relation_trio) const
687 return false;
690 // The default for op2_range is to return false.
692 bool
693 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
694 tree type ATTRIBUTE_UNUSED,
695 const irange &lhs ATTRIBUTE_UNUSED,
696 const irange &op1 ATTRIBUTE_UNUSED,
697 relation_trio) const
699 return false;
702 // The default relation routines return VREL_VARYING.
704 relation_kind
705 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
706 const irange &op1 ATTRIBUTE_UNUSED,
707 const irange &op2 ATTRIBUTE_UNUSED,
708 relation_kind rel ATTRIBUTE_UNUSED) const
710 return VREL_VARYING;
713 relation_kind
714 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
715 const irange &op1 ATTRIBUTE_UNUSED,
716 const irange &op2 ATTRIBUTE_UNUSED,
717 relation_kind rel ATTRIBUTE_UNUSED) const
719 return VREL_VARYING;
722 relation_kind
723 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
724 const irange &op1 ATTRIBUTE_UNUSED,
725 const irange &op2 ATTRIBUTE_UNUSED) const
727 return VREL_VARYING;
730 // Default is no relation affects the LHS.
732 bool
733 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
734 tree type ATTRIBUTE_UNUSED,
735 const irange &op1_range ATTRIBUTE_UNUSED,
736 const irange &op2_range ATTRIBUTE_UNUSED,
737 relation_kind rel ATTRIBUTE_UNUSED) const
739 return false;
742 bool
743 range_operator::overflow_free_p (const irange &, const irange &,
744 relation_trio) const
746 return false;
749 // Apply any known bitmask updates based on this operator.
751 void
752 range_operator::update_bitmask (irange &, const irange &,
753 const irange &) const
757 // Check that operand types are OK. Default to always OK.
759 bool
760 range_operator::operand_check_p (tree, tree, tree) const
762 return true;
765 // Create and return a range from a pair of wide-ints that are known
766 // to have overflowed (or underflowed).
768 static void
769 value_range_from_overflowed_bounds (irange &r, tree type,
770 const wide_int &wmin,
771 const wide_int &wmax)
773 const signop sgn = TYPE_SIGN (type);
774 const unsigned int prec = TYPE_PRECISION (type);
776 wide_int tmin = wide_int::from (wmin, prec, sgn);
777 wide_int tmax = wide_int::from (wmax, prec, sgn);
779 bool covers = false;
780 wide_int tem = tmin;
781 tmin = tmax + 1;
782 if (wi::cmp (tmin, tmax, sgn) < 0)
783 covers = true;
784 tmax = tem - 1;
785 if (wi::cmp (tmax, tem, sgn) > 0)
786 covers = true;
788 // If the anti-range would cover nothing, drop to varying.
789 // Likewise if the anti-range bounds are outside of the types
790 // values.
791 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
792 r.set_varying (type);
793 else
794 r.set (type, tmin, tmax, VR_ANTI_RANGE);
797 // Create and return a range from a pair of wide-ints. MIN_OVF and
798 // MAX_OVF describe any overflow that might have occurred while
799 // calculating WMIN and WMAX respectively.
801 static void
802 value_range_with_overflow (irange &r, tree type,
803 const wide_int &wmin, const wide_int &wmax,
804 wi::overflow_type min_ovf = wi::OVF_NONE,
805 wi::overflow_type max_ovf = wi::OVF_NONE)
807 const signop sgn = TYPE_SIGN (type);
808 const unsigned int prec = TYPE_PRECISION (type);
809 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
811 // For one bit precision if max != min, then the range covers all
812 // values.
813 if (prec == 1 && wi::ne_p (wmax, wmin))
815 r.set_varying (type);
816 return;
819 if (overflow_wraps)
821 // If overflow wraps, truncate the values and adjust the range,
822 // kind, and bounds appropriately.
823 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
825 wide_int tmin = wide_int::from (wmin, prec, sgn);
826 wide_int tmax = wide_int::from (wmax, prec, sgn);
827 // If the limits are swapped, we wrapped around and cover
828 // the entire range.
829 if (wi::gt_p (tmin, tmax, sgn))
830 r.set_varying (type);
831 else
832 // No overflow or both overflow or underflow. The range
833 // kind stays normal.
834 r.set (type, tmin, tmax);
835 return;
838 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
839 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
840 value_range_from_overflowed_bounds (r, type, wmin, wmax);
841 else
842 // Other underflow and/or overflow, drop to VR_VARYING.
843 r.set_varying (type);
845 else
847 // If both bounds either underflowed or overflowed, then the result
848 // is undefined.
849 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
850 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
852 r.set_undefined ();
853 return;
856 // If overflow does not wrap, saturate to [MIN, MAX].
857 wide_int new_lb, new_ub;
858 if (min_ovf == wi::OVF_UNDERFLOW)
859 new_lb = wi::min_value (prec, sgn);
860 else if (min_ovf == wi::OVF_OVERFLOW)
861 new_lb = wi::max_value (prec, sgn);
862 else
863 new_lb = wmin;
865 if (max_ovf == wi::OVF_UNDERFLOW)
866 new_ub = wi::min_value (prec, sgn);
867 else if (max_ovf == wi::OVF_OVERFLOW)
868 new_ub = wi::max_value (prec, sgn);
869 else
870 new_ub = wmax;
872 r.set (type, new_lb, new_ub);
876 // Create and return a range from a pair of wide-ints. Canonicalize
877 // the case where the bounds are swapped. In which case, we transform
878 // [10,5] into [MIN,5][10,MAX].
880 static inline void
881 create_possibly_reversed_range (irange &r, tree type,
882 const wide_int &new_lb, const wide_int &new_ub)
884 signop s = TYPE_SIGN (type);
885 // If the bounds are swapped, treat the result as if an overflow occurred.
886 if (wi::gt_p (new_lb, new_ub, s))
887 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
888 else
889 // Otherwise it's just a normal range.
890 r.set (type, new_lb, new_ub);
893 // Return the summary information about boolean range LHS. If EMPTY/FULL,
894 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
896 bool_range_state
897 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
899 // If there is no result, then this is unexecutable.
900 if (lhs.undefined_p ())
902 r.set_undefined ();
903 return BRS_EMPTY;
906 if (lhs.zero_p ())
907 return BRS_FALSE;
909 // For TRUE, we can't just test for [1,1] because Ada can have
910 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
911 if (lhs.contains_p (build_zero_cst (lhs.type ())))
913 r.set_varying (val_type);
914 return BRS_FULL;
917 return BRS_TRUE;
920 // ------------------------------------------------------------------------
922 void
923 operator_equal::update_bitmask (irange &r, const irange &lh,
924 const irange &rh) const
926 update_known_bitmask (r, EQ_EXPR, lh, rh);
929 // Check if the LHS range indicates a relation between OP1 and OP2.
931 relation_kind
932 operator_equal::op1_op2_relation (const irange &lhs, const irange &,
933 const irange &) const
935 if (lhs.undefined_p ())
936 return VREL_UNDEFINED;
938 // FALSE = op1 == op2 indicates NE_EXPR.
939 if (lhs.zero_p ())
940 return VREL_NE;
942 // TRUE = op1 == op2 indicates EQ_EXPR.
943 if (!contains_zero_p (lhs))
944 return VREL_EQ;
945 return VREL_VARYING;
948 bool
949 operator_equal::fold_range (irange &r, tree type,
950 const irange &op1,
951 const irange &op2,
952 relation_trio rel) const
954 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
955 return true;
957 // We can be sure the values are always equal or not if both ranges
958 // consist of a single value, and then compare them.
959 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
960 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
961 if (op1_const && op2_const)
963 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
964 r = range_true (type);
965 else
966 r = range_false (type);
968 else
970 // If ranges do not intersect, we know the range is not equal,
971 // otherwise we don't know anything for sure.
972 int_range_max tmp = op1;
973 tmp.intersect (op2);
974 if (tmp.undefined_p ())
975 r = range_false (type);
976 // Check if a constant cannot satisfy the bitmask requirements.
977 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
978 r = range_false (type);
979 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
980 r = range_false (type);
981 else
982 r = range_true_and_false (type);
984 return true;
987 bool
988 operator_equal::op1_range (irange &r, tree type,
989 const irange &lhs,
990 const irange &op2,
991 relation_trio) const
993 switch (get_bool_state (r, lhs, type))
995 case BRS_TRUE:
996 // If it's true, the result is the same as OP2.
997 r = op2;
998 break;
1000 case BRS_FALSE:
1001 // If the result is false, the only time we know anything is
1002 // if OP2 is a constant.
1003 if (!op2.undefined_p ()
1004 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1006 r = op2;
1007 r.invert ();
1009 else
1010 r.set_varying (type);
1011 break;
1013 default:
1014 break;
1016 return true;
1019 bool
1020 operator_equal::op2_range (irange &r, tree type,
1021 const irange &lhs,
1022 const irange &op1,
1023 relation_trio rel) const
1025 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1028 // -------------------------------------------------------------------------
1030 void
1031 operator_not_equal::update_bitmask (irange &r, const irange &lh,
1032 const irange &rh) const
1034 update_known_bitmask (r, NE_EXPR, lh, rh);
1037 // Check if the LHS range indicates a relation between OP1 and OP2.
1039 relation_kind
1040 operator_not_equal::op1_op2_relation (const irange &lhs, const irange &,
1041 const irange &) const
1043 if (lhs.undefined_p ())
1044 return VREL_UNDEFINED;
1046 // FALSE = op1 != op2 indicates EQ_EXPR.
1047 if (lhs.zero_p ())
1048 return VREL_EQ;
1050 // TRUE = op1 != op2 indicates NE_EXPR.
1051 if (!contains_zero_p (lhs))
1052 return VREL_NE;
1053 return VREL_VARYING;
1056 bool
1057 operator_not_equal::fold_range (irange &r, tree type,
1058 const irange &op1,
1059 const irange &op2,
1060 relation_trio rel) const
1062 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
1063 return true;
1065 // We can be sure the values are always equal or not if both ranges
1066 // consist of a single value, and then compare them.
1067 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
1068 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
1069 if (op1_const && op2_const)
1071 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
1072 r = range_true (type);
1073 else
1074 r = range_false (type);
1076 else
1078 // If ranges do not intersect, we know the range is not equal,
1079 // otherwise we don't know anything for sure.
1080 int_range_max tmp = op1;
1081 tmp.intersect (op2);
1082 if (tmp.undefined_p ())
1083 r = range_true (type);
1084 // Check if a constant cannot satisfy the bitmask requirements.
1085 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
1086 r = range_true (type);
1087 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
1088 r = range_true (type);
1089 else
1090 r = range_true_and_false (type);
1092 return true;
1095 bool
1096 operator_not_equal::op1_range (irange &r, tree type,
1097 const irange &lhs,
1098 const irange &op2,
1099 relation_trio) const
1101 switch (get_bool_state (r, lhs, type))
1103 case BRS_TRUE:
1104 // If the result is true, the only time we know anything is if
1105 // OP2 is a constant.
1106 if (!op2.undefined_p ()
1107 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1109 r = op2;
1110 r.invert ();
1112 else
1113 r.set_varying (type);
1114 break;
1116 case BRS_FALSE:
1117 // If it's false, the result is the same as OP2.
1118 r = op2;
1119 break;
1121 default:
1122 break;
1124 return true;
1128 bool
1129 operator_not_equal::op2_range (irange &r, tree type,
1130 const irange &lhs,
1131 const irange &op1,
1132 relation_trio rel) const
1134 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1137 // (X < VAL) produces the range of [MIN, VAL - 1].
1139 static void
1140 build_lt (irange &r, tree type, const wide_int &val)
1142 wi::overflow_type ov;
1143 wide_int lim;
1144 signop sgn = TYPE_SIGN (type);
1146 // Signed 1 bit cannot represent 1 for subtraction.
1147 if (sgn == SIGNED)
1148 lim = wi::add (val, -1, sgn, &ov);
1149 else
1150 lim = wi::sub (val, 1, sgn, &ov);
1152 // If val - 1 underflows, check if X < MIN, which is an empty range.
1153 if (ov)
1154 r.set_undefined ();
1155 else
1156 r = int_range<1> (type, min_limit (type), lim);
1159 // (X <= VAL) produces the range of [MIN, VAL].
1161 static void
1162 build_le (irange &r, tree type, const wide_int &val)
1164 r = int_range<1> (type, min_limit (type), val);
1167 // (X > VAL) produces the range of [VAL + 1, MAX].
1169 static void
1170 build_gt (irange &r, tree type, const wide_int &val)
1172 wi::overflow_type ov;
1173 wide_int lim;
1174 signop sgn = TYPE_SIGN (type);
1176 // Signed 1 bit cannot represent 1 for addition.
1177 if (sgn == SIGNED)
1178 lim = wi::sub (val, -1, sgn, &ov);
1179 else
1180 lim = wi::add (val, 1, sgn, &ov);
1181 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1182 if (ov)
1183 r.set_undefined ();
1184 else
1185 r = int_range<1> (type, lim, max_limit (type));
1188 // (X >= val) produces the range of [VAL, MAX].
1190 static void
1191 build_ge (irange &r, tree type, const wide_int &val)
1193 r = int_range<1> (type, val, max_limit (type));
1197 void
1198 operator_lt::update_bitmask (irange &r, const irange &lh,
1199 const irange &rh) const
1201 update_known_bitmask (r, LT_EXPR, lh, rh);
1204 // Check if the LHS range indicates a relation between OP1 and OP2.
1206 relation_kind
1207 operator_lt::op1_op2_relation (const irange &lhs, const irange &,
1208 const irange &) const
1210 if (lhs.undefined_p ())
1211 return VREL_UNDEFINED;
1213 // FALSE = op1 < op2 indicates GE_EXPR.
1214 if (lhs.zero_p ())
1215 return VREL_GE;
1217 // TRUE = op1 < op2 indicates LT_EXPR.
1218 if (!contains_zero_p (lhs))
1219 return VREL_LT;
1220 return VREL_VARYING;
1223 bool
1224 operator_lt::fold_range (irange &r, tree type,
1225 const irange &op1,
1226 const irange &op2,
1227 relation_trio rel) const
1229 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1230 return true;
1232 signop sign = TYPE_SIGN (op1.type ());
1233 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1235 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1236 r = range_true (type);
1237 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1238 r = range_false (type);
1239 // Use nonzero bits to determine if < 0 is false.
1240 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1241 r = range_false (type);
1242 else
1243 r = range_true_and_false (type);
1244 return true;
1247 bool
1248 operator_lt::op1_range (irange &r, tree type,
1249 const irange &lhs,
1250 const irange &op2,
1251 relation_trio) const
1253 if (op2.undefined_p ())
1254 return false;
1256 switch (get_bool_state (r, lhs, type))
1258 case BRS_TRUE:
1259 build_lt (r, type, op2.upper_bound ());
1260 break;
1262 case BRS_FALSE:
1263 build_ge (r, type, op2.lower_bound ());
1264 break;
1266 default:
1267 break;
1269 return true;
1272 bool
1273 operator_lt::op2_range (irange &r, tree type,
1274 const irange &lhs,
1275 const irange &op1,
1276 relation_trio) const
1278 if (op1.undefined_p ())
1279 return false;
1281 switch (get_bool_state (r, lhs, type))
1283 case BRS_TRUE:
1284 build_gt (r, type, op1.lower_bound ());
1285 break;
1287 case BRS_FALSE:
1288 build_le (r, type, op1.upper_bound ());
1289 break;
1291 default:
1292 break;
1294 return true;
1298 void
1299 operator_le::update_bitmask (irange &r, const irange &lh,
1300 const irange &rh) const
1302 update_known_bitmask (r, LE_EXPR, lh, rh);
1305 // Check if the LHS range indicates a relation between OP1 and OP2.
1307 relation_kind
1308 operator_le::op1_op2_relation (const irange &lhs, const irange &,
1309 const irange &) const
1311 if (lhs.undefined_p ())
1312 return VREL_UNDEFINED;
1314 // FALSE = op1 <= op2 indicates GT_EXPR.
1315 if (lhs.zero_p ())
1316 return VREL_GT;
1318 // TRUE = op1 <= op2 indicates LE_EXPR.
1319 if (!contains_zero_p (lhs))
1320 return VREL_LE;
1321 return VREL_VARYING;
1324 bool
1325 operator_le::fold_range (irange &r, tree type,
1326 const irange &op1,
1327 const irange &op2,
1328 relation_trio rel) const
1330 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1331 return true;
1333 signop sign = TYPE_SIGN (op1.type ());
1334 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1336 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1337 r = range_true (type);
1338 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1339 r = range_false (type);
1340 else
1341 r = range_true_and_false (type);
1342 return true;
1345 bool
1346 operator_le::op1_range (irange &r, tree type,
1347 const irange &lhs,
1348 const irange &op2,
1349 relation_trio) const
1351 if (op2.undefined_p ())
1352 return false;
1354 switch (get_bool_state (r, lhs, type))
1356 case BRS_TRUE:
1357 build_le (r, type, op2.upper_bound ());
1358 break;
1360 case BRS_FALSE:
1361 build_gt (r, type, op2.lower_bound ());
1362 break;
1364 default:
1365 break;
1367 return true;
1370 bool
1371 operator_le::op2_range (irange &r, tree type,
1372 const irange &lhs,
1373 const irange &op1,
1374 relation_trio) const
1376 if (op1.undefined_p ())
1377 return false;
1379 switch (get_bool_state (r, lhs, type))
1381 case BRS_TRUE:
1382 build_ge (r, type, op1.lower_bound ());
1383 break;
1385 case BRS_FALSE:
1386 build_lt (r, type, op1.upper_bound ());
1387 break;
1389 default:
1390 break;
1392 return true;
1396 void
1397 operator_gt::update_bitmask (irange &r, const irange &lh,
1398 const irange &rh) const
1400 update_known_bitmask (r, GT_EXPR, lh, rh);
1403 // Check if the LHS range indicates a relation between OP1 and OP2.
1405 relation_kind
1406 operator_gt::op1_op2_relation (const irange &lhs, const irange &,
1407 const irange &) const
1409 if (lhs.undefined_p ())
1410 return VREL_UNDEFINED;
1412 // FALSE = op1 > op2 indicates LE_EXPR.
1413 if (lhs.zero_p ())
1414 return VREL_LE;
1416 // TRUE = op1 > op2 indicates GT_EXPR.
1417 if (!contains_zero_p (lhs))
1418 return VREL_GT;
1419 return VREL_VARYING;
1422 bool
1423 operator_gt::fold_range (irange &r, tree type,
1424 const irange &op1, const irange &op2,
1425 relation_trio rel) const
1427 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1428 return true;
1430 signop sign = TYPE_SIGN (op1.type ());
1431 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1433 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1434 r = range_true (type);
1435 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1436 r = range_false (type);
1437 else
1438 r = range_true_and_false (type);
1439 return true;
1442 bool
1443 operator_gt::op1_range (irange &r, tree type,
1444 const irange &lhs, const irange &op2,
1445 relation_trio) const
1447 if (op2.undefined_p ())
1448 return false;
1450 switch (get_bool_state (r, lhs, type))
1452 case BRS_TRUE:
1453 build_gt (r, type, op2.lower_bound ());
1454 break;
1456 case BRS_FALSE:
1457 build_le (r, type, op2.upper_bound ());
1458 break;
1460 default:
1461 break;
1463 return true;
1466 bool
1467 operator_gt::op2_range (irange &r, tree type,
1468 const irange &lhs,
1469 const irange &op1,
1470 relation_trio) const
1472 if (op1.undefined_p ())
1473 return false;
1475 switch (get_bool_state (r, lhs, type))
1477 case BRS_TRUE:
1478 build_lt (r, type, op1.upper_bound ());
1479 break;
1481 case BRS_FALSE:
1482 build_ge (r, type, op1.lower_bound ());
1483 break;
1485 default:
1486 break;
1488 return true;
1492 void
1493 operator_ge::update_bitmask (irange &r, const irange &lh,
1494 const irange &rh) const
1496 update_known_bitmask (r, GE_EXPR, lh, rh);
1499 // Check if the LHS range indicates a relation between OP1 and OP2.
1501 relation_kind
1502 operator_ge::op1_op2_relation (const irange &lhs, const irange &,
1503 const irange &) const
1505 if (lhs.undefined_p ())
1506 return VREL_UNDEFINED;
1508 // FALSE = op1 >= op2 indicates LT_EXPR.
1509 if (lhs.zero_p ())
1510 return VREL_LT;
1512 // TRUE = op1 >= op2 indicates GE_EXPR.
1513 if (!contains_zero_p (lhs))
1514 return VREL_GE;
1515 return VREL_VARYING;
1518 bool
1519 operator_ge::fold_range (irange &r, tree type,
1520 const irange &op1,
1521 const irange &op2,
1522 relation_trio rel) const
1524 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1525 return true;
1527 signop sign = TYPE_SIGN (op1.type ());
1528 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1530 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1531 r = range_true (type);
1532 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1533 r = range_false (type);
1534 else
1535 r = range_true_and_false (type);
1536 return true;
1539 bool
1540 operator_ge::op1_range (irange &r, tree type,
1541 const irange &lhs,
1542 const irange &op2,
1543 relation_trio) const
1545 if (op2.undefined_p ())
1546 return false;
1548 switch (get_bool_state (r, lhs, type))
1550 case BRS_TRUE:
1551 build_ge (r, type, op2.lower_bound ());
1552 break;
1554 case BRS_FALSE:
1555 build_lt (r, type, op2.upper_bound ());
1556 break;
1558 default:
1559 break;
1561 return true;
1564 bool
1565 operator_ge::op2_range (irange &r, tree type,
1566 const irange &lhs,
1567 const irange &op1,
1568 relation_trio) const
1570 if (op1.undefined_p ())
1571 return false;
1573 switch (get_bool_state (r, lhs, type))
1575 case BRS_TRUE:
1576 build_le (r, type, op1.upper_bound ());
1577 break;
1579 case BRS_FALSE:
1580 build_gt (r, type, op1.lower_bound ());
1581 break;
1583 default:
1584 break;
1586 return true;
1590 void
1591 operator_plus::update_bitmask (irange &r, const irange &lh,
1592 const irange &rh) const
1594 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1597 // Check to see if the range of OP2 indicates anything about the relation
1598 // between LHS and OP1.
1600 relation_kind
1601 operator_plus::lhs_op1_relation (const irange &lhs,
1602 const irange &op1,
1603 const irange &op2,
1604 relation_kind) const
1606 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1607 return VREL_VARYING;
1609 tree type = lhs.type ();
1610 unsigned prec = TYPE_PRECISION (type);
1611 wi::overflow_type ovf1, ovf2;
1612 signop sign = TYPE_SIGN (type);
1614 // LHS = OP1 + 0 indicates LHS == OP1.
1615 if (op2.zero_p ())
1616 return VREL_EQ;
1618 if (TYPE_OVERFLOW_WRAPS (type))
1620 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1621 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1623 else
1624 ovf1 = ovf2 = wi::OVF_NONE;
1626 // Never wrapping additions.
1627 if (!ovf1 && !ovf2)
1629 // Positive op2 means lhs > op1.
1630 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1631 return VREL_GT;
1632 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1633 return VREL_GE;
1635 // Negative op2 means lhs < op1.
1636 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1637 return VREL_LT;
1638 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1639 return VREL_LE;
1641 // Always wrapping additions.
1642 else if (ovf1 && ovf1 == ovf2)
1644 // Positive op2 means lhs < op1.
1645 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1646 return VREL_LT;
1647 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1648 return VREL_LE;
1650 // Negative op2 means lhs > op1.
1651 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1652 return VREL_GT;
1653 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1654 return VREL_GE;
1657 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1658 if (!range_includes_zero_p (&op2))
1659 return VREL_NE;
1661 return VREL_VARYING;
1664 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1665 // operands.
1667 relation_kind
1668 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1669 const irange &op2, relation_kind rel) const
1671 return lhs_op1_relation (lhs, op2, op1, rel);
1674 void
1675 operator_plus::wi_fold (irange &r, tree type,
1676 const wide_int &lh_lb, const wide_int &lh_ub,
1677 const wide_int &rh_lb, const wide_int &rh_ub) const
1679 wi::overflow_type ov_lb, ov_ub;
1680 signop s = TYPE_SIGN (type);
1681 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1682 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1683 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1686 // Given addition or subtraction, determine the possible NORMAL ranges and
1687 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1688 // Return the relation that exists between the LHS and OP1 in order for the
1689 // NORMAL range to apply.
1690 // a return value of VREL_VARYING means no ranges were applicable.
1692 static relation_kind
1693 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1694 bool add_p)
1696 relation_kind kind = VREL_VARYING;
1697 // For now, only deal with constant adds. This could be extended to ranges
1698 // when someone is so motivated.
1699 if (!offset.singleton_p () || offset.zero_p ())
1700 return kind;
1702 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1703 wide_int off = offset.lower_bound ();
1704 if (wi::neg_p (off, SIGNED))
1706 add_p = !add_p;
1707 off = wi::neg (off);
1710 wi::overflow_type ov;
1711 tree type = offset.type ();
1712 unsigned prec = TYPE_PRECISION (type);
1713 wide_int ub;
1714 wide_int lb;
1715 // calculate the normal range and relation for the operation.
1716 if (add_p)
1718 // [ 0 , INF - OFF]
1719 lb = wi::zero (prec);
1720 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1721 kind = VREL_GT;
1723 else
1725 // [ OFF, INF ]
1726 lb = off;
1727 ub = irange_val_max (type);
1728 kind = VREL_LT;
1730 int_range<2> normal_range (type, lb, ub);
1731 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1733 r_ov = ov_range;
1734 r_normal = normal_range;
1735 return kind;
1738 // Once op1 has been calculated by operator_plus or operator_minus, check
1739 // to see if the relation passed causes any part of the calculation to
1740 // be not possible. ie
1741 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1742 // and that further refines a_2 to [0, 0].
1743 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1744 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1745 // MINUS. IF any adjustment can be made, R will reflect it.
1747 static void
1748 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1749 bool add_p)
1751 if (r.undefined_p ())
1752 return;
1753 tree type = r.type ();
1754 // Check for unsigned overflow and calculate the overflow part.
1755 signop s = TYPE_SIGN (type);
1756 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1757 return;
1759 // Only work with <, <=, >, >= relations.
1760 if (!relation_lt_le_gt_ge_p (rel))
1761 return;
1763 // Get the ranges for this offset.
1764 int_range_max normal, overflow;
1765 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1767 // VREL_VARYING means there are no adjustments.
1768 if (k == VREL_VARYING)
1769 return;
1771 // If the relations match use the normal range, otherwise use overflow range.
1772 if (relation_intersect (k, rel) == k)
1773 r.intersect (normal);
1774 else
1775 r.intersect (overflow);
1776 return;
1779 bool
1780 operator_plus::op1_range (irange &r, tree type,
1781 const irange &lhs,
1782 const irange &op2,
1783 relation_trio trio) const
1785 if (lhs.undefined_p ())
1786 return false;
1787 // Start with the default operation.
1788 range_op_handler minus (MINUS_EXPR);
1789 if (!minus)
1790 return false;
1791 bool res = minus.fold_range (r, type, lhs, op2);
1792 relation_kind rel = trio.lhs_op1 ();
1793 // Check for a relation refinement.
1794 if (res)
1795 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1796 return res;
1799 bool
1800 operator_plus::op2_range (irange &r, tree type,
1801 const irange &lhs,
1802 const irange &op1,
1803 relation_trio rel) const
1805 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1808 class operator_widen_plus_signed : public range_operator
1810 public:
1811 virtual void wi_fold (irange &r, tree type,
1812 const wide_int &lh_lb,
1813 const wide_int &lh_ub,
1814 const wide_int &rh_lb,
1815 const wide_int &rh_ub) const;
1816 } op_widen_plus_signed;
1818 void
1819 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1820 const wide_int &lh_lb,
1821 const wide_int &lh_ub,
1822 const wide_int &rh_lb,
1823 const wide_int &rh_ub) const
1825 wi::overflow_type ov_lb, ov_ub;
1826 signop s = TYPE_SIGN (type);
1828 wide_int lh_wlb
1829 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1830 wide_int lh_wub
1831 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1832 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1833 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1835 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1836 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1838 r = int_range<2> (type, new_lb, new_ub);
1841 class operator_widen_plus_unsigned : public range_operator
1843 public:
1844 virtual void wi_fold (irange &r, tree type,
1845 const wide_int &lh_lb,
1846 const wide_int &lh_ub,
1847 const wide_int &rh_lb,
1848 const wide_int &rh_ub) const;
1849 } op_widen_plus_unsigned;
1851 void
1852 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1853 const wide_int &lh_lb,
1854 const wide_int &lh_ub,
1855 const wide_int &rh_lb,
1856 const wide_int &rh_ub) const
1858 wi::overflow_type ov_lb, ov_ub;
1859 signop s = TYPE_SIGN (type);
1861 wide_int lh_wlb
1862 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1863 wide_int lh_wub
1864 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1865 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1866 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1868 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1869 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1871 r = int_range<2> (type, new_lb, new_ub);
1874 void
1875 operator_minus::update_bitmask (irange &r, const irange &lh,
1876 const irange &rh) const
1878 update_known_bitmask (r, MINUS_EXPR, lh, rh);
1881 void
1882 operator_minus::wi_fold (irange &r, tree type,
1883 const wide_int &lh_lb, const wide_int &lh_ub,
1884 const wide_int &rh_lb, const wide_int &rh_ub) const
1886 wi::overflow_type ov_lb, ov_ub;
1887 signop s = TYPE_SIGN (type);
1888 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
1889 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
1890 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1894 // Return the relation between LHS and OP1 based on the relation between
1895 // OP1 and OP2.
1897 relation_kind
1898 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
1899 const irange &, relation_kind rel) const
1901 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
1902 switch (rel)
1904 case VREL_GT:
1905 case VREL_GE:
1906 return VREL_LE;
1907 default:
1908 break;
1910 return VREL_VARYING;
1913 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1914 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1915 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1917 bool
1918 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
1919 const irange &op1_range ATTRIBUTE_UNUSED,
1920 const irange &op2_range ATTRIBUTE_UNUSED,
1921 relation_kind rel)
1923 if (rel == VREL_VARYING)
1924 return false;
1926 int_range<2> rel_range;
1927 unsigned prec = TYPE_PRECISION (type);
1928 signop sgn = TYPE_SIGN (type);
1930 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1931 if (rel == VREL_EQ)
1932 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
1933 else if (rel == VREL_NE)
1934 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1935 VR_ANTI_RANGE);
1936 else if (TYPE_OVERFLOW_WRAPS (type))
1938 switch (rel)
1940 // For wrapping signed values and unsigned, if op1 > op2 or
1941 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1942 case VREL_GT:
1943 case VREL_LT:
1944 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1945 VR_ANTI_RANGE);
1946 break;
1947 default:
1948 return false;
1951 else
1953 switch (rel)
1955 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1956 case VREL_GT:
1957 rel_range = int_range<2> (type, wi::one (prec),
1958 wi::max_value (prec, sgn));
1959 break;
1960 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1961 case VREL_GE:
1962 rel_range = int_range<2> (type, wi::zero (prec),
1963 wi::max_value (prec, sgn));
1964 break;
1965 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1966 case VREL_LT:
1967 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1968 wi::minus_one (prec));
1969 break;
1970 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1971 case VREL_LE:
1972 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1973 wi::zero (prec));
1974 break;
1975 default:
1976 return false;
1979 lhs_range.intersect (rel_range);
1980 return true;
1983 bool
1984 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
1985 const irange &op1_range,
1986 const irange &op2_range,
1987 relation_kind rel) const
1989 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
1990 rel);
1993 bool
1994 operator_minus::op1_range (irange &r, tree type,
1995 const irange &lhs,
1996 const irange &op2,
1997 relation_trio trio) const
1999 if (lhs.undefined_p ())
2000 return false;
2001 // Start with the default operation.
2002 range_op_handler minus (PLUS_EXPR);
2003 if (!minus)
2004 return false;
2005 bool res = minus.fold_range (r, type, lhs, op2);
2006 relation_kind rel = trio.lhs_op1 ();
2007 if (res)
2008 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
2009 return res;
2013 bool
2014 operator_minus::op2_range (irange &r, tree type,
2015 const irange &lhs,
2016 const irange &op1,
2017 relation_trio) const
2019 if (lhs.undefined_p ())
2020 return false;
2021 return fold_range (r, type, op1, lhs);
2024 void
2025 operator_min::update_bitmask (irange &r, const irange &lh,
2026 const irange &rh) const
2028 update_known_bitmask (r, MIN_EXPR, lh, rh);
2031 void
2032 operator_min::wi_fold (irange &r, tree type,
2033 const wide_int &lh_lb, const wide_int &lh_ub,
2034 const wide_int &rh_lb, const wide_int &rh_ub) const
2036 signop s = TYPE_SIGN (type);
2037 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
2038 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
2039 value_range_with_overflow (r, type, new_lb, new_ub);
2043 void
2044 operator_max::update_bitmask (irange &r, const irange &lh,
2045 const irange &rh) const
2047 update_known_bitmask (r, MAX_EXPR, lh, rh);
2050 void
2051 operator_max::wi_fold (irange &r, tree type,
2052 const wide_int &lh_lb, const wide_int &lh_ub,
2053 const wide_int &rh_lb, const wide_int &rh_ub) const
2055 signop s = TYPE_SIGN (type);
2056 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
2057 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
2058 value_range_with_overflow (r, type, new_lb, new_ub);
2062 // Calculate the cross product of two sets of ranges and return it.
2064 // Multiplications, divisions and shifts are a bit tricky to handle,
2065 // depending on the mix of signs we have in the two ranges, we need to
2066 // operate on different values to get the minimum and maximum values
2067 // for the new range. One approach is to figure out all the
2068 // variations of range combinations and do the operations.
2070 // However, this involves several calls to compare_values and it is
2071 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2072 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2073 // figure the smallest and largest values to form the new range.
2075 void
2076 cross_product_operator::wi_cross_product (irange &r, tree type,
2077 const wide_int &lh_lb,
2078 const wide_int &lh_ub,
2079 const wide_int &rh_lb,
2080 const wide_int &rh_ub) const
2082 wide_int cp1, cp2, cp3, cp4;
2083 // Default to varying.
2084 r.set_varying (type);
2086 // Compute the 4 cross operations, bailing if we get an overflow we
2087 // can't handle.
2088 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
2089 return;
2090 if (wi::eq_p (lh_lb, lh_ub))
2091 cp3 = cp1;
2092 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2093 return;
2094 if (wi::eq_p (rh_lb, rh_ub))
2095 cp2 = cp1;
2096 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2097 return;
2098 if (wi::eq_p (lh_lb, lh_ub))
2099 cp4 = cp2;
2100 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2101 return;
2103 // Order pairs.
2104 signop sign = TYPE_SIGN (type);
2105 if (wi::gt_p (cp1, cp2, sign))
2106 std::swap (cp1, cp2);
2107 if (wi::gt_p (cp3, cp4, sign))
2108 std::swap (cp3, cp4);
2110 // Choose min and max from the ordered pairs.
2111 wide_int res_lb = wi::min (cp1, cp3, sign);
2112 wide_int res_ub = wi::max (cp2, cp4, sign);
2113 value_range_with_overflow (r, type, res_lb, res_ub);
2117 void
2118 operator_mult::update_bitmask (irange &r, const irange &lh,
2119 const irange &rh) const
2121 update_known_bitmask (r, MULT_EXPR, lh, rh);
2124 bool
2125 operator_mult::op1_range (irange &r, tree type,
2126 const irange &lhs, const irange &op2,
2127 relation_trio) const
2129 if (lhs.undefined_p ())
2130 return false;
2132 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2133 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2134 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2135 if (TYPE_OVERFLOW_WRAPS (type))
2136 return false;
2138 wide_int offset;
2139 if (op2.singleton_p (offset) && offset != 0)
2140 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2141 return false;
2144 bool
2145 operator_mult::op2_range (irange &r, tree type,
2146 const irange &lhs, const irange &op1,
2147 relation_trio rel) const
2149 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2152 bool
2153 operator_mult::wi_op_overflows (wide_int &res, tree type,
2154 const wide_int &w0, const wide_int &w1) const
2156 wi::overflow_type overflow = wi::OVF_NONE;
2157 signop sign = TYPE_SIGN (type);
2158 res = wi::mul (w0, w1, sign, &overflow);
2159 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2161 // For multiplication, the sign of the overflow is given
2162 // by the comparison of the signs of the operands.
2163 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2164 res = wi::max_value (w0.get_precision (), sign);
2165 else
2166 res = wi::min_value (w0.get_precision (), sign);
2167 return false;
2169 return overflow;
2172 void
2173 operator_mult::wi_fold (irange &r, tree type,
2174 const wide_int &lh_lb, const wide_int &lh_ub,
2175 const wide_int &rh_lb, const wide_int &rh_ub) const
2177 if (TYPE_OVERFLOW_UNDEFINED (type))
2179 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2180 return;
2183 // Multiply the ranges when overflow wraps. This is basically fancy
2184 // code so we don't drop to varying with an unsigned
2185 // [-3,-1]*[-3,-1].
2187 // This test requires 2*prec bits if both operands are signed and
2188 // 2*prec + 2 bits if either is not. Therefore, extend the values
2189 // using the sign of the result to PREC2. From here on out,
2190 // everything is just signed math no matter what the input types
2191 // were.
2193 signop sign = TYPE_SIGN (type);
2194 unsigned prec = TYPE_PRECISION (type);
2195 widest2_int min0 = widest2_int::from (lh_lb, sign);
2196 widest2_int max0 = widest2_int::from (lh_ub, sign);
2197 widest2_int min1 = widest2_int::from (rh_lb, sign);
2198 widest2_int max1 = widest2_int::from (rh_ub, sign);
2199 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2200 widest2_int size = sizem1 + 1;
2202 // Canonicalize the intervals.
2203 if (sign == UNSIGNED)
2205 if (wi::ltu_p (size, min0 + max0))
2207 min0 -= size;
2208 max0 -= size;
2210 if (wi::ltu_p (size, min1 + max1))
2212 min1 -= size;
2213 max1 -= size;
2217 // Sort the 4 products so that min is in prod0 and max is in
2218 // prod3.
2219 widest2_int prod0 = min0 * min1;
2220 widest2_int prod1 = min0 * max1;
2221 widest2_int prod2 = max0 * min1;
2222 widest2_int prod3 = max0 * max1;
2224 // min0min1 > max0max1
2225 if (prod0 > prod3)
2226 std::swap (prod0, prod3);
2228 // min0max1 > max0min1
2229 if (prod1 > prod2)
2230 std::swap (prod1, prod2);
2232 if (prod0 > prod1)
2233 std::swap (prod0, prod1);
2235 if (prod2 > prod3)
2236 std::swap (prod2, prod3);
2238 // diff = max - min
2239 prod2 = prod3 - prod0;
2240 if (wi::geu_p (prod2, sizem1))
2242 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2243 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2244 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2246 r.set (type, rh_lb, wi::max_value (prec, sign));
2247 int_range<2> zero;
2248 zero.set_zero (type);
2249 r.union_ (zero);
2251 else
2252 // The range covers all values.
2253 r.set_varying (type);
2255 else
2257 wide_int new_lb = wide_int::from (prod0, prec, sign);
2258 wide_int new_ub = wide_int::from (prod3, prec, sign);
2259 create_possibly_reversed_range (r, type, new_lb, new_ub);
2263 class operator_widen_mult_signed : public range_operator
2265 public:
2266 virtual void wi_fold (irange &r, tree type,
2267 const wide_int &lh_lb,
2268 const wide_int &lh_ub,
2269 const wide_int &rh_lb,
2270 const wide_int &rh_ub)
2271 const;
2272 } op_widen_mult_signed;
2274 void
2275 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2276 const wide_int &lh_lb,
2277 const wide_int &lh_ub,
2278 const wide_int &rh_lb,
2279 const wide_int &rh_ub) const
2281 signop s = TYPE_SIGN (type);
2283 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2284 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2285 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2286 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2288 /* We don't expect a widening multiplication to be able to overflow but range
2289 calculations for multiplications are complicated. After widening the
2290 operands lets call the base class. */
2291 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2295 class operator_widen_mult_unsigned : public range_operator
2297 public:
2298 virtual void wi_fold (irange &r, tree type,
2299 const wide_int &lh_lb,
2300 const wide_int &lh_ub,
2301 const wide_int &rh_lb,
2302 const wide_int &rh_ub)
2303 const;
2304 } op_widen_mult_unsigned;
2306 void
2307 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2308 const wide_int &lh_lb,
2309 const wide_int &lh_ub,
2310 const wide_int &rh_lb,
2311 const wide_int &rh_ub) const
2313 signop s = TYPE_SIGN (type);
2315 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2316 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2317 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2318 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2320 /* We don't expect a widening multiplication to be able to overflow but range
2321 calculations for multiplications are complicated. After widening the
2322 operands lets call the base class. */
2323 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2326 class operator_div : public cross_product_operator
2328 public:
2329 operator_div (tree_code div_kind) { m_code = div_kind; }
2330 virtual void wi_fold (irange &r, tree type,
2331 const wide_int &lh_lb,
2332 const wide_int &lh_ub,
2333 const wide_int &rh_lb,
2334 const wide_int &rh_ub) const final override;
2335 virtual bool wi_op_overflows (wide_int &res, tree type,
2336 const wide_int &, const wide_int &)
2337 const final override;
2338 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2339 { update_known_bitmask (r, m_code, lh, rh); }
2340 protected:
2341 tree_code m_code;
2344 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2345 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2346 static operator_div op_round_div (ROUND_DIV_EXPR);
2347 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2349 bool
2350 operator_div::wi_op_overflows (wide_int &res, tree type,
2351 const wide_int &w0, const wide_int &w1) const
2353 if (w1 == 0)
2354 return true;
2356 wi::overflow_type overflow = wi::OVF_NONE;
2357 signop sign = TYPE_SIGN (type);
2359 switch (m_code)
2361 case EXACT_DIV_EXPR:
2362 case TRUNC_DIV_EXPR:
2363 res = wi::div_trunc (w0, w1, sign, &overflow);
2364 break;
2365 case FLOOR_DIV_EXPR:
2366 res = wi::div_floor (w0, w1, sign, &overflow);
2367 break;
2368 case ROUND_DIV_EXPR:
2369 res = wi::div_round (w0, w1, sign, &overflow);
2370 break;
2371 case CEIL_DIV_EXPR:
2372 res = wi::div_ceil (w0, w1, sign, &overflow);
2373 break;
2374 default:
2375 gcc_unreachable ();
2378 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2380 // For division, the only case is -INF / -1 = +INF.
2381 res = wi::max_value (w0.get_precision (), sign);
2382 return false;
2384 return overflow;
2387 void
2388 operator_div::wi_fold (irange &r, tree type,
2389 const wide_int &lh_lb, const wide_int &lh_ub,
2390 const wide_int &rh_lb, const wide_int &rh_ub) const
2392 const wide_int dividend_min = lh_lb;
2393 const wide_int dividend_max = lh_ub;
2394 const wide_int divisor_min = rh_lb;
2395 const wide_int divisor_max = rh_ub;
2396 signop sign = TYPE_SIGN (type);
2397 unsigned prec = TYPE_PRECISION (type);
2398 wide_int extra_min, extra_max;
2400 // If we know we won't divide by zero, just do the division.
2401 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2403 wi_cross_product (r, type, dividend_min, dividend_max,
2404 divisor_min, divisor_max);
2405 return;
2408 // If we're definitely dividing by zero, there's nothing to do.
2409 if (wi_zero_p (type, divisor_min, divisor_max))
2411 r.set_undefined ();
2412 return;
2415 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2416 // skip any division by zero.
2418 // First divide by the negative numbers, if any.
2419 if (wi::neg_p (divisor_min, sign))
2420 wi_cross_product (r, type, dividend_min, dividend_max,
2421 divisor_min, wi::minus_one (prec));
2422 else
2423 r.set_undefined ();
2425 // Then divide by the non-zero positive numbers, if any.
2426 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2428 int_range_max tmp;
2429 wi_cross_product (tmp, type, dividend_min, dividend_max,
2430 wi::one (prec), divisor_max);
2431 r.union_ (tmp);
2433 // We shouldn't still have undefined here.
2434 gcc_checking_assert (!r.undefined_p ());
2438 class operator_exact_divide : public operator_div
2440 using range_operator::op1_range;
2441 public:
2442 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2443 virtual bool op1_range (irange &r, tree type,
2444 const irange &lhs,
2445 const irange &op2,
2446 relation_trio) const;
2448 } op_exact_div;
2450 bool
2451 operator_exact_divide::op1_range (irange &r, tree type,
2452 const irange &lhs,
2453 const irange &op2,
2454 relation_trio) const
2456 if (lhs.undefined_p ())
2457 return false;
2458 wide_int offset;
2459 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2460 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2461 // We wont bother trying to enumerate all the in between stuff :-P
2462 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2463 // the time however.
2464 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2465 if (op2.singleton_p (offset) && offset != 0)
2466 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2467 return false;
2471 class operator_lshift : public cross_product_operator
2473 using range_operator::fold_range;
2474 using range_operator::op1_range;
2475 public:
2476 virtual bool op1_range (irange &r, tree type, const irange &lhs,
2477 const irange &op2, relation_trio rel = TRIO_VARYING)
2478 const final override;
2479 virtual bool fold_range (irange &r, tree type, const irange &op1,
2480 const irange &op2, relation_trio rel = TRIO_VARYING)
2481 const final override;
2483 virtual void wi_fold (irange &r, tree type,
2484 const wide_int &lh_lb, const wide_int &lh_ub,
2485 const wide_int &rh_lb,
2486 const wide_int &rh_ub) const final override;
2487 virtual bool wi_op_overflows (wide_int &res,
2488 tree type,
2489 const wide_int &,
2490 const wide_int &) const final override;
2491 void update_bitmask (irange &r, const irange &lh,
2492 const irange &rh) const final override
2493 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2494 // Check compatibility of LHS and op1.
2495 bool operand_check_p (tree t1, tree t2, tree) const final override
2496 { return range_compatible_p (t1, t2); }
2497 } op_lshift;
2499 class operator_rshift : public cross_product_operator
2501 using range_operator::fold_range;
2502 using range_operator::op1_range;
2503 using range_operator::lhs_op1_relation;
2504 public:
2505 virtual bool fold_range (irange &r, tree type, const irange &op1,
2506 const irange &op2, relation_trio rel = TRIO_VARYING)
2507 const final override;
2508 virtual void wi_fold (irange &r, tree type,
2509 const wide_int &lh_lb,
2510 const wide_int &lh_ub,
2511 const wide_int &rh_lb,
2512 const wide_int &rh_ub) const final override;
2513 virtual bool wi_op_overflows (wide_int &res,
2514 tree type,
2515 const wide_int &w0,
2516 const wide_int &w1) const final override;
2517 virtual bool op1_range (irange &, tree type, const irange &lhs,
2518 const irange &op2, relation_trio rel = TRIO_VARYING)
2519 const final override;
2520 virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1,
2521 const irange &op2, relation_kind rel)
2522 const final override;
2523 void update_bitmask (irange &r, const irange &lh,
2524 const irange &rh) const final override
2525 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2526 // Check compatibility of LHS and op1.
2527 bool operand_check_p (tree t1, tree t2, tree) const final override
2528 { return range_compatible_p (t1, t2); }
2529 } op_rshift;
2532 relation_kind
2533 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2534 const irange &op1,
2535 const irange &op2,
2536 relation_kind) const
2538 // If both operands range are >= 0, then the LHS <= op1.
2539 if (!op1.undefined_p () && !op2.undefined_p ()
2540 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2541 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2542 return VREL_LE;
2543 return VREL_VARYING;
2546 bool
2547 operator_lshift::fold_range (irange &r, tree type,
2548 const irange &op1,
2549 const irange &op2,
2550 relation_trio rel) const
2552 int_range_max shift_range;
2553 if (!get_shift_range (shift_range, type, op2))
2555 if (op2.undefined_p ())
2556 r.set_undefined ();
2557 else
2558 r.set_zero (type);
2559 return true;
2562 // Transform left shifts by constants into multiplies.
2563 if (shift_range.singleton_p ())
2565 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2566 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2567 int_range<1> mult (type, tmp, tmp);
2569 // Force wrapping multiplication.
2570 bool saved_flag_wrapv = flag_wrapv;
2571 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2572 flag_wrapv = 1;
2573 flag_wrapv_pointer = 1;
2574 bool b = op_mult.fold_range (r, type, op1, mult);
2575 flag_wrapv = saved_flag_wrapv;
2576 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2577 return b;
2579 else
2580 // Otherwise, invoke the generic fold routine.
2581 return range_operator::fold_range (r, type, op1, shift_range, rel);
2584 void
2585 operator_lshift::wi_fold (irange &r, tree type,
2586 const wide_int &lh_lb, const wide_int &lh_ub,
2587 const wide_int &rh_lb, const wide_int &rh_ub) const
2589 signop sign = TYPE_SIGN (type);
2590 unsigned prec = TYPE_PRECISION (type);
2591 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2592 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2593 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2594 // overflow. However, for that to happen, rh.max needs to be zero,
2595 // which means rh is a singleton range of zero, which means we simply return
2596 // [lh_lb, lh_ub] as the range.
2597 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2599 r = int_range<2> (type, lh_lb, lh_ub);
2600 return;
2603 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2604 wide_int complement = ~(bound - 1);
2605 wide_int low_bound, high_bound;
2606 bool in_bounds = false;
2608 if (sign == UNSIGNED)
2610 low_bound = bound;
2611 high_bound = complement;
2612 if (wi::ltu_p (lh_ub, low_bound))
2614 // [5, 6] << [1, 2] == [10, 24].
2615 // We're shifting out only zeroes, the value increases
2616 // monotonically.
2617 in_bounds = true;
2619 else if (wi::ltu_p (high_bound, lh_lb))
2621 // [0xffffff00, 0xffffffff] << [1, 2]
2622 // == [0xfffffc00, 0xfffffffe].
2623 // We're shifting out only ones, the value decreases
2624 // monotonically.
2625 in_bounds = true;
2628 else
2630 // [-1, 1] << [1, 2] == [-4, 4]
2631 low_bound = complement;
2632 high_bound = bound;
2633 if (wi::lts_p (lh_ub, high_bound)
2634 && wi::lts_p (low_bound, lh_lb))
2636 // For non-negative numbers, we're shifting out only zeroes,
2637 // the value increases monotonically. For negative numbers,
2638 // we're shifting out only ones, the value decreases
2639 // monotonically.
2640 in_bounds = true;
2644 if (in_bounds)
2645 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2646 else
2647 r.set_varying (type);
2650 bool
2651 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2652 const wide_int &w0, const wide_int &w1) const
2654 signop sign = TYPE_SIGN (type);
2655 if (wi::neg_p (w1))
2657 // It's unclear from the C standard whether shifts can overflow.
2658 // The following code ignores overflow; perhaps a C standard
2659 // interpretation ruling is needed.
2660 res = wi::rshift (w0, -w1, sign);
2662 else
2663 res = wi::lshift (w0, w1);
2664 return false;
2667 bool
2668 operator_lshift::op1_range (irange &r,
2669 tree type,
2670 const irange &lhs,
2671 const irange &op2,
2672 relation_trio) const
2674 if (lhs.undefined_p ())
2675 return false;
2677 if (!contains_zero_p (lhs))
2678 r.set_nonzero (type);
2679 else
2680 r.set_varying (type);
2682 wide_int shift;
2683 if (op2.singleton_p (shift))
2685 if (wi::lt_p (shift, 0, SIGNED))
2686 return false;
2687 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2688 TYPE_PRECISION (op2.type ())),
2689 UNSIGNED))
2690 return false;
2691 if (shift == 0)
2693 r.intersect (lhs);
2694 return true;
2697 // Work completely in unsigned mode to start.
2698 tree utype = type;
2699 int_range_max tmp_range;
2700 if (TYPE_SIGN (type) == SIGNED)
2702 int_range_max tmp = lhs;
2703 utype = unsigned_type_for (type);
2704 range_cast (tmp, utype);
2705 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2707 else
2708 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2710 // Start with ranges which can produce the LHS by right shifting the
2711 // result by the shift amount.
2712 // ie [0x08, 0xF0] = op1 << 2 will start with
2713 // [00001000, 11110000] = op1 << 2
2714 // [0x02, 0x4C] aka [00000010, 00111100]
2716 // Then create a range from the LB with the least significant upper bit
2717 // set, to the upper bound with all the bits set.
2718 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2720 // Ideally we do this for each subrange, but just lump them all for now.
2721 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2722 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2723 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2724 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2725 int_range<2> fill_range (utype, new_lb, new_ub);
2726 tmp_range.union_ (fill_range);
2728 if (utype != type)
2729 range_cast (tmp_range, type);
2731 r.intersect (tmp_range);
2732 return true;
2735 return !r.varying_p ();
2738 bool
2739 operator_rshift::op1_range (irange &r,
2740 tree type,
2741 const irange &lhs,
2742 const irange &op2,
2743 relation_trio) const
2745 if (lhs.undefined_p ())
2746 return false;
2747 wide_int shift;
2748 if (op2.singleton_p (shift))
2750 // Ignore nonsensical shifts.
2751 unsigned prec = TYPE_PRECISION (type);
2752 if (wi::ge_p (shift,
2753 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2754 UNSIGNED))
2755 return false;
2756 if (shift == 0)
2758 r = lhs;
2759 return true;
2762 // Folding the original operation may discard some impossible
2763 // ranges from the LHS.
2764 int_range_max lhs_refined;
2765 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2766 lhs_refined.intersect (lhs);
2767 if (lhs_refined.undefined_p ())
2769 r.set_undefined ();
2770 return true;
2772 int_range_max shift_range (op2.type (), shift, shift);
2773 int_range_max lb, ub;
2774 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2775 // LHS
2776 // 0000 0111 = OP1 >> 3
2778 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2779 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2780 // right hand side (0x07).
2781 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2782 int_range_max mask_range (type,
2783 wi::zero (TYPE_PRECISION (type)),
2784 mask);
2785 op_plus.fold_range (ub, type, lb, mask_range);
2786 r = lb;
2787 r.union_ (ub);
2788 if (!contains_zero_p (lhs_refined))
2790 mask_range.invert ();
2791 r.intersect (mask_range);
2793 return true;
2795 return false;
2798 bool
2799 operator_rshift::wi_op_overflows (wide_int &res,
2800 tree type,
2801 const wide_int &w0,
2802 const wide_int &w1) const
2804 signop sign = TYPE_SIGN (type);
2805 if (wi::neg_p (w1))
2806 res = wi::lshift (w0, -w1);
2807 else
2809 // It's unclear from the C standard whether shifts can overflow.
2810 // The following code ignores overflow; perhaps a C standard
2811 // interpretation ruling is needed.
2812 res = wi::rshift (w0, w1, sign);
2814 return false;
2817 bool
2818 operator_rshift::fold_range (irange &r, tree type,
2819 const irange &op1,
2820 const irange &op2,
2821 relation_trio rel) const
2823 int_range_max shift;
2824 if (!get_shift_range (shift, type, op2))
2826 if (op2.undefined_p ())
2827 r.set_undefined ();
2828 else
2829 r.set_zero (type);
2830 return true;
2833 return range_operator::fold_range (r, type, op1, shift, rel);
2836 void
2837 operator_rshift::wi_fold (irange &r, tree type,
2838 const wide_int &lh_lb, const wide_int &lh_ub,
2839 const wide_int &rh_lb, const wide_int &rh_ub) const
2841 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2845 // Add a partial equivalence between the LHS and op1 for casts.
2847 relation_kind
2848 operator_cast::lhs_op1_relation (const irange &lhs,
2849 const irange &op1,
2850 const irange &op2 ATTRIBUTE_UNUSED,
2851 relation_kind) const
2853 if (lhs.undefined_p () || op1.undefined_p ())
2854 return VREL_VARYING;
2855 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2856 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2857 // If the result gets sign extended into a larger type check first if this
2858 // qualifies as a partial equivalence.
2859 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2861 // If the result is sign extended, and the LHS is larger than op1,
2862 // check if op1's range can be negative as the sign extension will
2863 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2864 int_range<3> negs = range_negatives (op1.type ());
2865 negs.intersect (op1);
2866 if (!negs.undefined_p ())
2867 return VREL_VARYING;
2870 unsigned prec = MIN (lhs_prec, op1_prec);
2871 return bits_to_pe (prec);
2874 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2876 inline bool
2877 operator_cast::truncating_cast_p (const irange &inner,
2878 const irange &outer) const
2880 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2883 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2885 bool
2886 operator_cast::inside_domain_p (const wide_int &min,
2887 const wide_int &max,
2888 const irange &range) const
2890 wide_int domain_min = irange_val_min (range.type ());
2891 wide_int domain_max = irange_val_max (range.type ());
2892 signop domain_sign = TYPE_SIGN (range.type ());
2893 return (wi::le_p (min, domain_max, domain_sign)
2894 && wi::le_p (max, domain_max, domain_sign)
2895 && wi::ge_p (min, domain_min, domain_sign)
2896 && wi::ge_p (max, domain_min, domain_sign));
2900 // Helper for fold_range which work on a pair at a time.
2902 void
2903 operator_cast::fold_pair (irange &r, unsigned index,
2904 const irange &inner,
2905 const irange &outer) const
2907 tree inner_type = inner.type ();
2908 tree outer_type = outer.type ();
2909 signop inner_sign = TYPE_SIGN (inner_type);
2910 unsigned outer_prec = TYPE_PRECISION (outer_type);
2912 // check to see if casting from INNER to OUTER is a conversion that
2913 // fits in the resulting OUTER type.
2914 wide_int inner_lb = inner.lower_bound (index);
2915 wide_int inner_ub = inner.upper_bound (index);
2916 if (truncating_cast_p (inner, outer))
2918 // We may be able to accommodate a truncating cast if the
2919 // resulting range can be represented in the target type...
2920 if (wi::rshift (wi::sub (inner_ub, inner_lb),
2921 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
2922 inner_sign) != 0)
2924 r.set_varying (outer_type);
2925 return;
2928 // ...but we must still verify that the final range fits in the
2929 // domain. This catches -fstrict-enum restrictions where the domain
2930 // range is smaller than what fits in the underlying type.
2931 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
2932 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
2933 if (inside_domain_p (min, max, outer))
2934 create_possibly_reversed_range (r, outer_type, min, max);
2935 else
2936 r.set_varying (outer_type);
2940 bool
2941 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2942 const irange &inner,
2943 const irange &outer,
2944 relation_trio) const
2946 if (empty_range_varying (r, type, inner, outer))
2947 return true;
2949 gcc_checking_assert (outer.varying_p ());
2950 gcc_checking_assert (inner.num_pairs () > 0);
2952 // Avoid a temporary by folding the first pair directly into the result.
2953 fold_pair (r, 0, inner, outer);
2955 // Then process any additional pairs by unioning with their results.
2956 for (unsigned x = 1; x < inner.num_pairs (); ++x)
2958 int_range_max tmp;
2959 fold_pair (tmp, x, inner, outer);
2960 r.union_ (tmp);
2961 if (r.varying_p ())
2962 return true;
2965 update_bitmask (r, inner, outer);
2966 return true;
2969 void
2970 operator_cast::update_bitmask (irange &r, const irange &lh,
2971 const irange &rh) const
2973 update_known_bitmask (r, CONVERT_EXPR, lh, rh);
2976 bool
2977 operator_cast::op1_range (irange &r, tree type,
2978 const irange &lhs,
2979 const irange &op2,
2980 relation_trio) const
2982 if (lhs.undefined_p ())
2983 return false;
2984 tree lhs_type = lhs.type ();
2985 gcc_checking_assert (types_compatible_p (op2.type(), type));
2987 // If we are calculating a pointer, shortcut to what we really care about.
2988 if (POINTER_TYPE_P (type))
2990 // Conversion from other pointers or a constant (including 0/NULL)
2991 // are straightforward.
2992 if (POINTER_TYPE_P (lhs.type ())
2993 || (lhs.singleton_p ()
2994 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
2996 r = lhs;
2997 range_cast (r, type);
2999 else
3001 // If the LHS is not a pointer nor a singleton, then it is
3002 // either VARYING or non-zero.
3003 if (!lhs.undefined_p () && !contains_zero_p (lhs))
3004 r.set_nonzero (type);
3005 else
3006 r.set_varying (type);
3008 r.intersect (op2);
3009 return true;
3012 if (truncating_cast_p (op2, lhs))
3014 if (lhs.varying_p ())
3015 r.set_varying (type);
3016 else
3018 // We want to insert the LHS as an unsigned value since it
3019 // would not trigger the signed bit of the larger type.
3020 int_range_max converted_lhs = lhs;
3021 range_cast (converted_lhs, unsigned_type_for (lhs_type));
3022 range_cast (converted_lhs, type);
3023 // Start by building the positive signed outer range for the type.
3024 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
3025 TYPE_PRECISION (type));
3026 create_possibly_reversed_range (r, type, lim,
3027 wi::max_value (TYPE_PRECISION (type),
3028 SIGNED));
3029 // For the signed part, we need to simply union the 2 ranges now.
3030 r.union_ (converted_lhs);
3032 // Create maximal negative number outside of LHS bits.
3033 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
3034 TYPE_PRECISION (type));
3035 // Add this to the unsigned LHS range(s).
3036 int_range_max lim_range (type, lim, lim);
3037 int_range_max lhs_neg;
3038 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
3039 converted_lhs, lim_range);
3040 // lhs_neg now has all the negative versions of the LHS.
3041 // Now union in all the values from SIGNED MIN (0x80000) to
3042 // lim-1 in order to fill in all the ranges with the upper
3043 // bits set.
3045 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
3046 // we don't need to create a range from min to lim-1
3047 // calculate neg range traps trying to create [lim, lim - 1].
3048 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
3049 if (lim != min_val)
3051 int_range_max neg (type,
3052 wi::min_value (TYPE_PRECISION (type),
3053 SIGNED),
3054 lim - 1);
3055 lhs_neg.union_ (neg);
3057 // And finally, munge the signed and unsigned portions.
3058 r.union_ (lhs_neg);
3060 // And intersect with any known value passed in the extra operand.
3061 r.intersect (op2);
3062 return true;
3065 int_range_max tmp;
3066 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
3067 tmp = lhs;
3068 else
3070 // The cast is not truncating, and the range is restricted to
3071 // the range of the RHS by this assignment.
3073 // Cast the range of the RHS to the type of the LHS.
3074 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
3075 // Intersect this with the LHS range will produce the range,
3076 // which will be cast to the RHS type before returning.
3077 tmp.intersect (lhs);
3080 // Cast the calculated range to the type of the RHS.
3081 fold_range (r, type, tmp, int_range<1> (type));
3082 return true;
3086 class operator_logical_and : public range_operator
3088 using range_operator::fold_range;
3089 using range_operator::op1_range;
3090 using range_operator::op2_range;
3091 public:
3092 virtual bool fold_range (irange &r, tree type,
3093 const irange &lh,
3094 const irange &rh,
3095 relation_trio rel = TRIO_VARYING) const;
3096 virtual bool op1_range (irange &r, tree type,
3097 const irange &lhs,
3098 const irange &op2,
3099 relation_trio rel = TRIO_VARYING) const;
3100 virtual bool op2_range (irange &r, tree type,
3101 const irange &lhs,
3102 const irange &op1,
3103 relation_trio rel = TRIO_VARYING) const;
3104 // Check compatibility of all operands.
3105 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3106 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3107 } op_logical_and;
3109 bool
3110 operator_logical_and::fold_range (irange &r, tree type,
3111 const irange &lh,
3112 const irange &rh,
3113 relation_trio) const
3115 if (empty_range_varying (r, type, lh, rh))
3116 return true;
3118 // Precision of LHS and both operands must match.
3119 if (TYPE_PRECISION (lh.type ()) != TYPE_PRECISION (type)
3120 || TYPE_PRECISION (type) != TYPE_PRECISION (rh.type ()))
3121 return false;
3123 // 0 && anything is 0.
3124 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3125 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3126 r = range_false (type);
3127 else if (contains_zero_p (lh) || contains_zero_p (rh))
3128 // To reach this point, there must be a logical 1 on each side, and
3129 // the only remaining question is whether there is a zero or not.
3130 r = range_true_and_false (type);
3131 else
3132 r = range_true (type);
3133 return true;
3136 bool
3137 operator_logical_and::op1_range (irange &r, tree type,
3138 const irange &lhs,
3139 const irange &op2 ATTRIBUTE_UNUSED,
3140 relation_trio) const
3142 switch (get_bool_state (r, lhs, type))
3144 case BRS_TRUE:
3145 // A true result means both sides of the AND must be true.
3146 r = range_true (type);
3147 break;
3148 default:
3149 // Any other result means only one side has to be false, the
3150 // other side can be anything. So we cannot be sure of any
3151 // result here.
3152 r = range_true_and_false (type);
3153 break;
3155 return true;
3158 bool
3159 operator_logical_and::op2_range (irange &r, tree type,
3160 const irange &lhs,
3161 const irange &op1,
3162 relation_trio) const
3164 return operator_logical_and::op1_range (r, type, lhs, op1);
3168 void
3169 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3170 const irange &rh) const
3172 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3175 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3176 // by considering the number of leading redundant sign bit copies.
3177 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3178 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3179 static bool
3180 wi_optimize_signed_bitwise_op (irange &r, tree type,
3181 const wide_int &lh_lb, const wide_int &lh_ub,
3182 const wide_int &rh_lb, const wide_int &rh_ub)
3184 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3185 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3186 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3187 if (new_clrsb == 0)
3188 return false;
3189 int type_prec = TYPE_PRECISION (type);
3190 int rprec = (type_prec - new_clrsb) - 1;
3191 value_range_with_overflow (r, type,
3192 wi::mask (rprec, true, type_prec),
3193 wi::mask (rprec, false, type_prec));
3194 return true;
3197 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3198 // the LHS and op1.
3200 relation_kind
3201 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3202 const irange &op1,
3203 const irange &op2,
3204 relation_kind) const
3206 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3207 return VREL_VARYING;
3208 if (!op2.singleton_p ())
3209 return VREL_VARYING;
3210 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3211 int prec1 = TYPE_PRECISION (op1.type ());
3212 int prec2 = TYPE_PRECISION (op2.type ());
3213 int mask_prec = 0;
3214 wide_int mask = op2.lower_bound ();
3215 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3216 mask_prec = 8;
3217 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3218 mask_prec = 16;
3219 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3220 mask_prec = 32;
3221 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3222 mask_prec = 64;
3223 return bits_to_pe (MIN (prec1, mask_prec));
3226 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3227 // possible. Basically, see if we can optimize:
3229 // [LB, UB] op Z
3230 // into:
3231 // [LB op Z, UB op Z]
3233 // If the optimization was successful, accumulate the range in R and
3234 // return TRUE.
3236 static bool
3237 wi_optimize_and_or (irange &r,
3238 enum tree_code code,
3239 tree type,
3240 const wide_int &lh_lb, const wide_int &lh_ub,
3241 const wide_int &rh_lb, const wide_int &rh_ub)
3243 // Calculate the singleton mask among the ranges, if any.
3244 wide_int lower_bound, upper_bound, mask;
3245 if (wi::eq_p (rh_lb, rh_ub))
3247 mask = rh_lb;
3248 lower_bound = lh_lb;
3249 upper_bound = lh_ub;
3251 else if (wi::eq_p (lh_lb, lh_ub))
3253 mask = lh_lb;
3254 lower_bound = rh_lb;
3255 upper_bound = rh_ub;
3257 else
3258 return false;
3260 // If Z is a constant which (for op | its bitwise not) has n
3261 // consecutive least significant bits cleared followed by m 1
3262 // consecutive bits set immediately above it and either
3263 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3265 // The least significant n bits of all the values in the range are
3266 // cleared or set, the m bits above it are preserved and any bits
3267 // above these are required to be the same for all values in the
3268 // range.
3269 wide_int w = mask;
3270 int m = 0, n = 0;
3271 if (code == BIT_IOR_EXPR)
3272 w = ~w;
3273 if (wi::eq_p (w, 0))
3274 n = w.get_precision ();
3275 else
3277 n = wi::ctz (w);
3278 w = ~(w | wi::mask (n, false, w.get_precision ()));
3279 if (wi::eq_p (w, 0))
3280 m = w.get_precision () - n;
3281 else
3282 m = wi::ctz (w) - n;
3284 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3285 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3286 return false;
3288 wide_int res_lb, res_ub;
3289 if (code == BIT_AND_EXPR)
3291 res_lb = wi::bit_and (lower_bound, mask);
3292 res_ub = wi::bit_and (upper_bound, mask);
3294 else if (code == BIT_IOR_EXPR)
3296 res_lb = wi::bit_or (lower_bound, mask);
3297 res_ub = wi::bit_or (upper_bound, mask);
3299 else
3300 gcc_unreachable ();
3301 value_range_with_overflow (r, type, res_lb, res_ub);
3303 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3304 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3306 int_range<2> tmp;
3307 tmp.set_nonzero (type);
3308 r.intersect (tmp);
3310 return true;
3313 // For range [LB, UB] compute two wide_int bit masks.
3315 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3316 // for all numbers in the range the bit is 0, otherwise it might be 0
3317 // or 1.
3319 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3320 // for all numbers in the range the bit is 1, otherwise it might be 0
3321 // or 1.
3323 void
3324 wi_set_zero_nonzero_bits (tree type,
3325 const wide_int &lb, const wide_int &ub,
3326 wide_int &maybe_nonzero,
3327 wide_int &mustbe_nonzero)
3329 signop sign = TYPE_SIGN (type);
3331 if (wi::eq_p (lb, ub))
3332 maybe_nonzero = mustbe_nonzero = lb;
3333 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3335 wide_int xor_mask = lb ^ ub;
3336 maybe_nonzero = lb | ub;
3337 mustbe_nonzero = lb & ub;
3338 if (xor_mask != 0)
3340 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3341 maybe_nonzero.get_precision ());
3342 maybe_nonzero = maybe_nonzero | mask;
3343 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3346 else
3348 maybe_nonzero = wi::minus_one (lb.get_precision ());
3349 mustbe_nonzero = wi::zero (lb.get_precision ());
3353 void
3354 operator_bitwise_and::wi_fold (irange &r, tree type,
3355 const wide_int &lh_lb,
3356 const wide_int &lh_ub,
3357 const wide_int &rh_lb,
3358 const wide_int &rh_ub) const
3360 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3361 return;
3363 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3364 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3365 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3366 maybe_nonzero_lh, mustbe_nonzero_lh);
3367 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3368 maybe_nonzero_rh, mustbe_nonzero_rh);
3370 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3371 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3372 signop sign = TYPE_SIGN (type);
3373 unsigned prec = TYPE_PRECISION (type);
3374 // If both input ranges contain only negative values, we can
3375 // truncate the result range maximum to the minimum of the
3376 // input range maxima.
3377 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3379 new_ub = wi::min (new_ub, lh_ub, sign);
3380 new_ub = wi::min (new_ub, rh_ub, sign);
3382 // If either input range contains only non-negative values
3383 // we can truncate the result range maximum to the respective
3384 // maximum of the input range.
3385 if (wi::ge_p (lh_lb, 0, sign))
3386 new_ub = wi::min (new_ub, lh_ub, sign);
3387 if (wi::ge_p (rh_lb, 0, sign))
3388 new_ub = wi::min (new_ub, rh_ub, sign);
3389 // PR68217: In case of signed & sign-bit-CST should
3390 // result in [-INF, 0] instead of [-INF, INF].
3391 if (wi::gt_p (new_lb, new_ub, sign))
3393 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3394 if (sign == SIGNED
3395 && ((wi::eq_p (lh_lb, lh_ub)
3396 && !wi::cmps (lh_lb, sign_bit))
3397 || (wi::eq_p (rh_lb, rh_ub)
3398 && !wi::cmps (rh_lb, sign_bit))))
3400 new_lb = wi::min_value (prec, sign);
3401 new_ub = wi::zero (prec);
3404 // If the limits got swapped around, return varying.
3405 if (wi::gt_p (new_lb, new_ub,sign))
3407 if (sign == SIGNED
3408 && wi_optimize_signed_bitwise_op (r, type,
3409 lh_lb, lh_ub,
3410 rh_lb, rh_ub))
3411 return;
3412 r.set_varying (type);
3414 else
3415 value_range_with_overflow (r, type, new_lb, new_ub);
3418 static void
3419 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3421 if (lhs.undefined_p () || contains_zero_p (lhs))
3422 r.set_varying (type);
3423 else
3424 r.set_nonzero (type);
3427 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3428 (otherwise return VAL). VAL and MASK must be zero-extended for
3429 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3430 (to transform signed values into unsigned) and at the end xor
3431 SGNBIT back. */
3433 wide_int
3434 masked_increment (const wide_int &val_in, const wide_int &mask,
3435 const wide_int &sgnbit, unsigned int prec)
3437 wide_int bit = wi::one (prec), res;
3438 unsigned int i;
3440 wide_int val = val_in ^ sgnbit;
3441 for (i = 0; i < prec; i++, bit += bit)
3443 res = mask;
3444 if ((res & bit) == 0)
3445 continue;
3446 res = bit - 1;
3447 res = wi::bit_and_not (val + bit, res);
3448 res &= mask;
3449 if (wi::gtu_p (res, val))
3450 return res ^ sgnbit;
3452 return val ^ sgnbit;
3455 // This was shamelessly stolen from register_edge_assert_for_2 and
3456 // adjusted to work with iranges.
3458 void
3459 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3460 const irange &lhs,
3461 const irange &op2) const
3463 if (!op2.singleton_p ())
3465 set_nonzero_range_from_mask (r, type, lhs);
3466 return;
3468 unsigned int nprec = TYPE_PRECISION (type);
3469 wide_int cst2v = op2.lower_bound ();
3470 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3471 wide_int sgnbit;
3472 if (cst2n)
3473 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3474 else
3475 sgnbit = wi::zero (nprec);
3477 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3479 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3480 // maximum unsigned value is ~0. For signed comparison, if CST2
3481 // doesn't have the most significant bit set, handle it similarly. If
3482 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3483 wide_int valv = lhs.lower_bound ();
3484 wide_int minv = valv & cst2v, maxv;
3485 bool we_know_nothing = false;
3486 if (minv != valv)
3488 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3489 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3490 if (minv == valv)
3492 // If we can't determine anything on this bound, fall
3493 // through and conservatively solve for the other end point.
3494 we_know_nothing = true;
3497 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3498 if (we_know_nothing)
3499 r.set_varying (type);
3500 else
3501 create_possibly_reversed_range (r, type, minv, maxv);
3503 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3505 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3506 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3507 // VAL2 where
3508 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3509 // as maximum.
3510 // For signed comparison, if CST2 doesn't have most significant bit
3511 // set, handle it similarly. If CST2 has MSB set, the maximum is
3512 // the same and minimum is INT_MIN.
3513 valv = lhs.upper_bound ();
3514 minv = valv & cst2v;
3515 if (minv == valv)
3516 maxv = valv;
3517 else
3519 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3520 if (maxv == valv)
3522 // If we couldn't determine anything on either bound, return
3523 // undefined.
3524 if (we_know_nothing)
3525 r.set_undefined ();
3526 return;
3528 maxv -= 1;
3530 maxv |= ~cst2v;
3531 minv = sgnbit;
3532 int_range<2> upper_bits;
3533 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3534 r.intersect (upper_bits);
3537 bool
3538 operator_bitwise_and::op1_range (irange &r, tree type,
3539 const irange &lhs,
3540 const irange &op2,
3541 relation_trio) const
3543 if (lhs.undefined_p ())
3544 return false;
3545 if (types_compatible_p (type, boolean_type_node))
3546 return op_logical_and.op1_range (r, type, lhs, op2);
3548 r.set_undefined ();
3549 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3551 int_range_max chunk (lhs.type (),
3552 lhs.lower_bound (i),
3553 lhs.upper_bound (i));
3554 int_range_max res;
3555 simple_op1_range_solver (res, type, chunk, op2);
3556 r.union_ (res);
3558 if (r.undefined_p ())
3559 set_nonzero_range_from_mask (r, type, lhs);
3561 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3562 wide_int mask;
3563 if (lhs == op2 && lhs.singleton_p (mask))
3565 r.update_bitmask (irange_bitmask (mask, ~mask));
3566 return true;
3569 // For 0 = op1 & MASK, op1 is ~MASK.
3570 if (lhs.zero_p () && op2.singleton_p ())
3572 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3573 int_range<2> tmp (type);
3574 tmp.set_nonzero_bits (nz);
3575 r.intersect (tmp);
3577 return true;
3580 bool
3581 operator_bitwise_and::op2_range (irange &r, tree type,
3582 const irange &lhs,
3583 const irange &op1,
3584 relation_trio) const
3586 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3590 class operator_logical_or : public range_operator
3592 using range_operator::fold_range;
3593 using range_operator::op1_range;
3594 using range_operator::op2_range;
3595 public:
3596 virtual bool fold_range (irange &r, tree type,
3597 const irange &lh,
3598 const irange &rh,
3599 relation_trio rel = TRIO_VARYING) const;
3600 virtual bool op1_range (irange &r, tree type,
3601 const irange &lhs,
3602 const irange &op2,
3603 relation_trio rel = TRIO_VARYING) const;
3604 virtual bool op2_range (irange &r, tree type,
3605 const irange &lhs,
3606 const irange &op1,
3607 relation_trio rel = TRIO_VARYING) const;
3608 // Check compatibility of all operands.
3609 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3610 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3611 } op_logical_or;
3613 bool
3614 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3615 const irange &lh,
3616 const irange &rh,
3617 relation_trio) const
3619 if (empty_range_varying (r, type, lh, rh))
3620 return true;
3622 r = lh;
3623 r.union_ (rh);
3624 return true;
3627 bool
3628 operator_logical_or::op1_range (irange &r, tree type,
3629 const irange &lhs,
3630 const irange &op2 ATTRIBUTE_UNUSED,
3631 relation_trio) const
3633 switch (get_bool_state (r, lhs, type))
3635 case BRS_FALSE:
3636 // A false result means both sides of the OR must be false.
3637 r = range_false (type);
3638 break;
3639 default:
3640 // Any other result means only one side has to be true, the
3641 // other side can be anything. so we can't be sure of any result
3642 // here.
3643 r = range_true_and_false (type);
3644 break;
3646 return true;
3649 bool
3650 operator_logical_or::op2_range (irange &r, tree type,
3651 const irange &lhs,
3652 const irange &op1,
3653 relation_trio) const
3655 return operator_logical_or::op1_range (r, type, lhs, op1);
3659 void
3660 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3661 const irange &rh) const
3663 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3666 void
3667 operator_bitwise_or::wi_fold (irange &r, tree type,
3668 const wide_int &lh_lb,
3669 const wide_int &lh_ub,
3670 const wide_int &rh_lb,
3671 const wide_int &rh_ub) const
3673 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3674 return;
3676 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3677 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3678 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3679 maybe_nonzero_lh, mustbe_nonzero_lh);
3680 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3681 maybe_nonzero_rh, mustbe_nonzero_rh);
3682 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3683 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3684 signop sign = TYPE_SIGN (type);
3685 // If the input ranges contain only positive values we can
3686 // truncate the minimum of the result range to the maximum
3687 // of the input range minima.
3688 if (wi::ge_p (lh_lb, 0, sign)
3689 && wi::ge_p (rh_lb, 0, sign))
3691 new_lb = wi::max (new_lb, lh_lb, sign);
3692 new_lb = wi::max (new_lb, rh_lb, sign);
3694 // If either input range contains only negative values
3695 // we can truncate the minimum of the result range to the
3696 // respective minimum range.
3697 if (wi::lt_p (lh_ub, 0, sign))
3698 new_lb = wi::max (new_lb, lh_lb, sign);
3699 if (wi::lt_p (rh_ub, 0, sign))
3700 new_lb = wi::max (new_lb, rh_lb, sign);
3701 // If the limits got swapped around, return a conservative range.
3702 if (wi::gt_p (new_lb, new_ub, sign))
3704 // Make sure that nonzero|X is nonzero.
3705 if (wi::gt_p (lh_lb, 0, sign)
3706 || wi::gt_p (rh_lb, 0, sign)
3707 || wi::lt_p (lh_ub, 0, sign)
3708 || wi::lt_p (rh_ub, 0, sign))
3709 r.set_nonzero (type);
3710 else if (sign == SIGNED
3711 && wi_optimize_signed_bitwise_op (r, type,
3712 lh_lb, lh_ub,
3713 rh_lb, rh_ub))
3714 return;
3715 else
3716 r.set_varying (type);
3717 return;
3719 value_range_with_overflow (r, type, new_lb, new_ub);
3722 bool
3723 operator_bitwise_or::op1_range (irange &r, tree type,
3724 const irange &lhs,
3725 const irange &op2,
3726 relation_trio) const
3728 if (lhs.undefined_p ())
3729 return false;
3730 // If this is really a logical wi_fold, call that.
3731 if (types_compatible_p (type, boolean_type_node))
3732 return op_logical_or.op1_range (r, type, lhs, op2);
3734 if (lhs.zero_p ())
3736 r.set_zero (type);
3737 return true;
3739 r.set_varying (type);
3740 return true;
3743 bool
3744 operator_bitwise_or::op2_range (irange &r, tree type,
3745 const irange &lhs,
3746 const irange &op1,
3747 relation_trio) const
3749 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3752 void
3753 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3754 const irange &rh) const
3756 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3759 void
3760 operator_bitwise_xor::wi_fold (irange &r, tree type,
3761 const wide_int &lh_lb,
3762 const wide_int &lh_ub,
3763 const wide_int &rh_lb,
3764 const wide_int &rh_ub) const
3766 signop sign = TYPE_SIGN (type);
3767 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3768 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3769 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3770 maybe_nonzero_lh, mustbe_nonzero_lh);
3771 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3772 maybe_nonzero_rh, mustbe_nonzero_rh);
3774 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3775 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3776 wide_int result_one_bits
3777 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3778 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3779 wide_int new_ub = ~result_zero_bits;
3780 wide_int new_lb = result_one_bits;
3782 // If the range has all positive or all negative values, the result
3783 // is better than VARYING.
3784 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3785 value_range_with_overflow (r, type, new_lb, new_ub);
3786 else if (sign == SIGNED
3787 && wi_optimize_signed_bitwise_op (r, type,
3788 lh_lb, lh_ub,
3789 rh_lb, rh_ub))
3790 ; /* Do nothing. */
3791 else
3792 r.set_varying (type);
3794 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3795 if (wi::lt_p (lh_ub, rh_lb, sign)
3796 || wi::lt_p (rh_ub, lh_lb, sign)
3797 || wi::ne_p (result_one_bits, 0))
3799 int_range<2> tmp;
3800 tmp.set_nonzero (type);
3801 r.intersect (tmp);
3805 bool
3806 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3807 tree type,
3808 const irange &,
3809 const irange &,
3810 relation_kind rel) const
3812 if (rel == VREL_VARYING)
3813 return false;
3815 int_range<2> rel_range;
3817 switch (rel)
3819 case VREL_EQ:
3820 rel_range.set_zero (type);
3821 break;
3822 case VREL_NE:
3823 rel_range.set_nonzero (type);
3824 break;
3825 default:
3826 return false;
3829 lhs_range.intersect (rel_range);
3830 return true;
3833 bool
3834 operator_bitwise_xor::op1_range (irange &r, tree type,
3835 const irange &lhs,
3836 const irange &op2,
3837 relation_trio) const
3839 if (lhs.undefined_p () || lhs.varying_p ())
3841 r = lhs;
3842 return true;
3844 if (types_compatible_p (type, boolean_type_node))
3846 switch (get_bool_state (r, lhs, type))
3848 case BRS_TRUE:
3849 if (op2.varying_p ())
3850 r.set_varying (type);
3851 else if (op2.zero_p ())
3852 r = range_true (type);
3853 // See get_bool_state for the rationale
3854 else if (op2.undefined_p () || contains_zero_p (op2))
3855 r = range_true_and_false (type);
3856 else
3857 r = range_false (type);
3858 break;
3859 case BRS_FALSE:
3860 r = op2;
3861 break;
3862 default:
3863 break;
3865 return true;
3867 r.set_varying (type);
3868 return true;
3871 bool
3872 operator_bitwise_xor::op2_range (irange &r, tree type,
3873 const irange &lhs,
3874 const irange &op1,
3875 relation_trio) const
3877 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3880 class operator_trunc_mod : public range_operator
3882 using range_operator::op1_range;
3883 using range_operator::op2_range;
3884 public:
3885 virtual void wi_fold (irange &r, tree type,
3886 const wide_int &lh_lb,
3887 const wide_int &lh_ub,
3888 const wide_int &rh_lb,
3889 const wide_int &rh_ub) const;
3890 virtual bool op1_range (irange &r, tree type,
3891 const irange &lhs,
3892 const irange &op2,
3893 relation_trio) const;
3894 virtual bool op2_range (irange &r, tree type,
3895 const irange &lhs,
3896 const irange &op1,
3897 relation_trio) const;
3898 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
3899 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
3900 } op_trunc_mod;
3902 void
3903 operator_trunc_mod::wi_fold (irange &r, tree type,
3904 const wide_int &lh_lb,
3905 const wide_int &lh_ub,
3906 const wide_int &rh_lb,
3907 const wide_int &rh_ub) const
3909 wide_int new_lb, new_ub, tmp;
3910 signop sign = TYPE_SIGN (type);
3911 unsigned prec = TYPE_PRECISION (type);
3913 // Mod 0 is undefined.
3914 if (wi_zero_p (type, rh_lb, rh_ub))
3916 r.set_undefined ();
3917 return;
3920 // Check for constant and try to fold.
3921 if (lh_lb == lh_ub && rh_lb == rh_ub)
3923 wi::overflow_type ov = wi::OVF_NONE;
3924 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
3925 if (ov == wi::OVF_NONE)
3927 r = int_range<2> (type, tmp, tmp);
3928 return;
3932 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3933 new_ub = rh_ub - 1;
3934 if (sign == SIGNED)
3936 tmp = -1 - rh_lb;
3937 new_ub = wi::smax (new_ub, tmp);
3940 if (sign == UNSIGNED)
3941 new_lb = wi::zero (prec);
3942 else
3944 new_lb = -new_ub;
3945 tmp = lh_lb;
3946 if (wi::gts_p (tmp, 0))
3947 tmp = wi::zero (prec);
3948 new_lb = wi::smax (new_lb, tmp);
3950 tmp = lh_ub;
3951 if (sign == SIGNED && wi::neg_p (tmp))
3952 tmp = wi::zero (prec);
3953 new_ub = wi::min (new_ub, tmp, sign);
3955 value_range_with_overflow (r, type, new_lb, new_ub);
3958 bool
3959 operator_trunc_mod::op1_range (irange &r, tree type,
3960 const irange &lhs,
3961 const irange &,
3962 relation_trio) const
3964 if (lhs.undefined_p ())
3965 return false;
3966 // PR 91029.
3967 signop sign = TYPE_SIGN (type);
3968 unsigned prec = TYPE_PRECISION (type);
3969 // (a % b) >= x && x > 0 , then a >= x.
3970 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3972 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
3973 return true;
3975 // (a % b) <= x && x < 0 , then a <= x.
3976 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3978 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
3979 return true;
3981 return false;
3984 bool
3985 operator_trunc_mod::op2_range (irange &r, tree type,
3986 const irange &lhs,
3987 const irange &,
3988 relation_trio) const
3990 if (lhs.undefined_p ())
3991 return false;
3992 // PR 91029.
3993 signop sign = TYPE_SIGN (type);
3994 unsigned prec = TYPE_PRECISION (type);
3995 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3996 // or b > x for unsigned.
3997 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3999 if (sign == SIGNED)
4000 r = value_range (type, wi::neg (lhs.lower_bound ()),
4001 lhs.lower_bound (), VR_ANTI_RANGE);
4002 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
4003 sign))
4004 r = value_range (type, lhs.lower_bound () + 1,
4005 wi::max_value (prec, sign));
4006 else
4007 return false;
4008 return true;
4010 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
4011 if (wi::lt_p (lhs.upper_bound (), 0, sign))
4013 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
4014 r = value_range (type, lhs.upper_bound (),
4015 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
4016 else
4017 return false;
4018 return true;
4020 return false;
4024 class operator_logical_not : public range_operator
4026 using range_operator::fold_range;
4027 using range_operator::op1_range;
4028 public:
4029 virtual bool fold_range (irange &r, tree type,
4030 const irange &lh,
4031 const irange &rh,
4032 relation_trio rel = TRIO_VARYING) const;
4033 virtual bool op1_range (irange &r, tree type,
4034 const irange &lhs,
4035 const irange &op2,
4036 relation_trio rel = TRIO_VARYING) const;
4037 // Check compatibility of LHS and op1.
4038 bool operand_check_p (tree t1, tree t2, tree) const final override
4039 { return range_compatible_p (t1, t2); }
4040 } op_logical_not;
4042 // Folding a logical NOT, oddly enough, involves doing nothing on the
4043 // forward pass through. During the initial walk backwards, the
4044 // logical NOT reversed the desired outcome on the way back, so on the
4045 // way forward all we do is pass the range forward.
4047 // b_2 = x_1 < 20
4048 // b_3 = !b_2
4049 // if (b_3)
4050 // to determine the TRUE branch, walking backward
4051 // if (b_3) if ([1,1])
4052 // b_3 = !b_2 [1,1] = ![0,0]
4053 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
4054 // which is the result we are looking for.. so.. pass it through.
4056 bool
4057 operator_logical_not::fold_range (irange &r, tree type,
4058 const irange &lh,
4059 const irange &rh ATTRIBUTE_UNUSED,
4060 relation_trio) const
4062 if (empty_range_varying (r, type, lh, rh))
4063 return true;
4065 r = lh;
4066 if (!lh.varying_p () && !lh.undefined_p ())
4067 r.invert ();
4069 return true;
4072 bool
4073 operator_logical_not::op1_range (irange &r,
4074 tree type,
4075 const irange &lhs,
4076 const irange &op2,
4077 relation_trio) const
4079 // Logical NOT is involutary...do it again.
4080 return fold_range (r, type, lhs, op2);
4083 bool
4084 operator_bitwise_not::fold_range (irange &r, tree type,
4085 const irange &lh,
4086 const irange &rh,
4087 relation_trio) const
4089 if (empty_range_varying (r, type, lh, rh))
4090 return true;
4092 if (types_compatible_p (type, boolean_type_node))
4093 return op_logical_not.fold_range (r, type, lh, rh);
4095 // ~X is simply -1 - X.
4096 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
4097 wi::minus_one (TYPE_PRECISION (type)));
4098 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
4101 bool
4102 operator_bitwise_not::op1_range (irange &r, tree type,
4103 const irange &lhs,
4104 const irange &op2,
4105 relation_trio) const
4107 if (lhs.undefined_p ())
4108 return false;
4109 if (types_compatible_p (type, boolean_type_node))
4110 return op_logical_not.op1_range (r, type, lhs, op2);
4112 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4113 return fold_range (r, type, lhs, op2);
4116 void
4117 operator_bitwise_not::update_bitmask (irange &r, const irange &lh,
4118 const irange &rh) const
4120 update_known_bitmask (r, BIT_NOT_EXPR, lh, rh);
4124 bool
4125 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4126 const irange &lh,
4127 const irange &rh ATTRIBUTE_UNUSED,
4128 relation_trio) const
4130 r = lh;
4131 return true;
4135 // Determine if there is a relationship between LHS and OP1.
4137 relation_kind
4138 operator_identity::lhs_op1_relation (const irange &lhs,
4139 const irange &op1 ATTRIBUTE_UNUSED,
4140 const irange &op2 ATTRIBUTE_UNUSED,
4141 relation_kind) const
4143 if (lhs.undefined_p ())
4144 return VREL_VARYING;
4145 // Simply a copy, so they are equivalent.
4146 return VREL_EQ;
4149 bool
4150 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4151 const irange &lh,
4152 const irange &rh ATTRIBUTE_UNUSED,
4153 relation_trio) const
4155 r = lh;
4156 return true;
4159 bool
4160 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4161 const irange &lhs,
4162 const irange &op2 ATTRIBUTE_UNUSED,
4163 relation_trio) const
4165 r = lhs;
4166 return true;
4170 class operator_unknown : public range_operator
4172 using range_operator::fold_range;
4173 public:
4174 virtual bool fold_range (irange &r, tree type,
4175 const irange &op1,
4176 const irange &op2,
4177 relation_trio rel = TRIO_VARYING) const;
4178 } op_unknown;
4180 bool
4181 operator_unknown::fold_range (irange &r, tree type,
4182 const irange &lh ATTRIBUTE_UNUSED,
4183 const irange &rh ATTRIBUTE_UNUSED,
4184 relation_trio) const
4186 r.set_varying (type);
4187 return true;
4191 void
4192 operator_abs::wi_fold (irange &r, tree type,
4193 const wide_int &lh_lb, const wide_int &lh_ub,
4194 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4195 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4197 wide_int min, max;
4198 signop sign = TYPE_SIGN (type);
4199 unsigned prec = TYPE_PRECISION (type);
4201 // Pass through LH for the easy cases.
4202 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4204 r = int_range<1> (type, lh_lb, lh_ub);
4205 return;
4208 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4209 // a useful range.
4210 wide_int min_value = wi::min_value (prec, sign);
4211 wide_int max_value = wi::max_value (prec, sign);
4212 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4214 r.set_varying (type);
4215 return;
4218 // ABS_EXPR may flip the range around, if the original range
4219 // included negative values.
4220 if (wi::eq_p (lh_lb, min_value))
4222 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4223 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4224 if (wi::eq_p (lh_ub, min_value))
4226 r = int_range<1> (type, min_value, min_value);
4227 return;
4229 min = max_value;
4231 else
4232 min = wi::abs (lh_lb);
4234 if (wi::eq_p (lh_ub, min_value))
4235 max = max_value;
4236 else
4237 max = wi::abs (lh_ub);
4239 // If the range contains zero then we know that the minimum value in the
4240 // range will be zero.
4241 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4243 if (wi::gt_p (min, max, sign))
4244 max = min;
4245 min = wi::zero (prec);
4247 else
4249 // If the range was reversed, swap MIN and MAX.
4250 if (wi::gt_p (min, max, sign))
4251 std::swap (min, max);
4254 // If the new range has its limits swapped around (MIN > MAX), then
4255 // the operation caused one of them to wrap around. The only thing
4256 // we know is that the result is positive.
4257 if (wi::gt_p (min, max, sign))
4259 min = wi::zero (prec);
4260 max = max_value;
4262 r = int_range<1> (type, min, max);
4265 bool
4266 operator_abs::op1_range (irange &r, tree type,
4267 const irange &lhs,
4268 const irange &op2,
4269 relation_trio) const
4271 if (empty_range_varying (r, type, lhs, op2))
4272 return true;
4273 if (TYPE_UNSIGNED (type))
4275 r = lhs;
4276 return true;
4278 // Start with the positives because negatives are an impossible result.
4279 int_range_max positives = range_positives (type);
4280 positives.intersect (lhs);
4281 r = positives;
4282 // Then add the negative of each pair:
4283 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4284 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4285 r.union_ (int_range<1> (type,
4286 -positives.upper_bound (i),
4287 -positives.lower_bound (i)));
4288 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4289 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4290 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4291 wide_int lb = lhs.lower_bound ();
4292 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4293 r.union_ (int_range<2> (type, lb, lb));
4294 return true;
4297 void
4298 operator_abs::update_bitmask (irange &r, const irange &lh,
4299 const irange &rh) const
4301 update_known_bitmask (r, ABS_EXPR, lh, rh);
4304 class operator_absu : public range_operator
4306 public:
4307 virtual void wi_fold (irange &r, tree type,
4308 const wide_int &lh_lb, const wide_int &lh_ub,
4309 const wide_int &rh_lb, const wide_int &rh_ub) const;
4310 virtual void update_bitmask (irange &r, const irange &lh,
4311 const irange &rh) const final override;
4312 } op_absu;
4314 void
4315 operator_absu::wi_fold (irange &r, tree type,
4316 const wide_int &lh_lb, const wide_int &lh_ub,
4317 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4318 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4320 wide_int new_lb, new_ub;
4322 // Pass through VR0 the easy cases.
4323 if (wi::ges_p (lh_lb, 0))
4325 new_lb = lh_lb;
4326 new_ub = lh_ub;
4328 else
4330 new_lb = wi::abs (lh_lb);
4331 new_ub = wi::abs (lh_ub);
4333 // If the range contains zero then we know that the minimum
4334 // value in the range will be zero.
4335 if (wi::ges_p (lh_ub, 0))
4337 if (wi::gtu_p (new_lb, new_ub))
4338 new_ub = new_lb;
4339 new_lb = wi::zero (TYPE_PRECISION (type));
4341 else
4342 std::swap (new_lb, new_ub);
4345 gcc_checking_assert (TYPE_UNSIGNED (type));
4346 r = int_range<1> (type, new_lb, new_ub);
4349 void
4350 operator_absu::update_bitmask (irange &r, const irange &lh,
4351 const irange &rh) const
4353 update_known_bitmask (r, ABSU_EXPR, lh, rh);
4357 bool
4358 operator_negate::fold_range (irange &r, tree type,
4359 const irange &lh,
4360 const irange &rh,
4361 relation_trio) const
4363 if (empty_range_varying (r, type, lh, rh))
4364 return true;
4365 // -X is simply 0 - X.
4366 return range_op_handler (MINUS_EXPR).fold_range (r, type,
4367 range_zero (type), lh);
4370 bool
4371 operator_negate::op1_range (irange &r, tree type,
4372 const irange &lhs,
4373 const irange &op2,
4374 relation_trio) const
4376 // NEGATE is involutory.
4377 return fold_range (r, type, lhs, op2);
4381 bool
4382 operator_addr_expr::fold_range (irange &r, tree type,
4383 const irange &lh,
4384 const irange &rh,
4385 relation_trio) const
4387 if (empty_range_varying (r, type, lh, rh))
4388 return true;
4390 // Return a non-null pointer of the LHS type (passed in op2).
4391 if (lh.zero_p ())
4392 r = range_zero (type);
4393 else if (lh.undefined_p () || contains_zero_p (lh))
4394 r.set_varying (type);
4395 else
4396 r.set_nonzero (type);
4397 return true;
4400 bool
4401 operator_addr_expr::op1_range (irange &r, tree type,
4402 const irange &lhs,
4403 const irange &op2,
4404 relation_trio) const
4406 if (empty_range_varying (r, type, lhs, op2))
4407 return true;
4409 // Return a non-null pointer of the LHS type (passed in op2), but only
4410 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4411 // See PR 111009.
4412 if (!lhs.undefined_p () && !contains_zero_p (lhs) && TYPE_OVERFLOW_UNDEFINED (type))
4413 r.set_nonzero (type);
4414 else
4415 r.set_varying (type);
4416 return true;
4419 // Initialize any integral operators to the primary table
4421 void
4422 range_op_table::initialize_integral_ops ()
4424 set (TRUNC_DIV_EXPR, op_trunc_div);
4425 set (FLOOR_DIV_EXPR, op_floor_div);
4426 set (ROUND_DIV_EXPR, op_round_div);
4427 set (CEIL_DIV_EXPR, op_ceil_div);
4428 set (EXACT_DIV_EXPR, op_exact_div);
4429 set (LSHIFT_EXPR, op_lshift);
4430 set (RSHIFT_EXPR, op_rshift);
4431 set (TRUTH_AND_EXPR, op_logical_and);
4432 set (TRUTH_OR_EXPR, op_logical_or);
4433 set (TRUNC_MOD_EXPR, op_trunc_mod);
4434 set (TRUTH_NOT_EXPR, op_logical_not);
4435 set (IMAGPART_EXPR, op_unknown);
4436 set (REALPART_EXPR, op_unknown);
4437 set (ABSU_EXPR, op_absu);
4438 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4439 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4440 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4441 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4445 bool
4446 operator_plus::overflow_free_p (const irange &lh, const irange &rh,
4447 relation_trio) const
4449 if (lh.undefined_p () || rh.undefined_p ())
4450 return false;
4452 tree type = lh.type ();
4453 if (TYPE_OVERFLOW_UNDEFINED (type))
4454 return true;
4456 wi::overflow_type ovf;
4457 signop sgn = TYPE_SIGN (type);
4458 wide_int wmax0 = lh.upper_bound ();
4459 wide_int wmax1 = rh.upper_bound ();
4460 wi::add (wmax0, wmax1, sgn, &ovf);
4461 if (ovf != wi::OVF_NONE)
4462 return false;
4464 if (TYPE_UNSIGNED (type))
4465 return true;
4467 wide_int wmin0 = lh.lower_bound ();
4468 wide_int wmin1 = rh.lower_bound ();
4469 wi::add (wmin0, wmin1, sgn, &ovf);
4470 if (ovf != wi::OVF_NONE)
4471 return false;
4473 return true;
4476 bool
4477 operator_minus::overflow_free_p (const irange &lh, const irange &rh,
4478 relation_trio) const
4480 if (lh.undefined_p () || rh.undefined_p ())
4481 return false;
4483 tree type = lh.type ();
4484 if (TYPE_OVERFLOW_UNDEFINED (type))
4485 return true;
4487 wi::overflow_type ovf;
4488 signop sgn = TYPE_SIGN (type);
4489 wide_int wmin0 = lh.lower_bound ();
4490 wide_int wmax1 = rh.upper_bound ();
4491 wi::sub (wmin0, wmax1, sgn, &ovf);
4492 if (ovf != wi::OVF_NONE)
4493 return false;
4495 if (TYPE_UNSIGNED (type))
4496 return true;
4498 wide_int wmax0 = lh.upper_bound ();
4499 wide_int wmin1 = rh.lower_bound ();
4500 wi::sub (wmax0, wmin1, sgn, &ovf);
4501 if (ovf != wi::OVF_NONE)
4502 return false;
4504 return true;
4507 bool
4508 operator_mult::overflow_free_p (const irange &lh, const irange &rh,
4509 relation_trio) const
4511 if (lh.undefined_p () || rh.undefined_p ())
4512 return false;
4514 tree type = lh.type ();
4515 if (TYPE_OVERFLOW_UNDEFINED (type))
4516 return true;
4518 wi::overflow_type ovf;
4519 signop sgn = TYPE_SIGN (type);
4520 wide_int wmax0 = lh.upper_bound ();
4521 wide_int wmax1 = rh.upper_bound ();
4522 wi::mul (wmax0, wmax1, sgn, &ovf);
4523 if (ovf != wi::OVF_NONE)
4524 return false;
4526 if (TYPE_UNSIGNED (type))
4527 return true;
4529 wide_int wmin0 = lh.lower_bound ();
4530 wide_int wmin1 = rh.lower_bound ();
4531 wi::mul (wmin0, wmin1, sgn, &ovf);
4532 if (ovf != wi::OVF_NONE)
4533 return false;
4535 wi::mul (wmin0, wmax1, sgn, &ovf);
4536 if (ovf != wi::OVF_NONE)
4537 return false;
4539 wi::mul (wmax0, wmin1, sgn, &ovf);
4540 if (ovf != wi::OVF_NONE)
4541 return false;
4543 return true;
4546 #if CHECKING_P
4547 #include "selftest.h"
4549 namespace selftest
4551 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4552 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4553 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4554 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4555 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4556 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4558 static void
4559 range_op_cast_tests ()
4561 int_range<2> r0, r1, r2, rold;
4562 r0.set_varying (integer_type_node);
4563 wide_int maxint = r0.upper_bound ();
4565 // If a range is in any way outside of the range for the converted
4566 // to range, default to the range for the new type.
4567 r0.set_varying (short_integer_type_node);
4568 wide_int minshort = r0.lower_bound ();
4569 wide_int maxshort = r0.upper_bound ();
4570 if (TYPE_PRECISION (integer_type_node)
4571 > TYPE_PRECISION (short_integer_type_node))
4573 r1 = int_range<1> (integer_type_node,
4574 wi::zero (TYPE_PRECISION (integer_type_node)),
4575 maxint);
4576 range_cast (r1, short_integer_type_node);
4577 ASSERT_TRUE (r1.lower_bound () == minshort
4578 && r1.upper_bound() == maxshort);
4581 // (unsigned char)[-5,-1] => [251,255].
4582 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4583 range_cast (r0, unsigned_char_type_node);
4584 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4585 UCHAR (251), UCHAR (255)));
4586 range_cast (r0, signed_char_type_node);
4587 ASSERT_TRUE (r0 == rold);
4589 // (signed char)[15, 150] => [-128,-106][15,127].
4590 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4591 range_cast (r0, signed_char_type_node);
4592 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4593 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4594 r1.union_ (r2);
4595 ASSERT_TRUE (r1 == r0);
4596 range_cast (r0, unsigned_char_type_node);
4597 ASSERT_TRUE (r0 == rold);
4599 // (unsigned char)[-5, 5] => [0,5][251,255].
4600 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4601 range_cast (r0, unsigned_char_type_node);
4602 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4603 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4604 r1.union_ (r2);
4605 ASSERT_TRUE (r0 == r1);
4606 range_cast (r0, signed_char_type_node);
4607 ASSERT_TRUE (r0 == rold);
4609 // (unsigned char)[-5,5] => [0,5][251,255].
4610 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4611 range_cast (r0, unsigned_char_type_node);
4612 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4613 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4614 ASSERT_TRUE (r0 == r1);
4616 // (unsigned char)[5U,1974U] => [0,255].
4617 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4618 range_cast (r0, unsigned_char_type_node);
4619 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4620 range_cast (r0, integer_type_node);
4621 // Going to a wider range should not sign extend.
4622 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4624 // (unsigned char)[-350,15] => [0,255].
4625 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4626 range_cast (r0, unsigned_char_type_node);
4627 ASSERT_TRUE (r0 == (int_range<1>
4628 (unsigned_char_type_node,
4629 min_limit (unsigned_char_type_node),
4630 max_limit (unsigned_char_type_node))));
4632 // Casting [-120,20] from signed char to unsigned short.
4633 // => [0, 20][0xff88, 0xffff].
4634 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4635 range_cast (r0, short_unsigned_type_node);
4636 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4637 r2 = int_range<1> (short_unsigned_type_node,
4638 UINT16 (0xff88), UINT16 (0xffff));
4639 r1.union_ (r2);
4640 ASSERT_TRUE (r0 == r1);
4641 // A truncating cast back to signed char will work because [-120, 20]
4642 // is representable in signed char.
4643 range_cast (r0, signed_char_type_node);
4644 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4645 SCHAR (-120), SCHAR (20)));
4647 // unsigned char -> signed short
4648 // (signed short)[(unsigned char)25, (unsigned char)250]
4649 // => [(signed short)25, (signed short)250]
4650 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4651 range_cast (r0, short_integer_type_node);
4652 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4653 ASSERT_TRUE (r0 == r1);
4654 range_cast (r0, unsigned_char_type_node);
4655 ASSERT_TRUE (r0 == rold);
4657 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4658 r0 = int_range<1> (long_long_integer_type_node,
4659 min_limit (long_long_integer_type_node),
4660 max_limit (long_long_integer_type_node));
4661 range_cast (r0, short_unsigned_type_node);
4662 r1 = int_range<1> (short_unsigned_type_node,
4663 min_limit (short_unsigned_type_node),
4664 max_limit (short_unsigned_type_node));
4665 ASSERT_TRUE (r0 == r1);
4667 // Casting NONZERO to a narrower type will wrap/overflow so
4668 // it's just the entire range for the narrower type.
4670 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4671 // is outside of the range of a smaller range, return the full
4672 // smaller range.
4673 if (TYPE_PRECISION (integer_type_node)
4674 > TYPE_PRECISION (short_integer_type_node))
4676 r0 = range_nonzero (integer_type_node);
4677 range_cast (r0, short_integer_type_node);
4678 r1 = int_range<1> (short_integer_type_node,
4679 min_limit (short_integer_type_node),
4680 max_limit (short_integer_type_node));
4681 ASSERT_TRUE (r0 == r1);
4684 // Casting NONZERO from a narrower signed to a wider signed.
4686 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4687 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4688 r0 = range_nonzero (short_integer_type_node);
4689 range_cast (r0, integer_type_node);
4690 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4691 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4692 r1.union_ (r2);
4693 ASSERT_TRUE (r0 == r1);
4696 static void
4697 range_op_lshift_tests ()
4699 // Test that 0x808.... & 0x8.... still contains 0x8....
4700 // for a large set of numbers.
4702 int_range_max res;
4703 tree big_type = long_long_unsigned_type_node;
4704 unsigned big_prec = TYPE_PRECISION (big_type);
4705 // big_num = 0x808,0000,0000,0000
4706 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4707 wi::uhwi (48, big_prec));
4708 op_bitwise_and.fold_range (res, big_type,
4709 int_range <1> (big_type),
4710 int_range <1> (big_type, big_num, big_num));
4711 // val = 0x8,0000,0000,0000
4712 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4713 wi::uhwi (48, big_prec));
4714 ASSERT_TRUE (res.contains_p (val));
4717 if (TYPE_PRECISION (unsigned_type_node) > 31)
4719 // unsigned VARYING = op1 << 1 should be VARYING.
4720 int_range<2> lhs (unsigned_type_node);
4721 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4722 int_range_max op1;
4723 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4724 ASSERT_TRUE (op1.varying_p ());
4726 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4727 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4728 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4729 ASSERT_TRUE (op1.num_pairs () == 2);
4730 // Remove the [0,0] range.
4731 op1.intersect (zero);
4732 ASSERT_TRUE (op1.num_pairs () == 1);
4733 // op1 << 1 should be [0x8000,0x8000] << 1,
4734 // which should result in [0,0].
4735 int_range_max result;
4736 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4737 ASSERT_TRUE (result == zero);
4739 // signed VARYING = op1 << 1 should be VARYING.
4740 if (TYPE_PRECISION (integer_type_node) > 31)
4742 // unsigned VARYING = op1 << 1 should be VARYING.
4743 int_range<2> lhs (integer_type_node);
4744 int_range<2> shift (integer_type_node, INT (1), INT (1));
4745 int_range_max op1;
4746 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4747 ASSERT_TRUE (op1.varying_p ());
4749 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4750 int_range<2> zero (integer_type_node, INT (0), INT (0));
4751 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4752 ASSERT_TRUE (op1.num_pairs () == 2);
4753 // Remove the [0,0] range.
4754 op1.intersect (zero);
4755 ASSERT_TRUE (op1.num_pairs () == 1);
4756 // op1 << 1 should be [0x8000,0x8000] << 1,
4757 // which should result in [0,0].
4758 int_range_max result;
4759 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4760 ASSERT_TRUE (result == zero);
4764 static void
4765 range_op_rshift_tests ()
4767 // unsigned: [3, MAX] = OP1 >> 1
4769 int_range_max lhs (unsigned_type_node,
4770 UINT (3), max_limit (unsigned_type_node));
4771 int_range_max one (unsigned_type_node,
4772 wi::one (TYPE_PRECISION (unsigned_type_node)),
4773 wi::one (TYPE_PRECISION (unsigned_type_node)));
4774 int_range_max op1;
4775 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4776 ASSERT_FALSE (op1.contains_p (UINT (3)));
4779 // signed: [3, MAX] = OP1 >> 1
4781 int_range_max lhs (integer_type_node,
4782 INT (3), max_limit (integer_type_node));
4783 int_range_max one (integer_type_node, INT (1), INT (1));
4784 int_range_max op1;
4785 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4786 ASSERT_FALSE (op1.contains_p (INT (-2)));
4789 // This is impossible, so OP1 should be [].
4790 // signed: [MIN, MIN] = OP1 >> 1
4792 int_range_max lhs (integer_type_node,
4793 min_limit (integer_type_node),
4794 min_limit (integer_type_node));
4795 int_range_max one (integer_type_node, INT (1), INT (1));
4796 int_range_max op1;
4797 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4798 ASSERT_TRUE (op1.undefined_p ());
4801 // signed: ~[-1] = OP1 >> 31
4802 if (TYPE_PRECISION (integer_type_node) > 31)
4804 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4805 int_range_max shift (integer_type_node, INT (31), INT (31));
4806 int_range_max op1;
4807 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4808 int_range_max negatives = range_negatives (integer_type_node);
4809 negatives.intersect (op1);
4810 ASSERT_TRUE (negatives.undefined_p ());
4814 static void
4815 range_op_bitwise_and_tests ()
4817 int_range_max res;
4818 wide_int min = min_limit (integer_type_node);
4819 wide_int max = max_limit (integer_type_node);
4820 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4821 int_range_max i1 (integer_type_node, tiny, max);
4822 int_range_max i2 (integer_type_node, INT (255), INT (255));
4824 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4825 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4826 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4828 // VARYING = OP1 & 255: OP1 is VARYING
4829 i1 = int_range<1> (integer_type_node);
4830 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4831 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4833 // For 0 = x & MASK, x is ~MASK.
4835 int_range<2> zero (integer_type_node, INT (0), INT (0));
4836 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4837 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4838 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4839 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4842 // (NONZERO | X) is nonzero.
4843 i1.set_nonzero (integer_type_node);
4844 i2.set_varying (integer_type_node);
4845 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4846 ASSERT_TRUE (res.nonzero_p ());
4848 // (NEGATIVE | X) is nonzero.
4849 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4850 i2.set_varying (integer_type_node);
4851 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4852 ASSERT_FALSE (res.contains_p (INT (0)));
4855 static void
4856 range_relational_tests ()
4858 int_range<2> lhs (unsigned_char_type_node);
4859 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4860 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4862 // Never wrapping additions mean LHS > OP1.
4863 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4864 ASSERT_TRUE (code == VREL_GT);
4866 // Most wrapping additions mean nothing...
4867 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4868 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4869 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4870 ASSERT_TRUE (code == VREL_VARYING);
4872 // However, always wrapping additions mean LHS < OP1.
4873 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
4874 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
4875 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4876 ASSERT_TRUE (code == VREL_LT);
4879 void
4880 range_op_tests ()
4882 range_op_rshift_tests ();
4883 range_op_lshift_tests ();
4884 range_op_bitwise_and_tests ();
4885 range_op_cast_tests ();
4886 range_relational_tests ();
4888 extern void range_op_float_tests ();
4889 range_op_float_tests ();
4892 } // namespace selftest
4894 #endif // CHECKING_P