c++: only cache constexpr calls that are constant exprs
[official-gcc.git] / gcc / range-op.cc
blob6b5d4f2accd29e826348c3066bb2594462b0f084
1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
76 // Instantaite a range operator table.
77 range_op_table operator_table;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR, op_addr);
109 set (BIT_NOT_EXPR, op_bitwise_not);
110 set (BIT_XOR_EXPR, op_bitwise_xor);
112 // These are in both integer and pointer tables, but pointer has a different
113 // implementation.
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator = &default_operator;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code)
140 m_operator = operator_table[code];
141 if (!m_operator)
142 m_operator = &default_operator;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator != &default_operator;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
156 range_operator *
157 range_op_handler::range_op () const
159 if (m_operator != &default_operator)
160 return m_operator;
161 return NULL;
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
168 constexpr unsigned
169 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
171 return ((lhs << 8) + (op1 << 4) + (op2));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
179 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
180 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
181 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
182 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
183 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
185 // Return a dispatch value for parameter types LHS, OP1 and OP2.
187 unsigned
188 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
189 const vrange& op2) const
191 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
192 op2.m_discriminator);
195 // Dispatch a call to fold_range based on the types of R, LH and RH.
197 bool
198 range_op_handler::fold_range (vrange &r, tree type,
199 const vrange &lh,
200 const vrange &rh,
201 relation_trio rel) const
203 gcc_checking_assert (m_operator);
204 switch (dispatch_kind (r, lh, rh))
206 case RO_III:
207 return m_operator->fold_range (as_a <irange> (r), type,
208 as_a <irange> (lh),
209 as_a <irange> (rh), rel);
210 case RO_IFI:
211 return m_operator->fold_range (as_a <irange> (r), type,
212 as_a <frange> (lh),
213 as_a <irange> (rh), rel);
214 case RO_IFF:
215 return m_operator->fold_range (as_a <irange> (r), type,
216 as_a <frange> (lh),
217 as_a <frange> (rh), rel);
218 case RO_FFF:
219 return m_operator->fold_range (as_a <frange> (r), type,
220 as_a <frange> (lh),
221 as_a <frange> (rh), rel);
222 case RO_FII:
223 return m_operator->fold_range (as_a <frange> (r), type,
224 as_a <irange> (lh),
225 as_a <irange> (rh), rel);
226 default:
227 return false;
231 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
233 bool
234 range_op_handler::op1_range (vrange &r, tree type,
235 const vrange &lhs,
236 const vrange &op2,
237 relation_trio rel) const
239 gcc_checking_assert (m_operator);
241 if (lhs.undefined_p ())
242 return false;
243 switch (dispatch_kind (r, lhs, op2))
245 case RO_III:
246 return m_operator->op1_range (as_a <irange> (r), type,
247 as_a <irange> (lhs),
248 as_a <irange> (op2), rel);
249 case RO_FIF:
250 return m_operator->op1_range (as_a <frange> (r), type,
251 as_a <irange> (lhs),
252 as_a <frange> (op2), rel);
253 case RO_FFF:
254 return m_operator->op1_range (as_a <frange> (r), type,
255 as_a <frange> (lhs),
256 as_a <frange> (op2), rel);
257 default:
258 return false;
262 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
264 bool
265 range_op_handler::op2_range (vrange &r, tree type,
266 const vrange &lhs,
267 const vrange &op1,
268 relation_trio rel) const
270 gcc_checking_assert (m_operator);
271 if (lhs.undefined_p ())
272 return false;
274 switch (dispatch_kind (r, lhs, op1))
276 case RO_III:
277 return m_operator->op2_range (as_a <irange> (r), type,
278 as_a <irange> (lhs),
279 as_a <irange> (op1), rel);
280 case RO_FIF:
281 return m_operator->op2_range (as_a <frange> (r), type,
282 as_a <irange> (lhs),
283 as_a <frange> (op1), rel);
284 case RO_FFF:
285 return m_operator->op2_range (as_a <frange> (r), type,
286 as_a <frange> (lhs),
287 as_a <frange> (op1), rel);
288 default:
289 return false;
293 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
295 relation_kind
296 range_op_handler::lhs_op1_relation (const vrange &lhs,
297 const vrange &op1,
298 const vrange &op2,
299 relation_kind rel) const
301 gcc_checking_assert (m_operator);
303 switch (dispatch_kind (lhs, op1, op2))
305 case RO_III:
306 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
307 as_a <irange> (op1),
308 as_a <irange> (op2), rel);
309 case RO_IFF:
310 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
311 as_a <frange> (op1),
312 as_a <frange> (op2), rel);
313 case RO_FFF:
314 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
315 as_a <frange> (op1),
316 as_a <frange> (op2), rel);
317 default:
318 return VREL_VARYING;
322 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
324 relation_kind
325 range_op_handler::lhs_op2_relation (const vrange &lhs,
326 const vrange &op1,
327 const vrange &op2,
328 relation_kind rel) const
330 gcc_checking_assert (m_operator);
331 switch (dispatch_kind (lhs, op1, op2))
333 case RO_III:
334 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
335 as_a <irange> (op1),
336 as_a <irange> (op2), rel);
337 case RO_IFF:
338 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
339 as_a <frange> (op1),
340 as_a <frange> (op2), rel);
341 case RO_FFF:
342 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
343 as_a <frange> (op1),
344 as_a <frange> (op2), rel);
345 default:
346 return VREL_VARYING;
350 // Dispatch a call to op1_op2_relation based on the type of LHS.
352 relation_kind
353 range_op_handler::op1_op2_relation (const vrange &lhs) const
355 gcc_checking_assert (m_operator);
356 switch (dispatch_kind (lhs, lhs, lhs))
358 case RO_III:
359 return m_operator->op1_op2_relation (as_a <irange> (lhs));
361 case RO_FFF:
362 return m_operator->op1_op2_relation (as_a <frange> (lhs));
364 default:
365 return VREL_VARYING;
370 // Update the known bitmasks in R when applying the operation CODE to
371 // LH and RH.
373 void
374 update_known_bitmask (irange &r, tree_code code,
375 const irange &lh, const irange &rh)
377 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ()
378 || r.singleton_p ())
379 return;
381 widest_int widest_value, widest_mask;
382 tree type = r.type ();
383 signop sign = TYPE_SIGN (type);
384 int prec = TYPE_PRECISION (type);
385 irange_bitmask lh_bits = lh.get_bitmask ();
386 irange_bitmask rh_bits = rh.get_bitmask ();
388 bit_value_binop (code, sign, prec, &widest_value, &widest_mask,
389 TYPE_SIGN (lh.type ()),
390 TYPE_PRECISION (lh.type ()),
391 widest_int::from (lh_bits.value (), sign),
392 widest_int::from (lh_bits.mask (), sign),
393 TYPE_SIGN (rh.type ()),
394 TYPE_PRECISION (rh.type ()),
395 widest_int::from (rh_bits.value (), sign),
396 widest_int::from (rh_bits.mask (), sign));
398 wide_int mask = wide_int::from (widest_mask, prec, sign);
399 wide_int value = wide_int::from (widest_value, prec, sign);
400 // Bitmasks must have the unknown value bits cleared.
401 value &= ~mask;
402 irange_bitmask bm (value, mask);
403 r.update_bitmask (bm);
406 // Return the upper limit for a type.
408 static inline wide_int
409 max_limit (const_tree type)
411 return irange_val_max (type);
414 // Return the lower limit for a type.
416 static inline wide_int
417 min_limit (const_tree type)
419 return irange_val_min (type);
422 // Return false if shifting by OP is undefined behavior. Otherwise, return
423 // true and the range it is to be shifted by. This allows trimming out of
424 // undefined ranges, leaving only valid ranges if there are any.
426 static inline bool
427 get_shift_range (irange &r, tree type, const irange &op)
429 if (op.undefined_p ())
430 return false;
432 // Build valid range and intersect it with the shift range.
433 r = value_range (op.type (),
434 wi::shwi (0, TYPE_PRECISION (op.type ())),
435 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
436 r.intersect (op);
438 // If there are no valid ranges in the shift range, returned false.
439 if (r.undefined_p ())
440 return false;
441 return true;
444 // Default wide_int fold operation returns [MIN, MAX].
446 void
447 range_operator::wi_fold (irange &r, tree type,
448 const wide_int &lh_lb ATTRIBUTE_UNUSED,
449 const wide_int &lh_ub ATTRIBUTE_UNUSED,
450 const wide_int &rh_lb ATTRIBUTE_UNUSED,
451 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
453 gcc_checking_assert (r.supports_type_p (type));
454 r.set_varying (type);
457 // Call wi_fold when both op1 and op2 are equivalent. Further split small
458 // subranges into constants. This can provide better precision.
459 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
460 // [0,0][2, 2][4,4][6, 6][8, 8]
461 // LIMIT is the maximum number of elements in range allowed before we
462 // do not process them individually.
464 void
465 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
466 const wide_int &lh_lb,
467 const wide_int &lh_ub,
468 unsigned limit) const
470 int_range_max tmp;
471 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
472 widest_int::from (lh_lb, TYPE_SIGN (type)));
473 // if there are 1 to 8 values in the LH range, split them up.
474 r.set_undefined ();
475 if (lh_range >= 0 && lh_range < limit)
477 for (unsigned x = 0; x <= lh_range; x++)
479 wide_int val = lh_lb + x;
480 wi_fold (tmp, type, val, val, val, val);
481 r.union_ (tmp);
484 // Otherwise just call wi_fold.
485 else
486 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
489 // Call wi_fold, except further split small subranges into constants.
490 // This can provide better precision. For something 8 >> [0,1]
491 // Instead of [8, 16], we will produce [8,8][16,16]
493 void
494 range_operator::wi_fold_in_parts (irange &r, tree type,
495 const wide_int &lh_lb,
496 const wide_int &lh_ub,
497 const wide_int &rh_lb,
498 const wide_int &rh_ub) const
500 int_range_max tmp;
501 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
502 widest_int::from (rh_lb, TYPE_SIGN (type)));
503 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
504 widest_int::from (lh_lb, TYPE_SIGN (type)));
505 // If there are 2, 3, or 4 values in the RH range, do them separately.
506 // Call wi_fold_in_parts to check the RH side.
507 if (rh_range > 0 && rh_range < 4)
509 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
510 if (rh_range > 1)
512 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
513 r.union_ (tmp);
514 if (rh_range == 3)
516 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
517 r.union_ (tmp);
520 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
521 r.union_ (tmp);
523 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
524 // The RH side has been checked, so no recursion needed.
525 else if (lh_range > 0 && lh_range < 4)
527 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
528 if (lh_range > 1)
530 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
531 r.union_ (tmp);
532 if (lh_range == 3)
534 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
535 r.union_ (tmp);
538 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
539 r.union_ (tmp);
541 // Otherwise just call wi_fold.
542 else
543 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
546 // The default for fold is to break all ranges into sub-ranges and
547 // invoke the wi_fold method on each sub-range pair.
549 bool
550 range_operator::fold_range (irange &r, tree type,
551 const irange &lh,
552 const irange &rh,
553 relation_trio trio) const
555 gcc_checking_assert (r.supports_type_p (type));
556 if (empty_range_varying (r, type, lh, rh))
557 return true;
559 relation_kind rel = trio.op1_op2 ();
560 unsigned num_lh = lh.num_pairs ();
561 unsigned num_rh = rh.num_pairs ();
563 // If op1 and op2 are equivalences, then we don't need a complete cross
564 // product, just pairs of matching elements.
565 if (relation_equiv_p (rel) && lh == rh)
567 int_range_max tmp;
568 r.set_undefined ();
569 for (unsigned x = 0; x < num_lh; ++x)
571 // If the number of subranges is too high, limit subrange creation.
572 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
573 wide_int lh_lb = lh.lower_bound (x);
574 wide_int lh_ub = lh.upper_bound (x);
575 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
576 r.union_ (tmp);
577 if (r.varying_p ())
578 break;
580 op1_op2_relation_effect (r, type, lh, rh, rel);
581 update_bitmask (r, lh, rh);
582 return true;
585 // If both ranges are single pairs, fold directly into the result range.
586 // If the number of subranges grows too high, produce a summary result as the
587 // loop becomes exponential with little benefit. See PR 103821.
588 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
590 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
591 rh.lower_bound (), rh.upper_bound ());
592 op1_op2_relation_effect (r, type, lh, rh, rel);
593 update_bitmask (r, lh, rh);
594 return true;
597 int_range_max tmp;
598 r.set_undefined ();
599 for (unsigned x = 0; x < num_lh; ++x)
600 for (unsigned y = 0; y < num_rh; ++y)
602 wide_int lh_lb = lh.lower_bound (x);
603 wide_int lh_ub = lh.upper_bound (x);
604 wide_int rh_lb = rh.lower_bound (y);
605 wide_int rh_ub = rh.upper_bound (y);
606 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
607 r.union_ (tmp);
608 if (r.varying_p ())
610 op1_op2_relation_effect (r, type, lh, rh, rel);
611 update_bitmask (r, lh, rh);
612 return true;
615 op1_op2_relation_effect (r, type, lh, rh, rel);
616 update_bitmask (r, lh, rh);
617 return true;
620 // The default for op1_range is to return false.
622 bool
623 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
624 tree type ATTRIBUTE_UNUSED,
625 const irange &lhs ATTRIBUTE_UNUSED,
626 const irange &op2 ATTRIBUTE_UNUSED,
627 relation_trio) const
629 return false;
632 // The default for op2_range is to return false.
634 bool
635 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
636 tree type ATTRIBUTE_UNUSED,
637 const irange &lhs ATTRIBUTE_UNUSED,
638 const irange &op1 ATTRIBUTE_UNUSED,
639 relation_trio) const
641 return false;
644 // The default relation routines return VREL_VARYING.
646 relation_kind
647 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
648 const irange &op1 ATTRIBUTE_UNUSED,
649 const irange &op2 ATTRIBUTE_UNUSED,
650 relation_kind rel ATTRIBUTE_UNUSED) const
652 return VREL_VARYING;
655 relation_kind
656 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
657 const irange &op1 ATTRIBUTE_UNUSED,
658 const irange &op2 ATTRIBUTE_UNUSED,
659 relation_kind rel ATTRIBUTE_UNUSED) const
661 return VREL_VARYING;
664 relation_kind
665 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED) const
667 return VREL_VARYING;
670 // Default is no relation affects the LHS.
672 bool
673 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
674 tree type ATTRIBUTE_UNUSED,
675 const irange &op1_range ATTRIBUTE_UNUSED,
676 const irange &op2_range ATTRIBUTE_UNUSED,
677 relation_kind rel ATTRIBUTE_UNUSED) const
679 return false;
682 // Apply any known bitmask updates based on this operator.
684 void
685 range_operator::update_bitmask (irange &, const irange &,
686 const irange &) const
690 // Create and return a range from a pair of wide-ints that are known
691 // to have overflowed (or underflowed).
693 static void
694 value_range_from_overflowed_bounds (irange &r, tree type,
695 const wide_int &wmin,
696 const wide_int &wmax)
698 const signop sgn = TYPE_SIGN (type);
699 const unsigned int prec = TYPE_PRECISION (type);
701 wide_int tmin = wide_int::from (wmin, prec, sgn);
702 wide_int tmax = wide_int::from (wmax, prec, sgn);
704 bool covers = false;
705 wide_int tem = tmin;
706 tmin = tmax + 1;
707 if (wi::cmp (tmin, tmax, sgn) < 0)
708 covers = true;
709 tmax = tem - 1;
710 if (wi::cmp (tmax, tem, sgn) > 0)
711 covers = true;
713 // If the anti-range would cover nothing, drop to varying.
714 // Likewise if the anti-range bounds are outside of the types
715 // values.
716 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
717 r.set_varying (type);
718 else
719 r.set (type, tmin, tmax, VR_ANTI_RANGE);
722 // Create and return a range from a pair of wide-ints. MIN_OVF and
723 // MAX_OVF describe any overflow that might have occurred while
724 // calculating WMIN and WMAX respectively.
726 static void
727 value_range_with_overflow (irange &r, tree type,
728 const wide_int &wmin, const wide_int &wmax,
729 wi::overflow_type min_ovf = wi::OVF_NONE,
730 wi::overflow_type max_ovf = wi::OVF_NONE)
732 const signop sgn = TYPE_SIGN (type);
733 const unsigned int prec = TYPE_PRECISION (type);
734 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
736 // For one bit precision if max != min, then the range covers all
737 // values.
738 if (prec == 1 && wi::ne_p (wmax, wmin))
740 r.set_varying (type);
741 return;
744 if (overflow_wraps)
746 // If overflow wraps, truncate the values and adjust the range,
747 // kind, and bounds appropriately.
748 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
750 wide_int tmin = wide_int::from (wmin, prec, sgn);
751 wide_int tmax = wide_int::from (wmax, prec, sgn);
752 // If the limits are swapped, we wrapped around and cover
753 // the entire range.
754 if (wi::gt_p (tmin, tmax, sgn))
755 r.set_varying (type);
756 else
757 // No overflow or both overflow or underflow. The range
758 // kind stays normal.
759 r.set (type, tmin, tmax);
760 return;
763 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
764 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
765 value_range_from_overflowed_bounds (r, type, wmin, wmax);
766 else
767 // Other underflow and/or overflow, drop to VR_VARYING.
768 r.set_varying (type);
770 else
772 // If both bounds either underflowed or overflowed, then the result
773 // is undefined.
774 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
775 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
777 r.set_undefined ();
778 return;
781 // If overflow does not wrap, saturate to [MIN, MAX].
782 wide_int new_lb, new_ub;
783 if (min_ovf == wi::OVF_UNDERFLOW)
784 new_lb = wi::min_value (prec, sgn);
785 else if (min_ovf == wi::OVF_OVERFLOW)
786 new_lb = wi::max_value (prec, sgn);
787 else
788 new_lb = wmin;
790 if (max_ovf == wi::OVF_UNDERFLOW)
791 new_ub = wi::min_value (prec, sgn);
792 else if (max_ovf == wi::OVF_OVERFLOW)
793 new_ub = wi::max_value (prec, sgn);
794 else
795 new_ub = wmax;
797 r.set (type, new_lb, new_ub);
801 // Create and return a range from a pair of wide-ints. Canonicalize
802 // the case where the bounds are swapped. In which case, we transform
803 // [10,5] into [MIN,5][10,MAX].
805 static inline void
806 create_possibly_reversed_range (irange &r, tree type,
807 const wide_int &new_lb, const wide_int &new_ub)
809 signop s = TYPE_SIGN (type);
810 // If the bounds are swapped, treat the result as if an overflow occurred.
811 if (wi::gt_p (new_lb, new_ub, s))
812 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
813 else
814 // Otherwise it's just a normal range.
815 r.set (type, new_lb, new_ub);
818 // Return the summary information about boolean range LHS. If EMPTY/FULL,
819 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
821 bool_range_state
822 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
824 // If there is no result, then this is unexecutable.
825 if (lhs.undefined_p ())
827 r.set_undefined ();
828 return BRS_EMPTY;
831 if (lhs.zero_p ())
832 return BRS_FALSE;
834 // For TRUE, we can't just test for [1,1] because Ada can have
835 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
836 if (lhs.contains_p (build_zero_cst (lhs.type ())))
838 r.set_varying (val_type);
839 return BRS_FULL;
842 return BRS_TRUE;
845 // ------------------------------------------------------------------------
847 void
848 operator_equal::update_bitmask (irange &r, const irange &lh,
849 const irange &rh) const
851 update_known_bitmask (r, EQ_EXPR, lh, rh);
854 // Check if the LHS range indicates a relation between OP1 and OP2.
856 relation_kind
857 operator_equal::op1_op2_relation (const irange &lhs) const
859 if (lhs.undefined_p ())
860 return VREL_UNDEFINED;
862 // FALSE = op1 == op2 indicates NE_EXPR.
863 if (lhs.zero_p ())
864 return VREL_NE;
866 // TRUE = op1 == op2 indicates EQ_EXPR.
867 if (lhs.undefined_p () || !contains_zero_p (lhs))
868 return VREL_EQ;
869 return VREL_VARYING;
872 bool
873 operator_equal::fold_range (irange &r, tree type,
874 const irange &op1,
875 const irange &op2,
876 relation_trio rel) const
878 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
879 return true;
881 // We can be sure the values are always equal or not if both ranges
882 // consist of a single value, and then compare them.
883 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
884 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
886 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
887 r = range_true (type);
888 else
889 r = range_false (type);
891 else
893 // If ranges do not intersect, we know the range is not equal,
894 // otherwise we don't know anything for sure.
895 int_range_max tmp = op1;
896 tmp.intersect (op2);
897 if (tmp.undefined_p ())
898 r = range_false (type);
899 else
900 r = range_true_and_false (type);
902 return true;
905 bool
906 operator_equal::op1_range (irange &r, tree type,
907 const irange &lhs,
908 const irange &op2,
909 relation_trio) const
911 switch (get_bool_state (r, lhs, type))
913 case BRS_TRUE:
914 // If it's true, the result is the same as OP2.
915 r = op2;
916 break;
918 case BRS_FALSE:
919 // If the result is false, the only time we know anything is
920 // if OP2 is a constant.
921 if (!op2.undefined_p ()
922 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
924 r = op2;
925 r.invert ();
927 else
928 r.set_varying (type);
929 break;
931 default:
932 break;
934 return true;
937 bool
938 operator_equal::op2_range (irange &r, tree type,
939 const irange &lhs,
940 const irange &op1,
941 relation_trio rel) const
943 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
946 // -------------------------------------------------------------------------
948 void
949 operator_not_equal::update_bitmask (irange &r, const irange &lh,
950 const irange &rh) const
952 update_known_bitmask (r, NE_EXPR, lh, rh);
955 // Check if the LHS range indicates a relation between OP1 and OP2.
957 relation_kind
958 operator_not_equal::op1_op2_relation (const irange &lhs) const
960 if (lhs.undefined_p ())
961 return VREL_UNDEFINED;
963 // FALSE = op1 != op2 indicates EQ_EXPR.
964 if (lhs.zero_p ())
965 return VREL_EQ;
967 // TRUE = op1 != op2 indicates NE_EXPR.
968 if (lhs.undefined_p () || !contains_zero_p (lhs))
969 return VREL_NE;
970 return VREL_VARYING;
973 bool
974 operator_not_equal::fold_range (irange &r, tree type,
975 const irange &op1,
976 const irange &op2,
977 relation_trio rel) const
979 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
980 return true;
982 // We can be sure the values are always equal or not if both ranges
983 // consist of a single value, and then compare them.
984 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
985 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
987 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
988 r = range_true (type);
989 else
990 r = range_false (type);
992 else
994 // If ranges do not intersect, we know the range is not equal,
995 // otherwise we don't know anything for sure.
996 int_range_max tmp = op1;
997 tmp.intersect (op2);
998 if (tmp.undefined_p ())
999 r = range_true (type);
1000 else
1001 r = range_true_and_false (type);
1003 return true;
1006 bool
1007 operator_not_equal::op1_range (irange &r, tree type,
1008 const irange &lhs,
1009 const irange &op2,
1010 relation_trio) const
1012 switch (get_bool_state (r, lhs, type))
1014 case BRS_TRUE:
1015 // If the result is true, the only time we know anything is if
1016 // OP2 is a constant.
1017 if (!op2.undefined_p ()
1018 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1020 r = op2;
1021 r.invert ();
1023 else
1024 r.set_varying (type);
1025 break;
1027 case BRS_FALSE:
1028 // If it's false, the result is the same as OP2.
1029 r = op2;
1030 break;
1032 default:
1033 break;
1035 return true;
1039 bool
1040 operator_not_equal::op2_range (irange &r, tree type,
1041 const irange &lhs,
1042 const irange &op1,
1043 relation_trio rel) const
1045 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1048 // (X < VAL) produces the range of [MIN, VAL - 1].
1050 static void
1051 build_lt (irange &r, tree type, const wide_int &val)
1053 wi::overflow_type ov;
1054 wide_int lim;
1055 signop sgn = TYPE_SIGN (type);
1057 // Signed 1 bit cannot represent 1 for subtraction.
1058 if (sgn == SIGNED)
1059 lim = wi::add (val, -1, sgn, &ov);
1060 else
1061 lim = wi::sub (val, 1, sgn, &ov);
1063 // If val - 1 underflows, check if X < MIN, which is an empty range.
1064 if (ov)
1065 r.set_undefined ();
1066 else
1067 r = int_range<1> (type, min_limit (type), lim);
1070 // (X <= VAL) produces the range of [MIN, VAL].
1072 static void
1073 build_le (irange &r, tree type, const wide_int &val)
1075 r = int_range<1> (type, min_limit (type), val);
1078 // (X > VAL) produces the range of [VAL + 1, MAX].
1080 static void
1081 build_gt (irange &r, tree type, const wide_int &val)
1083 wi::overflow_type ov;
1084 wide_int lim;
1085 signop sgn = TYPE_SIGN (type);
1087 // Signed 1 bit cannot represent 1 for addition.
1088 if (sgn == SIGNED)
1089 lim = wi::sub (val, -1, sgn, &ov);
1090 else
1091 lim = wi::add (val, 1, sgn, &ov);
1092 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1093 if (ov)
1094 r.set_undefined ();
1095 else
1096 r = int_range<1> (type, lim, max_limit (type));
1099 // (X >= val) produces the range of [VAL, MAX].
1101 static void
1102 build_ge (irange &r, tree type, const wide_int &val)
1104 r = int_range<1> (type, val, max_limit (type));
1108 void
1109 operator_lt::update_bitmask (irange &r, const irange &lh,
1110 const irange &rh) const
1112 update_known_bitmask (r, LT_EXPR, lh, rh);
1115 // Check if the LHS range indicates a relation between OP1 and OP2.
1117 relation_kind
1118 operator_lt::op1_op2_relation (const irange &lhs) const
1120 if (lhs.undefined_p ())
1121 return VREL_UNDEFINED;
1123 // FALSE = op1 < op2 indicates GE_EXPR.
1124 if (lhs.zero_p ())
1125 return VREL_GE;
1127 // TRUE = op1 < op2 indicates LT_EXPR.
1128 if (lhs.undefined_p () || !contains_zero_p (lhs))
1129 return VREL_LT;
1130 return VREL_VARYING;
1133 bool
1134 operator_lt::fold_range (irange &r, tree type,
1135 const irange &op1,
1136 const irange &op2,
1137 relation_trio rel) const
1139 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1140 return true;
1142 signop sign = TYPE_SIGN (op1.type ());
1143 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1145 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1146 r = range_true (type);
1147 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1148 r = range_false (type);
1149 // Use nonzero bits to determine if < 0 is false.
1150 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1151 r = range_false (type);
1152 else
1153 r = range_true_and_false (type);
1154 return true;
1157 bool
1158 operator_lt::op1_range (irange &r, tree type,
1159 const irange &lhs,
1160 const irange &op2,
1161 relation_trio) const
1163 if (op2.undefined_p ())
1164 return false;
1166 switch (get_bool_state (r, lhs, type))
1168 case BRS_TRUE:
1169 build_lt (r, type, op2.upper_bound ());
1170 break;
1172 case BRS_FALSE:
1173 build_ge (r, type, op2.lower_bound ());
1174 break;
1176 default:
1177 break;
1179 return true;
1182 bool
1183 operator_lt::op2_range (irange &r, tree type,
1184 const irange &lhs,
1185 const irange &op1,
1186 relation_trio) const
1188 if (op1.undefined_p ())
1189 return false;
1191 switch (get_bool_state (r, lhs, type))
1193 case BRS_TRUE:
1194 build_gt (r, type, op1.lower_bound ());
1195 break;
1197 case BRS_FALSE:
1198 build_le (r, type, op1.upper_bound ());
1199 break;
1201 default:
1202 break;
1204 return true;
1208 void
1209 operator_le::update_bitmask (irange &r, const irange &lh,
1210 const irange &rh) const
1212 update_known_bitmask (r, LE_EXPR, lh, rh);
1215 // Check if the LHS range indicates a relation between OP1 and OP2.
1217 relation_kind
1218 operator_le::op1_op2_relation (const irange &lhs) const
1220 if (lhs.undefined_p ())
1221 return VREL_UNDEFINED;
1223 // FALSE = op1 <= op2 indicates GT_EXPR.
1224 if (lhs.zero_p ())
1225 return VREL_GT;
1227 // TRUE = op1 <= op2 indicates LE_EXPR.
1228 if (lhs.undefined_p () || !contains_zero_p (lhs))
1229 return VREL_LE;
1230 return VREL_VARYING;
1233 bool
1234 operator_le::fold_range (irange &r, tree type,
1235 const irange &op1,
1236 const irange &op2,
1237 relation_trio rel) const
1239 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1240 return true;
1242 signop sign = TYPE_SIGN (op1.type ());
1243 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1245 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1246 r = range_true (type);
1247 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1248 r = range_false (type);
1249 else
1250 r = range_true_and_false (type);
1251 return true;
1254 bool
1255 operator_le::op1_range (irange &r, tree type,
1256 const irange &lhs,
1257 const irange &op2,
1258 relation_trio) const
1260 if (op2.undefined_p ())
1261 return false;
1263 switch (get_bool_state (r, lhs, type))
1265 case BRS_TRUE:
1266 build_le (r, type, op2.upper_bound ());
1267 break;
1269 case BRS_FALSE:
1270 build_gt (r, type, op2.lower_bound ());
1271 break;
1273 default:
1274 break;
1276 return true;
1279 bool
1280 operator_le::op2_range (irange &r, tree type,
1281 const irange &lhs,
1282 const irange &op1,
1283 relation_trio) const
1285 if (op1.undefined_p ())
1286 return false;
1288 switch (get_bool_state (r, lhs, type))
1290 case BRS_TRUE:
1291 build_ge (r, type, op1.lower_bound ());
1292 break;
1294 case BRS_FALSE:
1295 build_lt (r, type, op1.upper_bound ());
1296 break;
1298 default:
1299 break;
1301 return true;
1305 void
1306 operator_gt::update_bitmask (irange &r, const irange &lh,
1307 const irange &rh) const
1309 update_known_bitmask (r, GT_EXPR, lh, rh);
1312 // Check if the LHS range indicates a relation between OP1 and OP2.
1314 relation_kind
1315 operator_gt::op1_op2_relation (const irange &lhs) const
1317 if (lhs.undefined_p ())
1318 return VREL_UNDEFINED;
1320 // FALSE = op1 > op2 indicates LE_EXPR.
1321 if (lhs.zero_p ())
1322 return VREL_LE;
1324 // TRUE = op1 > op2 indicates GT_EXPR.
1325 if (!contains_zero_p (lhs))
1326 return VREL_GT;
1327 return VREL_VARYING;
1330 bool
1331 operator_gt::fold_range (irange &r, tree type,
1332 const irange &op1, const irange &op2,
1333 relation_trio rel) const
1335 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1336 return true;
1338 signop sign = TYPE_SIGN (op1.type ());
1339 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1341 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1342 r = range_true (type);
1343 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1344 r = range_false (type);
1345 else
1346 r = range_true_and_false (type);
1347 return true;
1350 bool
1351 operator_gt::op1_range (irange &r, tree type,
1352 const irange &lhs, const irange &op2,
1353 relation_trio) const
1355 if (op2.undefined_p ())
1356 return false;
1358 switch (get_bool_state (r, lhs, type))
1360 case BRS_TRUE:
1361 build_gt (r, type, op2.lower_bound ());
1362 break;
1364 case BRS_FALSE:
1365 build_le (r, type, op2.upper_bound ());
1366 break;
1368 default:
1369 break;
1371 return true;
1374 bool
1375 operator_gt::op2_range (irange &r, tree type,
1376 const irange &lhs,
1377 const irange &op1,
1378 relation_trio) const
1380 if (op1.undefined_p ())
1381 return false;
1383 switch (get_bool_state (r, lhs, type))
1385 case BRS_TRUE:
1386 build_lt (r, type, op1.upper_bound ());
1387 break;
1389 case BRS_FALSE:
1390 build_ge (r, type, op1.lower_bound ());
1391 break;
1393 default:
1394 break;
1396 return true;
1400 void
1401 operator_ge::update_bitmask (irange &r, const irange &lh,
1402 const irange &rh) const
1404 update_known_bitmask (r, GE_EXPR, lh, rh);
1407 // Check if the LHS range indicates a relation between OP1 and OP2.
1409 relation_kind
1410 operator_ge::op1_op2_relation (const irange &lhs) const
1412 if (lhs.undefined_p ())
1413 return VREL_UNDEFINED;
1415 // FALSE = op1 >= op2 indicates LT_EXPR.
1416 if (lhs.zero_p ())
1417 return VREL_LT;
1419 // TRUE = op1 >= op2 indicates GE_EXPR.
1420 if (!contains_zero_p (lhs))
1421 return VREL_GE;
1422 return VREL_VARYING;
1425 bool
1426 operator_ge::fold_range (irange &r, tree type,
1427 const irange &op1,
1428 const irange &op2,
1429 relation_trio rel) const
1431 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1432 return true;
1434 signop sign = TYPE_SIGN (op1.type ());
1435 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1437 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1438 r = range_true (type);
1439 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1440 r = range_false (type);
1441 else
1442 r = range_true_and_false (type);
1443 return true;
1446 bool
1447 operator_ge::op1_range (irange &r, tree type,
1448 const irange &lhs,
1449 const irange &op2,
1450 relation_trio) const
1452 if (op2.undefined_p ())
1453 return false;
1455 switch (get_bool_state (r, lhs, type))
1457 case BRS_TRUE:
1458 build_ge (r, type, op2.lower_bound ());
1459 break;
1461 case BRS_FALSE:
1462 build_lt (r, type, op2.upper_bound ());
1463 break;
1465 default:
1466 break;
1468 return true;
1471 bool
1472 operator_ge::op2_range (irange &r, tree type,
1473 const irange &lhs,
1474 const irange &op1,
1475 relation_trio) const
1477 if (op1.undefined_p ())
1478 return false;
1480 switch (get_bool_state (r, lhs, type))
1482 case BRS_TRUE:
1483 build_le (r, type, op1.upper_bound ());
1484 break;
1486 case BRS_FALSE:
1487 build_gt (r, type, op1.lower_bound ());
1488 break;
1490 default:
1491 break;
1493 return true;
1497 void
1498 operator_plus::update_bitmask (irange &r, const irange &lh,
1499 const irange &rh) const
1501 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1504 // Check to see if the range of OP2 indicates anything about the relation
1505 // between LHS and OP1.
1507 relation_kind
1508 operator_plus::lhs_op1_relation (const irange &lhs,
1509 const irange &op1,
1510 const irange &op2,
1511 relation_kind) const
1513 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1514 return VREL_VARYING;
1516 tree type = lhs.type ();
1517 unsigned prec = TYPE_PRECISION (type);
1518 wi::overflow_type ovf1, ovf2;
1519 signop sign = TYPE_SIGN (type);
1521 // LHS = OP1 + 0 indicates LHS == OP1.
1522 if (op2.zero_p ())
1523 return VREL_EQ;
1525 if (TYPE_OVERFLOW_WRAPS (type))
1527 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1528 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1530 else
1531 ovf1 = ovf2 = wi::OVF_NONE;
1533 // Never wrapping additions.
1534 if (!ovf1 && !ovf2)
1536 // Positive op2 means lhs > op1.
1537 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1538 return VREL_GT;
1539 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1540 return VREL_GE;
1542 // Negative op2 means lhs < op1.
1543 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1544 return VREL_LT;
1545 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1546 return VREL_LE;
1548 // Always wrapping additions.
1549 else if (ovf1 && ovf1 == ovf2)
1551 // Positive op2 means lhs < op1.
1552 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1553 return VREL_LT;
1554 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1555 return VREL_LE;
1557 // Negative op2 means lhs > op1.
1558 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1559 return VREL_GT;
1560 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1561 return VREL_GE;
1564 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1565 if (!range_includes_zero_p (&op2))
1566 return VREL_NE;
1568 return VREL_VARYING;
1571 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1572 // operands.
1574 relation_kind
1575 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1576 const irange &op2, relation_kind rel) const
1578 return lhs_op1_relation (lhs, op2, op1, rel);
1581 void
1582 operator_plus::wi_fold (irange &r, tree type,
1583 const wide_int &lh_lb, const wide_int &lh_ub,
1584 const wide_int &rh_lb, const wide_int &rh_ub) const
1586 wi::overflow_type ov_lb, ov_ub;
1587 signop s = TYPE_SIGN (type);
1588 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1589 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1590 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1593 // Given addition or subtraction, determine the possible NORMAL ranges and
1594 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1595 // Return the relation that exists between the LHS and OP1 in order for the
1596 // NORMAL range to apply.
1597 // a return value of VREL_VARYING means no ranges were applicable.
1599 static relation_kind
1600 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1601 bool add_p)
1603 relation_kind kind = VREL_VARYING;
1604 // For now, only deal with constant adds. This could be extended to ranges
1605 // when someone is so motivated.
1606 if (!offset.singleton_p () || offset.zero_p ())
1607 return kind;
1609 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1610 wide_int off = offset.lower_bound ();
1611 if (wi::neg_p (off, SIGNED))
1613 add_p = !add_p;
1614 off = wi::neg (off);
1617 wi::overflow_type ov;
1618 tree type = offset.type ();
1619 unsigned prec = TYPE_PRECISION (type);
1620 wide_int ub;
1621 wide_int lb;
1622 // calculate the normal range and relation for the operation.
1623 if (add_p)
1625 // [ 0 , INF - OFF]
1626 lb = wi::zero (prec);
1627 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1628 kind = VREL_GT;
1630 else
1632 // [ OFF, INF ]
1633 lb = off;
1634 ub = irange_val_max (type);
1635 kind = VREL_LT;
1637 int_range<2> normal_range (type, lb, ub);
1638 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1640 r_ov = ov_range;
1641 r_normal = normal_range;
1642 return kind;
1645 // Once op1 has been calculated by operator_plus or operator_minus, check
1646 // to see if the relation passed causes any part of the calculation to
1647 // be not possible. ie
1648 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1649 // and that further refines a_2 to [0, 0].
1650 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1651 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1652 // MINUS. IF any adjustment can be made, R will reflect it.
1654 static void
1655 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1656 bool add_p)
1658 if (r.undefined_p ())
1659 return;
1660 tree type = r.type ();
1661 // Check for unsigned overflow and calculate the overflow part.
1662 signop s = TYPE_SIGN (type);
1663 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1664 return;
1666 // Only work with <, <=, >, >= relations.
1667 if (!relation_lt_le_gt_ge_p (rel))
1668 return;
1670 // Get the ranges for this offset.
1671 int_range_max normal, overflow;
1672 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1674 // VREL_VARYING means there are no adjustments.
1675 if (k == VREL_VARYING)
1676 return;
1678 // If the relations match use the normal range, otherwise use overflow range.
1679 if (relation_intersect (k, rel) == k)
1680 r.intersect (normal);
1681 else
1682 r.intersect (overflow);
1683 return;
1686 bool
1687 operator_plus::op1_range (irange &r, tree type,
1688 const irange &lhs,
1689 const irange &op2,
1690 relation_trio trio) const
1692 if (lhs.undefined_p ())
1693 return false;
1694 // Start with the default operation.
1695 range_op_handler minus (MINUS_EXPR);
1696 if (!minus)
1697 return false;
1698 bool res = minus.fold_range (r, type, lhs, op2);
1699 relation_kind rel = trio.lhs_op1 ();
1700 // Check for a relation refinement.
1701 if (res)
1702 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1703 return res;
1706 bool
1707 operator_plus::op2_range (irange &r, tree type,
1708 const irange &lhs,
1709 const irange &op1,
1710 relation_trio rel) const
1712 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1715 class operator_widen_plus_signed : public range_operator
1717 public:
1718 virtual void wi_fold (irange &r, tree type,
1719 const wide_int &lh_lb,
1720 const wide_int &lh_ub,
1721 const wide_int &rh_lb,
1722 const wide_int &rh_ub) const;
1723 } op_widen_plus_signed;
1725 void
1726 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1727 const wide_int &lh_lb,
1728 const wide_int &lh_ub,
1729 const wide_int &rh_lb,
1730 const wide_int &rh_ub) const
1732 wi::overflow_type ov_lb, ov_ub;
1733 signop s = TYPE_SIGN (type);
1735 wide_int lh_wlb
1736 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1737 wide_int lh_wub
1738 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1739 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1740 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1742 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1743 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1745 r = int_range<2> (type, new_lb, new_ub);
1748 class operator_widen_plus_unsigned : public range_operator
1750 public:
1751 virtual void wi_fold (irange &r, tree type,
1752 const wide_int &lh_lb,
1753 const wide_int &lh_ub,
1754 const wide_int &rh_lb,
1755 const wide_int &rh_ub) const;
1756 } op_widen_plus_unsigned;
1758 void
1759 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1760 const wide_int &lh_lb,
1761 const wide_int &lh_ub,
1762 const wide_int &rh_lb,
1763 const wide_int &rh_ub) const
1765 wi::overflow_type ov_lb, ov_ub;
1766 signop s = TYPE_SIGN (type);
1768 wide_int lh_wlb
1769 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1770 wide_int lh_wub
1771 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1772 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1773 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1775 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1776 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1778 r = int_range<2> (type, new_lb, new_ub);
1781 void
1782 operator_minus::update_bitmask (irange &r, const irange &lh,
1783 const irange &rh) const
1785 update_known_bitmask (r, MINUS_EXPR, lh, rh);
1788 void
1789 operator_minus::wi_fold (irange &r, tree type,
1790 const wide_int &lh_lb, const wide_int &lh_ub,
1791 const wide_int &rh_lb, const wide_int &rh_ub) const
1793 wi::overflow_type ov_lb, ov_ub;
1794 signop s = TYPE_SIGN (type);
1795 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
1796 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
1797 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1801 // Return the relation between LHS and OP1 based on the relation between
1802 // OP1 and OP2.
1804 relation_kind
1805 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
1806 const irange &, relation_kind rel) const
1808 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
1809 switch (rel)
1811 case VREL_GT:
1812 case VREL_GE:
1813 return VREL_LE;
1814 default:
1815 break;
1817 return VREL_VARYING;
1820 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1821 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1822 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1824 bool
1825 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
1826 const irange &op1_range ATTRIBUTE_UNUSED,
1827 const irange &op2_range ATTRIBUTE_UNUSED,
1828 relation_kind rel)
1830 if (rel == VREL_VARYING)
1831 return false;
1833 int_range<2> rel_range;
1834 unsigned prec = TYPE_PRECISION (type);
1835 signop sgn = TYPE_SIGN (type);
1837 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1838 if (rel == VREL_EQ)
1839 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
1840 else if (rel == VREL_NE)
1841 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1842 VR_ANTI_RANGE);
1843 else if (TYPE_OVERFLOW_WRAPS (type))
1845 switch (rel)
1847 // For wrapping signed values and unsigned, if op1 > op2 or
1848 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1849 case VREL_GT:
1850 case VREL_LT:
1851 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1852 VR_ANTI_RANGE);
1853 break;
1854 default:
1855 return false;
1858 else
1860 switch (rel)
1862 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1863 case VREL_GT:
1864 rel_range = int_range<2> (type, wi::one (prec),
1865 wi::max_value (prec, sgn));
1866 break;
1867 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1868 case VREL_GE:
1869 rel_range = int_range<2> (type, wi::zero (prec),
1870 wi::max_value (prec, sgn));
1871 break;
1872 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1873 case VREL_LT:
1874 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1875 wi::minus_one (prec));
1876 break;
1877 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1878 case VREL_LE:
1879 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1880 wi::zero (prec));
1881 break;
1882 default:
1883 return false;
1886 lhs_range.intersect (rel_range);
1887 return true;
1890 bool
1891 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
1892 const irange &op1_range,
1893 const irange &op2_range,
1894 relation_kind rel) const
1896 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
1897 rel);
1900 bool
1901 operator_minus::op1_range (irange &r, tree type,
1902 const irange &lhs,
1903 const irange &op2,
1904 relation_trio trio) const
1906 if (lhs.undefined_p ())
1907 return false;
1908 // Start with the default operation.
1909 range_op_handler minus (PLUS_EXPR);
1910 if (!minus)
1911 return false;
1912 bool res = minus.fold_range (r, type, lhs, op2);
1913 relation_kind rel = trio.lhs_op1 ();
1914 if (res)
1915 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
1916 return res;
1920 bool
1921 operator_minus::op2_range (irange &r, tree type,
1922 const irange &lhs,
1923 const irange &op1,
1924 relation_trio) const
1926 if (lhs.undefined_p ())
1927 return false;
1928 return fold_range (r, type, op1, lhs);
1931 void
1932 operator_min::update_bitmask (irange &r, const irange &lh,
1933 const irange &rh) const
1935 update_known_bitmask (r, MIN_EXPR, lh, rh);
1938 void
1939 operator_min::wi_fold (irange &r, tree type,
1940 const wide_int &lh_lb, const wide_int &lh_ub,
1941 const wide_int &rh_lb, const wide_int &rh_ub) const
1943 signop s = TYPE_SIGN (type);
1944 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
1945 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
1946 value_range_with_overflow (r, type, new_lb, new_ub);
1950 void
1951 operator_max::update_bitmask (irange &r, const irange &lh,
1952 const irange &rh) const
1954 update_known_bitmask (r, MAX_EXPR, lh, rh);
1957 void
1958 operator_max::wi_fold (irange &r, tree type,
1959 const wide_int &lh_lb, const wide_int &lh_ub,
1960 const wide_int &rh_lb, const wide_int &rh_ub) const
1962 signop s = TYPE_SIGN (type);
1963 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
1964 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
1965 value_range_with_overflow (r, type, new_lb, new_ub);
1969 // Calculate the cross product of two sets of ranges and return it.
1971 // Multiplications, divisions and shifts are a bit tricky to handle,
1972 // depending on the mix of signs we have in the two ranges, we need to
1973 // operate on different values to get the minimum and maximum values
1974 // for the new range. One approach is to figure out all the
1975 // variations of range combinations and do the operations.
1977 // However, this involves several calls to compare_values and it is
1978 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1979 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1980 // figure the smallest and largest values to form the new range.
1982 void
1983 cross_product_operator::wi_cross_product (irange &r, tree type,
1984 const wide_int &lh_lb,
1985 const wide_int &lh_ub,
1986 const wide_int &rh_lb,
1987 const wide_int &rh_ub) const
1989 wide_int cp1, cp2, cp3, cp4;
1990 // Default to varying.
1991 r.set_varying (type);
1993 // Compute the 4 cross operations, bailing if we get an overflow we
1994 // can't handle.
1995 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
1996 return;
1997 if (wi::eq_p (lh_lb, lh_ub))
1998 cp3 = cp1;
1999 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2000 return;
2001 if (wi::eq_p (rh_lb, rh_ub))
2002 cp2 = cp1;
2003 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2004 return;
2005 if (wi::eq_p (lh_lb, lh_ub))
2006 cp4 = cp2;
2007 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2008 return;
2010 // Order pairs.
2011 signop sign = TYPE_SIGN (type);
2012 if (wi::gt_p (cp1, cp2, sign))
2013 std::swap (cp1, cp2);
2014 if (wi::gt_p (cp3, cp4, sign))
2015 std::swap (cp3, cp4);
2017 // Choose min and max from the ordered pairs.
2018 wide_int res_lb = wi::min (cp1, cp3, sign);
2019 wide_int res_ub = wi::max (cp2, cp4, sign);
2020 value_range_with_overflow (r, type, res_lb, res_ub);
2024 void
2025 operator_mult::update_bitmask (irange &r, const irange &lh,
2026 const irange &rh) const
2028 update_known_bitmask (r, MULT_EXPR, lh, rh);
2031 bool
2032 operator_mult::op1_range (irange &r, tree type,
2033 const irange &lhs, const irange &op2,
2034 relation_trio) const
2036 if (lhs.undefined_p ())
2037 return false;
2039 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2040 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2041 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2042 if (TYPE_OVERFLOW_WRAPS (type))
2043 return false;
2045 wide_int offset;
2046 if (op2.singleton_p (offset) && offset != 0)
2047 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2048 return false;
2051 bool
2052 operator_mult::op2_range (irange &r, tree type,
2053 const irange &lhs, const irange &op1,
2054 relation_trio rel) const
2056 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2059 bool
2060 operator_mult::wi_op_overflows (wide_int &res, tree type,
2061 const wide_int &w0, const wide_int &w1) const
2063 wi::overflow_type overflow = wi::OVF_NONE;
2064 signop sign = TYPE_SIGN (type);
2065 res = wi::mul (w0, w1, sign, &overflow);
2066 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2068 // For multiplication, the sign of the overflow is given
2069 // by the comparison of the signs of the operands.
2070 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2071 res = wi::max_value (w0.get_precision (), sign);
2072 else
2073 res = wi::min_value (w0.get_precision (), sign);
2074 return false;
2076 return overflow;
2079 void
2080 operator_mult::wi_fold (irange &r, tree type,
2081 const wide_int &lh_lb, const wide_int &lh_ub,
2082 const wide_int &rh_lb, const wide_int &rh_ub) const
2084 if (TYPE_OVERFLOW_UNDEFINED (type))
2086 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2087 return;
2090 // Multiply the ranges when overflow wraps. This is basically fancy
2091 // code so we don't drop to varying with an unsigned
2092 // [-3,-1]*[-3,-1].
2094 // This test requires 2*prec bits if both operands are signed and
2095 // 2*prec + 2 bits if either is not. Therefore, extend the values
2096 // using the sign of the result to PREC2. From here on out,
2097 // everything is just signed math no matter what the input types
2098 // were.
2100 signop sign = TYPE_SIGN (type);
2101 unsigned prec = TYPE_PRECISION (type);
2102 widest2_int min0 = widest2_int::from (lh_lb, sign);
2103 widest2_int max0 = widest2_int::from (lh_ub, sign);
2104 widest2_int min1 = widest2_int::from (rh_lb, sign);
2105 widest2_int max1 = widest2_int::from (rh_ub, sign);
2106 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2107 widest2_int size = sizem1 + 1;
2109 // Canonicalize the intervals.
2110 if (sign == UNSIGNED)
2112 if (wi::ltu_p (size, min0 + max0))
2114 min0 -= size;
2115 max0 -= size;
2117 if (wi::ltu_p (size, min1 + max1))
2119 min1 -= size;
2120 max1 -= size;
2124 // Sort the 4 products so that min is in prod0 and max is in
2125 // prod3.
2126 widest2_int prod0 = min0 * min1;
2127 widest2_int prod1 = min0 * max1;
2128 widest2_int prod2 = max0 * min1;
2129 widest2_int prod3 = max0 * max1;
2131 // min0min1 > max0max1
2132 if (prod0 > prod3)
2133 std::swap (prod0, prod3);
2135 // min0max1 > max0min1
2136 if (prod1 > prod2)
2137 std::swap (prod1, prod2);
2139 if (prod0 > prod1)
2140 std::swap (prod0, prod1);
2142 if (prod2 > prod3)
2143 std::swap (prod2, prod3);
2145 // diff = max - min
2146 prod2 = prod3 - prod0;
2147 if (wi::geu_p (prod2, sizem1))
2149 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2150 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2151 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2153 r.set (type, rh_lb, wi::max_value (prec, sign));
2154 int_range<2> zero;
2155 zero.set_zero (type);
2156 r.union_ (zero);
2158 else
2159 // The range covers all values.
2160 r.set_varying (type);
2162 else
2164 wide_int new_lb = wide_int::from (prod0, prec, sign);
2165 wide_int new_ub = wide_int::from (prod3, prec, sign);
2166 create_possibly_reversed_range (r, type, new_lb, new_ub);
2170 class operator_widen_mult_signed : public range_operator
2172 public:
2173 virtual void wi_fold (irange &r, tree type,
2174 const wide_int &lh_lb,
2175 const wide_int &lh_ub,
2176 const wide_int &rh_lb,
2177 const wide_int &rh_ub)
2178 const;
2179 } op_widen_mult_signed;
2181 void
2182 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2183 const wide_int &lh_lb,
2184 const wide_int &lh_ub,
2185 const wide_int &rh_lb,
2186 const wide_int &rh_ub) const
2188 signop s = TYPE_SIGN (type);
2190 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2191 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2192 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2193 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2195 /* We don't expect a widening multiplication to be able to overflow but range
2196 calculations for multiplications are complicated. After widening the
2197 operands lets call the base class. */
2198 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2202 class operator_widen_mult_unsigned : public range_operator
2204 public:
2205 virtual void wi_fold (irange &r, tree type,
2206 const wide_int &lh_lb,
2207 const wide_int &lh_ub,
2208 const wide_int &rh_lb,
2209 const wide_int &rh_ub)
2210 const;
2211 } op_widen_mult_unsigned;
2213 void
2214 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2215 const wide_int &lh_lb,
2216 const wide_int &lh_ub,
2217 const wide_int &rh_lb,
2218 const wide_int &rh_ub) const
2220 signop s = TYPE_SIGN (type);
2222 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2223 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2224 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2225 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2227 /* We don't expect a widening multiplication to be able to overflow but range
2228 calculations for multiplications are complicated. After widening the
2229 operands lets call the base class. */
2230 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2233 class operator_div : public cross_product_operator
2235 public:
2236 operator_div (tree_code div_kind) { m_code = div_kind; }
2237 virtual void wi_fold (irange &r, tree type,
2238 const wide_int &lh_lb,
2239 const wide_int &lh_ub,
2240 const wide_int &rh_lb,
2241 const wide_int &rh_ub) const final override;
2242 virtual bool wi_op_overflows (wide_int &res, tree type,
2243 const wide_int &, const wide_int &)
2244 const final override;
2245 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2246 { update_known_bitmask (r, m_code, lh, rh); }
2247 protected:
2248 tree_code m_code;
2251 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2252 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2253 static operator_div op_round_div (ROUND_DIV_EXPR);
2254 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2256 bool
2257 operator_div::wi_op_overflows (wide_int &res, tree type,
2258 const wide_int &w0, const wide_int &w1) const
2260 if (w1 == 0)
2261 return true;
2263 wi::overflow_type overflow = wi::OVF_NONE;
2264 signop sign = TYPE_SIGN (type);
2266 switch (m_code)
2268 case EXACT_DIV_EXPR:
2269 case TRUNC_DIV_EXPR:
2270 res = wi::div_trunc (w0, w1, sign, &overflow);
2271 break;
2272 case FLOOR_DIV_EXPR:
2273 res = wi::div_floor (w0, w1, sign, &overflow);
2274 break;
2275 case ROUND_DIV_EXPR:
2276 res = wi::div_round (w0, w1, sign, &overflow);
2277 break;
2278 case CEIL_DIV_EXPR:
2279 res = wi::div_ceil (w0, w1, sign, &overflow);
2280 break;
2281 default:
2282 gcc_unreachable ();
2285 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2287 // For division, the only case is -INF / -1 = +INF.
2288 res = wi::max_value (w0.get_precision (), sign);
2289 return false;
2291 return overflow;
2294 void
2295 operator_div::wi_fold (irange &r, tree type,
2296 const wide_int &lh_lb, const wide_int &lh_ub,
2297 const wide_int &rh_lb, const wide_int &rh_ub) const
2299 const wide_int dividend_min = lh_lb;
2300 const wide_int dividend_max = lh_ub;
2301 const wide_int divisor_min = rh_lb;
2302 const wide_int divisor_max = rh_ub;
2303 signop sign = TYPE_SIGN (type);
2304 unsigned prec = TYPE_PRECISION (type);
2305 wide_int extra_min, extra_max;
2307 // If we know we won't divide by zero, just do the division.
2308 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2310 wi_cross_product (r, type, dividend_min, dividend_max,
2311 divisor_min, divisor_max);
2312 return;
2315 // If we're definitely dividing by zero, there's nothing to do.
2316 if (wi_zero_p (type, divisor_min, divisor_max))
2318 r.set_undefined ();
2319 return;
2322 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2323 // skip any division by zero.
2325 // First divide by the negative numbers, if any.
2326 if (wi::neg_p (divisor_min, sign))
2327 wi_cross_product (r, type, dividend_min, dividend_max,
2328 divisor_min, wi::minus_one (prec));
2329 else
2330 r.set_undefined ();
2332 // Then divide by the non-zero positive numbers, if any.
2333 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2335 int_range_max tmp;
2336 wi_cross_product (tmp, type, dividend_min, dividend_max,
2337 wi::one (prec), divisor_max);
2338 r.union_ (tmp);
2340 // We shouldn't still have undefined here.
2341 gcc_checking_assert (!r.undefined_p ());
2345 class operator_exact_divide : public operator_div
2347 using range_operator::op1_range;
2348 public:
2349 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2350 virtual bool op1_range (irange &r, tree type,
2351 const irange &lhs,
2352 const irange &op2,
2353 relation_trio) const;
2355 } op_exact_div;
2357 bool
2358 operator_exact_divide::op1_range (irange &r, tree type,
2359 const irange &lhs,
2360 const irange &op2,
2361 relation_trio) const
2363 if (lhs.undefined_p ())
2364 return false;
2365 wide_int offset;
2366 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2367 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2368 // We wont bother trying to enumerate all the in between stuff :-P
2369 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2370 // the time however.
2371 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2372 if (op2.singleton_p (offset) && offset != 0)
2373 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2374 return false;
2378 class operator_lshift : public cross_product_operator
2380 using range_operator::fold_range;
2381 using range_operator::op1_range;
2382 public:
2383 virtual bool op1_range (irange &r, tree type,
2384 const irange &lhs,
2385 const irange &op2,
2386 relation_trio rel = TRIO_VARYING) const;
2387 virtual bool fold_range (irange &r, tree type,
2388 const irange &op1,
2389 const irange &op2,
2390 relation_trio rel = TRIO_VARYING) const;
2392 virtual void wi_fold (irange &r, tree type,
2393 const wide_int &lh_lb, const wide_int &lh_ub,
2394 const wide_int &rh_lb, const wide_int &rh_ub) const;
2395 virtual bool wi_op_overflows (wide_int &res,
2396 tree type,
2397 const wide_int &,
2398 const wide_int &) const;
2399 void update_bitmask (irange &r, const irange &lh,
2400 const irange &rh) const final override
2401 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2402 } op_lshift;
2404 class operator_rshift : public cross_product_operator
2406 using range_operator::fold_range;
2407 using range_operator::op1_range;
2408 using range_operator::lhs_op1_relation;
2409 public:
2410 virtual bool fold_range (irange &r, tree type,
2411 const irange &op1,
2412 const irange &op2,
2413 relation_trio rel = TRIO_VARYING) const;
2414 virtual void wi_fold (irange &r, tree type,
2415 const wide_int &lh_lb,
2416 const wide_int &lh_ub,
2417 const wide_int &rh_lb,
2418 const wide_int &rh_ub) const;
2419 virtual bool wi_op_overflows (wide_int &res,
2420 tree type,
2421 const wide_int &w0,
2422 const wide_int &w1) const;
2423 virtual bool op1_range (irange &, tree type,
2424 const irange &lhs,
2425 const irange &op2,
2426 relation_trio rel = TRIO_VARYING) const;
2427 virtual relation_kind lhs_op1_relation (const irange &lhs,
2428 const irange &op1,
2429 const irange &op2,
2430 relation_kind rel) const;
2431 void update_bitmask (irange &r, const irange &lh,
2432 const irange &rh) const final override
2433 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2434 } op_rshift;
2437 relation_kind
2438 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2439 const irange &op1,
2440 const irange &op2,
2441 relation_kind) const
2443 // If both operands range are >= 0, then the LHS <= op1.
2444 if (!op1.undefined_p () && !op2.undefined_p ()
2445 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2446 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2447 return VREL_LE;
2448 return VREL_VARYING;
2451 bool
2452 operator_lshift::fold_range (irange &r, tree type,
2453 const irange &op1,
2454 const irange &op2,
2455 relation_trio rel) const
2457 int_range_max shift_range;
2458 if (!get_shift_range (shift_range, type, op2))
2460 if (op2.undefined_p ())
2461 r.set_undefined ();
2462 else
2463 r.set_zero (type);
2464 return true;
2467 // Transform left shifts by constants into multiplies.
2468 if (shift_range.singleton_p ())
2470 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2471 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2472 int_range<1> mult (type, tmp, tmp);
2474 // Force wrapping multiplication.
2475 bool saved_flag_wrapv = flag_wrapv;
2476 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2477 flag_wrapv = 1;
2478 flag_wrapv_pointer = 1;
2479 bool b = op_mult.fold_range (r, type, op1, mult);
2480 flag_wrapv = saved_flag_wrapv;
2481 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2482 return b;
2484 else
2485 // Otherwise, invoke the generic fold routine.
2486 return range_operator::fold_range (r, type, op1, shift_range, rel);
2489 void
2490 operator_lshift::wi_fold (irange &r, tree type,
2491 const wide_int &lh_lb, const wide_int &lh_ub,
2492 const wide_int &rh_lb, const wide_int &rh_ub) const
2494 signop sign = TYPE_SIGN (type);
2495 unsigned prec = TYPE_PRECISION (type);
2496 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2497 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2498 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2499 // overflow. However, for that to happen, rh.max needs to be zero,
2500 // which means rh is a singleton range of zero, which means we simply return
2501 // [lh_lb, lh_ub] as the range.
2502 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2504 r = int_range<2> (type, lh_lb, lh_ub);
2505 return;
2508 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2509 wide_int complement = ~(bound - 1);
2510 wide_int low_bound, high_bound;
2511 bool in_bounds = false;
2513 if (sign == UNSIGNED)
2515 low_bound = bound;
2516 high_bound = complement;
2517 if (wi::ltu_p (lh_ub, low_bound))
2519 // [5, 6] << [1, 2] == [10, 24].
2520 // We're shifting out only zeroes, the value increases
2521 // monotonically.
2522 in_bounds = true;
2524 else if (wi::ltu_p (high_bound, lh_lb))
2526 // [0xffffff00, 0xffffffff] << [1, 2]
2527 // == [0xfffffc00, 0xfffffffe].
2528 // We're shifting out only ones, the value decreases
2529 // monotonically.
2530 in_bounds = true;
2533 else
2535 // [-1, 1] << [1, 2] == [-4, 4]
2536 low_bound = complement;
2537 high_bound = bound;
2538 if (wi::lts_p (lh_ub, high_bound)
2539 && wi::lts_p (low_bound, lh_lb))
2541 // For non-negative numbers, we're shifting out only zeroes,
2542 // the value increases monotonically. For negative numbers,
2543 // we're shifting out only ones, the value decreases
2544 // monotonically.
2545 in_bounds = true;
2549 if (in_bounds)
2550 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2551 else
2552 r.set_varying (type);
2555 bool
2556 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2557 const wide_int &w0, const wide_int &w1) const
2559 signop sign = TYPE_SIGN (type);
2560 if (wi::neg_p (w1))
2562 // It's unclear from the C standard whether shifts can overflow.
2563 // The following code ignores overflow; perhaps a C standard
2564 // interpretation ruling is needed.
2565 res = wi::rshift (w0, -w1, sign);
2567 else
2568 res = wi::lshift (w0, w1);
2569 return false;
2572 bool
2573 operator_lshift::op1_range (irange &r,
2574 tree type,
2575 const irange &lhs,
2576 const irange &op2,
2577 relation_trio) const
2579 if (lhs.undefined_p ())
2580 return false;
2582 if (!contains_zero_p (lhs))
2583 r.set_nonzero (type);
2584 else
2585 r.set_varying (type);
2587 wide_int shift;
2588 if (op2.singleton_p (shift))
2590 if (wi::lt_p (shift, 0, SIGNED))
2591 return false;
2592 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2593 TYPE_PRECISION (op2.type ())),
2594 UNSIGNED))
2595 return false;
2596 if (shift == 0)
2598 r.intersect (lhs);
2599 return true;
2602 // Work completely in unsigned mode to start.
2603 tree utype = type;
2604 int_range_max tmp_range;
2605 if (TYPE_SIGN (type) == SIGNED)
2607 int_range_max tmp = lhs;
2608 utype = unsigned_type_for (type);
2609 range_cast (tmp, utype);
2610 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2612 else
2613 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2615 // Start with ranges which can produce the LHS by right shifting the
2616 // result by the shift amount.
2617 // ie [0x08, 0xF0] = op1 << 2 will start with
2618 // [00001000, 11110000] = op1 << 2
2619 // [0x02, 0x4C] aka [00000010, 00111100]
2621 // Then create a range from the LB with the least significant upper bit
2622 // set, to the upper bound with all the bits set.
2623 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2625 // Ideally we do this for each subrange, but just lump them all for now.
2626 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2627 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2628 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2629 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2630 int_range<2> fill_range (utype, new_lb, new_ub);
2631 tmp_range.union_ (fill_range);
2633 if (utype != type)
2634 range_cast (tmp_range, type);
2636 r.intersect (tmp_range);
2637 return true;
2640 return !r.varying_p ();
2643 bool
2644 operator_rshift::op1_range (irange &r,
2645 tree type,
2646 const irange &lhs,
2647 const irange &op2,
2648 relation_trio) const
2650 if (lhs.undefined_p ())
2651 return false;
2652 wide_int shift;
2653 if (op2.singleton_p (shift))
2655 // Ignore nonsensical shifts.
2656 unsigned prec = TYPE_PRECISION (type);
2657 if (wi::ge_p (shift,
2658 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2659 UNSIGNED))
2660 return false;
2661 if (shift == 0)
2663 r = lhs;
2664 return true;
2667 // Folding the original operation may discard some impossible
2668 // ranges from the LHS.
2669 int_range_max lhs_refined;
2670 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2671 lhs_refined.intersect (lhs);
2672 if (lhs_refined.undefined_p ())
2674 r.set_undefined ();
2675 return true;
2677 int_range_max shift_range (op2.type (), shift, shift);
2678 int_range_max lb, ub;
2679 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2680 // LHS
2681 // 0000 0111 = OP1 >> 3
2683 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2684 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2685 // right hand side (0x07).
2686 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2687 int_range_max mask_range (type,
2688 wi::zero (TYPE_PRECISION (type)),
2689 mask);
2690 op_plus.fold_range (ub, type, lb, mask_range);
2691 r = lb;
2692 r.union_ (ub);
2693 if (!contains_zero_p (lhs_refined))
2695 mask_range.invert ();
2696 r.intersect (mask_range);
2698 return true;
2700 return false;
2703 bool
2704 operator_rshift::wi_op_overflows (wide_int &res,
2705 tree type,
2706 const wide_int &w0,
2707 const wide_int &w1) const
2709 signop sign = TYPE_SIGN (type);
2710 if (wi::neg_p (w1))
2711 res = wi::lshift (w0, -w1);
2712 else
2714 // It's unclear from the C standard whether shifts can overflow.
2715 // The following code ignores overflow; perhaps a C standard
2716 // interpretation ruling is needed.
2717 res = wi::rshift (w0, w1, sign);
2719 return false;
2722 bool
2723 operator_rshift::fold_range (irange &r, tree type,
2724 const irange &op1,
2725 const irange &op2,
2726 relation_trio rel) const
2728 int_range_max shift;
2729 if (!get_shift_range (shift, type, op2))
2731 if (op2.undefined_p ())
2732 r.set_undefined ();
2733 else
2734 r.set_zero (type);
2735 return true;
2738 return range_operator::fold_range (r, type, op1, shift, rel);
2741 void
2742 operator_rshift::wi_fold (irange &r, tree type,
2743 const wide_int &lh_lb, const wide_int &lh_ub,
2744 const wide_int &rh_lb, const wide_int &rh_ub) const
2746 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2750 // Add a partial equivalence between the LHS and op1 for casts.
2752 relation_kind
2753 operator_cast::lhs_op1_relation (const irange &lhs,
2754 const irange &op1,
2755 const irange &op2 ATTRIBUTE_UNUSED,
2756 relation_kind) const
2758 if (lhs.undefined_p () || op1.undefined_p ())
2759 return VREL_VARYING;
2760 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2761 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2762 // If the result gets sign extended into a larger type check first if this
2763 // qualifies as a partial equivalence.
2764 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2766 // If the result is sign extended, and the LHS is larger than op1,
2767 // check if op1's range can be negative as the sign extension will
2768 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2769 int_range<3> negs = range_negatives (op1.type ());
2770 negs.intersect (op1);
2771 if (!negs.undefined_p ())
2772 return VREL_VARYING;
2775 unsigned prec = MIN (lhs_prec, op1_prec);
2776 return bits_to_pe (prec);
2779 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2781 inline bool
2782 operator_cast::truncating_cast_p (const irange &inner,
2783 const irange &outer) const
2785 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2788 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2790 bool
2791 operator_cast::inside_domain_p (const wide_int &min,
2792 const wide_int &max,
2793 const irange &range) const
2795 wide_int domain_min = irange_val_min (range.type ());
2796 wide_int domain_max = irange_val_max (range.type ());
2797 signop domain_sign = TYPE_SIGN (range.type ());
2798 return (wi::le_p (min, domain_max, domain_sign)
2799 && wi::le_p (max, domain_max, domain_sign)
2800 && wi::ge_p (min, domain_min, domain_sign)
2801 && wi::ge_p (max, domain_min, domain_sign));
2805 // Helper for fold_range which work on a pair at a time.
2807 void
2808 operator_cast::fold_pair (irange &r, unsigned index,
2809 const irange &inner,
2810 const irange &outer) const
2812 tree inner_type = inner.type ();
2813 tree outer_type = outer.type ();
2814 signop inner_sign = TYPE_SIGN (inner_type);
2815 unsigned outer_prec = TYPE_PRECISION (outer_type);
2817 // check to see if casting from INNER to OUTER is a conversion that
2818 // fits in the resulting OUTER type.
2819 wide_int inner_lb = inner.lower_bound (index);
2820 wide_int inner_ub = inner.upper_bound (index);
2821 if (truncating_cast_p (inner, outer))
2823 // We may be able to accommodate a truncating cast if the
2824 // resulting range can be represented in the target type...
2825 if (wi::rshift (wi::sub (inner_ub, inner_lb),
2826 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
2827 inner_sign) != 0)
2829 r.set_varying (outer_type);
2830 return;
2833 // ...but we must still verify that the final range fits in the
2834 // domain. This catches -fstrict-enum restrictions where the domain
2835 // range is smaller than what fits in the underlying type.
2836 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
2837 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
2838 if (inside_domain_p (min, max, outer))
2839 create_possibly_reversed_range (r, outer_type, min, max);
2840 else
2841 r.set_varying (outer_type);
2845 bool
2846 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2847 const irange &inner,
2848 const irange &outer,
2849 relation_trio) const
2851 if (empty_range_varying (r, type, inner, outer))
2852 return true;
2854 gcc_checking_assert (outer.varying_p ());
2855 gcc_checking_assert (inner.num_pairs () > 0);
2857 // Avoid a temporary by folding the first pair directly into the result.
2858 fold_pair (r, 0, inner, outer);
2860 // Then process any additional pairs by unioning with their results.
2861 for (unsigned x = 1; x < inner.num_pairs (); ++x)
2863 int_range_max tmp;
2864 fold_pair (tmp, x, inner, outer);
2865 r.union_ (tmp);
2866 if (r.varying_p ())
2867 return true;
2870 // Update the bitmask. Truncating casts are problematic unless
2871 // the conversion fits in the resulting outer type.
2872 irange_bitmask bm = inner.get_bitmask ();
2873 if (truncating_cast_p (inner, outer)
2874 && wi::rshift (bm.mask (),
2875 wi::uhwi (TYPE_PRECISION (outer.type ()),
2876 TYPE_PRECISION (inner.type ())),
2877 TYPE_SIGN (inner.type ())) != 0)
2878 return true;
2879 unsigned prec = TYPE_PRECISION (type);
2880 signop sign = TYPE_SIGN (inner.type ());
2881 bm = irange_bitmask (wide_int::from (bm.value (), prec, sign),
2882 wide_int::from (bm.mask (), prec, sign));
2883 r.update_bitmask (bm);
2885 return true;
2888 bool
2889 operator_cast::op1_range (irange &r, tree type,
2890 const irange &lhs,
2891 const irange &op2,
2892 relation_trio) const
2894 if (lhs.undefined_p ())
2895 return false;
2896 tree lhs_type = lhs.type ();
2897 gcc_checking_assert (types_compatible_p (op2.type(), type));
2899 // If we are calculating a pointer, shortcut to what we really care about.
2900 if (POINTER_TYPE_P (type))
2902 // Conversion from other pointers or a constant (including 0/NULL)
2903 // are straightforward.
2904 if (POINTER_TYPE_P (lhs.type ())
2905 || (lhs.singleton_p ()
2906 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
2908 r = lhs;
2909 range_cast (r, type);
2911 else
2913 // If the LHS is not a pointer nor a singleton, then it is
2914 // either VARYING or non-zero.
2915 if (!contains_zero_p (lhs))
2916 r.set_nonzero (type);
2917 else
2918 r.set_varying (type);
2920 r.intersect (op2);
2921 return true;
2924 if (truncating_cast_p (op2, lhs))
2926 if (lhs.varying_p ())
2927 r.set_varying (type);
2928 else
2930 // We want to insert the LHS as an unsigned value since it
2931 // would not trigger the signed bit of the larger type.
2932 int_range_max converted_lhs = lhs;
2933 range_cast (converted_lhs, unsigned_type_for (lhs_type));
2934 range_cast (converted_lhs, type);
2935 // Start by building the positive signed outer range for the type.
2936 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
2937 TYPE_PRECISION (type));
2938 create_possibly_reversed_range (r, type, lim,
2939 wi::max_value (TYPE_PRECISION (type),
2940 SIGNED));
2941 // For the signed part, we need to simply union the 2 ranges now.
2942 r.union_ (converted_lhs);
2944 // Create maximal negative number outside of LHS bits.
2945 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
2946 TYPE_PRECISION (type));
2947 // Add this to the unsigned LHS range(s).
2948 int_range_max lim_range (type, lim, lim);
2949 int_range_max lhs_neg;
2950 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
2951 converted_lhs, lim_range);
2952 // lhs_neg now has all the negative versions of the LHS.
2953 // Now union in all the values from SIGNED MIN (0x80000) to
2954 // lim-1 in order to fill in all the ranges with the upper
2955 // bits set.
2957 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2958 // we don't need to create a range from min to lim-1
2959 // calculate neg range traps trying to create [lim, lim - 1].
2960 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
2961 if (lim != min_val)
2963 int_range_max neg (type,
2964 wi::min_value (TYPE_PRECISION (type),
2965 SIGNED),
2966 lim - 1);
2967 lhs_neg.union_ (neg);
2969 // And finally, munge the signed and unsigned portions.
2970 r.union_ (lhs_neg);
2972 // And intersect with any known value passed in the extra operand.
2973 r.intersect (op2);
2974 return true;
2977 int_range_max tmp;
2978 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
2979 tmp = lhs;
2980 else
2982 // The cast is not truncating, and the range is restricted to
2983 // the range of the RHS by this assignment.
2985 // Cast the range of the RHS to the type of the LHS.
2986 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
2987 // Intersect this with the LHS range will produce the range,
2988 // which will be cast to the RHS type before returning.
2989 tmp.intersect (lhs);
2992 // Cast the calculated range to the type of the RHS.
2993 fold_range (r, type, tmp, int_range<1> (type));
2994 return true;
2998 class operator_logical_and : public range_operator
3000 using range_operator::fold_range;
3001 using range_operator::op1_range;
3002 using range_operator::op2_range;
3003 public:
3004 virtual bool fold_range (irange &r, tree type,
3005 const irange &lh,
3006 const irange &rh,
3007 relation_trio rel = TRIO_VARYING) const;
3008 virtual bool op1_range (irange &r, tree type,
3009 const irange &lhs,
3010 const irange &op2,
3011 relation_trio rel = TRIO_VARYING) const;
3012 virtual bool op2_range (irange &r, tree type,
3013 const irange &lhs,
3014 const irange &op1,
3015 relation_trio rel = TRIO_VARYING) const;
3016 } op_logical_and;
3019 bool
3020 operator_logical_and::fold_range (irange &r, tree type,
3021 const irange &lh,
3022 const irange &rh,
3023 relation_trio) const
3025 if (empty_range_varying (r, type, lh, rh))
3026 return true;
3028 // 0 && anything is 0.
3029 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3030 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3031 r = range_false (type);
3032 else if (contains_zero_p (lh) || contains_zero_p (rh))
3033 // To reach this point, there must be a logical 1 on each side, and
3034 // the only remaining question is whether there is a zero or not.
3035 r = range_true_and_false (type);
3036 else
3037 r = range_true (type);
3038 return true;
3041 bool
3042 operator_logical_and::op1_range (irange &r, tree type,
3043 const irange &lhs,
3044 const irange &op2 ATTRIBUTE_UNUSED,
3045 relation_trio) const
3047 switch (get_bool_state (r, lhs, type))
3049 case BRS_TRUE:
3050 // A true result means both sides of the AND must be true.
3051 r = range_true (type);
3052 break;
3053 default:
3054 // Any other result means only one side has to be false, the
3055 // other side can be anything. So we cannot be sure of any
3056 // result here.
3057 r = range_true_and_false (type);
3058 break;
3060 return true;
3063 bool
3064 operator_logical_and::op2_range (irange &r, tree type,
3065 const irange &lhs,
3066 const irange &op1,
3067 relation_trio) const
3069 return operator_logical_and::op1_range (r, type, lhs, op1);
3073 void
3074 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3075 const irange &rh) const
3077 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3080 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3081 // by considering the number of leading redundant sign bit copies.
3082 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3083 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3084 static bool
3085 wi_optimize_signed_bitwise_op (irange &r, tree type,
3086 const wide_int &lh_lb, const wide_int &lh_ub,
3087 const wide_int &rh_lb, const wide_int &rh_ub)
3089 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3090 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3091 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3092 if (new_clrsb == 0)
3093 return false;
3094 int type_prec = TYPE_PRECISION (type);
3095 int rprec = (type_prec - new_clrsb) - 1;
3096 value_range_with_overflow (r, type,
3097 wi::mask (rprec, true, type_prec),
3098 wi::mask (rprec, false, type_prec));
3099 return true;
3102 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3103 // the LHS and op1.
3105 relation_kind
3106 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3107 const irange &op1,
3108 const irange &op2,
3109 relation_kind) const
3111 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3112 return VREL_VARYING;
3113 if (!op2.singleton_p ())
3114 return VREL_VARYING;
3115 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3116 int prec1 = TYPE_PRECISION (op1.type ());
3117 int prec2 = TYPE_PRECISION (op2.type ());
3118 int mask_prec = 0;
3119 wide_int mask = op2.lower_bound ();
3120 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3121 mask_prec = 8;
3122 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3123 mask_prec = 16;
3124 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3125 mask_prec = 32;
3126 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3127 mask_prec = 64;
3128 return bits_to_pe (MIN (prec1, mask_prec));
3131 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3132 // possible. Basically, see if we can optimize:
3134 // [LB, UB] op Z
3135 // into:
3136 // [LB op Z, UB op Z]
3138 // If the optimization was successful, accumulate the range in R and
3139 // return TRUE.
3141 static bool
3142 wi_optimize_and_or (irange &r,
3143 enum tree_code code,
3144 tree type,
3145 const wide_int &lh_lb, const wide_int &lh_ub,
3146 const wide_int &rh_lb, const wide_int &rh_ub)
3148 // Calculate the singleton mask among the ranges, if any.
3149 wide_int lower_bound, upper_bound, mask;
3150 if (wi::eq_p (rh_lb, rh_ub))
3152 mask = rh_lb;
3153 lower_bound = lh_lb;
3154 upper_bound = lh_ub;
3156 else if (wi::eq_p (lh_lb, lh_ub))
3158 mask = lh_lb;
3159 lower_bound = rh_lb;
3160 upper_bound = rh_ub;
3162 else
3163 return false;
3165 // If Z is a constant which (for op | its bitwise not) has n
3166 // consecutive least significant bits cleared followed by m 1
3167 // consecutive bits set immediately above it and either
3168 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3170 // The least significant n bits of all the values in the range are
3171 // cleared or set, the m bits above it are preserved and any bits
3172 // above these are required to be the same for all values in the
3173 // range.
3174 wide_int w = mask;
3175 int m = 0, n = 0;
3176 if (code == BIT_IOR_EXPR)
3177 w = ~w;
3178 if (wi::eq_p (w, 0))
3179 n = w.get_precision ();
3180 else
3182 n = wi::ctz (w);
3183 w = ~(w | wi::mask (n, false, w.get_precision ()));
3184 if (wi::eq_p (w, 0))
3185 m = w.get_precision () - n;
3186 else
3187 m = wi::ctz (w) - n;
3189 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3190 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3191 return false;
3193 wide_int res_lb, res_ub;
3194 if (code == BIT_AND_EXPR)
3196 res_lb = wi::bit_and (lower_bound, mask);
3197 res_ub = wi::bit_and (upper_bound, mask);
3199 else if (code == BIT_IOR_EXPR)
3201 res_lb = wi::bit_or (lower_bound, mask);
3202 res_ub = wi::bit_or (upper_bound, mask);
3204 else
3205 gcc_unreachable ();
3206 value_range_with_overflow (r, type, res_lb, res_ub);
3208 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3209 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3211 int_range<2> tmp;
3212 tmp.set_nonzero (type);
3213 r.intersect (tmp);
3215 return true;
3218 // For range [LB, UB] compute two wide_int bit masks.
3220 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3221 // for all numbers in the range the bit is 0, otherwise it might be 0
3222 // or 1.
3224 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3225 // for all numbers in the range the bit is 1, otherwise it might be 0
3226 // or 1.
3228 void
3229 wi_set_zero_nonzero_bits (tree type,
3230 const wide_int &lb, const wide_int &ub,
3231 wide_int &maybe_nonzero,
3232 wide_int &mustbe_nonzero)
3234 signop sign = TYPE_SIGN (type);
3236 if (wi::eq_p (lb, ub))
3237 maybe_nonzero = mustbe_nonzero = lb;
3238 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3240 wide_int xor_mask = lb ^ ub;
3241 maybe_nonzero = lb | ub;
3242 mustbe_nonzero = lb & ub;
3243 if (xor_mask != 0)
3245 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3246 maybe_nonzero.get_precision ());
3247 maybe_nonzero = maybe_nonzero | mask;
3248 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3251 else
3253 maybe_nonzero = wi::minus_one (lb.get_precision ());
3254 mustbe_nonzero = wi::zero (lb.get_precision ());
3258 void
3259 operator_bitwise_and::wi_fold (irange &r, tree type,
3260 const wide_int &lh_lb,
3261 const wide_int &lh_ub,
3262 const wide_int &rh_lb,
3263 const wide_int &rh_ub) const
3265 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3266 return;
3268 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3269 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3270 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3271 maybe_nonzero_lh, mustbe_nonzero_lh);
3272 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3273 maybe_nonzero_rh, mustbe_nonzero_rh);
3275 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3276 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3277 signop sign = TYPE_SIGN (type);
3278 unsigned prec = TYPE_PRECISION (type);
3279 // If both input ranges contain only negative values, we can
3280 // truncate the result range maximum to the minimum of the
3281 // input range maxima.
3282 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3284 new_ub = wi::min (new_ub, lh_ub, sign);
3285 new_ub = wi::min (new_ub, rh_ub, sign);
3287 // If either input range contains only non-negative values
3288 // we can truncate the result range maximum to the respective
3289 // maximum of the input range.
3290 if (wi::ge_p (lh_lb, 0, sign))
3291 new_ub = wi::min (new_ub, lh_ub, sign);
3292 if (wi::ge_p (rh_lb, 0, sign))
3293 new_ub = wi::min (new_ub, rh_ub, sign);
3294 // PR68217: In case of signed & sign-bit-CST should
3295 // result in [-INF, 0] instead of [-INF, INF].
3296 if (wi::gt_p (new_lb, new_ub, sign))
3298 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3299 if (sign == SIGNED
3300 && ((wi::eq_p (lh_lb, lh_ub)
3301 && !wi::cmps (lh_lb, sign_bit))
3302 || (wi::eq_p (rh_lb, rh_ub)
3303 && !wi::cmps (rh_lb, sign_bit))))
3305 new_lb = wi::min_value (prec, sign);
3306 new_ub = wi::zero (prec);
3309 // If the limits got swapped around, return varying.
3310 if (wi::gt_p (new_lb, new_ub,sign))
3312 if (sign == SIGNED
3313 && wi_optimize_signed_bitwise_op (r, type,
3314 lh_lb, lh_ub,
3315 rh_lb, rh_ub))
3316 return;
3317 r.set_varying (type);
3319 else
3320 value_range_with_overflow (r, type, new_lb, new_ub);
3323 static void
3324 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3326 if (!contains_zero_p (lhs))
3327 r = range_nonzero (type);
3328 else
3329 r.set_varying (type);
3332 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3333 (otherwise return VAL). VAL and MASK must be zero-extended for
3334 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3335 (to transform signed values into unsigned) and at the end xor
3336 SGNBIT back. */
3338 wide_int
3339 masked_increment (const wide_int &val_in, const wide_int &mask,
3340 const wide_int &sgnbit, unsigned int prec)
3342 wide_int bit = wi::one (prec), res;
3343 unsigned int i;
3345 wide_int val = val_in ^ sgnbit;
3346 for (i = 0; i < prec; i++, bit += bit)
3348 res = mask;
3349 if ((res & bit) == 0)
3350 continue;
3351 res = bit - 1;
3352 res = wi::bit_and_not (val + bit, res);
3353 res &= mask;
3354 if (wi::gtu_p (res, val))
3355 return res ^ sgnbit;
3357 return val ^ sgnbit;
3360 // This was shamelessly stolen from register_edge_assert_for_2 and
3361 // adjusted to work with iranges.
3363 void
3364 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3365 const irange &lhs,
3366 const irange &op2) const
3368 if (!op2.singleton_p ())
3370 set_nonzero_range_from_mask (r, type, lhs);
3371 return;
3373 unsigned int nprec = TYPE_PRECISION (type);
3374 wide_int cst2v = op2.lower_bound ();
3375 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3376 wide_int sgnbit;
3377 if (cst2n)
3378 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3379 else
3380 sgnbit = wi::zero (nprec);
3382 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3384 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3385 // maximum unsigned value is ~0. For signed comparison, if CST2
3386 // doesn't have the most significant bit set, handle it similarly. If
3387 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3388 wide_int valv = lhs.lower_bound ();
3389 wide_int minv = valv & cst2v, maxv;
3390 bool we_know_nothing = false;
3391 if (minv != valv)
3393 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3394 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3395 if (minv == valv)
3397 // If we can't determine anything on this bound, fall
3398 // through and conservatively solve for the other end point.
3399 we_know_nothing = true;
3402 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3403 if (we_know_nothing)
3404 r.set_varying (type);
3405 else
3406 create_possibly_reversed_range (r, type, minv, maxv);
3408 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3410 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3411 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3412 // VAL2 where
3413 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3414 // as maximum.
3415 // For signed comparison, if CST2 doesn't have most significant bit
3416 // set, handle it similarly. If CST2 has MSB set, the maximum is
3417 // the same and minimum is INT_MIN.
3418 valv = lhs.upper_bound ();
3419 minv = valv & cst2v;
3420 if (minv == valv)
3421 maxv = valv;
3422 else
3424 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3425 if (maxv == valv)
3427 // If we couldn't determine anything on either bound, return
3428 // undefined.
3429 if (we_know_nothing)
3430 r.set_undefined ();
3431 return;
3433 maxv -= 1;
3435 maxv |= ~cst2v;
3436 minv = sgnbit;
3437 int_range<2> upper_bits;
3438 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3439 r.intersect (upper_bits);
3442 bool
3443 operator_bitwise_and::op1_range (irange &r, tree type,
3444 const irange &lhs,
3445 const irange &op2,
3446 relation_trio) const
3448 if (lhs.undefined_p ())
3449 return false;
3450 if (types_compatible_p (type, boolean_type_node))
3451 return op_logical_and.op1_range (r, type, lhs, op2);
3453 r.set_undefined ();
3454 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3456 int_range_max chunk (lhs.type (),
3457 lhs.lower_bound (i),
3458 lhs.upper_bound (i));
3459 int_range_max res;
3460 simple_op1_range_solver (res, type, chunk, op2);
3461 r.union_ (res);
3463 if (r.undefined_p ())
3464 set_nonzero_range_from_mask (r, type, lhs);
3466 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3467 wide_int mask;
3468 if (lhs == op2 && lhs.singleton_p (mask))
3470 r.update_bitmask (irange_bitmask (mask, ~mask));
3471 return true;
3474 // For 0 = op1 & MASK, op1 is ~MASK.
3475 if (lhs.zero_p () && op2.singleton_p ())
3477 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3478 int_range<2> tmp (type);
3479 tmp.set_nonzero_bits (nz);
3480 r.intersect (tmp);
3482 return true;
3485 bool
3486 operator_bitwise_and::op2_range (irange &r, tree type,
3487 const irange &lhs,
3488 const irange &op1,
3489 relation_trio) const
3491 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3495 class operator_logical_or : public range_operator
3497 using range_operator::fold_range;
3498 using range_operator::op1_range;
3499 using range_operator::op2_range;
3500 public:
3501 virtual bool fold_range (irange &r, tree type,
3502 const irange &lh,
3503 const irange &rh,
3504 relation_trio rel = TRIO_VARYING) const;
3505 virtual bool op1_range (irange &r, tree type,
3506 const irange &lhs,
3507 const irange &op2,
3508 relation_trio rel = TRIO_VARYING) const;
3509 virtual bool op2_range (irange &r, tree type,
3510 const irange &lhs,
3511 const irange &op1,
3512 relation_trio rel = TRIO_VARYING) const;
3513 } op_logical_or;
3515 bool
3516 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3517 const irange &lh,
3518 const irange &rh,
3519 relation_trio) const
3521 if (empty_range_varying (r, type, lh, rh))
3522 return true;
3524 r = lh;
3525 r.union_ (rh);
3526 return true;
3529 bool
3530 operator_logical_or::op1_range (irange &r, tree type,
3531 const irange &lhs,
3532 const irange &op2 ATTRIBUTE_UNUSED,
3533 relation_trio) const
3535 switch (get_bool_state (r, lhs, type))
3537 case BRS_FALSE:
3538 // A false result means both sides of the OR must be false.
3539 r = range_false (type);
3540 break;
3541 default:
3542 // Any other result means only one side has to be true, the
3543 // other side can be anything. so we can't be sure of any result
3544 // here.
3545 r = range_true_and_false (type);
3546 break;
3548 return true;
3551 bool
3552 operator_logical_or::op2_range (irange &r, tree type,
3553 const irange &lhs,
3554 const irange &op1,
3555 relation_trio) const
3557 return operator_logical_or::op1_range (r, type, lhs, op1);
3561 void
3562 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3563 const irange &rh) const
3565 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3568 void
3569 operator_bitwise_or::wi_fold (irange &r, tree type,
3570 const wide_int &lh_lb,
3571 const wide_int &lh_ub,
3572 const wide_int &rh_lb,
3573 const wide_int &rh_ub) const
3575 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3576 return;
3578 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3579 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3580 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3581 maybe_nonzero_lh, mustbe_nonzero_lh);
3582 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3583 maybe_nonzero_rh, mustbe_nonzero_rh);
3584 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3585 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3586 signop sign = TYPE_SIGN (type);
3587 // If the input ranges contain only positive values we can
3588 // truncate the minimum of the result range to the maximum
3589 // of the input range minima.
3590 if (wi::ge_p (lh_lb, 0, sign)
3591 && wi::ge_p (rh_lb, 0, sign))
3593 new_lb = wi::max (new_lb, lh_lb, sign);
3594 new_lb = wi::max (new_lb, rh_lb, sign);
3596 // If either input range contains only negative values
3597 // we can truncate the minimum of the result range to the
3598 // respective minimum range.
3599 if (wi::lt_p (lh_ub, 0, sign))
3600 new_lb = wi::max (new_lb, lh_lb, sign);
3601 if (wi::lt_p (rh_ub, 0, sign))
3602 new_lb = wi::max (new_lb, rh_lb, sign);
3603 // If the limits got swapped around, return a conservative range.
3604 if (wi::gt_p (new_lb, new_ub, sign))
3606 // Make sure that nonzero|X is nonzero.
3607 if (wi::gt_p (lh_lb, 0, sign)
3608 || wi::gt_p (rh_lb, 0, sign)
3609 || wi::lt_p (lh_ub, 0, sign)
3610 || wi::lt_p (rh_ub, 0, sign))
3611 r.set_nonzero (type);
3612 else if (sign == SIGNED
3613 && wi_optimize_signed_bitwise_op (r, type,
3614 lh_lb, lh_ub,
3615 rh_lb, rh_ub))
3616 return;
3617 else
3618 r.set_varying (type);
3619 return;
3621 value_range_with_overflow (r, type, new_lb, new_ub);
3624 bool
3625 operator_bitwise_or::op1_range (irange &r, tree type,
3626 const irange &lhs,
3627 const irange &op2,
3628 relation_trio) const
3630 if (lhs.undefined_p ())
3631 return false;
3632 // If this is really a logical wi_fold, call that.
3633 if (types_compatible_p (type, boolean_type_node))
3634 return op_logical_or.op1_range (r, type, lhs, op2);
3636 if (lhs.zero_p ())
3638 r.set_zero (type);
3639 return true;
3641 r.set_varying (type);
3642 return true;
3645 bool
3646 operator_bitwise_or::op2_range (irange &r, tree type,
3647 const irange &lhs,
3648 const irange &op1,
3649 relation_trio) const
3651 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3654 void
3655 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3656 const irange &rh) const
3658 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3661 void
3662 operator_bitwise_xor::wi_fold (irange &r, tree type,
3663 const wide_int &lh_lb,
3664 const wide_int &lh_ub,
3665 const wide_int &rh_lb,
3666 const wide_int &rh_ub) const
3668 signop sign = TYPE_SIGN (type);
3669 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3670 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3671 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3672 maybe_nonzero_lh, mustbe_nonzero_lh);
3673 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3674 maybe_nonzero_rh, mustbe_nonzero_rh);
3676 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3677 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3678 wide_int result_one_bits
3679 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3680 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3681 wide_int new_ub = ~result_zero_bits;
3682 wide_int new_lb = result_one_bits;
3684 // If the range has all positive or all negative values, the result
3685 // is better than VARYING.
3686 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3687 value_range_with_overflow (r, type, new_lb, new_ub);
3688 else if (sign == SIGNED
3689 && wi_optimize_signed_bitwise_op (r, type,
3690 lh_lb, lh_ub,
3691 rh_lb, rh_ub))
3692 ; /* Do nothing. */
3693 else
3694 r.set_varying (type);
3696 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3697 if (wi::lt_p (lh_ub, rh_lb, sign)
3698 || wi::lt_p (rh_ub, lh_lb, sign)
3699 || wi::ne_p (result_one_bits, 0))
3701 int_range<2> tmp;
3702 tmp.set_nonzero (type);
3703 r.intersect (tmp);
3707 bool
3708 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3709 tree type,
3710 const irange &,
3711 const irange &,
3712 relation_kind rel) const
3714 if (rel == VREL_VARYING)
3715 return false;
3717 int_range<2> rel_range;
3719 switch (rel)
3721 case VREL_EQ:
3722 rel_range.set_zero (type);
3723 break;
3724 case VREL_NE:
3725 rel_range.set_nonzero (type);
3726 break;
3727 default:
3728 return false;
3731 lhs_range.intersect (rel_range);
3732 return true;
3735 bool
3736 operator_bitwise_xor::op1_range (irange &r, tree type,
3737 const irange &lhs,
3738 const irange &op2,
3739 relation_trio) const
3741 if (lhs.undefined_p () || lhs.varying_p ())
3743 r = lhs;
3744 return true;
3746 if (types_compatible_p (type, boolean_type_node))
3748 switch (get_bool_state (r, lhs, type))
3750 case BRS_TRUE:
3751 if (op2.varying_p ())
3752 r.set_varying (type);
3753 else if (op2.zero_p ())
3754 r = range_true (type);
3755 // See get_bool_state for the rationale
3756 else if (contains_zero_p (op2))
3757 r = range_true_and_false (type);
3758 else
3759 r = range_false (type);
3760 break;
3761 case BRS_FALSE:
3762 r = op2;
3763 break;
3764 default:
3765 break;
3767 return true;
3769 r.set_varying (type);
3770 return true;
3773 bool
3774 operator_bitwise_xor::op2_range (irange &r, tree type,
3775 const irange &lhs,
3776 const irange &op1,
3777 relation_trio) const
3779 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3782 class operator_trunc_mod : public range_operator
3784 using range_operator::op1_range;
3785 using range_operator::op2_range;
3786 public:
3787 virtual void wi_fold (irange &r, tree type,
3788 const wide_int &lh_lb,
3789 const wide_int &lh_ub,
3790 const wide_int &rh_lb,
3791 const wide_int &rh_ub) const;
3792 virtual bool op1_range (irange &r, tree type,
3793 const irange &lhs,
3794 const irange &op2,
3795 relation_trio) const;
3796 virtual bool op2_range (irange &r, tree type,
3797 const irange &lhs,
3798 const irange &op1,
3799 relation_trio) const;
3800 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
3801 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
3802 } op_trunc_mod;
3804 void
3805 operator_trunc_mod::wi_fold (irange &r, tree type,
3806 const wide_int &lh_lb,
3807 const wide_int &lh_ub,
3808 const wide_int &rh_lb,
3809 const wide_int &rh_ub) const
3811 wide_int new_lb, new_ub, tmp;
3812 signop sign = TYPE_SIGN (type);
3813 unsigned prec = TYPE_PRECISION (type);
3815 // Mod 0 is undefined.
3816 if (wi_zero_p (type, rh_lb, rh_ub))
3818 r.set_undefined ();
3819 return;
3822 // Check for constant and try to fold.
3823 if (lh_lb == lh_ub && rh_lb == rh_ub)
3825 wi::overflow_type ov = wi::OVF_NONE;
3826 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
3827 if (ov == wi::OVF_NONE)
3829 r = int_range<2> (type, tmp, tmp);
3830 return;
3834 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3835 new_ub = rh_ub - 1;
3836 if (sign == SIGNED)
3838 tmp = -1 - rh_lb;
3839 new_ub = wi::smax (new_ub, tmp);
3842 if (sign == UNSIGNED)
3843 new_lb = wi::zero (prec);
3844 else
3846 new_lb = -new_ub;
3847 tmp = lh_lb;
3848 if (wi::gts_p (tmp, 0))
3849 tmp = wi::zero (prec);
3850 new_lb = wi::smax (new_lb, tmp);
3852 tmp = lh_ub;
3853 if (sign == SIGNED && wi::neg_p (tmp))
3854 tmp = wi::zero (prec);
3855 new_ub = wi::min (new_ub, tmp, sign);
3857 value_range_with_overflow (r, type, new_lb, new_ub);
3860 bool
3861 operator_trunc_mod::op1_range (irange &r, tree type,
3862 const irange &lhs,
3863 const irange &,
3864 relation_trio) const
3866 if (lhs.undefined_p ())
3867 return false;
3868 // PR 91029.
3869 signop sign = TYPE_SIGN (type);
3870 unsigned prec = TYPE_PRECISION (type);
3871 // (a % b) >= x && x > 0 , then a >= x.
3872 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3874 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
3875 return true;
3877 // (a % b) <= x && x < 0 , then a <= x.
3878 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3880 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
3881 return true;
3883 return false;
3886 bool
3887 operator_trunc_mod::op2_range (irange &r, tree type,
3888 const irange &lhs,
3889 const irange &,
3890 relation_trio) const
3892 if (lhs.undefined_p ())
3893 return false;
3894 // PR 91029.
3895 signop sign = TYPE_SIGN (type);
3896 unsigned prec = TYPE_PRECISION (type);
3897 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3898 // or b > x for unsigned.
3899 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3901 if (sign == SIGNED)
3902 r = value_range (type, wi::neg (lhs.lower_bound ()),
3903 lhs.lower_bound (), VR_ANTI_RANGE);
3904 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
3905 sign))
3906 r = value_range (type, lhs.lower_bound () + 1,
3907 wi::max_value (prec, sign));
3908 else
3909 return false;
3910 return true;
3912 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3913 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3915 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
3916 r = value_range (type, lhs.upper_bound (),
3917 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
3918 else
3919 return false;
3920 return true;
3922 return false;
3926 class operator_logical_not : public range_operator
3928 using range_operator::fold_range;
3929 using range_operator::op1_range;
3930 public:
3931 virtual bool fold_range (irange &r, tree type,
3932 const irange &lh,
3933 const irange &rh,
3934 relation_trio rel = TRIO_VARYING) const;
3935 virtual bool op1_range (irange &r, tree type,
3936 const irange &lhs,
3937 const irange &op2,
3938 relation_trio rel = TRIO_VARYING) const;
3939 } op_logical_not;
3941 // Folding a logical NOT, oddly enough, involves doing nothing on the
3942 // forward pass through. During the initial walk backwards, the
3943 // logical NOT reversed the desired outcome on the way back, so on the
3944 // way forward all we do is pass the range forward.
3946 // b_2 = x_1 < 20
3947 // b_3 = !b_2
3948 // if (b_3)
3949 // to determine the TRUE branch, walking backward
3950 // if (b_3) if ([1,1])
3951 // b_3 = !b_2 [1,1] = ![0,0]
3952 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3953 // which is the result we are looking for.. so.. pass it through.
3955 bool
3956 operator_logical_not::fold_range (irange &r, tree type,
3957 const irange &lh,
3958 const irange &rh ATTRIBUTE_UNUSED,
3959 relation_trio) const
3961 if (empty_range_varying (r, type, lh, rh))
3962 return true;
3964 r = lh;
3965 if (!lh.varying_p () && !lh.undefined_p ())
3966 r.invert ();
3968 return true;
3971 bool
3972 operator_logical_not::op1_range (irange &r,
3973 tree type,
3974 const irange &lhs,
3975 const irange &op2,
3976 relation_trio) const
3978 // Logical NOT is involutary...do it again.
3979 return fold_range (r, type, lhs, op2);
3983 bool
3984 operator_bitwise_not::fold_range (irange &r, tree type,
3985 const irange &lh,
3986 const irange &rh,
3987 relation_trio) const
3989 if (empty_range_varying (r, type, lh, rh))
3990 return true;
3992 if (types_compatible_p (type, boolean_type_node))
3993 return op_logical_not.fold_range (r, type, lh, rh);
3995 // ~X is simply -1 - X.
3996 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
3997 wi::minus_one (TYPE_PRECISION (type)));
3998 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
4001 bool
4002 operator_bitwise_not::op1_range (irange &r, tree type,
4003 const irange &lhs,
4004 const irange &op2,
4005 relation_trio) const
4007 if (lhs.undefined_p ())
4008 return false;
4009 if (types_compatible_p (type, boolean_type_node))
4010 return op_logical_not.op1_range (r, type, lhs, op2);
4012 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4013 return fold_range (r, type, lhs, op2);
4017 bool
4018 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4019 const irange &lh,
4020 const irange &rh ATTRIBUTE_UNUSED,
4021 relation_trio) const
4023 r = lh;
4024 return true;
4028 // Determine if there is a relationship between LHS and OP1.
4030 relation_kind
4031 operator_identity::lhs_op1_relation (const irange &lhs,
4032 const irange &op1 ATTRIBUTE_UNUSED,
4033 const irange &op2 ATTRIBUTE_UNUSED,
4034 relation_kind) const
4036 if (lhs.undefined_p ())
4037 return VREL_VARYING;
4038 // Simply a copy, so they are equivalent.
4039 return VREL_EQ;
4042 bool
4043 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4044 const irange &lh,
4045 const irange &rh ATTRIBUTE_UNUSED,
4046 relation_trio) const
4048 r = lh;
4049 return true;
4052 bool
4053 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4054 const irange &lhs,
4055 const irange &op2 ATTRIBUTE_UNUSED,
4056 relation_trio) const
4058 r = lhs;
4059 return true;
4063 class operator_unknown : public range_operator
4065 using range_operator::fold_range;
4066 public:
4067 virtual bool fold_range (irange &r, tree type,
4068 const irange &op1,
4069 const irange &op2,
4070 relation_trio rel = TRIO_VARYING) const;
4071 } op_unknown;
4073 bool
4074 operator_unknown::fold_range (irange &r, tree type,
4075 const irange &lh ATTRIBUTE_UNUSED,
4076 const irange &rh ATTRIBUTE_UNUSED,
4077 relation_trio) const
4079 r.set_varying (type);
4080 return true;
4084 void
4085 operator_abs::wi_fold (irange &r, tree type,
4086 const wide_int &lh_lb, const wide_int &lh_ub,
4087 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4088 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4090 wide_int min, max;
4091 signop sign = TYPE_SIGN (type);
4092 unsigned prec = TYPE_PRECISION (type);
4094 // Pass through LH for the easy cases.
4095 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4097 r = int_range<1> (type, lh_lb, lh_ub);
4098 return;
4101 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4102 // a useful range.
4103 wide_int min_value = wi::min_value (prec, sign);
4104 wide_int max_value = wi::max_value (prec, sign);
4105 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4107 r.set_varying (type);
4108 return;
4111 // ABS_EXPR may flip the range around, if the original range
4112 // included negative values.
4113 if (wi::eq_p (lh_lb, min_value))
4115 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4116 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4117 if (wi::eq_p (lh_ub, min_value))
4119 r = int_range<1> (type, min_value, min_value);
4120 return;
4122 min = max_value;
4124 else
4125 min = wi::abs (lh_lb);
4127 if (wi::eq_p (lh_ub, min_value))
4128 max = max_value;
4129 else
4130 max = wi::abs (lh_ub);
4132 // If the range contains zero then we know that the minimum value in the
4133 // range will be zero.
4134 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4136 if (wi::gt_p (min, max, sign))
4137 max = min;
4138 min = wi::zero (prec);
4140 else
4142 // If the range was reversed, swap MIN and MAX.
4143 if (wi::gt_p (min, max, sign))
4144 std::swap (min, max);
4147 // If the new range has its limits swapped around (MIN > MAX), then
4148 // the operation caused one of them to wrap around. The only thing
4149 // we know is that the result is positive.
4150 if (wi::gt_p (min, max, sign))
4152 min = wi::zero (prec);
4153 max = max_value;
4155 r = int_range<1> (type, min, max);
4158 bool
4159 operator_abs::op1_range (irange &r, tree type,
4160 const irange &lhs,
4161 const irange &op2,
4162 relation_trio) const
4164 if (empty_range_varying (r, type, lhs, op2))
4165 return true;
4166 if (TYPE_UNSIGNED (type))
4168 r = lhs;
4169 return true;
4171 // Start with the positives because negatives are an impossible result.
4172 int_range_max positives = range_positives (type);
4173 positives.intersect (lhs);
4174 r = positives;
4175 // Then add the negative of each pair:
4176 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4177 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4178 r.union_ (int_range<1> (type,
4179 -positives.upper_bound (i),
4180 -positives.lower_bound (i)));
4181 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4182 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4183 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4184 wide_int lb = lhs.lower_bound ();
4185 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4186 r.union_ (int_range<2> (type, lb, lb));
4187 return true;
4191 class operator_absu : public range_operator
4193 public:
4194 virtual void wi_fold (irange &r, tree type,
4195 const wide_int &lh_lb, const wide_int &lh_ub,
4196 const wide_int &rh_lb, const wide_int &rh_ub) const;
4197 } op_absu;
4199 void
4200 operator_absu::wi_fold (irange &r, tree type,
4201 const wide_int &lh_lb, const wide_int &lh_ub,
4202 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4203 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4205 wide_int new_lb, new_ub;
4207 // Pass through VR0 the easy cases.
4208 if (wi::ges_p (lh_lb, 0))
4210 new_lb = lh_lb;
4211 new_ub = lh_ub;
4213 else
4215 new_lb = wi::abs (lh_lb);
4216 new_ub = wi::abs (lh_ub);
4218 // If the range contains zero then we know that the minimum
4219 // value in the range will be zero.
4220 if (wi::ges_p (lh_ub, 0))
4222 if (wi::gtu_p (new_lb, new_ub))
4223 new_ub = new_lb;
4224 new_lb = wi::zero (TYPE_PRECISION (type));
4226 else
4227 std::swap (new_lb, new_ub);
4230 gcc_checking_assert (TYPE_UNSIGNED (type));
4231 r = int_range<1> (type, new_lb, new_ub);
4235 bool
4236 operator_negate::fold_range (irange &r, tree type,
4237 const irange &lh,
4238 const irange &rh,
4239 relation_trio) const
4241 if (empty_range_varying (r, type, lh, rh))
4242 return true;
4243 // -X is simply 0 - X.
4244 return range_op_handler (MINUS_EXPR).fold_range (r, type,
4245 range_zero (type), lh);
4248 bool
4249 operator_negate::op1_range (irange &r, tree type,
4250 const irange &lhs,
4251 const irange &op2,
4252 relation_trio) const
4254 // NEGATE is involutory.
4255 return fold_range (r, type, lhs, op2);
4259 bool
4260 operator_addr_expr::fold_range (irange &r, tree type,
4261 const irange &lh,
4262 const irange &rh,
4263 relation_trio) const
4265 if (empty_range_varying (r, type, lh, rh))
4266 return true;
4268 // Return a non-null pointer of the LHS type (passed in op2).
4269 if (lh.zero_p ())
4270 r = range_zero (type);
4271 else if (!contains_zero_p (lh))
4272 r = range_nonzero (type);
4273 else
4274 r.set_varying (type);
4275 return true;
4278 bool
4279 operator_addr_expr::op1_range (irange &r, tree type,
4280 const irange &lhs,
4281 const irange &op2,
4282 relation_trio) const
4284 return operator_addr_expr::fold_range (r, type, lhs, op2);
4287 // Initialize any integral operators to the primary table
4289 void
4290 range_op_table::initialize_integral_ops ()
4292 set (TRUNC_DIV_EXPR, op_trunc_div);
4293 set (FLOOR_DIV_EXPR, op_floor_div);
4294 set (ROUND_DIV_EXPR, op_round_div);
4295 set (CEIL_DIV_EXPR, op_ceil_div);
4296 set (EXACT_DIV_EXPR, op_exact_div);
4297 set (LSHIFT_EXPR, op_lshift);
4298 set (RSHIFT_EXPR, op_rshift);
4299 set (TRUTH_AND_EXPR, op_logical_and);
4300 set (TRUTH_OR_EXPR, op_logical_or);
4301 set (TRUNC_MOD_EXPR, op_trunc_mod);
4302 set (TRUTH_NOT_EXPR, op_logical_not);
4303 set (IMAGPART_EXPR, op_unknown);
4304 set (REALPART_EXPR, op_unknown);
4305 set (ABSU_EXPR, op_absu);
4306 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4307 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4308 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4309 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4313 #if CHECKING_P
4314 #include "selftest.h"
4316 namespace selftest
4318 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4319 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4320 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4321 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4322 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4323 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4325 static void
4326 range_op_cast_tests ()
4328 int_range<2> r0, r1, r2, rold;
4329 r0.set_varying (integer_type_node);
4330 wide_int maxint = r0.upper_bound ();
4332 // If a range is in any way outside of the range for the converted
4333 // to range, default to the range for the new type.
4334 r0.set_varying (short_integer_type_node);
4335 wide_int minshort = r0.lower_bound ();
4336 wide_int maxshort = r0.upper_bound ();
4337 if (TYPE_PRECISION (integer_type_node)
4338 > TYPE_PRECISION (short_integer_type_node))
4340 r1 = int_range<1> (integer_type_node,
4341 wi::zero (TYPE_PRECISION (integer_type_node)),
4342 maxint);
4343 range_cast (r1, short_integer_type_node);
4344 ASSERT_TRUE (r1.lower_bound () == minshort
4345 && r1.upper_bound() == maxshort);
4348 // (unsigned char)[-5,-1] => [251,255].
4349 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4350 range_cast (r0, unsigned_char_type_node);
4351 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4352 UCHAR (251), UCHAR (255)));
4353 range_cast (r0, signed_char_type_node);
4354 ASSERT_TRUE (r0 == rold);
4356 // (signed char)[15, 150] => [-128,-106][15,127].
4357 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4358 range_cast (r0, signed_char_type_node);
4359 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4360 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4361 r1.union_ (r2);
4362 ASSERT_TRUE (r1 == r0);
4363 range_cast (r0, unsigned_char_type_node);
4364 ASSERT_TRUE (r0 == rold);
4366 // (unsigned char)[-5, 5] => [0,5][251,255].
4367 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4368 range_cast (r0, unsigned_char_type_node);
4369 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4370 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4371 r1.union_ (r2);
4372 ASSERT_TRUE (r0 == r1);
4373 range_cast (r0, signed_char_type_node);
4374 ASSERT_TRUE (r0 == rold);
4376 // (unsigned char)[-5,5] => [0,5][251,255].
4377 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4378 range_cast (r0, unsigned_char_type_node);
4379 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4380 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4381 ASSERT_TRUE (r0 == r1);
4383 // (unsigned char)[5U,1974U] => [0,255].
4384 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4385 range_cast (r0, unsigned_char_type_node);
4386 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4387 range_cast (r0, integer_type_node);
4388 // Going to a wider range should not sign extend.
4389 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4391 // (unsigned char)[-350,15] => [0,255].
4392 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4393 range_cast (r0, unsigned_char_type_node);
4394 ASSERT_TRUE (r0 == (int_range<1>
4395 (unsigned_char_type_node,
4396 min_limit (unsigned_char_type_node),
4397 max_limit (unsigned_char_type_node))));
4399 // Casting [-120,20] from signed char to unsigned short.
4400 // => [0, 20][0xff88, 0xffff].
4401 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4402 range_cast (r0, short_unsigned_type_node);
4403 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4404 r2 = int_range<1> (short_unsigned_type_node,
4405 UINT16 (0xff88), UINT16 (0xffff));
4406 r1.union_ (r2);
4407 ASSERT_TRUE (r0 == r1);
4408 // A truncating cast back to signed char will work because [-120, 20]
4409 // is representable in signed char.
4410 range_cast (r0, signed_char_type_node);
4411 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4412 SCHAR (-120), SCHAR (20)));
4414 // unsigned char -> signed short
4415 // (signed short)[(unsigned char)25, (unsigned char)250]
4416 // => [(signed short)25, (signed short)250]
4417 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4418 range_cast (r0, short_integer_type_node);
4419 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4420 ASSERT_TRUE (r0 == r1);
4421 range_cast (r0, unsigned_char_type_node);
4422 ASSERT_TRUE (r0 == rold);
4424 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4425 r0 = int_range<1> (long_long_integer_type_node,
4426 min_limit (long_long_integer_type_node),
4427 max_limit (long_long_integer_type_node));
4428 range_cast (r0, short_unsigned_type_node);
4429 r1 = int_range<1> (short_unsigned_type_node,
4430 min_limit (short_unsigned_type_node),
4431 max_limit (short_unsigned_type_node));
4432 ASSERT_TRUE (r0 == r1);
4434 // Casting NONZERO to a narrower type will wrap/overflow so
4435 // it's just the entire range for the narrower type.
4437 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4438 // is outside of the range of a smaller range, return the full
4439 // smaller range.
4440 if (TYPE_PRECISION (integer_type_node)
4441 > TYPE_PRECISION (short_integer_type_node))
4443 r0 = range_nonzero (integer_type_node);
4444 range_cast (r0, short_integer_type_node);
4445 r1 = int_range<1> (short_integer_type_node,
4446 min_limit (short_integer_type_node),
4447 max_limit (short_integer_type_node));
4448 ASSERT_TRUE (r0 == r1);
4451 // Casting NONZERO from a narrower signed to a wider signed.
4453 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4454 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4455 r0 = range_nonzero (short_integer_type_node);
4456 range_cast (r0, integer_type_node);
4457 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4458 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4459 r1.union_ (r2);
4460 ASSERT_TRUE (r0 == r1);
4463 static void
4464 range_op_lshift_tests ()
4466 // Test that 0x808.... & 0x8.... still contains 0x8....
4467 // for a large set of numbers.
4469 int_range_max res;
4470 tree big_type = long_long_unsigned_type_node;
4471 unsigned big_prec = TYPE_PRECISION (big_type);
4472 // big_num = 0x808,0000,0000,0000
4473 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4474 wi::uhwi (48, big_prec));
4475 op_bitwise_and.fold_range (res, big_type,
4476 int_range <1> (big_type),
4477 int_range <1> (big_type, big_num, big_num));
4478 // val = 0x8,0000,0000,0000
4479 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4480 wi::uhwi (48, big_prec));
4481 ASSERT_TRUE (res.contains_p (val));
4484 if (TYPE_PRECISION (unsigned_type_node) > 31)
4486 // unsigned VARYING = op1 << 1 should be VARYING.
4487 int_range<2> lhs (unsigned_type_node);
4488 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4489 int_range_max op1;
4490 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4491 ASSERT_TRUE (op1.varying_p ());
4493 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4494 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4495 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4496 ASSERT_TRUE (op1.num_pairs () == 2);
4497 // Remove the [0,0] range.
4498 op1.intersect (zero);
4499 ASSERT_TRUE (op1.num_pairs () == 1);
4500 // op1 << 1 should be [0x8000,0x8000] << 1,
4501 // which should result in [0,0].
4502 int_range_max result;
4503 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4504 ASSERT_TRUE (result == zero);
4506 // signed VARYING = op1 << 1 should be VARYING.
4507 if (TYPE_PRECISION (integer_type_node) > 31)
4509 // unsigned VARYING = op1 << 1 should be VARYING.
4510 int_range<2> lhs (integer_type_node);
4511 int_range<2> shift (integer_type_node, INT (1), INT (1));
4512 int_range_max op1;
4513 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4514 ASSERT_TRUE (op1.varying_p ());
4516 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4517 int_range<2> zero (integer_type_node, INT (0), INT (0));
4518 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4519 ASSERT_TRUE (op1.num_pairs () == 2);
4520 // Remove the [0,0] range.
4521 op1.intersect (zero);
4522 ASSERT_TRUE (op1.num_pairs () == 1);
4523 // op1 << 1 should be [0x8000,0x8000] << 1,
4524 // which should result in [0,0].
4525 int_range_max result;
4526 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4527 ASSERT_TRUE (result == zero);
4531 static void
4532 range_op_rshift_tests ()
4534 // unsigned: [3, MAX] = OP1 >> 1
4536 int_range_max lhs (unsigned_type_node,
4537 UINT (3), max_limit (unsigned_type_node));
4538 int_range_max one (unsigned_type_node,
4539 wi::one (TYPE_PRECISION (unsigned_type_node)),
4540 wi::one (TYPE_PRECISION (unsigned_type_node)));
4541 int_range_max op1;
4542 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4543 ASSERT_FALSE (op1.contains_p (UINT (3)));
4546 // signed: [3, MAX] = OP1 >> 1
4548 int_range_max lhs (integer_type_node,
4549 INT (3), max_limit (integer_type_node));
4550 int_range_max one (integer_type_node, INT (1), INT (1));
4551 int_range_max op1;
4552 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4553 ASSERT_FALSE (op1.contains_p (INT (-2)));
4556 // This is impossible, so OP1 should be [].
4557 // signed: [MIN, MIN] = OP1 >> 1
4559 int_range_max lhs (integer_type_node,
4560 min_limit (integer_type_node),
4561 min_limit (integer_type_node));
4562 int_range_max one (integer_type_node, INT (1), INT (1));
4563 int_range_max op1;
4564 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4565 ASSERT_TRUE (op1.undefined_p ());
4568 // signed: ~[-1] = OP1 >> 31
4569 if (TYPE_PRECISION (integer_type_node) > 31)
4571 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4572 int_range_max shift (integer_type_node, INT (31), INT (31));
4573 int_range_max op1;
4574 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4575 int_range_max negatives = range_negatives (integer_type_node);
4576 negatives.intersect (op1);
4577 ASSERT_TRUE (negatives.undefined_p ());
4581 static void
4582 range_op_bitwise_and_tests ()
4584 int_range_max res;
4585 wide_int min = min_limit (integer_type_node);
4586 wide_int max = max_limit (integer_type_node);
4587 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4588 int_range_max i1 (integer_type_node, tiny, max);
4589 int_range_max i2 (integer_type_node, INT (255), INT (255));
4591 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4592 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4593 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4595 // VARYING = OP1 & 255: OP1 is VARYING
4596 i1 = int_range<1> (integer_type_node);
4597 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4598 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4600 // For 0 = x & MASK, x is ~MASK.
4602 int_range<2> zero (integer_type_node, INT (0), INT (0));
4603 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4604 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4605 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4606 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4609 // (NONZERO | X) is nonzero.
4610 i1.set_nonzero (integer_type_node);
4611 i2.set_varying (integer_type_node);
4612 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4613 ASSERT_TRUE (res.nonzero_p ());
4615 // (NEGATIVE | X) is nonzero.
4616 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4617 i2.set_varying (integer_type_node);
4618 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4619 ASSERT_FALSE (res.contains_p (INT (0)));
4622 static void
4623 range_relational_tests ()
4625 int_range<2> lhs (unsigned_char_type_node);
4626 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4627 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4629 // Never wrapping additions mean LHS > OP1.
4630 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4631 ASSERT_TRUE (code == VREL_GT);
4633 // Most wrapping additions mean nothing...
4634 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4635 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4636 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4637 ASSERT_TRUE (code == VREL_VARYING);
4639 // However, always wrapping additions mean LHS < OP1.
4640 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
4641 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
4642 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4643 ASSERT_TRUE (code == VREL_LT);
4646 void
4647 range_op_tests ()
4649 range_op_rshift_tests ();
4650 range_op_lshift_tests ();
4651 range_op_bitwise_and_tests ();
4652 range_op_cast_tests ();
4653 range_relational_tests ();
4655 extern void range_op_float_tests ();
4656 range_op_float_tests ();
4659 } // namespace selftest
4661 #endif // CHECKING_P