d: Add testcase from PR108962
[official-gcc.git] / gcc / range-op.cc
blobf0dff53ec1ecc4a50ff7df8f3fef9f32d18bd4b2
1 /* Code for range operators.
2 Copyright (C) 2017-2023 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
76 // Instantaite a range operator table.
77 range_op_table operator_table;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
106 // Occur in both integer and pointer tables, but currently share
107 // integral implementation.
108 set (ADDR_EXPR, op_addr);
109 set (BIT_NOT_EXPR, op_bitwise_not);
110 set (BIT_XOR_EXPR, op_bitwise_xor);
112 // These are in both integer and pointer tables, but pointer has a different
113 // implementation.
114 // If commented out, there is a hybrid version in range-op-ptr.cc which
115 // is used until there is a pointer range class. Then we can simply
116 // uncomment the operator here and use the unified version.
118 // set (BIT_AND_EXPR, op_bitwise_and);
119 // set (BIT_IOR_EXPR, op_bitwise_or);
120 // set (MIN_EXPR, op_min);
121 // set (MAX_EXPR, op_max);
124 // Instantiate a default range operator for opcodes with no entry.
126 range_operator default_operator;
128 // Create a default range_op_handler.
130 range_op_handler::range_op_handler ()
132 m_operator = &default_operator;
135 // Create a range_op_handler for CODE. Use a default operatoer if CODE
136 // does not have an entry.
138 range_op_handler::range_op_handler (unsigned code)
140 m_operator = operator_table[code];
141 if (!m_operator)
142 m_operator = &default_operator;
145 // Return TRUE if this handler has a non-default operator.
147 range_op_handler::operator bool () const
149 return m_operator != &default_operator;
152 // Return a pointer to the range operator assocaited with this handler.
153 // If it is a default operator, return NULL.
154 // This is the equivalent of indexing the range table.
156 range_operator *
157 range_op_handler::range_op () const
159 if (m_operator != &default_operator)
160 return m_operator;
161 return NULL;
164 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
165 // This is used to produce a unique value for each dispatch pattern. Shift
166 // values are based on the size of the m_discriminator field in value_range.h.
168 constexpr unsigned
169 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
171 return ((lhs << 8) + (op1 << 4) + (op2));
174 // These are the supported dispatch patterns. These map to the parameter list
175 // of the routines in range_operator. Note the last 3 characters are
176 // shorthand for the LHS, OP1, and OP2 range discriminator class.
178 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
179 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
180 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
181 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
182 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
183 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
185 // Return a dispatch value for parameter types LHS, OP1 and OP2.
187 unsigned
188 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
189 const vrange& op2) const
191 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
192 op2.m_discriminator);
195 // Dispatch a call to fold_range based on the types of R, LH and RH.
197 bool
198 range_op_handler::fold_range (vrange &r, tree type,
199 const vrange &lh,
200 const vrange &rh,
201 relation_trio rel) const
203 gcc_checking_assert (m_operator);
204 switch (dispatch_kind (r, lh, rh))
206 case RO_III:
207 return m_operator->fold_range (as_a <irange> (r), type,
208 as_a <irange> (lh),
209 as_a <irange> (rh), rel);
210 case RO_IFI:
211 return m_operator->fold_range (as_a <irange> (r), type,
212 as_a <frange> (lh),
213 as_a <irange> (rh), rel);
214 case RO_IFF:
215 return m_operator->fold_range (as_a <irange> (r), type,
216 as_a <frange> (lh),
217 as_a <frange> (rh), rel);
218 case RO_FFF:
219 return m_operator->fold_range (as_a <frange> (r), type,
220 as_a <frange> (lh),
221 as_a <frange> (rh), rel);
222 case RO_FII:
223 return m_operator->fold_range (as_a <frange> (r), type,
224 as_a <irange> (lh),
225 as_a <irange> (rh), rel);
226 default:
227 return false;
231 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
233 bool
234 range_op_handler::op1_range (vrange &r, tree type,
235 const vrange &lhs,
236 const vrange &op2,
237 relation_trio rel) const
239 gcc_checking_assert (m_operator);
241 if (lhs.undefined_p ())
242 return false;
243 switch (dispatch_kind (r, lhs, op2))
245 case RO_III:
246 return m_operator->op1_range (as_a <irange> (r), type,
247 as_a <irange> (lhs),
248 as_a <irange> (op2), rel);
249 case RO_FIF:
250 return m_operator->op1_range (as_a <frange> (r), type,
251 as_a <irange> (lhs),
252 as_a <frange> (op2), rel);
253 case RO_FFF:
254 return m_operator->op1_range (as_a <frange> (r), type,
255 as_a <frange> (lhs),
256 as_a <frange> (op2), rel);
257 default:
258 return false;
262 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
264 bool
265 range_op_handler::op2_range (vrange &r, tree type,
266 const vrange &lhs,
267 const vrange &op1,
268 relation_trio rel) const
270 gcc_checking_assert (m_operator);
271 if (lhs.undefined_p ())
272 return false;
274 switch (dispatch_kind (r, lhs, op1))
276 case RO_III:
277 return m_operator->op2_range (as_a <irange> (r), type,
278 as_a <irange> (lhs),
279 as_a <irange> (op1), rel);
280 case RO_FIF:
281 return m_operator->op2_range (as_a <frange> (r), type,
282 as_a <irange> (lhs),
283 as_a <frange> (op1), rel);
284 case RO_FFF:
285 return m_operator->op2_range (as_a <frange> (r), type,
286 as_a <frange> (lhs),
287 as_a <frange> (op1), rel);
288 default:
289 return false;
293 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
295 relation_kind
296 range_op_handler::lhs_op1_relation (const vrange &lhs,
297 const vrange &op1,
298 const vrange &op2,
299 relation_kind rel) const
301 gcc_checking_assert (m_operator);
303 switch (dispatch_kind (lhs, op1, op2))
305 case RO_III:
306 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
307 as_a <irange> (op1),
308 as_a <irange> (op2), rel);
309 case RO_IFF:
310 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
311 as_a <frange> (op1),
312 as_a <frange> (op2), rel);
313 case RO_FFF:
314 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
315 as_a <frange> (op1),
316 as_a <frange> (op2), rel);
317 default:
318 return VREL_VARYING;
322 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
324 relation_kind
325 range_op_handler::lhs_op2_relation (const vrange &lhs,
326 const vrange &op1,
327 const vrange &op2,
328 relation_kind rel) const
330 gcc_checking_assert (m_operator);
331 switch (dispatch_kind (lhs, op1, op2))
333 case RO_III:
334 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
335 as_a <irange> (op1),
336 as_a <irange> (op2), rel);
337 case RO_IFF:
338 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
339 as_a <frange> (op1),
340 as_a <frange> (op2), rel);
341 case RO_FFF:
342 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
343 as_a <frange> (op1),
344 as_a <frange> (op2), rel);
345 default:
346 return VREL_VARYING;
350 // Dispatch a call to op1_op2_relation based on the type of LHS.
352 relation_kind
353 range_op_handler::op1_op2_relation (const vrange &lhs) const
355 gcc_checking_assert (m_operator);
356 switch (dispatch_kind (lhs, lhs, lhs))
358 case RO_III:
359 return m_operator->op1_op2_relation (as_a <irange> (lhs));
361 case RO_FFF:
362 return m_operator->op1_op2_relation (as_a <frange> (lhs));
364 default:
365 return VREL_VARYING;
370 // Convert irange bitmasks into a VALUE MASK pair suitable for calling CCP.
372 static void
373 irange_to_masked_value (const irange &r, widest_int &value, widest_int &mask)
375 if (r.singleton_p ())
377 mask = 0;
378 value = widest_int::from (r.lower_bound (), TYPE_SIGN (r.type ()));
380 else
382 mask = widest_int::from (r.get_nonzero_bits (), TYPE_SIGN (r.type ()));
383 value = 0;
387 // Update the known bitmasks in R when applying the operation CODE to
388 // LH and RH.
390 void
391 update_known_bitmask (irange &r, tree_code code,
392 const irange &lh, const irange &rh)
394 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ())
395 return;
397 widest_int value, mask, lh_mask, rh_mask, lh_value, rh_value;
398 tree type = r.type ();
399 signop sign = TYPE_SIGN (type);
400 int prec = TYPE_PRECISION (type);
401 signop lh_sign = TYPE_SIGN (lh.type ());
402 signop rh_sign = TYPE_SIGN (rh.type ());
403 int lh_prec = TYPE_PRECISION (lh.type ());
404 int rh_prec = TYPE_PRECISION (rh.type ());
406 irange_to_masked_value (lh, lh_value, lh_mask);
407 irange_to_masked_value (rh, rh_value, rh_mask);
408 bit_value_binop (code, sign, prec, &value, &mask,
409 lh_sign, lh_prec, lh_value, lh_mask,
410 rh_sign, rh_prec, rh_value, rh_mask);
411 wide_int tmp = wide_int::from (value | mask, prec, sign);
412 r.set_nonzero_bits (tmp);
415 // Return the upper limit for a type.
417 static inline wide_int
418 max_limit (const_tree type)
420 return irange_val_max (type);
423 // Return the lower limit for a type.
425 static inline wide_int
426 min_limit (const_tree type)
428 return irange_val_min (type);
431 // Return false if shifting by OP is undefined behavior. Otherwise, return
432 // true and the range it is to be shifted by. This allows trimming out of
433 // undefined ranges, leaving only valid ranges if there are any.
435 static inline bool
436 get_shift_range (irange &r, tree type, const irange &op)
438 if (op.undefined_p ())
439 return false;
441 // Build valid range and intersect it with the shift range.
442 r = value_range (op.type (),
443 wi::shwi (0, TYPE_PRECISION (op.type ())),
444 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
445 r.intersect (op);
447 // If there are no valid ranges in the shift range, returned false.
448 if (r.undefined_p ())
449 return false;
450 return true;
453 // Default wide_int fold operation returns [MIN, MAX].
455 void
456 range_operator::wi_fold (irange &r, tree type,
457 const wide_int &lh_lb ATTRIBUTE_UNUSED,
458 const wide_int &lh_ub ATTRIBUTE_UNUSED,
459 const wide_int &rh_lb ATTRIBUTE_UNUSED,
460 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
462 gcc_checking_assert (r.supports_type_p (type));
463 r.set_varying (type);
466 // Call wi_fold when both op1 and op2 are equivalent. Further split small
467 // subranges into constants. This can provide better precision.
468 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
469 // [0,0][2, 2][4,4][6, 6][8, 8]
470 // LIMIT is the maximum number of elements in range allowed before we
471 // do not process them individually.
473 void
474 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
475 const wide_int &lh_lb,
476 const wide_int &lh_ub,
477 unsigned limit) const
479 int_range_max tmp;
480 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
481 widest_int::from (lh_lb, TYPE_SIGN (type)));
482 // if there are 1 to 8 values in the LH range, split them up.
483 r.set_undefined ();
484 if (lh_range >= 0 && lh_range < limit)
486 for (unsigned x = 0; x <= lh_range; x++)
488 wide_int val = lh_lb + x;
489 wi_fold (tmp, type, val, val, val, val);
490 r.union_ (tmp);
493 // Otherwise just call wi_fold.
494 else
495 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
498 // Call wi_fold, except further split small subranges into constants.
499 // This can provide better precision. For something 8 >> [0,1]
500 // Instead of [8, 16], we will produce [8,8][16,16]
502 void
503 range_operator::wi_fold_in_parts (irange &r, tree type,
504 const wide_int &lh_lb,
505 const wide_int &lh_ub,
506 const wide_int &rh_lb,
507 const wide_int &rh_ub) const
509 int_range_max tmp;
510 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
511 widest_int::from (rh_lb, TYPE_SIGN (type)));
512 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
513 widest_int::from (lh_lb, TYPE_SIGN (type)));
514 // If there are 2, 3, or 4 values in the RH range, do them separately.
515 // Call wi_fold_in_parts to check the RH side.
516 if (rh_range > 0 && rh_range < 4)
518 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
519 if (rh_range > 1)
521 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
522 r.union_ (tmp);
523 if (rh_range == 3)
525 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
526 r.union_ (tmp);
529 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
530 r.union_ (tmp);
532 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
533 // The RH side has been checked, so no recursion needed.
534 else if (lh_range > 0 && lh_range < 4)
536 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
537 if (lh_range > 1)
539 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
540 r.union_ (tmp);
541 if (lh_range == 3)
543 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
544 r.union_ (tmp);
547 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
548 r.union_ (tmp);
550 // Otherwise just call wi_fold.
551 else
552 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
555 // The default for fold is to break all ranges into sub-ranges and
556 // invoke the wi_fold method on each sub-range pair.
558 bool
559 range_operator::fold_range (irange &r, tree type,
560 const irange &lh,
561 const irange &rh,
562 relation_trio trio) const
564 gcc_checking_assert (r.supports_type_p (type));
565 if (empty_range_varying (r, type, lh, rh))
566 return true;
568 relation_kind rel = trio.op1_op2 ();
569 unsigned num_lh = lh.num_pairs ();
570 unsigned num_rh = rh.num_pairs ();
572 // If op1 and op2 are equivalences, then we don't need a complete cross
573 // product, just pairs of matching elements.
574 if (relation_equiv_p (rel) && lh == rh)
576 int_range_max tmp;
577 r.set_undefined ();
578 for (unsigned x = 0; x < num_lh; ++x)
580 // If the number of subranges is too high, limit subrange creation.
581 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
582 wide_int lh_lb = lh.lower_bound (x);
583 wide_int lh_ub = lh.upper_bound (x);
584 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
585 r.union_ (tmp);
586 if (r.varying_p ())
587 break;
589 op1_op2_relation_effect (r, type, lh, rh, rel);
590 update_bitmask (r, lh, rh);
591 return true;
594 // If both ranges are single pairs, fold directly into the result range.
595 // If the number of subranges grows too high, produce a summary result as the
596 // loop becomes exponential with little benefit. See PR 103821.
597 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
599 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
600 rh.lower_bound (), rh.upper_bound ());
601 op1_op2_relation_effect (r, type, lh, rh, rel);
602 update_bitmask (r, lh, rh);
603 return true;
606 int_range_max tmp;
607 r.set_undefined ();
608 for (unsigned x = 0; x < num_lh; ++x)
609 for (unsigned y = 0; y < num_rh; ++y)
611 wide_int lh_lb = lh.lower_bound (x);
612 wide_int lh_ub = lh.upper_bound (x);
613 wide_int rh_lb = rh.lower_bound (y);
614 wide_int rh_ub = rh.upper_bound (y);
615 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
616 r.union_ (tmp);
617 if (r.varying_p ())
619 op1_op2_relation_effect (r, type, lh, rh, rel);
620 update_bitmask (r, lh, rh);
621 return true;
624 op1_op2_relation_effect (r, type, lh, rh, rel);
625 update_bitmask (r, lh, rh);
626 return true;
629 // The default for op1_range is to return false.
631 bool
632 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
633 tree type ATTRIBUTE_UNUSED,
634 const irange &lhs ATTRIBUTE_UNUSED,
635 const irange &op2 ATTRIBUTE_UNUSED,
636 relation_trio) const
638 return false;
641 // The default for op2_range is to return false.
643 bool
644 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
645 tree type ATTRIBUTE_UNUSED,
646 const irange &lhs ATTRIBUTE_UNUSED,
647 const irange &op1 ATTRIBUTE_UNUSED,
648 relation_trio) const
650 return false;
653 // The default relation routines return VREL_VARYING.
655 relation_kind
656 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
657 const irange &op1 ATTRIBUTE_UNUSED,
658 const irange &op2 ATTRIBUTE_UNUSED,
659 relation_kind rel ATTRIBUTE_UNUSED) const
661 return VREL_VARYING;
664 relation_kind
665 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
666 const irange &op1 ATTRIBUTE_UNUSED,
667 const irange &op2 ATTRIBUTE_UNUSED,
668 relation_kind rel ATTRIBUTE_UNUSED) const
670 return VREL_VARYING;
673 relation_kind
674 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED) const
676 return VREL_VARYING;
679 // Default is no relation affects the LHS.
681 bool
682 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
683 tree type ATTRIBUTE_UNUSED,
684 const irange &op1_range ATTRIBUTE_UNUSED,
685 const irange &op2_range ATTRIBUTE_UNUSED,
686 relation_kind rel ATTRIBUTE_UNUSED) const
688 return false;
691 // Apply any known bitmask updates based on this operator.
693 void
694 range_operator::update_bitmask (irange &, const irange &,
695 const irange &) const
699 // Create and return a range from a pair of wide-ints that are known
700 // to have overflowed (or underflowed).
702 static void
703 value_range_from_overflowed_bounds (irange &r, tree type,
704 const wide_int &wmin,
705 const wide_int &wmax)
707 const signop sgn = TYPE_SIGN (type);
708 const unsigned int prec = TYPE_PRECISION (type);
710 wide_int tmin = wide_int::from (wmin, prec, sgn);
711 wide_int tmax = wide_int::from (wmax, prec, sgn);
713 bool covers = false;
714 wide_int tem = tmin;
715 tmin = tmax + 1;
716 if (wi::cmp (tmin, tmax, sgn) < 0)
717 covers = true;
718 tmax = tem - 1;
719 if (wi::cmp (tmax, tem, sgn) > 0)
720 covers = true;
722 // If the anti-range would cover nothing, drop to varying.
723 // Likewise if the anti-range bounds are outside of the types
724 // values.
725 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
726 r.set_varying (type);
727 else
728 r.set (type, tmin, tmax, VR_ANTI_RANGE);
731 // Create and return a range from a pair of wide-ints. MIN_OVF and
732 // MAX_OVF describe any overflow that might have occurred while
733 // calculating WMIN and WMAX respectively.
735 static void
736 value_range_with_overflow (irange &r, tree type,
737 const wide_int &wmin, const wide_int &wmax,
738 wi::overflow_type min_ovf = wi::OVF_NONE,
739 wi::overflow_type max_ovf = wi::OVF_NONE)
741 const signop sgn = TYPE_SIGN (type);
742 const unsigned int prec = TYPE_PRECISION (type);
743 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
745 // For one bit precision if max != min, then the range covers all
746 // values.
747 if (prec == 1 && wi::ne_p (wmax, wmin))
749 r.set_varying (type);
750 return;
753 if (overflow_wraps)
755 // If overflow wraps, truncate the values and adjust the range,
756 // kind, and bounds appropriately.
757 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
759 wide_int tmin = wide_int::from (wmin, prec, sgn);
760 wide_int tmax = wide_int::from (wmax, prec, sgn);
761 // If the limits are swapped, we wrapped around and cover
762 // the entire range.
763 if (wi::gt_p (tmin, tmax, sgn))
764 r.set_varying (type);
765 else
766 // No overflow or both overflow or underflow. The range
767 // kind stays normal.
768 r.set (type, tmin, tmax);
769 return;
772 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
773 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
774 value_range_from_overflowed_bounds (r, type, wmin, wmax);
775 else
776 // Other underflow and/or overflow, drop to VR_VARYING.
777 r.set_varying (type);
779 else
781 // If both bounds either underflowed or overflowed, then the result
782 // is undefined.
783 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
784 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
786 r.set_undefined ();
787 return;
790 // If overflow does not wrap, saturate to [MIN, MAX].
791 wide_int new_lb, new_ub;
792 if (min_ovf == wi::OVF_UNDERFLOW)
793 new_lb = wi::min_value (prec, sgn);
794 else if (min_ovf == wi::OVF_OVERFLOW)
795 new_lb = wi::max_value (prec, sgn);
796 else
797 new_lb = wmin;
799 if (max_ovf == wi::OVF_UNDERFLOW)
800 new_ub = wi::min_value (prec, sgn);
801 else if (max_ovf == wi::OVF_OVERFLOW)
802 new_ub = wi::max_value (prec, sgn);
803 else
804 new_ub = wmax;
806 r.set (type, new_lb, new_ub);
810 // Create and return a range from a pair of wide-ints. Canonicalize
811 // the case where the bounds are swapped. In which case, we transform
812 // [10,5] into [MIN,5][10,MAX].
814 static inline void
815 create_possibly_reversed_range (irange &r, tree type,
816 const wide_int &new_lb, const wide_int &new_ub)
818 signop s = TYPE_SIGN (type);
819 // If the bounds are swapped, treat the result as if an overflow occurred.
820 if (wi::gt_p (new_lb, new_ub, s))
821 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
822 else
823 // Otherwise it's just a normal range.
824 r.set (type, new_lb, new_ub);
827 // Return the summary information about boolean range LHS. If EMPTY/FULL,
828 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
830 bool_range_state
831 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
833 // If there is no result, then this is unexecutable.
834 if (lhs.undefined_p ())
836 r.set_undefined ();
837 return BRS_EMPTY;
840 if (lhs.zero_p ())
841 return BRS_FALSE;
843 // For TRUE, we can't just test for [1,1] because Ada can have
844 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
845 if (lhs.contains_p (build_zero_cst (lhs.type ())))
847 r.set_varying (val_type);
848 return BRS_FULL;
851 return BRS_TRUE;
854 // ------------------------------------------------------------------------
856 void
857 operator_equal::update_bitmask (irange &r, const irange &lh,
858 const irange &rh) const
860 update_known_bitmask (r, EQ_EXPR, lh, rh);
863 // Check if the LHS range indicates a relation between OP1 and OP2.
865 relation_kind
866 operator_equal::op1_op2_relation (const irange &lhs) const
868 if (lhs.undefined_p ())
869 return VREL_UNDEFINED;
871 // FALSE = op1 == op2 indicates NE_EXPR.
872 if (lhs.zero_p ())
873 return VREL_NE;
875 // TRUE = op1 == op2 indicates EQ_EXPR.
876 if (lhs.undefined_p () || !contains_zero_p (lhs))
877 return VREL_EQ;
878 return VREL_VARYING;
881 bool
882 operator_equal::fold_range (irange &r, tree type,
883 const irange &op1,
884 const irange &op2,
885 relation_trio rel) const
887 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
888 return true;
890 // We can be sure the values are always equal or not if both ranges
891 // consist of a single value, and then compare them.
892 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
893 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
895 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
896 r = range_true (type);
897 else
898 r = range_false (type);
900 else
902 // If ranges do not intersect, we know the range is not equal,
903 // otherwise we don't know anything for sure.
904 int_range_max tmp = op1;
905 tmp.intersect (op2);
906 if (tmp.undefined_p ())
907 r = range_false (type);
908 else
909 r = range_true_and_false (type);
911 return true;
914 bool
915 operator_equal::op1_range (irange &r, tree type,
916 const irange &lhs,
917 const irange &op2,
918 relation_trio) const
920 switch (get_bool_state (r, lhs, type))
922 case BRS_TRUE:
923 // If it's true, the result is the same as OP2.
924 r = op2;
925 break;
927 case BRS_FALSE:
928 // If the result is false, the only time we know anything is
929 // if OP2 is a constant.
930 if (!op2.undefined_p ()
931 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
933 r = op2;
934 r.invert ();
936 else
937 r.set_varying (type);
938 break;
940 default:
941 break;
943 return true;
946 bool
947 operator_equal::op2_range (irange &r, tree type,
948 const irange &lhs,
949 const irange &op1,
950 relation_trio rel) const
952 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
955 // -------------------------------------------------------------------------
957 void
958 operator_not_equal::update_bitmask (irange &r, const irange &lh,
959 const irange &rh) const
961 update_known_bitmask (r, NE_EXPR, lh, rh);
964 // Check if the LHS range indicates a relation between OP1 and OP2.
966 relation_kind
967 operator_not_equal::op1_op2_relation (const irange &lhs) const
969 if (lhs.undefined_p ())
970 return VREL_UNDEFINED;
972 // FALSE = op1 != op2 indicates EQ_EXPR.
973 if (lhs.zero_p ())
974 return VREL_EQ;
976 // TRUE = op1 != op2 indicates NE_EXPR.
977 if (lhs.undefined_p () || !contains_zero_p (lhs))
978 return VREL_NE;
979 return VREL_VARYING;
982 bool
983 operator_not_equal::fold_range (irange &r, tree type,
984 const irange &op1,
985 const irange &op2,
986 relation_trio rel) const
988 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
989 return true;
991 // We can be sure the values are always equal or not if both ranges
992 // consist of a single value, and then compare them.
993 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
994 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
996 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
997 r = range_true (type);
998 else
999 r = range_false (type);
1001 else
1003 // If ranges do not intersect, we know the range is not equal,
1004 // otherwise we don't know anything for sure.
1005 int_range_max tmp = op1;
1006 tmp.intersect (op2);
1007 if (tmp.undefined_p ())
1008 r = range_true (type);
1009 else
1010 r = range_true_and_false (type);
1012 return true;
1015 bool
1016 operator_not_equal::op1_range (irange &r, tree type,
1017 const irange &lhs,
1018 const irange &op2,
1019 relation_trio) const
1021 switch (get_bool_state (r, lhs, type))
1023 case BRS_TRUE:
1024 // If the result is true, the only time we know anything is if
1025 // OP2 is a constant.
1026 if (!op2.undefined_p ()
1027 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1029 r = op2;
1030 r.invert ();
1032 else
1033 r.set_varying (type);
1034 break;
1036 case BRS_FALSE:
1037 // If it's false, the result is the same as OP2.
1038 r = op2;
1039 break;
1041 default:
1042 break;
1044 return true;
1048 bool
1049 operator_not_equal::op2_range (irange &r, tree type,
1050 const irange &lhs,
1051 const irange &op1,
1052 relation_trio rel) const
1054 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1057 // (X < VAL) produces the range of [MIN, VAL - 1].
1059 static void
1060 build_lt (irange &r, tree type, const wide_int &val)
1062 wi::overflow_type ov;
1063 wide_int lim;
1064 signop sgn = TYPE_SIGN (type);
1066 // Signed 1 bit cannot represent 1 for subtraction.
1067 if (sgn == SIGNED)
1068 lim = wi::add (val, -1, sgn, &ov);
1069 else
1070 lim = wi::sub (val, 1, sgn, &ov);
1072 // If val - 1 underflows, check if X < MIN, which is an empty range.
1073 if (ov)
1074 r.set_undefined ();
1075 else
1076 r = int_range<1> (type, min_limit (type), lim);
1079 // (X <= VAL) produces the range of [MIN, VAL].
1081 static void
1082 build_le (irange &r, tree type, const wide_int &val)
1084 r = int_range<1> (type, min_limit (type), val);
1087 // (X > VAL) produces the range of [VAL + 1, MAX].
1089 static void
1090 build_gt (irange &r, tree type, const wide_int &val)
1092 wi::overflow_type ov;
1093 wide_int lim;
1094 signop sgn = TYPE_SIGN (type);
1096 // Signed 1 bit cannot represent 1 for addition.
1097 if (sgn == SIGNED)
1098 lim = wi::sub (val, -1, sgn, &ov);
1099 else
1100 lim = wi::add (val, 1, sgn, &ov);
1101 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1102 if (ov)
1103 r.set_undefined ();
1104 else
1105 r = int_range<1> (type, lim, max_limit (type));
1108 // (X >= val) produces the range of [VAL, MAX].
1110 static void
1111 build_ge (irange &r, tree type, const wide_int &val)
1113 r = int_range<1> (type, val, max_limit (type));
1117 void
1118 operator_lt::update_bitmask (irange &r, const irange &lh,
1119 const irange &rh) const
1121 update_known_bitmask (r, LT_EXPR, lh, rh);
1124 // Check if the LHS range indicates a relation between OP1 and OP2.
1126 relation_kind
1127 operator_lt::op1_op2_relation (const irange &lhs) const
1129 if (lhs.undefined_p ())
1130 return VREL_UNDEFINED;
1132 // FALSE = op1 < op2 indicates GE_EXPR.
1133 if (lhs.zero_p ())
1134 return VREL_GE;
1136 // TRUE = op1 < op2 indicates LT_EXPR.
1137 if (lhs.undefined_p () || !contains_zero_p (lhs))
1138 return VREL_LT;
1139 return VREL_VARYING;
1142 bool
1143 operator_lt::fold_range (irange &r, tree type,
1144 const irange &op1,
1145 const irange &op2,
1146 relation_trio rel) const
1148 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1149 return true;
1151 signop sign = TYPE_SIGN (op1.type ());
1152 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1154 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1155 r = range_true (type);
1156 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1157 r = range_false (type);
1158 // Use nonzero bits to determine if < 0 is false.
1159 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1160 r = range_false (type);
1161 else
1162 r = range_true_and_false (type);
1163 return true;
1166 bool
1167 operator_lt::op1_range (irange &r, tree type,
1168 const irange &lhs,
1169 const irange &op2,
1170 relation_trio) const
1172 if (op2.undefined_p ())
1173 return false;
1175 switch (get_bool_state (r, lhs, type))
1177 case BRS_TRUE:
1178 build_lt (r, type, op2.upper_bound ());
1179 break;
1181 case BRS_FALSE:
1182 build_ge (r, type, op2.lower_bound ());
1183 break;
1185 default:
1186 break;
1188 return true;
1191 bool
1192 operator_lt::op2_range (irange &r, tree type,
1193 const irange &lhs,
1194 const irange &op1,
1195 relation_trio) const
1197 if (op1.undefined_p ())
1198 return false;
1200 switch (get_bool_state (r, lhs, type))
1202 case BRS_TRUE:
1203 build_gt (r, type, op1.lower_bound ());
1204 break;
1206 case BRS_FALSE:
1207 build_le (r, type, op1.upper_bound ());
1208 break;
1210 default:
1211 break;
1213 return true;
1217 void
1218 operator_le::update_bitmask (irange &r, const irange &lh,
1219 const irange &rh) const
1221 update_known_bitmask (r, LE_EXPR, lh, rh);
1224 // Check if the LHS range indicates a relation between OP1 and OP2.
1226 relation_kind
1227 operator_le::op1_op2_relation (const irange &lhs) const
1229 if (lhs.undefined_p ())
1230 return VREL_UNDEFINED;
1232 // FALSE = op1 <= op2 indicates GT_EXPR.
1233 if (lhs.zero_p ())
1234 return VREL_GT;
1236 // TRUE = op1 <= op2 indicates LE_EXPR.
1237 if (lhs.undefined_p () || !contains_zero_p (lhs))
1238 return VREL_LE;
1239 return VREL_VARYING;
1242 bool
1243 operator_le::fold_range (irange &r, tree type,
1244 const irange &op1,
1245 const irange &op2,
1246 relation_trio rel) const
1248 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1249 return true;
1251 signop sign = TYPE_SIGN (op1.type ());
1252 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1254 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1255 r = range_true (type);
1256 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1257 r = range_false (type);
1258 else
1259 r = range_true_and_false (type);
1260 return true;
1263 bool
1264 operator_le::op1_range (irange &r, tree type,
1265 const irange &lhs,
1266 const irange &op2,
1267 relation_trio) const
1269 if (op2.undefined_p ())
1270 return false;
1272 switch (get_bool_state (r, lhs, type))
1274 case BRS_TRUE:
1275 build_le (r, type, op2.upper_bound ());
1276 break;
1278 case BRS_FALSE:
1279 build_gt (r, type, op2.lower_bound ());
1280 break;
1282 default:
1283 break;
1285 return true;
1288 bool
1289 operator_le::op2_range (irange &r, tree type,
1290 const irange &lhs,
1291 const irange &op1,
1292 relation_trio) const
1294 if (op1.undefined_p ())
1295 return false;
1297 switch (get_bool_state (r, lhs, type))
1299 case BRS_TRUE:
1300 build_ge (r, type, op1.lower_bound ());
1301 break;
1303 case BRS_FALSE:
1304 build_lt (r, type, op1.upper_bound ());
1305 break;
1307 default:
1308 break;
1310 return true;
1314 void
1315 operator_gt::update_bitmask (irange &r, const irange &lh,
1316 const irange &rh) const
1318 update_known_bitmask (r, GT_EXPR, lh, rh);
1321 // Check if the LHS range indicates a relation between OP1 and OP2.
1323 relation_kind
1324 operator_gt::op1_op2_relation (const irange &lhs) const
1326 if (lhs.undefined_p ())
1327 return VREL_UNDEFINED;
1329 // FALSE = op1 > op2 indicates LE_EXPR.
1330 if (lhs.zero_p ())
1331 return VREL_LE;
1333 // TRUE = op1 > op2 indicates GT_EXPR.
1334 if (!contains_zero_p (lhs))
1335 return VREL_GT;
1336 return VREL_VARYING;
1339 bool
1340 operator_gt::fold_range (irange &r, tree type,
1341 const irange &op1, const irange &op2,
1342 relation_trio rel) const
1344 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1345 return true;
1347 signop sign = TYPE_SIGN (op1.type ());
1348 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1350 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1351 r = range_true (type);
1352 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1353 r = range_false (type);
1354 else
1355 r = range_true_and_false (type);
1356 return true;
1359 bool
1360 operator_gt::op1_range (irange &r, tree type,
1361 const irange &lhs, const irange &op2,
1362 relation_trio) const
1364 if (op2.undefined_p ())
1365 return false;
1367 switch (get_bool_state (r, lhs, type))
1369 case BRS_TRUE:
1370 build_gt (r, type, op2.lower_bound ());
1371 break;
1373 case BRS_FALSE:
1374 build_le (r, type, op2.upper_bound ());
1375 break;
1377 default:
1378 break;
1380 return true;
1383 bool
1384 operator_gt::op2_range (irange &r, tree type,
1385 const irange &lhs,
1386 const irange &op1,
1387 relation_trio) const
1389 if (op1.undefined_p ())
1390 return false;
1392 switch (get_bool_state (r, lhs, type))
1394 case BRS_TRUE:
1395 build_lt (r, type, op1.upper_bound ());
1396 break;
1398 case BRS_FALSE:
1399 build_ge (r, type, op1.lower_bound ());
1400 break;
1402 default:
1403 break;
1405 return true;
1409 void
1410 operator_ge::update_bitmask (irange &r, const irange &lh,
1411 const irange &rh) const
1413 update_known_bitmask (r, GE_EXPR, lh, rh);
1416 // Check if the LHS range indicates a relation between OP1 and OP2.
1418 relation_kind
1419 operator_ge::op1_op2_relation (const irange &lhs) const
1421 if (lhs.undefined_p ())
1422 return VREL_UNDEFINED;
1424 // FALSE = op1 >= op2 indicates LT_EXPR.
1425 if (lhs.zero_p ())
1426 return VREL_LT;
1428 // TRUE = op1 >= op2 indicates GE_EXPR.
1429 if (!contains_zero_p (lhs))
1430 return VREL_GE;
1431 return VREL_VARYING;
1434 bool
1435 operator_ge::fold_range (irange &r, tree type,
1436 const irange &op1,
1437 const irange &op2,
1438 relation_trio rel) const
1440 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1441 return true;
1443 signop sign = TYPE_SIGN (op1.type ());
1444 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1446 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1447 r = range_true (type);
1448 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1449 r = range_false (type);
1450 else
1451 r = range_true_and_false (type);
1452 return true;
1455 bool
1456 operator_ge::op1_range (irange &r, tree type,
1457 const irange &lhs,
1458 const irange &op2,
1459 relation_trio) const
1461 if (op2.undefined_p ())
1462 return false;
1464 switch (get_bool_state (r, lhs, type))
1466 case BRS_TRUE:
1467 build_ge (r, type, op2.lower_bound ());
1468 break;
1470 case BRS_FALSE:
1471 build_lt (r, type, op2.upper_bound ());
1472 break;
1474 default:
1475 break;
1477 return true;
1480 bool
1481 operator_ge::op2_range (irange &r, tree type,
1482 const irange &lhs,
1483 const irange &op1,
1484 relation_trio) const
1486 if (op1.undefined_p ())
1487 return false;
1489 switch (get_bool_state (r, lhs, type))
1491 case BRS_TRUE:
1492 build_le (r, type, op1.upper_bound ());
1493 break;
1495 case BRS_FALSE:
1496 build_gt (r, type, op1.lower_bound ());
1497 break;
1499 default:
1500 break;
1502 return true;
1506 void
1507 operator_plus::update_bitmask (irange &r, const irange &lh,
1508 const irange &rh) const
1510 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1513 // Check to see if the range of OP2 indicates anything about the relation
1514 // between LHS and OP1.
1516 relation_kind
1517 operator_plus::lhs_op1_relation (const irange &lhs,
1518 const irange &op1,
1519 const irange &op2,
1520 relation_kind) const
1522 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1523 return VREL_VARYING;
1525 tree type = lhs.type ();
1526 unsigned prec = TYPE_PRECISION (type);
1527 wi::overflow_type ovf1, ovf2;
1528 signop sign = TYPE_SIGN (type);
1530 // LHS = OP1 + 0 indicates LHS == OP1.
1531 if (op2.zero_p ())
1532 return VREL_EQ;
1534 if (TYPE_OVERFLOW_WRAPS (type))
1536 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1537 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1539 else
1540 ovf1 = ovf2 = wi::OVF_NONE;
1542 // Never wrapping additions.
1543 if (!ovf1 && !ovf2)
1545 // Positive op2 means lhs > op1.
1546 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1547 return VREL_GT;
1548 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1549 return VREL_GE;
1551 // Negative op2 means lhs < op1.
1552 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1553 return VREL_LT;
1554 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1555 return VREL_LE;
1557 // Always wrapping additions.
1558 else if (ovf1 && ovf1 == ovf2)
1560 // Positive op2 means lhs < op1.
1561 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1562 return VREL_LT;
1563 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1564 return VREL_LE;
1566 // Negative op2 means lhs > op1.
1567 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1568 return VREL_GT;
1569 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1570 return VREL_GE;
1573 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1574 if (!range_includes_zero_p (&op2))
1575 return VREL_NE;
1577 return VREL_VARYING;
1580 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1581 // operands.
1583 relation_kind
1584 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1585 const irange &op2, relation_kind rel) const
1587 return lhs_op1_relation (lhs, op2, op1, rel);
1590 void
1591 operator_plus::wi_fold (irange &r, tree type,
1592 const wide_int &lh_lb, const wide_int &lh_ub,
1593 const wide_int &rh_lb, const wide_int &rh_ub) const
1595 wi::overflow_type ov_lb, ov_ub;
1596 signop s = TYPE_SIGN (type);
1597 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1598 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1599 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1602 // Given addition or subtraction, determine the possible NORMAL ranges and
1603 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1604 // Return the relation that exists between the LHS and OP1 in order for the
1605 // NORMAL range to apply.
1606 // a return value of VREL_VARYING means no ranges were applicable.
1608 static relation_kind
1609 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1610 bool add_p)
1612 relation_kind kind = VREL_VARYING;
1613 // For now, only deal with constant adds. This could be extended to ranges
1614 // when someone is so motivated.
1615 if (!offset.singleton_p () || offset.zero_p ())
1616 return kind;
1618 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1619 wide_int off = offset.lower_bound ();
1620 if (wi::neg_p (off, SIGNED))
1622 add_p = !add_p;
1623 off = wi::neg (off);
1626 wi::overflow_type ov;
1627 tree type = offset.type ();
1628 unsigned prec = TYPE_PRECISION (type);
1629 wide_int ub;
1630 wide_int lb;
1631 // calculate the normal range and relation for the operation.
1632 if (add_p)
1634 // [ 0 , INF - OFF]
1635 lb = wi::zero (prec);
1636 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1637 kind = VREL_GT;
1639 else
1641 // [ OFF, INF ]
1642 lb = off;
1643 ub = irange_val_max (type);
1644 kind = VREL_LT;
1646 int_range<2> normal_range (type, lb, ub);
1647 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1649 r_ov = ov_range;
1650 r_normal = normal_range;
1651 return kind;
1654 // Once op1 has been calculated by operator_plus or operator_minus, check
1655 // to see if the relation passed causes any part of the calculation to
1656 // be not possible. ie
1657 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1658 // and that further refines a_2 to [0, 0].
1659 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1660 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1661 // MINUS. IF any adjustment can be made, R will reflect it.
1663 static void
1664 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1665 bool add_p)
1667 if (r.undefined_p ())
1668 return;
1669 tree type = r.type ();
1670 // Check for unsigned overflow and calculate the overflow part.
1671 signop s = TYPE_SIGN (type);
1672 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1673 return;
1675 // Only work with <, <=, >, >= relations.
1676 if (!relation_lt_le_gt_ge_p (rel))
1677 return;
1679 // Get the ranges for this offset.
1680 int_range_max normal, overflow;
1681 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1683 // VREL_VARYING means there are no adjustments.
1684 if (k == VREL_VARYING)
1685 return;
1687 // If the relations match use the normal range, otherwise use overflow range.
1688 if (relation_intersect (k, rel) == k)
1689 r.intersect (normal);
1690 else
1691 r.intersect (overflow);
1692 return;
1695 bool
1696 operator_plus::op1_range (irange &r, tree type,
1697 const irange &lhs,
1698 const irange &op2,
1699 relation_trio trio) const
1701 if (lhs.undefined_p ())
1702 return false;
1703 // Start with the default operation.
1704 range_op_handler minus (MINUS_EXPR);
1705 if (!minus)
1706 return false;
1707 bool res = minus.fold_range (r, type, lhs, op2);
1708 relation_kind rel = trio.lhs_op1 ();
1709 // Check for a relation refinement.
1710 if (res)
1711 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1712 return res;
1715 bool
1716 operator_plus::op2_range (irange &r, tree type,
1717 const irange &lhs,
1718 const irange &op1,
1719 relation_trio rel) const
1721 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1724 class operator_widen_plus_signed : public range_operator
1726 public:
1727 virtual void wi_fold (irange &r, tree type,
1728 const wide_int &lh_lb,
1729 const wide_int &lh_ub,
1730 const wide_int &rh_lb,
1731 const wide_int &rh_ub) const;
1732 } op_widen_plus_signed;
1734 void
1735 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1736 const wide_int &lh_lb,
1737 const wide_int &lh_ub,
1738 const wide_int &rh_lb,
1739 const wide_int &rh_ub) const
1741 wi::overflow_type ov_lb, ov_ub;
1742 signop s = TYPE_SIGN (type);
1744 wide_int lh_wlb
1745 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1746 wide_int lh_wub
1747 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1748 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1749 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1751 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1752 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1754 r = int_range<2> (type, new_lb, new_ub);
1757 class operator_widen_plus_unsigned : public range_operator
1759 public:
1760 virtual void wi_fold (irange &r, tree type,
1761 const wide_int &lh_lb,
1762 const wide_int &lh_ub,
1763 const wide_int &rh_lb,
1764 const wide_int &rh_ub) const;
1765 } op_widen_plus_unsigned;
1767 void
1768 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1769 const wide_int &lh_lb,
1770 const wide_int &lh_ub,
1771 const wide_int &rh_lb,
1772 const wide_int &rh_ub) const
1774 wi::overflow_type ov_lb, ov_ub;
1775 signop s = TYPE_SIGN (type);
1777 wide_int lh_wlb
1778 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1779 wide_int lh_wub
1780 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1781 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1782 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1784 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1785 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1787 r = int_range<2> (type, new_lb, new_ub);
1790 void
1791 operator_minus::update_bitmask (irange &r, const irange &lh,
1792 const irange &rh) const
1794 update_known_bitmask (r, MINUS_EXPR, lh, rh);
1797 void
1798 operator_minus::wi_fold (irange &r, tree type,
1799 const wide_int &lh_lb, const wide_int &lh_ub,
1800 const wide_int &rh_lb, const wide_int &rh_ub) const
1802 wi::overflow_type ov_lb, ov_ub;
1803 signop s = TYPE_SIGN (type);
1804 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
1805 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
1806 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1810 // Return the relation between LHS and OP1 based on the relation between
1811 // OP1 and OP2.
1813 relation_kind
1814 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
1815 const irange &, relation_kind rel) const
1817 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
1818 switch (rel)
1820 case VREL_GT:
1821 case VREL_GE:
1822 return VREL_LE;
1823 default:
1824 break;
1826 return VREL_VARYING;
1829 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1830 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1831 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1833 bool
1834 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
1835 const irange &op1_range ATTRIBUTE_UNUSED,
1836 const irange &op2_range ATTRIBUTE_UNUSED,
1837 relation_kind rel)
1839 if (rel == VREL_VARYING)
1840 return false;
1842 int_range<2> rel_range;
1843 unsigned prec = TYPE_PRECISION (type);
1844 signop sgn = TYPE_SIGN (type);
1846 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
1847 if (rel == VREL_EQ)
1848 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
1849 else if (rel == VREL_NE)
1850 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1851 VR_ANTI_RANGE);
1852 else if (TYPE_OVERFLOW_WRAPS (type))
1854 switch (rel)
1856 // For wrapping signed values and unsigned, if op1 > op2 or
1857 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
1858 case VREL_GT:
1859 case VREL_LT:
1860 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
1861 VR_ANTI_RANGE);
1862 break;
1863 default:
1864 return false;
1867 else
1869 switch (rel)
1871 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
1872 case VREL_GT:
1873 rel_range = int_range<2> (type, wi::one (prec),
1874 wi::max_value (prec, sgn));
1875 break;
1876 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
1877 case VREL_GE:
1878 rel_range = int_range<2> (type, wi::zero (prec),
1879 wi::max_value (prec, sgn));
1880 break;
1881 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
1882 case VREL_LT:
1883 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1884 wi::minus_one (prec));
1885 break;
1886 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
1887 case VREL_LE:
1888 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
1889 wi::zero (prec));
1890 break;
1891 default:
1892 return false;
1895 lhs_range.intersect (rel_range);
1896 return true;
1899 bool
1900 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
1901 const irange &op1_range,
1902 const irange &op2_range,
1903 relation_kind rel) const
1905 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
1906 rel);
1909 bool
1910 operator_minus::op1_range (irange &r, tree type,
1911 const irange &lhs,
1912 const irange &op2,
1913 relation_trio trio) const
1915 if (lhs.undefined_p ())
1916 return false;
1917 // Start with the default operation.
1918 range_op_handler minus (PLUS_EXPR);
1919 if (!minus)
1920 return false;
1921 bool res = minus.fold_range (r, type, lhs, op2);
1922 relation_kind rel = trio.lhs_op1 ();
1923 if (res)
1924 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
1925 return res;
1929 bool
1930 operator_minus::op2_range (irange &r, tree type,
1931 const irange &lhs,
1932 const irange &op1,
1933 relation_trio) const
1935 if (lhs.undefined_p ())
1936 return false;
1937 return fold_range (r, type, op1, lhs);
1940 void
1941 operator_min::update_bitmask (irange &r, const irange &lh,
1942 const irange &rh) const
1944 update_known_bitmask (r, MIN_EXPR, lh, rh);
1947 void
1948 operator_min::wi_fold (irange &r, tree type,
1949 const wide_int &lh_lb, const wide_int &lh_ub,
1950 const wide_int &rh_lb, const wide_int &rh_ub) const
1952 signop s = TYPE_SIGN (type);
1953 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
1954 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
1955 value_range_with_overflow (r, type, new_lb, new_ub);
1959 void
1960 operator_max::update_bitmask (irange &r, const irange &lh,
1961 const irange &rh) const
1963 update_known_bitmask (r, MAX_EXPR, lh, rh);
1966 void
1967 operator_max::wi_fold (irange &r, tree type,
1968 const wide_int &lh_lb, const wide_int &lh_ub,
1969 const wide_int &rh_lb, const wide_int &rh_ub) const
1971 signop s = TYPE_SIGN (type);
1972 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
1973 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
1974 value_range_with_overflow (r, type, new_lb, new_ub);
1978 // Calculate the cross product of two sets of ranges and return it.
1980 // Multiplications, divisions and shifts are a bit tricky to handle,
1981 // depending on the mix of signs we have in the two ranges, we need to
1982 // operate on different values to get the minimum and maximum values
1983 // for the new range. One approach is to figure out all the
1984 // variations of range combinations and do the operations.
1986 // However, this involves several calls to compare_values and it is
1987 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1988 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1989 // figure the smallest and largest values to form the new range.
1991 void
1992 cross_product_operator::wi_cross_product (irange &r, tree type,
1993 const wide_int &lh_lb,
1994 const wide_int &lh_ub,
1995 const wide_int &rh_lb,
1996 const wide_int &rh_ub) const
1998 wide_int cp1, cp2, cp3, cp4;
1999 // Default to varying.
2000 r.set_varying (type);
2002 // Compute the 4 cross operations, bailing if we get an overflow we
2003 // can't handle.
2004 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
2005 return;
2006 if (wi::eq_p (lh_lb, lh_ub))
2007 cp3 = cp1;
2008 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2009 return;
2010 if (wi::eq_p (rh_lb, rh_ub))
2011 cp2 = cp1;
2012 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2013 return;
2014 if (wi::eq_p (lh_lb, lh_ub))
2015 cp4 = cp2;
2016 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2017 return;
2019 // Order pairs.
2020 signop sign = TYPE_SIGN (type);
2021 if (wi::gt_p (cp1, cp2, sign))
2022 std::swap (cp1, cp2);
2023 if (wi::gt_p (cp3, cp4, sign))
2024 std::swap (cp3, cp4);
2026 // Choose min and max from the ordered pairs.
2027 wide_int res_lb = wi::min (cp1, cp3, sign);
2028 wide_int res_ub = wi::max (cp2, cp4, sign);
2029 value_range_with_overflow (r, type, res_lb, res_ub);
2033 void
2034 operator_mult::update_bitmask (irange &r, const irange &lh,
2035 const irange &rh) const
2037 update_known_bitmask (r, MULT_EXPR, lh, rh);
2040 bool
2041 operator_mult::op1_range (irange &r, tree type,
2042 const irange &lhs, const irange &op2,
2043 relation_trio) const
2045 if (lhs.undefined_p ())
2046 return false;
2048 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2049 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2050 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2051 if (TYPE_OVERFLOW_WRAPS (type))
2052 return false;
2054 wide_int offset;
2055 if (op2.singleton_p (offset) && offset != 0)
2056 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2057 return false;
2060 bool
2061 operator_mult::op2_range (irange &r, tree type,
2062 const irange &lhs, const irange &op1,
2063 relation_trio rel) const
2065 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2068 bool
2069 operator_mult::wi_op_overflows (wide_int &res, tree type,
2070 const wide_int &w0, const wide_int &w1) const
2072 wi::overflow_type overflow = wi::OVF_NONE;
2073 signop sign = TYPE_SIGN (type);
2074 res = wi::mul (w0, w1, sign, &overflow);
2075 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2077 // For multiplication, the sign of the overflow is given
2078 // by the comparison of the signs of the operands.
2079 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2080 res = wi::max_value (w0.get_precision (), sign);
2081 else
2082 res = wi::min_value (w0.get_precision (), sign);
2083 return false;
2085 return overflow;
2088 void
2089 operator_mult::wi_fold (irange &r, tree type,
2090 const wide_int &lh_lb, const wide_int &lh_ub,
2091 const wide_int &rh_lb, const wide_int &rh_ub) const
2093 if (TYPE_OVERFLOW_UNDEFINED (type))
2095 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2096 return;
2099 // Multiply the ranges when overflow wraps. This is basically fancy
2100 // code so we don't drop to varying with an unsigned
2101 // [-3,-1]*[-3,-1].
2103 // This test requires 2*prec bits if both operands are signed and
2104 // 2*prec + 2 bits if either is not. Therefore, extend the values
2105 // using the sign of the result to PREC2. From here on out,
2106 // everything is just signed math no matter what the input types
2107 // were.
2109 signop sign = TYPE_SIGN (type);
2110 unsigned prec = TYPE_PRECISION (type);
2111 widest2_int min0 = widest2_int::from (lh_lb, sign);
2112 widest2_int max0 = widest2_int::from (lh_ub, sign);
2113 widest2_int min1 = widest2_int::from (rh_lb, sign);
2114 widest2_int max1 = widest2_int::from (rh_ub, sign);
2115 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2116 widest2_int size = sizem1 + 1;
2118 // Canonicalize the intervals.
2119 if (sign == UNSIGNED)
2121 if (wi::ltu_p (size, min0 + max0))
2123 min0 -= size;
2124 max0 -= size;
2126 if (wi::ltu_p (size, min1 + max1))
2128 min1 -= size;
2129 max1 -= size;
2133 // Sort the 4 products so that min is in prod0 and max is in
2134 // prod3.
2135 widest2_int prod0 = min0 * min1;
2136 widest2_int prod1 = min0 * max1;
2137 widest2_int prod2 = max0 * min1;
2138 widest2_int prod3 = max0 * max1;
2140 // min0min1 > max0max1
2141 if (prod0 > prod3)
2142 std::swap (prod0, prod3);
2144 // min0max1 > max0min1
2145 if (prod1 > prod2)
2146 std::swap (prod1, prod2);
2148 if (prod0 > prod1)
2149 std::swap (prod0, prod1);
2151 if (prod2 > prod3)
2152 std::swap (prod2, prod3);
2154 // diff = max - min
2155 prod2 = prod3 - prod0;
2156 if (wi::geu_p (prod2, sizem1))
2158 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2159 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2160 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2162 r.set (type, rh_lb, wi::max_value (prec, sign));
2163 int_range<2> zero;
2164 zero.set_zero (type);
2165 r.union_ (zero);
2167 else
2168 // The range covers all values.
2169 r.set_varying (type);
2171 else
2173 wide_int new_lb = wide_int::from (prod0, prec, sign);
2174 wide_int new_ub = wide_int::from (prod3, prec, sign);
2175 create_possibly_reversed_range (r, type, new_lb, new_ub);
2179 class operator_widen_mult_signed : public range_operator
2181 public:
2182 virtual void wi_fold (irange &r, tree type,
2183 const wide_int &lh_lb,
2184 const wide_int &lh_ub,
2185 const wide_int &rh_lb,
2186 const wide_int &rh_ub)
2187 const;
2188 } op_widen_mult_signed;
2190 void
2191 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2192 const wide_int &lh_lb,
2193 const wide_int &lh_ub,
2194 const wide_int &rh_lb,
2195 const wide_int &rh_ub) const
2197 signop s = TYPE_SIGN (type);
2199 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2200 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2201 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2202 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2204 /* We don't expect a widening multiplication to be able to overflow but range
2205 calculations for multiplications are complicated. After widening the
2206 operands lets call the base class. */
2207 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2211 class operator_widen_mult_unsigned : public range_operator
2213 public:
2214 virtual void wi_fold (irange &r, tree type,
2215 const wide_int &lh_lb,
2216 const wide_int &lh_ub,
2217 const wide_int &rh_lb,
2218 const wide_int &rh_ub)
2219 const;
2220 } op_widen_mult_unsigned;
2222 void
2223 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2224 const wide_int &lh_lb,
2225 const wide_int &lh_ub,
2226 const wide_int &rh_lb,
2227 const wide_int &rh_ub) const
2229 signop s = TYPE_SIGN (type);
2231 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2232 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2233 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2234 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2236 /* We don't expect a widening multiplication to be able to overflow but range
2237 calculations for multiplications are complicated. After widening the
2238 operands lets call the base class. */
2239 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2242 class operator_div : public cross_product_operator
2244 public:
2245 operator_div (tree_code div_kind) { m_code = div_kind; }
2246 virtual void wi_fold (irange &r, tree type,
2247 const wide_int &lh_lb,
2248 const wide_int &lh_ub,
2249 const wide_int &rh_lb,
2250 const wide_int &rh_ub) const final override;
2251 virtual bool wi_op_overflows (wide_int &res, tree type,
2252 const wide_int &, const wide_int &)
2253 const final override;
2254 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2255 { update_known_bitmask (r, m_code, lh, rh); }
2256 protected:
2257 tree_code m_code;
2260 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2261 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2262 static operator_div op_round_div (ROUND_DIV_EXPR);
2263 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2265 bool
2266 operator_div::wi_op_overflows (wide_int &res, tree type,
2267 const wide_int &w0, const wide_int &w1) const
2269 if (w1 == 0)
2270 return true;
2272 wi::overflow_type overflow = wi::OVF_NONE;
2273 signop sign = TYPE_SIGN (type);
2275 switch (m_code)
2277 case EXACT_DIV_EXPR:
2278 case TRUNC_DIV_EXPR:
2279 res = wi::div_trunc (w0, w1, sign, &overflow);
2280 break;
2281 case FLOOR_DIV_EXPR:
2282 res = wi::div_floor (w0, w1, sign, &overflow);
2283 break;
2284 case ROUND_DIV_EXPR:
2285 res = wi::div_round (w0, w1, sign, &overflow);
2286 break;
2287 case CEIL_DIV_EXPR:
2288 res = wi::div_ceil (w0, w1, sign, &overflow);
2289 break;
2290 default:
2291 gcc_unreachable ();
2294 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2296 // For division, the only case is -INF / -1 = +INF.
2297 res = wi::max_value (w0.get_precision (), sign);
2298 return false;
2300 return overflow;
2303 void
2304 operator_div::wi_fold (irange &r, tree type,
2305 const wide_int &lh_lb, const wide_int &lh_ub,
2306 const wide_int &rh_lb, const wide_int &rh_ub) const
2308 const wide_int dividend_min = lh_lb;
2309 const wide_int dividend_max = lh_ub;
2310 const wide_int divisor_min = rh_lb;
2311 const wide_int divisor_max = rh_ub;
2312 signop sign = TYPE_SIGN (type);
2313 unsigned prec = TYPE_PRECISION (type);
2314 wide_int extra_min, extra_max;
2316 // If we know we won't divide by zero, just do the division.
2317 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2319 wi_cross_product (r, type, dividend_min, dividend_max,
2320 divisor_min, divisor_max);
2321 return;
2324 // If we're definitely dividing by zero, there's nothing to do.
2325 if (wi_zero_p (type, divisor_min, divisor_max))
2327 r.set_undefined ();
2328 return;
2331 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2332 // skip any division by zero.
2334 // First divide by the negative numbers, if any.
2335 if (wi::neg_p (divisor_min, sign))
2336 wi_cross_product (r, type, dividend_min, dividend_max,
2337 divisor_min, wi::minus_one (prec));
2338 else
2339 r.set_undefined ();
2341 // Then divide by the non-zero positive numbers, if any.
2342 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2344 int_range_max tmp;
2345 wi_cross_product (tmp, type, dividend_min, dividend_max,
2346 wi::one (prec), divisor_max);
2347 r.union_ (tmp);
2349 // We shouldn't still have undefined here.
2350 gcc_checking_assert (!r.undefined_p ());
2354 class operator_exact_divide : public operator_div
2356 using range_operator::op1_range;
2357 public:
2358 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2359 virtual bool op1_range (irange &r, tree type,
2360 const irange &lhs,
2361 const irange &op2,
2362 relation_trio) const;
2364 } op_exact_div;
2366 bool
2367 operator_exact_divide::op1_range (irange &r, tree type,
2368 const irange &lhs,
2369 const irange &op2,
2370 relation_trio) const
2372 if (lhs.undefined_p ())
2373 return false;
2374 wide_int offset;
2375 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2376 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2377 // We wont bother trying to enumerate all the in between stuff :-P
2378 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2379 // the time however.
2380 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2381 if (op2.singleton_p (offset) && offset != 0)
2382 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2383 return false;
2387 class operator_lshift : public cross_product_operator
2389 using range_operator::fold_range;
2390 using range_operator::op1_range;
2391 public:
2392 virtual bool op1_range (irange &r, tree type,
2393 const irange &lhs,
2394 const irange &op2,
2395 relation_trio rel = TRIO_VARYING) const;
2396 virtual bool fold_range (irange &r, tree type,
2397 const irange &op1,
2398 const irange &op2,
2399 relation_trio rel = TRIO_VARYING) const;
2401 virtual void wi_fold (irange &r, tree type,
2402 const wide_int &lh_lb, const wide_int &lh_ub,
2403 const wide_int &rh_lb, const wide_int &rh_ub) const;
2404 virtual bool wi_op_overflows (wide_int &res,
2405 tree type,
2406 const wide_int &,
2407 const wide_int &) const;
2408 void update_bitmask (irange &r, const irange &lh,
2409 const irange &rh) const final override
2410 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2411 } op_lshift;
2413 class operator_rshift : public cross_product_operator
2415 using range_operator::fold_range;
2416 using range_operator::op1_range;
2417 using range_operator::lhs_op1_relation;
2418 public:
2419 virtual bool fold_range (irange &r, tree type,
2420 const irange &op1,
2421 const irange &op2,
2422 relation_trio rel = TRIO_VARYING) const;
2423 virtual void wi_fold (irange &r, tree type,
2424 const wide_int &lh_lb,
2425 const wide_int &lh_ub,
2426 const wide_int &rh_lb,
2427 const wide_int &rh_ub) const;
2428 virtual bool wi_op_overflows (wide_int &res,
2429 tree type,
2430 const wide_int &w0,
2431 const wide_int &w1) const;
2432 virtual bool op1_range (irange &, tree type,
2433 const irange &lhs,
2434 const irange &op2,
2435 relation_trio rel = TRIO_VARYING) const;
2436 virtual relation_kind lhs_op1_relation (const irange &lhs,
2437 const irange &op1,
2438 const irange &op2,
2439 relation_kind rel) const;
2440 void update_bitmask (irange &r, const irange &lh,
2441 const irange &rh) const final override
2442 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2443 } op_rshift;
2446 relation_kind
2447 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2448 const irange &op1,
2449 const irange &op2,
2450 relation_kind) const
2452 // If both operands range are >= 0, then the LHS <= op1.
2453 if (!op1.undefined_p () && !op2.undefined_p ()
2454 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2455 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2456 return VREL_LE;
2457 return VREL_VARYING;
2460 bool
2461 operator_lshift::fold_range (irange &r, tree type,
2462 const irange &op1,
2463 const irange &op2,
2464 relation_trio rel) const
2466 int_range_max shift_range;
2467 if (!get_shift_range (shift_range, type, op2))
2469 if (op2.undefined_p ())
2470 r.set_undefined ();
2471 else
2472 r.set_zero (type);
2473 return true;
2476 // Transform left shifts by constants into multiplies.
2477 if (shift_range.singleton_p ())
2479 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2480 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2481 int_range<1> mult (type, tmp, tmp);
2483 // Force wrapping multiplication.
2484 bool saved_flag_wrapv = flag_wrapv;
2485 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2486 flag_wrapv = 1;
2487 flag_wrapv_pointer = 1;
2488 bool b = op_mult.fold_range (r, type, op1, mult);
2489 flag_wrapv = saved_flag_wrapv;
2490 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2491 return b;
2493 else
2494 // Otherwise, invoke the generic fold routine.
2495 return range_operator::fold_range (r, type, op1, shift_range, rel);
2498 void
2499 operator_lshift::wi_fold (irange &r, tree type,
2500 const wide_int &lh_lb, const wide_int &lh_ub,
2501 const wide_int &rh_lb, const wide_int &rh_ub) const
2503 signop sign = TYPE_SIGN (type);
2504 unsigned prec = TYPE_PRECISION (type);
2505 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2506 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2507 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2508 // overflow. However, for that to happen, rh.max needs to be zero,
2509 // which means rh is a singleton range of zero, which means we simply return
2510 // [lh_lb, lh_ub] as the range.
2511 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2513 r = int_range<2> (type, lh_lb, lh_ub);
2514 return;
2517 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2518 wide_int complement = ~(bound - 1);
2519 wide_int low_bound, high_bound;
2520 bool in_bounds = false;
2522 if (sign == UNSIGNED)
2524 low_bound = bound;
2525 high_bound = complement;
2526 if (wi::ltu_p (lh_ub, low_bound))
2528 // [5, 6] << [1, 2] == [10, 24].
2529 // We're shifting out only zeroes, the value increases
2530 // monotonically.
2531 in_bounds = true;
2533 else if (wi::ltu_p (high_bound, lh_lb))
2535 // [0xffffff00, 0xffffffff] << [1, 2]
2536 // == [0xfffffc00, 0xfffffffe].
2537 // We're shifting out only ones, the value decreases
2538 // monotonically.
2539 in_bounds = true;
2542 else
2544 // [-1, 1] << [1, 2] == [-4, 4]
2545 low_bound = complement;
2546 high_bound = bound;
2547 if (wi::lts_p (lh_ub, high_bound)
2548 && wi::lts_p (low_bound, lh_lb))
2550 // For non-negative numbers, we're shifting out only zeroes,
2551 // the value increases monotonically. For negative numbers,
2552 // we're shifting out only ones, the value decreases
2553 // monotonically.
2554 in_bounds = true;
2558 if (in_bounds)
2559 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2560 else
2561 r.set_varying (type);
2564 bool
2565 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2566 const wide_int &w0, const wide_int &w1) const
2568 signop sign = TYPE_SIGN (type);
2569 if (wi::neg_p (w1))
2571 // It's unclear from the C standard whether shifts can overflow.
2572 // The following code ignores overflow; perhaps a C standard
2573 // interpretation ruling is needed.
2574 res = wi::rshift (w0, -w1, sign);
2576 else
2577 res = wi::lshift (w0, w1);
2578 return false;
2581 bool
2582 operator_lshift::op1_range (irange &r,
2583 tree type,
2584 const irange &lhs,
2585 const irange &op2,
2586 relation_trio) const
2588 if (lhs.undefined_p ())
2589 return false;
2591 if (!contains_zero_p (lhs))
2592 r.set_nonzero (type);
2593 else
2594 r.set_varying (type);
2596 wide_int shift;
2597 if (op2.singleton_p (shift))
2599 if (wi::lt_p (shift, 0, SIGNED))
2600 return false;
2601 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2602 TYPE_PRECISION (op2.type ())),
2603 UNSIGNED))
2604 return false;
2605 if (shift == 0)
2607 r.intersect (lhs);
2608 return true;
2611 // Work completely in unsigned mode to start.
2612 tree utype = type;
2613 int_range_max tmp_range;
2614 if (TYPE_SIGN (type) == SIGNED)
2616 int_range_max tmp = lhs;
2617 utype = unsigned_type_for (type);
2618 range_cast (tmp, utype);
2619 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2621 else
2622 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2624 // Start with ranges which can produce the LHS by right shifting the
2625 // result by the shift amount.
2626 // ie [0x08, 0xF0] = op1 << 2 will start with
2627 // [00001000, 11110000] = op1 << 2
2628 // [0x02, 0x4C] aka [00000010, 00111100]
2630 // Then create a range from the LB with the least significant upper bit
2631 // set, to the upper bound with all the bits set.
2632 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2634 // Ideally we do this for each subrange, but just lump them all for now.
2635 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2636 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2637 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2638 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2639 int_range<2> fill_range (utype, new_lb, new_ub);
2640 tmp_range.union_ (fill_range);
2642 if (utype != type)
2643 range_cast (tmp_range, type);
2645 r.intersect (tmp_range);
2646 return true;
2649 return !r.varying_p ();
2652 bool
2653 operator_rshift::op1_range (irange &r,
2654 tree type,
2655 const irange &lhs,
2656 const irange &op2,
2657 relation_trio) const
2659 if (lhs.undefined_p ())
2660 return false;
2661 wide_int shift;
2662 if (op2.singleton_p (shift))
2664 // Ignore nonsensical shifts.
2665 unsigned prec = TYPE_PRECISION (type);
2666 if (wi::ge_p (shift,
2667 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2668 UNSIGNED))
2669 return false;
2670 if (shift == 0)
2672 r = lhs;
2673 return true;
2676 // Folding the original operation may discard some impossible
2677 // ranges from the LHS.
2678 int_range_max lhs_refined;
2679 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2680 lhs_refined.intersect (lhs);
2681 if (lhs_refined.undefined_p ())
2683 r.set_undefined ();
2684 return true;
2686 int_range_max shift_range (op2.type (), shift, shift);
2687 int_range_max lb, ub;
2688 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2689 // LHS
2690 // 0000 0111 = OP1 >> 3
2692 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2693 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2694 // right hand side (0x07).
2695 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2696 int_range_max mask_range (type,
2697 wi::zero (TYPE_PRECISION (type)),
2698 mask);
2699 op_plus.fold_range (ub, type, lb, mask_range);
2700 r = lb;
2701 r.union_ (ub);
2702 if (!contains_zero_p (lhs_refined))
2704 mask_range.invert ();
2705 r.intersect (mask_range);
2707 return true;
2709 return false;
2712 bool
2713 operator_rshift::wi_op_overflows (wide_int &res,
2714 tree type,
2715 const wide_int &w0,
2716 const wide_int &w1) const
2718 signop sign = TYPE_SIGN (type);
2719 if (wi::neg_p (w1))
2720 res = wi::lshift (w0, -w1);
2721 else
2723 // It's unclear from the C standard whether shifts can overflow.
2724 // The following code ignores overflow; perhaps a C standard
2725 // interpretation ruling is needed.
2726 res = wi::rshift (w0, w1, sign);
2728 return false;
2731 bool
2732 operator_rshift::fold_range (irange &r, tree type,
2733 const irange &op1,
2734 const irange &op2,
2735 relation_trio rel) const
2737 int_range_max shift;
2738 if (!get_shift_range (shift, type, op2))
2740 if (op2.undefined_p ())
2741 r.set_undefined ();
2742 else
2743 r.set_zero (type);
2744 return true;
2747 return range_operator::fold_range (r, type, op1, shift, rel);
2750 void
2751 operator_rshift::wi_fold (irange &r, tree type,
2752 const wide_int &lh_lb, const wide_int &lh_ub,
2753 const wide_int &rh_lb, const wide_int &rh_ub) const
2755 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2759 // Add a partial equivalence between the LHS and op1 for casts.
2761 relation_kind
2762 operator_cast::lhs_op1_relation (const irange &lhs,
2763 const irange &op1,
2764 const irange &op2 ATTRIBUTE_UNUSED,
2765 relation_kind) const
2767 if (lhs.undefined_p () || op1.undefined_p ())
2768 return VREL_VARYING;
2769 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2770 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2771 // If the result gets sign extended into a larger type check first if this
2772 // qualifies as a partial equivalence.
2773 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2775 // If the result is sign extended, and the LHS is larger than op1,
2776 // check if op1's range can be negative as the sign extension will
2777 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2778 int_range<3> negs = range_negatives (op1.type ());
2779 negs.intersect (op1);
2780 if (!negs.undefined_p ())
2781 return VREL_VARYING;
2784 unsigned prec = MIN (lhs_prec, op1_prec);
2785 return bits_to_pe (prec);
2788 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2790 inline bool
2791 operator_cast::truncating_cast_p (const irange &inner,
2792 const irange &outer) const
2794 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2797 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2799 bool
2800 operator_cast::inside_domain_p (const wide_int &min,
2801 const wide_int &max,
2802 const irange &range) const
2804 wide_int domain_min = irange_val_min (range.type ());
2805 wide_int domain_max = irange_val_max (range.type ());
2806 signop domain_sign = TYPE_SIGN (range.type ());
2807 return (wi::le_p (min, domain_max, domain_sign)
2808 && wi::le_p (max, domain_max, domain_sign)
2809 && wi::ge_p (min, domain_min, domain_sign)
2810 && wi::ge_p (max, domain_min, domain_sign));
2814 // Helper for fold_range which work on a pair at a time.
2816 void
2817 operator_cast::fold_pair (irange &r, unsigned index,
2818 const irange &inner,
2819 const irange &outer) const
2821 tree inner_type = inner.type ();
2822 tree outer_type = outer.type ();
2823 signop inner_sign = TYPE_SIGN (inner_type);
2824 unsigned outer_prec = TYPE_PRECISION (outer_type);
2826 // check to see if casting from INNER to OUTER is a conversion that
2827 // fits in the resulting OUTER type.
2828 wide_int inner_lb = inner.lower_bound (index);
2829 wide_int inner_ub = inner.upper_bound (index);
2830 if (truncating_cast_p (inner, outer))
2832 // We may be able to accommodate a truncating cast if the
2833 // resulting range can be represented in the target type...
2834 if (wi::rshift (wi::sub (inner_ub, inner_lb),
2835 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
2836 inner_sign) != 0)
2838 r.set_varying (outer_type);
2839 return;
2842 // ...but we must still verify that the final range fits in the
2843 // domain. This catches -fstrict-enum restrictions where the domain
2844 // range is smaller than what fits in the underlying type.
2845 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
2846 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
2847 if (inside_domain_p (min, max, outer))
2848 create_possibly_reversed_range (r, outer_type, min, max);
2849 else
2850 r.set_varying (outer_type);
2854 bool
2855 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
2856 const irange &inner,
2857 const irange &outer,
2858 relation_trio) const
2860 if (empty_range_varying (r, type, inner, outer))
2861 return true;
2863 gcc_checking_assert (outer.varying_p ());
2864 gcc_checking_assert (inner.num_pairs () > 0);
2866 // Avoid a temporary by folding the first pair directly into the result.
2867 fold_pair (r, 0, inner, outer);
2869 // Then process any additional pairs by unioning with their results.
2870 for (unsigned x = 1; x < inner.num_pairs (); ++x)
2872 int_range_max tmp;
2873 fold_pair (tmp, x, inner, outer);
2874 r.union_ (tmp);
2875 if (r.varying_p ())
2876 return true;
2879 // Update the nonzero mask. Truncating casts are problematic unless
2880 // the conversion fits in the resulting outer type.
2881 wide_int nz = inner.get_nonzero_bits ();
2882 if (truncating_cast_p (inner, outer)
2883 && wi::rshift (nz, wi::uhwi (TYPE_PRECISION (outer.type ()),
2884 TYPE_PRECISION (inner.type ())),
2885 TYPE_SIGN (inner.type ())) != 0)
2886 return true;
2887 nz = wide_int::from (nz, TYPE_PRECISION (type), TYPE_SIGN (inner.type ()));
2888 r.set_nonzero_bits (nz);
2890 return true;
2893 bool
2894 operator_cast::op1_range (irange &r, tree type,
2895 const irange &lhs,
2896 const irange &op2,
2897 relation_trio) const
2899 if (lhs.undefined_p ())
2900 return false;
2901 tree lhs_type = lhs.type ();
2902 gcc_checking_assert (types_compatible_p (op2.type(), type));
2904 // If we are calculating a pointer, shortcut to what we really care about.
2905 if (POINTER_TYPE_P (type))
2907 // Conversion from other pointers or a constant (including 0/NULL)
2908 // are straightforward.
2909 if (POINTER_TYPE_P (lhs.type ())
2910 || (lhs.singleton_p ()
2911 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
2913 r = lhs;
2914 range_cast (r, type);
2916 else
2918 // If the LHS is not a pointer nor a singleton, then it is
2919 // either VARYING or non-zero.
2920 if (!contains_zero_p (lhs))
2921 r.set_nonzero (type);
2922 else
2923 r.set_varying (type);
2925 r.intersect (op2);
2926 return true;
2929 if (truncating_cast_p (op2, lhs))
2931 if (lhs.varying_p ())
2932 r.set_varying (type);
2933 else
2935 // We want to insert the LHS as an unsigned value since it
2936 // would not trigger the signed bit of the larger type.
2937 int_range_max converted_lhs = lhs;
2938 range_cast (converted_lhs, unsigned_type_for (lhs_type));
2939 range_cast (converted_lhs, type);
2940 // Start by building the positive signed outer range for the type.
2941 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
2942 TYPE_PRECISION (type));
2943 create_possibly_reversed_range (r, type, lim,
2944 wi::max_value (TYPE_PRECISION (type),
2945 SIGNED));
2946 // For the signed part, we need to simply union the 2 ranges now.
2947 r.union_ (converted_lhs);
2949 // Create maximal negative number outside of LHS bits.
2950 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
2951 TYPE_PRECISION (type));
2952 // Add this to the unsigned LHS range(s).
2953 int_range_max lim_range (type, lim, lim);
2954 int_range_max lhs_neg;
2955 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
2956 converted_lhs, lim_range);
2957 // lhs_neg now has all the negative versions of the LHS.
2958 // Now union in all the values from SIGNED MIN (0x80000) to
2959 // lim-1 in order to fill in all the ranges with the upper
2960 // bits set.
2962 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
2963 // we don't need to create a range from min to lim-1
2964 // calculate neg range traps trying to create [lim, lim - 1].
2965 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
2966 if (lim != min_val)
2968 int_range_max neg (type,
2969 wi::min_value (TYPE_PRECISION (type),
2970 SIGNED),
2971 lim - 1);
2972 lhs_neg.union_ (neg);
2974 // And finally, munge the signed and unsigned portions.
2975 r.union_ (lhs_neg);
2977 // And intersect with any known value passed in the extra operand.
2978 r.intersect (op2);
2979 return true;
2982 int_range_max tmp;
2983 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
2984 tmp = lhs;
2985 else
2987 // The cast is not truncating, and the range is restricted to
2988 // the range of the RHS by this assignment.
2990 // Cast the range of the RHS to the type of the LHS.
2991 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
2992 // Intersect this with the LHS range will produce the range,
2993 // which will be cast to the RHS type before returning.
2994 tmp.intersect (lhs);
2997 // Cast the calculated range to the type of the RHS.
2998 fold_range (r, type, tmp, int_range<1> (type));
2999 return true;
3003 class operator_logical_and : public range_operator
3005 using range_operator::fold_range;
3006 using range_operator::op1_range;
3007 using range_operator::op2_range;
3008 public:
3009 virtual bool fold_range (irange &r, tree type,
3010 const irange &lh,
3011 const irange &rh,
3012 relation_trio rel = TRIO_VARYING) const;
3013 virtual bool op1_range (irange &r, tree type,
3014 const irange &lhs,
3015 const irange &op2,
3016 relation_trio rel = TRIO_VARYING) const;
3017 virtual bool op2_range (irange &r, tree type,
3018 const irange &lhs,
3019 const irange &op1,
3020 relation_trio rel = TRIO_VARYING) const;
3021 } op_logical_and;
3024 bool
3025 operator_logical_and::fold_range (irange &r, tree type,
3026 const irange &lh,
3027 const irange &rh,
3028 relation_trio) const
3030 if (empty_range_varying (r, type, lh, rh))
3031 return true;
3033 // 0 && anything is 0.
3034 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3035 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3036 r = range_false (type);
3037 else if (contains_zero_p (lh) || contains_zero_p (rh))
3038 // To reach this point, there must be a logical 1 on each side, and
3039 // the only remaining question is whether there is a zero or not.
3040 r = range_true_and_false (type);
3041 else
3042 r = range_true (type);
3043 return true;
3046 bool
3047 operator_logical_and::op1_range (irange &r, tree type,
3048 const irange &lhs,
3049 const irange &op2 ATTRIBUTE_UNUSED,
3050 relation_trio) const
3052 switch (get_bool_state (r, lhs, type))
3054 case BRS_TRUE:
3055 // A true result means both sides of the AND must be true.
3056 r = range_true (type);
3057 break;
3058 default:
3059 // Any other result means only one side has to be false, the
3060 // other side can be anything. So we cannot be sure of any
3061 // result here.
3062 r = range_true_and_false (type);
3063 break;
3065 return true;
3068 bool
3069 operator_logical_and::op2_range (irange &r, tree type,
3070 const irange &lhs,
3071 const irange &op1,
3072 relation_trio) const
3074 return operator_logical_and::op1_range (r, type, lhs, op1);
3078 void
3079 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3080 const irange &rh) const
3082 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3085 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3086 // by considering the number of leading redundant sign bit copies.
3087 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3088 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3089 static bool
3090 wi_optimize_signed_bitwise_op (irange &r, tree type,
3091 const wide_int &lh_lb, const wide_int &lh_ub,
3092 const wide_int &rh_lb, const wide_int &rh_ub)
3094 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3095 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3096 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3097 if (new_clrsb == 0)
3098 return false;
3099 int type_prec = TYPE_PRECISION (type);
3100 int rprec = (type_prec - new_clrsb) - 1;
3101 value_range_with_overflow (r, type,
3102 wi::mask (rprec, true, type_prec),
3103 wi::mask (rprec, false, type_prec));
3104 return true;
3107 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3108 // the LHS and op1.
3110 relation_kind
3111 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3112 const irange &op1,
3113 const irange &op2,
3114 relation_kind) const
3116 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3117 return VREL_VARYING;
3118 if (!op2.singleton_p ())
3119 return VREL_VARYING;
3120 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3121 int prec1 = TYPE_PRECISION (op1.type ());
3122 int prec2 = TYPE_PRECISION (op2.type ());
3123 int mask_prec = 0;
3124 wide_int mask = op2.lower_bound ();
3125 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3126 mask_prec = 8;
3127 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3128 mask_prec = 16;
3129 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3130 mask_prec = 32;
3131 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3132 mask_prec = 64;
3133 return bits_to_pe (MIN (prec1, mask_prec));
3136 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3137 // possible. Basically, see if we can optimize:
3139 // [LB, UB] op Z
3140 // into:
3141 // [LB op Z, UB op Z]
3143 // If the optimization was successful, accumulate the range in R and
3144 // return TRUE.
3146 static bool
3147 wi_optimize_and_or (irange &r,
3148 enum tree_code code,
3149 tree type,
3150 const wide_int &lh_lb, const wide_int &lh_ub,
3151 const wide_int &rh_lb, const wide_int &rh_ub)
3153 // Calculate the singleton mask among the ranges, if any.
3154 wide_int lower_bound, upper_bound, mask;
3155 if (wi::eq_p (rh_lb, rh_ub))
3157 mask = rh_lb;
3158 lower_bound = lh_lb;
3159 upper_bound = lh_ub;
3161 else if (wi::eq_p (lh_lb, lh_ub))
3163 mask = lh_lb;
3164 lower_bound = rh_lb;
3165 upper_bound = rh_ub;
3167 else
3168 return false;
3170 // If Z is a constant which (for op | its bitwise not) has n
3171 // consecutive least significant bits cleared followed by m 1
3172 // consecutive bits set immediately above it and either
3173 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3175 // The least significant n bits of all the values in the range are
3176 // cleared or set, the m bits above it are preserved and any bits
3177 // above these are required to be the same for all values in the
3178 // range.
3179 wide_int w = mask;
3180 int m = 0, n = 0;
3181 if (code == BIT_IOR_EXPR)
3182 w = ~w;
3183 if (wi::eq_p (w, 0))
3184 n = w.get_precision ();
3185 else
3187 n = wi::ctz (w);
3188 w = ~(w | wi::mask (n, false, w.get_precision ()));
3189 if (wi::eq_p (w, 0))
3190 m = w.get_precision () - n;
3191 else
3192 m = wi::ctz (w) - n;
3194 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3195 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3196 return false;
3198 wide_int res_lb, res_ub;
3199 if (code == BIT_AND_EXPR)
3201 res_lb = wi::bit_and (lower_bound, mask);
3202 res_ub = wi::bit_and (upper_bound, mask);
3204 else if (code == BIT_IOR_EXPR)
3206 res_lb = wi::bit_or (lower_bound, mask);
3207 res_ub = wi::bit_or (upper_bound, mask);
3209 else
3210 gcc_unreachable ();
3211 value_range_with_overflow (r, type, res_lb, res_ub);
3213 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3214 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3216 int_range<2> tmp;
3217 tmp.set_nonzero (type);
3218 r.intersect (tmp);
3220 return true;
3223 // For range [LB, UB] compute two wide_int bit masks.
3225 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3226 // for all numbers in the range the bit is 0, otherwise it might be 0
3227 // or 1.
3229 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3230 // for all numbers in the range the bit is 1, otherwise it might be 0
3231 // or 1.
3233 void
3234 wi_set_zero_nonzero_bits (tree type,
3235 const wide_int &lb, const wide_int &ub,
3236 wide_int &maybe_nonzero,
3237 wide_int &mustbe_nonzero)
3239 signop sign = TYPE_SIGN (type);
3241 if (wi::eq_p (lb, ub))
3242 maybe_nonzero = mustbe_nonzero = lb;
3243 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3245 wide_int xor_mask = lb ^ ub;
3246 maybe_nonzero = lb | ub;
3247 mustbe_nonzero = lb & ub;
3248 if (xor_mask != 0)
3250 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3251 maybe_nonzero.get_precision ());
3252 maybe_nonzero = maybe_nonzero | mask;
3253 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3256 else
3258 maybe_nonzero = wi::minus_one (lb.get_precision ());
3259 mustbe_nonzero = wi::zero (lb.get_precision ());
3263 void
3264 operator_bitwise_and::wi_fold (irange &r, tree type,
3265 const wide_int &lh_lb,
3266 const wide_int &lh_ub,
3267 const wide_int &rh_lb,
3268 const wide_int &rh_ub) const
3270 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3271 return;
3273 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3274 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3275 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3276 maybe_nonzero_lh, mustbe_nonzero_lh);
3277 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3278 maybe_nonzero_rh, mustbe_nonzero_rh);
3280 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3281 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3282 signop sign = TYPE_SIGN (type);
3283 unsigned prec = TYPE_PRECISION (type);
3284 // If both input ranges contain only negative values, we can
3285 // truncate the result range maximum to the minimum of the
3286 // input range maxima.
3287 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3289 new_ub = wi::min (new_ub, lh_ub, sign);
3290 new_ub = wi::min (new_ub, rh_ub, sign);
3292 // If either input range contains only non-negative values
3293 // we can truncate the result range maximum to the respective
3294 // maximum of the input range.
3295 if (wi::ge_p (lh_lb, 0, sign))
3296 new_ub = wi::min (new_ub, lh_ub, sign);
3297 if (wi::ge_p (rh_lb, 0, sign))
3298 new_ub = wi::min (new_ub, rh_ub, sign);
3299 // PR68217: In case of signed & sign-bit-CST should
3300 // result in [-INF, 0] instead of [-INF, INF].
3301 if (wi::gt_p (new_lb, new_ub, sign))
3303 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3304 if (sign == SIGNED
3305 && ((wi::eq_p (lh_lb, lh_ub)
3306 && !wi::cmps (lh_lb, sign_bit))
3307 || (wi::eq_p (rh_lb, rh_ub)
3308 && !wi::cmps (rh_lb, sign_bit))))
3310 new_lb = wi::min_value (prec, sign);
3311 new_ub = wi::zero (prec);
3314 // If the limits got swapped around, return varying.
3315 if (wi::gt_p (new_lb, new_ub,sign))
3317 if (sign == SIGNED
3318 && wi_optimize_signed_bitwise_op (r, type,
3319 lh_lb, lh_ub,
3320 rh_lb, rh_ub))
3321 return;
3322 r.set_varying (type);
3324 else
3325 value_range_with_overflow (r, type, new_lb, new_ub);
3328 static void
3329 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3331 if (!contains_zero_p (lhs))
3332 r = range_nonzero (type);
3333 else
3334 r.set_varying (type);
3337 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3338 (otherwise return VAL). VAL and MASK must be zero-extended for
3339 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3340 (to transform signed values into unsigned) and at the end xor
3341 SGNBIT back. */
3343 wide_int
3344 masked_increment (const wide_int &val_in, const wide_int &mask,
3345 const wide_int &sgnbit, unsigned int prec)
3347 wide_int bit = wi::one (prec), res;
3348 unsigned int i;
3350 wide_int val = val_in ^ sgnbit;
3351 for (i = 0; i < prec; i++, bit += bit)
3353 res = mask;
3354 if ((res & bit) == 0)
3355 continue;
3356 res = bit - 1;
3357 res = wi::bit_and_not (val + bit, res);
3358 res &= mask;
3359 if (wi::gtu_p (res, val))
3360 return res ^ sgnbit;
3362 return val ^ sgnbit;
3365 // This was shamelessly stolen from register_edge_assert_for_2 and
3366 // adjusted to work with iranges.
3368 void
3369 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3370 const irange &lhs,
3371 const irange &op2) const
3373 if (!op2.singleton_p ())
3375 set_nonzero_range_from_mask (r, type, lhs);
3376 return;
3378 unsigned int nprec = TYPE_PRECISION (type);
3379 wide_int cst2v = op2.lower_bound ();
3380 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3381 wide_int sgnbit;
3382 if (cst2n)
3383 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3384 else
3385 sgnbit = wi::zero (nprec);
3387 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3389 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3390 // maximum unsigned value is ~0. For signed comparison, if CST2
3391 // doesn't have the most significant bit set, handle it similarly. If
3392 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3393 wide_int valv = lhs.lower_bound ();
3394 wide_int minv = valv & cst2v, maxv;
3395 bool we_know_nothing = false;
3396 if (minv != valv)
3398 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3399 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3400 if (minv == valv)
3402 // If we can't determine anything on this bound, fall
3403 // through and conservatively solve for the other end point.
3404 we_know_nothing = true;
3407 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3408 if (we_know_nothing)
3409 r.set_varying (type);
3410 else
3411 create_possibly_reversed_range (r, type, minv, maxv);
3413 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3415 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3416 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3417 // VAL2 where
3418 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3419 // as maximum.
3420 // For signed comparison, if CST2 doesn't have most significant bit
3421 // set, handle it similarly. If CST2 has MSB set, the maximum is
3422 // the same and minimum is INT_MIN.
3423 valv = lhs.upper_bound ();
3424 minv = valv & cst2v;
3425 if (minv == valv)
3426 maxv = valv;
3427 else
3429 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3430 if (maxv == valv)
3432 // If we couldn't determine anything on either bound, return
3433 // undefined.
3434 if (we_know_nothing)
3435 r.set_undefined ();
3436 return;
3438 maxv -= 1;
3440 maxv |= ~cst2v;
3441 minv = sgnbit;
3442 int_range<2> upper_bits;
3443 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3444 r.intersect (upper_bits);
3447 bool
3448 operator_bitwise_and::op1_range (irange &r, tree type,
3449 const irange &lhs,
3450 const irange &op2,
3451 relation_trio) const
3453 if (lhs.undefined_p ())
3454 return false;
3455 if (types_compatible_p (type, boolean_type_node))
3456 return op_logical_and.op1_range (r, type, lhs, op2);
3458 r.set_undefined ();
3459 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3461 int_range_max chunk (lhs.type (),
3462 lhs.lower_bound (i),
3463 lhs.upper_bound (i));
3464 int_range_max res;
3465 simple_op1_range_solver (res, type, chunk, op2);
3466 r.union_ (res);
3468 if (r.undefined_p ())
3469 set_nonzero_range_from_mask (r, type, lhs);
3471 // For 0 = op1 & MASK, op1 is ~MASK.
3472 if (lhs.zero_p () && op2.singleton_p ())
3474 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3475 int_range<2> tmp (type);
3476 tmp.set_nonzero_bits (nz);
3477 r.intersect (tmp);
3479 return true;
3482 bool
3483 operator_bitwise_and::op2_range (irange &r, tree type,
3484 const irange &lhs,
3485 const irange &op1,
3486 relation_trio) const
3488 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3492 class operator_logical_or : public range_operator
3494 using range_operator::fold_range;
3495 using range_operator::op1_range;
3496 using range_operator::op2_range;
3497 public:
3498 virtual bool fold_range (irange &r, tree type,
3499 const irange &lh,
3500 const irange &rh,
3501 relation_trio rel = TRIO_VARYING) const;
3502 virtual bool op1_range (irange &r, tree type,
3503 const irange &lhs,
3504 const irange &op2,
3505 relation_trio rel = TRIO_VARYING) const;
3506 virtual bool op2_range (irange &r, tree type,
3507 const irange &lhs,
3508 const irange &op1,
3509 relation_trio rel = TRIO_VARYING) const;
3510 } op_logical_or;
3512 bool
3513 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3514 const irange &lh,
3515 const irange &rh,
3516 relation_trio) const
3518 if (empty_range_varying (r, type, lh, rh))
3519 return true;
3521 r = lh;
3522 r.union_ (rh);
3523 return true;
3526 bool
3527 operator_logical_or::op1_range (irange &r, tree type,
3528 const irange &lhs,
3529 const irange &op2 ATTRIBUTE_UNUSED,
3530 relation_trio) const
3532 switch (get_bool_state (r, lhs, type))
3534 case BRS_FALSE:
3535 // A false result means both sides of the OR must be false.
3536 r = range_false (type);
3537 break;
3538 default:
3539 // Any other result means only one side has to be true, the
3540 // other side can be anything. so we can't be sure of any result
3541 // here.
3542 r = range_true_and_false (type);
3543 break;
3545 return true;
3548 bool
3549 operator_logical_or::op2_range (irange &r, tree type,
3550 const irange &lhs,
3551 const irange &op1,
3552 relation_trio) const
3554 return operator_logical_or::op1_range (r, type, lhs, op1);
3558 void
3559 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3560 const irange &rh) const
3562 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3565 void
3566 operator_bitwise_or::wi_fold (irange &r, tree type,
3567 const wide_int &lh_lb,
3568 const wide_int &lh_ub,
3569 const wide_int &rh_lb,
3570 const wide_int &rh_ub) const
3572 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3573 return;
3575 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3576 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3577 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3578 maybe_nonzero_lh, mustbe_nonzero_lh);
3579 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3580 maybe_nonzero_rh, mustbe_nonzero_rh);
3581 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3582 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3583 signop sign = TYPE_SIGN (type);
3584 // If the input ranges contain only positive values we can
3585 // truncate the minimum of the result range to the maximum
3586 // of the input range minima.
3587 if (wi::ge_p (lh_lb, 0, sign)
3588 && wi::ge_p (rh_lb, 0, sign))
3590 new_lb = wi::max (new_lb, lh_lb, sign);
3591 new_lb = wi::max (new_lb, rh_lb, sign);
3593 // If either input range contains only negative values
3594 // we can truncate the minimum of the result range to the
3595 // respective minimum range.
3596 if (wi::lt_p (lh_ub, 0, sign))
3597 new_lb = wi::max (new_lb, lh_lb, sign);
3598 if (wi::lt_p (rh_ub, 0, sign))
3599 new_lb = wi::max (new_lb, rh_lb, sign);
3600 // If the limits got swapped around, return a conservative range.
3601 if (wi::gt_p (new_lb, new_ub, sign))
3603 // Make sure that nonzero|X is nonzero.
3604 if (wi::gt_p (lh_lb, 0, sign)
3605 || wi::gt_p (rh_lb, 0, sign)
3606 || wi::lt_p (lh_ub, 0, sign)
3607 || wi::lt_p (rh_ub, 0, sign))
3608 r.set_nonzero (type);
3609 else if (sign == SIGNED
3610 && wi_optimize_signed_bitwise_op (r, type,
3611 lh_lb, lh_ub,
3612 rh_lb, rh_ub))
3613 return;
3614 else
3615 r.set_varying (type);
3616 return;
3618 value_range_with_overflow (r, type, new_lb, new_ub);
3621 bool
3622 operator_bitwise_or::op1_range (irange &r, tree type,
3623 const irange &lhs,
3624 const irange &op2,
3625 relation_trio) const
3627 if (lhs.undefined_p ())
3628 return false;
3629 // If this is really a logical wi_fold, call that.
3630 if (types_compatible_p (type, boolean_type_node))
3631 return op_logical_or.op1_range (r, type, lhs, op2);
3633 if (lhs.zero_p ())
3635 r.set_zero (type);
3636 return true;
3638 r.set_varying (type);
3639 return true;
3642 bool
3643 operator_bitwise_or::op2_range (irange &r, tree type,
3644 const irange &lhs,
3645 const irange &op1,
3646 relation_trio) const
3648 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3651 void
3652 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3653 const irange &rh) const
3655 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3658 void
3659 operator_bitwise_xor::wi_fold (irange &r, tree type,
3660 const wide_int &lh_lb,
3661 const wide_int &lh_ub,
3662 const wide_int &rh_lb,
3663 const wide_int &rh_ub) const
3665 signop sign = TYPE_SIGN (type);
3666 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3667 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3668 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3669 maybe_nonzero_lh, mustbe_nonzero_lh);
3670 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3671 maybe_nonzero_rh, mustbe_nonzero_rh);
3673 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3674 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3675 wide_int result_one_bits
3676 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3677 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3678 wide_int new_ub = ~result_zero_bits;
3679 wide_int new_lb = result_one_bits;
3681 // If the range has all positive or all negative values, the result
3682 // is better than VARYING.
3683 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3684 value_range_with_overflow (r, type, new_lb, new_ub);
3685 else if (sign == SIGNED
3686 && wi_optimize_signed_bitwise_op (r, type,
3687 lh_lb, lh_ub,
3688 rh_lb, rh_ub))
3689 ; /* Do nothing. */
3690 else
3691 r.set_varying (type);
3693 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3694 if (wi::lt_p (lh_ub, rh_lb, sign)
3695 || wi::lt_p (rh_ub, lh_lb, sign)
3696 || wi::ne_p (result_one_bits, 0))
3698 int_range<2> tmp;
3699 tmp.set_nonzero (type);
3700 r.intersect (tmp);
3704 bool
3705 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3706 tree type,
3707 const irange &,
3708 const irange &,
3709 relation_kind rel) const
3711 if (rel == VREL_VARYING)
3712 return false;
3714 int_range<2> rel_range;
3716 switch (rel)
3718 case VREL_EQ:
3719 rel_range.set_zero (type);
3720 break;
3721 case VREL_NE:
3722 rel_range.set_nonzero (type);
3723 break;
3724 default:
3725 return false;
3728 lhs_range.intersect (rel_range);
3729 return true;
3732 bool
3733 operator_bitwise_xor::op1_range (irange &r, tree type,
3734 const irange &lhs,
3735 const irange &op2,
3736 relation_trio) const
3738 if (lhs.undefined_p () || lhs.varying_p ())
3740 r = lhs;
3741 return true;
3743 if (types_compatible_p (type, boolean_type_node))
3745 switch (get_bool_state (r, lhs, type))
3747 case BRS_TRUE:
3748 if (op2.varying_p ())
3749 r.set_varying (type);
3750 else if (op2.zero_p ())
3751 r = range_true (type);
3752 // See get_bool_state for the rationale
3753 else if (contains_zero_p (op2))
3754 r = range_true_and_false (type);
3755 else
3756 r = range_false (type);
3757 break;
3758 case BRS_FALSE:
3759 r = op2;
3760 break;
3761 default:
3762 break;
3764 return true;
3766 r.set_varying (type);
3767 return true;
3770 bool
3771 operator_bitwise_xor::op2_range (irange &r, tree type,
3772 const irange &lhs,
3773 const irange &op1,
3774 relation_trio) const
3776 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3779 class operator_trunc_mod : public range_operator
3781 using range_operator::op1_range;
3782 using range_operator::op2_range;
3783 public:
3784 virtual void wi_fold (irange &r, tree type,
3785 const wide_int &lh_lb,
3786 const wide_int &lh_ub,
3787 const wide_int &rh_lb,
3788 const wide_int &rh_ub) const;
3789 virtual bool op1_range (irange &r, tree type,
3790 const irange &lhs,
3791 const irange &op2,
3792 relation_trio) const;
3793 virtual bool op2_range (irange &r, tree type,
3794 const irange &lhs,
3795 const irange &op1,
3796 relation_trio) const;
3797 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
3798 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
3799 } op_trunc_mod;
3801 void
3802 operator_trunc_mod::wi_fold (irange &r, tree type,
3803 const wide_int &lh_lb,
3804 const wide_int &lh_ub,
3805 const wide_int &rh_lb,
3806 const wide_int &rh_ub) const
3808 wide_int new_lb, new_ub, tmp;
3809 signop sign = TYPE_SIGN (type);
3810 unsigned prec = TYPE_PRECISION (type);
3812 // Mod 0 is undefined.
3813 if (wi_zero_p (type, rh_lb, rh_ub))
3815 r.set_undefined ();
3816 return;
3819 // Check for constant and try to fold.
3820 if (lh_lb == lh_ub && rh_lb == rh_ub)
3822 wi::overflow_type ov = wi::OVF_NONE;
3823 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
3824 if (ov == wi::OVF_NONE)
3826 r = int_range<2> (type, tmp, tmp);
3827 return;
3831 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
3832 new_ub = rh_ub - 1;
3833 if (sign == SIGNED)
3835 tmp = -1 - rh_lb;
3836 new_ub = wi::smax (new_ub, tmp);
3839 if (sign == UNSIGNED)
3840 new_lb = wi::zero (prec);
3841 else
3843 new_lb = -new_ub;
3844 tmp = lh_lb;
3845 if (wi::gts_p (tmp, 0))
3846 tmp = wi::zero (prec);
3847 new_lb = wi::smax (new_lb, tmp);
3849 tmp = lh_ub;
3850 if (sign == SIGNED && wi::neg_p (tmp))
3851 tmp = wi::zero (prec);
3852 new_ub = wi::min (new_ub, tmp, sign);
3854 value_range_with_overflow (r, type, new_lb, new_ub);
3857 bool
3858 operator_trunc_mod::op1_range (irange &r, tree type,
3859 const irange &lhs,
3860 const irange &,
3861 relation_trio) const
3863 if (lhs.undefined_p ())
3864 return false;
3865 // PR 91029.
3866 signop sign = TYPE_SIGN (type);
3867 unsigned prec = TYPE_PRECISION (type);
3868 // (a % b) >= x && x > 0 , then a >= x.
3869 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3871 r = value_range (type, lhs.lower_bound (), wi::max_value (prec, sign));
3872 return true;
3874 // (a % b) <= x && x < 0 , then a <= x.
3875 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3877 r = value_range (type, wi::min_value (prec, sign), lhs.upper_bound ());
3878 return true;
3880 return false;
3883 bool
3884 operator_trunc_mod::op2_range (irange &r, tree type,
3885 const irange &lhs,
3886 const irange &,
3887 relation_trio) const
3889 if (lhs.undefined_p ())
3890 return false;
3891 // PR 91029.
3892 signop sign = TYPE_SIGN (type);
3893 unsigned prec = TYPE_PRECISION (type);
3894 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
3895 // or b > x for unsigned.
3896 if (wi::gt_p (lhs.lower_bound (), 0, sign))
3898 if (sign == SIGNED)
3899 r = value_range (type, wi::neg (lhs.lower_bound ()),
3900 lhs.lower_bound (), VR_ANTI_RANGE);
3901 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
3902 sign))
3903 r = value_range (type, lhs.lower_bound () + 1,
3904 wi::max_value (prec, sign));
3905 else
3906 return false;
3907 return true;
3909 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
3910 if (wi::lt_p (lhs.upper_bound (), 0, sign))
3912 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
3913 r = value_range (type, lhs.upper_bound (),
3914 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
3915 else
3916 return false;
3917 return true;
3919 return false;
3923 class operator_logical_not : public range_operator
3925 using range_operator::fold_range;
3926 using range_operator::op1_range;
3927 public:
3928 virtual bool fold_range (irange &r, tree type,
3929 const irange &lh,
3930 const irange &rh,
3931 relation_trio rel = TRIO_VARYING) const;
3932 virtual bool op1_range (irange &r, tree type,
3933 const irange &lhs,
3934 const irange &op2,
3935 relation_trio rel = TRIO_VARYING) const;
3936 } op_logical_not;
3938 // Folding a logical NOT, oddly enough, involves doing nothing on the
3939 // forward pass through. During the initial walk backwards, the
3940 // logical NOT reversed the desired outcome on the way back, so on the
3941 // way forward all we do is pass the range forward.
3943 // b_2 = x_1 < 20
3944 // b_3 = !b_2
3945 // if (b_3)
3946 // to determine the TRUE branch, walking backward
3947 // if (b_3) if ([1,1])
3948 // b_3 = !b_2 [1,1] = ![0,0]
3949 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
3950 // which is the result we are looking for.. so.. pass it through.
3952 bool
3953 operator_logical_not::fold_range (irange &r, tree type,
3954 const irange &lh,
3955 const irange &rh ATTRIBUTE_UNUSED,
3956 relation_trio) const
3958 if (empty_range_varying (r, type, lh, rh))
3959 return true;
3961 r = lh;
3962 if (!lh.varying_p () && !lh.undefined_p ())
3963 r.invert ();
3965 return true;
3968 bool
3969 operator_logical_not::op1_range (irange &r,
3970 tree type,
3971 const irange &lhs,
3972 const irange &op2,
3973 relation_trio) const
3975 // Logical NOT is involutary...do it again.
3976 return fold_range (r, type, lhs, op2);
3980 bool
3981 operator_bitwise_not::fold_range (irange &r, tree type,
3982 const irange &lh,
3983 const irange &rh,
3984 relation_trio) const
3986 if (empty_range_varying (r, type, lh, rh))
3987 return true;
3989 if (types_compatible_p (type, boolean_type_node))
3990 return op_logical_not.fold_range (r, type, lh, rh);
3992 // ~X is simply -1 - X.
3993 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
3994 wi::minus_one (TYPE_PRECISION (type)));
3995 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
3998 bool
3999 operator_bitwise_not::op1_range (irange &r, tree type,
4000 const irange &lhs,
4001 const irange &op2,
4002 relation_trio) const
4004 if (lhs.undefined_p ())
4005 return false;
4006 if (types_compatible_p (type, boolean_type_node))
4007 return op_logical_not.op1_range (r, type, lhs, op2);
4009 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4010 return fold_range (r, type, lhs, op2);
4014 bool
4015 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4016 const irange &lh,
4017 const irange &rh ATTRIBUTE_UNUSED,
4018 relation_trio) const
4020 r = lh;
4021 return true;
4025 // Determine if there is a relationship between LHS and OP1.
4027 relation_kind
4028 operator_identity::lhs_op1_relation (const irange &lhs,
4029 const irange &op1 ATTRIBUTE_UNUSED,
4030 const irange &op2 ATTRIBUTE_UNUSED,
4031 relation_kind) const
4033 if (lhs.undefined_p ())
4034 return VREL_VARYING;
4035 // Simply a copy, so they are equivalent.
4036 return VREL_EQ;
4039 bool
4040 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4041 const irange &lh,
4042 const irange &rh ATTRIBUTE_UNUSED,
4043 relation_trio) const
4045 r = lh;
4046 return true;
4049 bool
4050 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4051 const irange &lhs,
4052 const irange &op2 ATTRIBUTE_UNUSED,
4053 relation_trio) const
4055 r = lhs;
4056 return true;
4060 class operator_unknown : public range_operator
4062 using range_operator::fold_range;
4063 public:
4064 virtual bool fold_range (irange &r, tree type,
4065 const irange &op1,
4066 const irange &op2,
4067 relation_trio rel = TRIO_VARYING) const;
4068 } op_unknown;
4070 bool
4071 operator_unknown::fold_range (irange &r, tree type,
4072 const irange &lh ATTRIBUTE_UNUSED,
4073 const irange &rh ATTRIBUTE_UNUSED,
4074 relation_trio) const
4076 r.set_varying (type);
4077 return true;
4081 void
4082 operator_abs::wi_fold (irange &r, tree type,
4083 const wide_int &lh_lb, const wide_int &lh_ub,
4084 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4085 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4087 wide_int min, max;
4088 signop sign = TYPE_SIGN (type);
4089 unsigned prec = TYPE_PRECISION (type);
4091 // Pass through LH for the easy cases.
4092 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4094 r = int_range<1> (type, lh_lb, lh_ub);
4095 return;
4098 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4099 // a useful range.
4100 wide_int min_value = wi::min_value (prec, sign);
4101 wide_int max_value = wi::max_value (prec, sign);
4102 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4104 r.set_varying (type);
4105 return;
4108 // ABS_EXPR may flip the range around, if the original range
4109 // included negative values.
4110 if (wi::eq_p (lh_lb, min_value))
4112 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4113 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4114 if (wi::eq_p (lh_ub, min_value))
4116 r = int_range<1> (type, min_value, min_value);
4117 return;
4119 min = max_value;
4121 else
4122 min = wi::abs (lh_lb);
4124 if (wi::eq_p (lh_ub, min_value))
4125 max = max_value;
4126 else
4127 max = wi::abs (lh_ub);
4129 // If the range contains zero then we know that the minimum value in the
4130 // range will be zero.
4131 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4133 if (wi::gt_p (min, max, sign))
4134 max = min;
4135 min = wi::zero (prec);
4137 else
4139 // If the range was reversed, swap MIN and MAX.
4140 if (wi::gt_p (min, max, sign))
4141 std::swap (min, max);
4144 // If the new range has its limits swapped around (MIN > MAX), then
4145 // the operation caused one of them to wrap around. The only thing
4146 // we know is that the result is positive.
4147 if (wi::gt_p (min, max, sign))
4149 min = wi::zero (prec);
4150 max = max_value;
4152 r = int_range<1> (type, min, max);
4155 bool
4156 operator_abs::op1_range (irange &r, tree type,
4157 const irange &lhs,
4158 const irange &op2,
4159 relation_trio) const
4161 if (empty_range_varying (r, type, lhs, op2))
4162 return true;
4163 if (TYPE_UNSIGNED (type))
4165 r = lhs;
4166 return true;
4168 // Start with the positives because negatives are an impossible result.
4169 int_range_max positives = range_positives (type);
4170 positives.intersect (lhs);
4171 r = positives;
4172 // Then add the negative of each pair:
4173 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4174 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4175 r.union_ (int_range<1> (type,
4176 -positives.upper_bound (i),
4177 -positives.lower_bound (i)));
4178 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4179 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4180 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4181 wide_int lb = lhs.lower_bound ();
4182 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4183 r.union_ (int_range<2> (type, lb, lb));
4184 return true;
4188 class operator_absu : public range_operator
4190 public:
4191 virtual void wi_fold (irange &r, tree type,
4192 const wide_int &lh_lb, const wide_int &lh_ub,
4193 const wide_int &rh_lb, const wide_int &rh_ub) const;
4194 } op_absu;
4196 void
4197 operator_absu::wi_fold (irange &r, tree type,
4198 const wide_int &lh_lb, const wide_int &lh_ub,
4199 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4200 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4202 wide_int new_lb, new_ub;
4204 // Pass through VR0 the easy cases.
4205 if (wi::ges_p (lh_lb, 0))
4207 new_lb = lh_lb;
4208 new_ub = lh_ub;
4210 else
4212 new_lb = wi::abs (lh_lb);
4213 new_ub = wi::abs (lh_ub);
4215 // If the range contains zero then we know that the minimum
4216 // value in the range will be zero.
4217 if (wi::ges_p (lh_ub, 0))
4219 if (wi::gtu_p (new_lb, new_ub))
4220 new_ub = new_lb;
4221 new_lb = wi::zero (TYPE_PRECISION (type));
4223 else
4224 std::swap (new_lb, new_ub);
4227 gcc_checking_assert (TYPE_UNSIGNED (type));
4228 r = int_range<1> (type, new_lb, new_ub);
4232 bool
4233 operator_negate::fold_range (irange &r, tree type,
4234 const irange &lh,
4235 const irange &rh,
4236 relation_trio) const
4238 if (empty_range_varying (r, type, lh, rh))
4239 return true;
4240 // -X is simply 0 - X.
4241 return range_op_handler (MINUS_EXPR).fold_range (r, type,
4242 range_zero (type), lh);
4245 bool
4246 operator_negate::op1_range (irange &r, tree type,
4247 const irange &lhs,
4248 const irange &op2,
4249 relation_trio) const
4251 // NEGATE is involutory.
4252 return fold_range (r, type, lhs, op2);
4256 bool
4257 operator_addr_expr::fold_range (irange &r, tree type,
4258 const irange &lh,
4259 const irange &rh,
4260 relation_trio) const
4262 if (empty_range_varying (r, type, lh, rh))
4263 return true;
4265 // Return a non-null pointer of the LHS type (passed in op2).
4266 if (lh.zero_p ())
4267 r = range_zero (type);
4268 else if (!contains_zero_p (lh))
4269 r = range_nonzero (type);
4270 else
4271 r.set_varying (type);
4272 return true;
4275 bool
4276 operator_addr_expr::op1_range (irange &r, tree type,
4277 const irange &lhs,
4278 const irange &op2,
4279 relation_trio) const
4281 return operator_addr_expr::fold_range (r, type, lhs, op2);
4284 // Initialize any integral operators to the primary table
4286 void
4287 range_op_table::initialize_integral_ops ()
4289 set (TRUNC_DIV_EXPR, op_trunc_div);
4290 set (FLOOR_DIV_EXPR, op_floor_div);
4291 set (ROUND_DIV_EXPR, op_round_div);
4292 set (CEIL_DIV_EXPR, op_ceil_div);
4293 set (EXACT_DIV_EXPR, op_exact_div);
4294 set (LSHIFT_EXPR, op_lshift);
4295 set (RSHIFT_EXPR, op_rshift);
4296 set (TRUTH_AND_EXPR, op_logical_and);
4297 set (TRUTH_OR_EXPR, op_logical_or);
4298 set (TRUNC_MOD_EXPR, op_trunc_mod);
4299 set (TRUTH_NOT_EXPR, op_logical_not);
4300 set (IMAGPART_EXPR, op_unknown);
4301 set (REALPART_EXPR, op_unknown);
4302 set (ABSU_EXPR, op_absu);
4303 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4304 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4305 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4306 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4310 #if CHECKING_P
4311 #include "selftest.h"
4313 namespace selftest
4315 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4316 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4317 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4318 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4319 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4320 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4322 static void
4323 range_op_cast_tests ()
4325 int_range<2> r0, r1, r2, rold;
4326 r0.set_varying (integer_type_node);
4327 wide_int maxint = r0.upper_bound ();
4329 // If a range is in any way outside of the range for the converted
4330 // to range, default to the range for the new type.
4331 r0.set_varying (short_integer_type_node);
4332 wide_int minshort = r0.lower_bound ();
4333 wide_int maxshort = r0.upper_bound ();
4334 if (TYPE_PRECISION (integer_type_node)
4335 > TYPE_PRECISION (short_integer_type_node))
4337 r1 = int_range<1> (integer_type_node,
4338 wi::zero (TYPE_PRECISION (integer_type_node)),
4339 maxint);
4340 range_cast (r1, short_integer_type_node);
4341 ASSERT_TRUE (r1.lower_bound () == minshort
4342 && r1.upper_bound() == maxshort);
4345 // (unsigned char)[-5,-1] => [251,255].
4346 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4347 range_cast (r0, unsigned_char_type_node);
4348 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4349 UCHAR (251), UCHAR (255)));
4350 range_cast (r0, signed_char_type_node);
4351 ASSERT_TRUE (r0 == rold);
4353 // (signed char)[15, 150] => [-128,-106][15,127].
4354 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4355 range_cast (r0, signed_char_type_node);
4356 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4357 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4358 r1.union_ (r2);
4359 ASSERT_TRUE (r1 == r0);
4360 range_cast (r0, unsigned_char_type_node);
4361 ASSERT_TRUE (r0 == rold);
4363 // (unsigned char)[-5, 5] => [0,5][251,255].
4364 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4365 range_cast (r0, unsigned_char_type_node);
4366 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4367 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4368 r1.union_ (r2);
4369 ASSERT_TRUE (r0 == r1);
4370 range_cast (r0, signed_char_type_node);
4371 ASSERT_TRUE (r0 == rold);
4373 // (unsigned char)[-5,5] => [0,5][251,255].
4374 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4375 range_cast (r0, unsigned_char_type_node);
4376 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4377 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4378 ASSERT_TRUE (r0 == r1);
4380 // (unsigned char)[5U,1974U] => [0,255].
4381 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4382 range_cast (r0, unsigned_char_type_node);
4383 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4384 range_cast (r0, integer_type_node);
4385 // Going to a wider range should not sign extend.
4386 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4388 // (unsigned char)[-350,15] => [0,255].
4389 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4390 range_cast (r0, unsigned_char_type_node);
4391 ASSERT_TRUE (r0 == (int_range<1>
4392 (unsigned_char_type_node,
4393 min_limit (unsigned_char_type_node),
4394 max_limit (unsigned_char_type_node))));
4396 // Casting [-120,20] from signed char to unsigned short.
4397 // => [0, 20][0xff88, 0xffff].
4398 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4399 range_cast (r0, short_unsigned_type_node);
4400 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4401 r2 = int_range<1> (short_unsigned_type_node,
4402 UINT16 (0xff88), UINT16 (0xffff));
4403 r1.union_ (r2);
4404 ASSERT_TRUE (r0 == r1);
4405 // A truncating cast back to signed char will work because [-120, 20]
4406 // is representable in signed char.
4407 range_cast (r0, signed_char_type_node);
4408 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4409 SCHAR (-120), SCHAR (20)));
4411 // unsigned char -> signed short
4412 // (signed short)[(unsigned char)25, (unsigned char)250]
4413 // => [(signed short)25, (signed short)250]
4414 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4415 range_cast (r0, short_integer_type_node);
4416 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4417 ASSERT_TRUE (r0 == r1);
4418 range_cast (r0, unsigned_char_type_node);
4419 ASSERT_TRUE (r0 == rold);
4421 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4422 r0 = int_range<1> (long_long_integer_type_node,
4423 min_limit (long_long_integer_type_node),
4424 max_limit (long_long_integer_type_node));
4425 range_cast (r0, short_unsigned_type_node);
4426 r1 = int_range<1> (short_unsigned_type_node,
4427 min_limit (short_unsigned_type_node),
4428 max_limit (short_unsigned_type_node));
4429 ASSERT_TRUE (r0 == r1);
4431 // Casting NONZERO to a narrower type will wrap/overflow so
4432 // it's just the entire range for the narrower type.
4434 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4435 // is outside of the range of a smaller range, return the full
4436 // smaller range.
4437 if (TYPE_PRECISION (integer_type_node)
4438 > TYPE_PRECISION (short_integer_type_node))
4440 r0 = range_nonzero (integer_type_node);
4441 range_cast (r0, short_integer_type_node);
4442 r1 = int_range<1> (short_integer_type_node,
4443 min_limit (short_integer_type_node),
4444 max_limit (short_integer_type_node));
4445 ASSERT_TRUE (r0 == r1);
4448 // Casting NONZERO from a narrower signed to a wider signed.
4450 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4451 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4452 r0 = range_nonzero (short_integer_type_node);
4453 range_cast (r0, integer_type_node);
4454 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4455 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4456 r1.union_ (r2);
4457 ASSERT_TRUE (r0 == r1);
4460 static void
4461 range_op_lshift_tests ()
4463 // Test that 0x808.... & 0x8.... still contains 0x8....
4464 // for a large set of numbers.
4466 int_range_max res;
4467 tree big_type = long_long_unsigned_type_node;
4468 unsigned big_prec = TYPE_PRECISION (big_type);
4469 // big_num = 0x808,0000,0000,0000
4470 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4471 wi::uhwi (48, big_prec));
4472 op_bitwise_and.fold_range (res, big_type,
4473 int_range <1> (big_type),
4474 int_range <1> (big_type, big_num, big_num));
4475 // val = 0x8,0000,0000,0000
4476 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4477 wi::uhwi (48, big_prec));
4478 ASSERT_TRUE (res.contains_p (val));
4481 if (TYPE_PRECISION (unsigned_type_node) > 31)
4483 // unsigned VARYING = op1 << 1 should be VARYING.
4484 int_range<2> lhs (unsigned_type_node);
4485 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4486 int_range_max op1;
4487 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4488 ASSERT_TRUE (op1.varying_p ());
4490 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4491 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4492 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4493 ASSERT_TRUE (op1.num_pairs () == 2);
4494 // Remove the [0,0] range.
4495 op1.intersect (zero);
4496 ASSERT_TRUE (op1.num_pairs () == 1);
4497 // op1 << 1 should be [0x8000,0x8000] << 1,
4498 // which should result in [0,0].
4499 int_range_max result;
4500 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4501 ASSERT_TRUE (result == zero);
4503 // signed VARYING = op1 << 1 should be VARYING.
4504 if (TYPE_PRECISION (integer_type_node) > 31)
4506 // unsigned VARYING = op1 << 1 should be VARYING.
4507 int_range<2> lhs (integer_type_node);
4508 int_range<2> shift (integer_type_node, INT (1), INT (1));
4509 int_range_max op1;
4510 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4511 ASSERT_TRUE (op1.varying_p ());
4513 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4514 int_range<2> zero (integer_type_node, INT (0), INT (0));
4515 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4516 ASSERT_TRUE (op1.num_pairs () == 2);
4517 // Remove the [0,0] range.
4518 op1.intersect (zero);
4519 ASSERT_TRUE (op1.num_pairs () == 1);
4520 // op1 << 1 should be [0x8000,0x8000] << 1,
4521 // which should result in [0,0].
4522 int_range_max result;
4523 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4524 ASSERT_TRUE (result == zero);
4528 static void
4529 range_op_rshift_tests ()
4531 // unsigned: [3, MAX] = OP1 >> 1
4533 int_range_max lhs (unsigned_type_node,
4534 UINT (3), max_limit (unsigned_type_node));
4535 int_range_max one (unsigned_type_node,
4536 wi::one (TYPE_PRECISION (unsigned_type_node)),
4537 wi::one (TYPE_PRECISION (unsigned_type_node)));
4538 int_range_max op1;
4539 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4540 ASSERT_FALSE (op1.contains_p (UINT (3)));
4543 // signed: [3, MAX] = OP1 >> 1
4545 int_range_max lhs (integer_type_node,
4546 INT (3), max_limit (integer_type_node));
4547 int_range_max one (integer_type_node, INT (1), INT (1));
4548 int_range_max op1;
4549 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4550 ASSERT_FALSE (op1.contains_p (INT (-2)));
4553 // This is impossible, so OP1 should be [].
4554 // signed: [MIN, MIN] = OP1 >> 1
4556 int_range_max lhs (integer_type_node,
4557 min_limit (integer_type_node),
4558 min_limit (integer_type_node));
4559 int_range_max one (integer_type_node, INT (1), INT (1));
4560 int_range_max op1;
4561 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4562 ASSERT_TRUE (op1.undefined_p ());
4565 // signed: ~[-1] = OP1 >> 31
4566 if (TYPE_PRECISION (integer_type_node) > 31)
4568 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4569 int_range_max shift (integer_type_node, INT (31), INT (31));
4570 int_range_max op1;
4571 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4572 int_range_max negatives = range_negatives (integer_type_node);
4573 negatives.intersect (op1);
4574 ASSERT_TRUE (negatives.undefined_p ());
4578 static void
4579 range_op_bitwise_and_tests ()
4581 int_range_max res;
4582 wide_int min = min_limit (integer_type_node);
4583 wide_int max = max_limit (integer_type_node);
4584 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4585 int_range_max i1 (integer_type_node, tiny, max);
4586 int_range_max i2 (integer_type_node, INT (255), INT (255));
4588 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4589 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4590 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4592 // VARYING = OP1 & 255: OP1 is VARYING
4593 i1 = int_range<1> (integer_type_node);
4594 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4595 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4597 // For 0 = x & MASK, x is ~MASK.
4599 int_range<2> zero (integer_type_node, INT (0), INT (0));
4600 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4601 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4602 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4603 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4606 // (NONZERO | X) is nonzero.
4607 i1.set_nonzero (integer_type_node);
4608 i2.set_varying (integer_type_node);
4609 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4610 ASSERT_TRUE (res.nonzero_p ());
4612 // (NEGATIVE | X) is nonzero.
4613 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4614 i2.set_varying (integer_type_node);
4615 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4616 ASSERT_FALSE (res.contains_p (INT (0)));
4619 static void
4620 range_relational_tests ()
4622 int_range<2> lhs (unsigned_char_type_node);
4623 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4624 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4626 // Never wrapping additions mean LHS > OP1.
4627 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4628 ASSERT_TRUE (code == VREL_GT);
4630 // Most wrapping additions mean nothing...
4631 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4632 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4633 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4634 ASSERT_TRUE (code == VREL_VARYING);
4636 // However, always wrapping additions mean LHS < OP1.
4637 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
4638 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
4639 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4640 ASSERT_TRUE (code == VREL_LT);
4643 void
4644 range_op_tests ()
4646 range_op_rshift_tests ();
4647 range_op_lshift_tests ();
4648 range_op_bitwise_and_tests ();
4649 range_op_cast_tests ();
4650 range_relational_tests ();
4652 extern void range_op_float_tests ();
4653 range_op_float_tests ();
4656 } // namespace selftest
4658 #endif // CHECKING_P