c++: Add [dcl.init.aggr] examples to testsuite
[official-gcc.git] / gcc / range-op.cc
blobc576f6882219dc025b4954b0e9ecdda9128e69c6
1 /* Code for range operators.
2 Copyright (C) 2017-2024 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "insn-codes.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "flags.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "calls.h"
40 #include "cfganal.h"
41 #include "gimple-iterator.h"
42 #include "gimple-fold.h"
43 #include "tree-eh.h"
44 #include "gimple-walk.h"
45 #include "tree-cfg.h"
46 #include "wide-int.h"
47 #include "value-relation.h"
48 #include "range-op.h"
49 #include "tree-ssa-ccp.h"
50 #include "range-op-mixed.h"
52 // Instantiate the operators which apply to multiple types here.
54 operator_equal op_equal;
55 operator_not_equal op_not_equal;
56 operator_lt op_lt;
57 operator_le op_le;
58 operator_gt op_gt;
59 operator_ge op_ge;
60 operator_identity op_ident;
61 operator_cst op_cst;
62 operator_cast op_cast;
63 operator_plus op_plus;
64 operator_abs op_abs;
65 operator_minus op_minus;
66 operator_negate op_negate;
67 operator_mult op_mult;
68 operator_addr_expr op_addr;
69 operator_bitwise_not op_bitwise_not;
70 operator_bitwise_xor op_bitwise_xor;
71 operator_bitwise_and op_bitwise_and;
72 operator_bitwise_or op_bitwise_or;
73 operator_min op_min;
74 operator_max op_max;
76 // Instantaite a range operator table.
77 range_op_table operator_table;
79 // Invoke the initialization routines for each class of range.
81 range_op_table::range_op_table ()
83 initialize_integral_ops ();
84 initialize_pointer_ops ();
85 initialize_float_ops ();
87 set (EQ_EXPR, op_equal);
88 set (NE_EXPR, op_not_equal);
89 set (LT_EXPR, op_lt);
90 set (LE_EXPR, op_le);
91 set (GT_EXPR, op_gt);
92 set (GE_EXPR, op_ge);
93 set (SSA_NAME, op_ident);
94 set (PAREN_EXPR, op_ident);
95 set (OBJ_TYPE_REF, op_ident);
96 set (REAL_CST, op_cst);
97 set (INTEGER_CST, op_cst);
98 set (NOP_EXPR, op_cast);
99 set (CONVERT_EXPR, op_cast);
100 set (PLUS_EXPR, op_plus);
101 set (ABS_EXPR, op_abs);
102 set (MINUS_EXPR, op_minus);
103 set (NEGATE_EXPR, op_negate);
104 set (MULT_EXPR, op_mult);
105 set (ADDR_EXPR, op_addr);
106 set (BIT_NOT_EXPR, op_bitwise_not);
107 set (BIT_XOR_EXPR, op_bitwise_xor);
108 set (BIT_AND_EXPR, op_bitwise_and);
109 set (BIT_IOR_EXPR, op_bitwise_or);
110 set (MIN_EXPR, op_min);
111 set (MAX_EXPR, op_max);
114 // Instantiate a default range operator for opcodes with no entry.
116 range_operator default_operator;
118 // Create a default range_op_handler.
120 range_op_handler::range_op_handler ()
122 m_operator = &default_operator;
125 // Create a range_op_handler for CODE. Use a default operatoer if CODE
126 // does not have an entry.
128 range_op_handler::range_op_handler (unsigned code)
130 m_operator = operator_table[code];
131 if (!m_operator)
132 m_operator = &default_operator;
135 // Return TRUE if this handler has a non-default operator.
137 range_op_handler::operator bool () const
139 return m_operator != &default_operator;
142 // Return a pointer to the range operator assocaited with this handler.
143 // If it is a default operator, return NULL.
144 // This is the equivalent of indexing the range table.
146 range_operator *
147 range_op_handler::range_op () const
149 if (m_operator != &default_operator)
150 return m_operator;
151 return NULL;
154 // Create a dispatch pattern for value range discriminators LHS, OP1, and OP2.
155 // This is used to produce a unique value for each dispatch pattern. Shift
156 // values are based on the size of the m_discriminator field in value_range.h.
158 constexpr unsigned
159 dispatch_trio (unsigned lhs, unsigned op1, unsigned op2)
161 return ((lhs << 8) + (op1 << 4) + (op2));
164 // These are the supported dispatch patterns. These map to the parameter list
165 // of the routines in range_operator. Note the last 3 characters are
166 // shorthand for the LHS, OP1, and OP2 range discriminator class.
168 const unsigned RO_III = dispatch_trio (VR_IRANGE, VR_IRANGE, VR_IRANGE);
169 const unsigned RO_IFI = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_IRANGE);
170 const unsigned RO_IFF = dispatch_trio (VR_IRANGE, VR_FRANGE, VR_FRANGE);
171 const unsigned RO_FFF = dispatch_trio (VR_FRANGE, VR_FRANGE, VR_FRANGE);
172 const unsigned RO_FIF = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_FRANGE);
173 const unsigned RO_FII = dispatch_trio (VR_FRANGE, VR_IRANGE, VR_IRANGE);
174 const unsigned RO_PPP = dispatch_trio (VR_PRANGE, VR_PRANGE, VR_PRANGE);
175 const unsigned RO_PPI = dispatch_trio (VR_PRANGE, VR_PRANGE, VR_IRANGE);
176 const unsigned RO_IPP = dispatch_trio (VR_IRANGE, VR_PRANGE, VR_PRANGE);
177 const unsigned RO_IPI = dispatch_trio (VR_IRANGE, VR_PRANGE, VR_IRANGE);
178 const unsigned RO_PIP = dispatch_trio (VR_PRANGE, VR_IRANGE, VR_PRANGE);
179 const unsigned RO_PII = dispatch_trio (VR_PRANGE, VR_IRANGE, VR_IRANGE);
181 // Return a dispatch value for parameter types LHS, OP1 and OP2.
183 unsigned
184 range_op_handler::dispatch_kind (const vrange &lhs, const vrange &op1,
185 const vrange& op2) const
187 return dispatch_trio (lhs.m_discriminator, op1.m_discriminator,
188 op2.m_discriminator);
191 void
192 range_op_handler::discriminator_fail (const vrange &r1,
193 const vrange &r2,
194 const vrange &r3) const
196 const char name[] = "IPF";
197 gcc_checking_assert (r1.m_discriminator < sizeof (name) - 1);
198 gcc_checking_assert (r2.m_discriminator < sizeof (name) - 1);
199 gcc_checking_assert (r3.m_discriminator < sizeof (name) - 1);
200 fprintf (stderr,
201 "Unsupported operand combination in dispatch: RO_%c%c%c\n",
202 name[r1.m_discriminator],
203 name[r2.m_discriminator],
204 name[r3.m_discriminator]);
205 gcc_unreachable ();
208 static inline bool
209 has_pointer_operand_p (const vrange &r1, const vrange &r2, const vrange &r3)
211 return is_a <prange> (r1) || is_a <prange> (r2) || is_a <prange> (r3);
214 // Dispatch a call to fold_range based on the types of R, LH and RH.
216 bool
217 range_op_handler::fold_range (vrange &r, tree type,
218 const vrange &lh,
219 const vrange &rh,
220 relation_trio rel) const
222 gcc_checking_assert (m_operator);
223 #if CHECKING_P
224 if (!lh.undefined_p () && !rh.undefined_p ())
225 gcc_assert (m_operator->operand_check_p (type, lh.type (), rh.type ()));
226 #endif
227 switch (dispatch_kind (r, lh, rh))
229 case RO_III:
230 return m_operator->fold_range (as_a <irange> (r), type,
231 as_a <irange> (lh),
232 as_a <irange> (rh), rel);
233 case RO_IFI:
234 return m_operator->fold_range (as_a <irange> (r), type,
235 as_a <frange> (lh),
236 as_a <irange> (rh), rel);
237 case RO_IFF:
238 return m_operator->fold_range (as_a <irange> (r), type,
239 as_a <frange> (lh),
240 as_a <frange> (rh), rel);
241 case RO_FFF:
242 return m_operator->fold_range (as_a <frange> (r), type,
243 as_a <frange> (lh),
244 as_a <frange> (rh), rel);
245 case RO_FII:
246 return m_operator->fold_range (as_a <frange> (r), type,
247 as_a <irange> (lh),
248 as_a <irange> (rh), rel);
249 case RO_PPP:
250 return m_operator->fold_range (as_a <prange> (r), type,
251 as_a <prange> (lh),
252 as_a <prange> (rh), rel);
253 case RO_PPI:
254 return m_operator->fold_range (as_a <prange> (r), type,
255 as_a <prange> (lh),
256 as_a <irange> (rh), rel);
257 case RO_IPP:
258 return m_operator->fold_range (as_a <irange> (r), type,
259 as_a <prange> (lh),
260 as_a <prange> (rh), rel);
261 case RO_PIP:
262 return m_operator->fold_range (as_a <prange> (r), type,
263 as_a <irange> (lh),
264 as_a <prange> (rh), rel);
265 case RO_IPI:
266 return m_operator->fold_range (as_a <irange> (r), type,
267 as_a <prange> (lh),
268 as_a <irange> (rh), rel);
269 default:
270 return false;
274 // Dispatch a call to op1_range based on the types of R, LHS and OP2.
276 bool
277 range_op_handler::op1_range (vrange &r, tree type,
278 const vrange &lhs,
279 const vrange &op2,
280 relation_trio rel) const
282 gcc_checking_assert (m_operator);
283 if (lhs.undefined_p ())
284 return false;
285 #if CHECKING_P
286 if (!op2.undefined_p ())
287 gcc_assert (m_operator->operand_check_p (lhs.type (), type, op2.type ()));
288 #endif
289 switch (dispatch_kind (r, lhs, op2))
291 case RO_III:
292 return m_operator->op1_range (as_a <irange> (r), type,
293 as_a <irange> (lhs),
294 as_a <irange> (op2), rel);
295 case RO_PPP:
296 return m_operator->op1_range (as_a <prange> (r), type,
297 as_a <prange> (lhs),
298 as_a <prange> (op2), rel);
299 case RO_PIP:
300 return m_operator->op1_range (as_a <prange> (r), type,
301 as_a <irange> (lhs),
302 as_a <prange> (op2), rel);
303 case RO_PPI:
304 return m_operator->op1_range (as_a <prange> (r), type,
305 as_a <prange> (lhs),
306 as_a <irange> (op2), rel);
307 case RO_IPI:
308 return m_operator->op1_range (as_a <irange> (r), type,
309 as_a <prange> (lhs),
310 as_a <irange> (op2), rel);
311 case RO_FIF:
312 return m_operator->op1_range (as_a <frange> (r), type,
313 as_a <irange> (lhs),
314 as_a <frange> (op2), rel);
315 case RO_FFF:
316 return m_operator->op1_range (as_a <frange> (r), type,
317 as_a <frange> (lhs),
318 as_a <frange> (op2), rel);
319 default:
320 return false;
324 // Dispatch a call to op2_range based on the types of R, LHS and OP1.
326 bool
327 range_op_handler::op2_range (vrange &r, tree type,
328 const vrange &lhs,
329 const vrange &op1,
330 relation_trio rel) const
332 gcc_checking_assert (m_operator);
333 if (lhs.undefined_p ())
334 return false;
335 #if CHECKING_P
336 if (!op1.undefined_p ())
337 gcc_assert (m_operator->operand_check_p (lhs.type (), op1.type (), type));
338 #endif
339 switch (dispatch_kind (r, lhs, op1))
341 case RO_III:
342 return m_operator->op2_range (as_a <irange> (r), type,
343 as_a <irange> (lhs),
344 as_a <irange> (op1), rel);
345 case RO_PIP:
346 return m_operator->op2_range (as_a <prange> (r), type,
347 as_a <irange> (lhs),
348 as_a <prange> (op1), rel);
349 case RO_IPP:
350 return m_operator->op2_range (as_a <irange> (r), type,
351 as_a <prange> (lhs),
352 as_a <prange> (op1), rel);
353 case RO_FIF:
354 return m_operator->op2_range (as_a <frange> (r), type,
355 as_a <irange> (lhs),
356 as_a <frange> (op1), rel);
357 case RO_FFF:
358 return m_operator->op2_range (as_a <frange> (r), type,
359 as_a <frange> (lhs),
360 as_a <frange> (op1), rel);
361 default:
362 return false;
366 // Dispatch a call to lhs_op1_relation based on the types of LHS, OP1 and OP2.
368 relation_kind
369 range_op_handler::lhs_op1_relation (const vrange &lhs,
370 const vrange &op1,
371 const vrange &op2,
372 relation_kind rel) const
374 gcc_checking_assert (m_operator);
375 switch (dispatch_kind (lhs, op1, op2))
377 case RO_III:
378 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
379 as_a <irange> (op1),
380 as_a <irange> (op2), rel);
381 case RO_PPP:
382 return m_operator->lhs_op1_relation (as_a <prange> (lhs),
383 as_a <prange> (op1),
384 as_a <prange> (op2), rel);
385 case RO_IPP:
386 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
387 as_a <prange> (op1),
388 as_a <prange> (op2), rel);
389 case RO_PII:
390 return m_operator->lhs_op1_relation (as_a <prange> (lhs),
391 as_a <irange> (op1),
392 as_a <irange> (op2), rel);
393 case RO_IFF:
394 return m_operator->lhs_op1_relation (as_a <irange> (lhs),
395 as_a <frange> (op1),
396 as_a <frange> (op2), rel);
397 case RO_FFF:
398 return m_operator->lhs_op1_relation (as_a <frange> (lhs),
399 as_a <frange> (op1),
400 as_a <frange> (op2), rel);
401 default:
402 return VREL_VARYING;
406 // Dispatch a call to lhs_op2_relation based on the types of LHS, OP1 and OP2.
408 relation_kind
409 range_op_handler::lhs_op2_relation (const vrange &lhs,
410 const vrange &op1,
411 const vrange &op2,
412 relation_kind rel) const
414 gcc_checking_assert (m_operator);
415 switch (dispatch_kind (lhs, op1, op2))
417 case RO_III:
418 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
419 as_a <irange> (op1),
420 as_a <irange> (op2), rel);
421 case RO_IFF:
422 return m_operator->lhs_op2_relation (as_a <irange> (lhs),
423 as_a <frange> (op1),
424 as_a <frange> (op2), rel);
425 case RO_FFF:
426 return m_operator->lhs_op2_relation (as_a <frange> (lhs),
427 as_a <frange> (op1),
428 as_a <frange> (op2), rel);
429 default:
430 return VREL_VARYING;
434 // Dispatch a call to op1_op2_relation based on the type of LHS.
436 relation_kind
437 range_op_handler::op1_op2_relation (const vrange &lhs,
438 const vrange &op1,
439 const vrange &op2) const
441 gcc_checking_assert (m_operator);
443 switch (dispatch_kind (lhs, op1, op2))
445 case RO_III:
446 return m_operator->op1_op2_relation (as_a <irange> (lhs),
447 as_a <irange> (op1),
448 as_a <irange> (op2));
450 case RO_IPP:
451 return m_operator->op1_op2_relation (as_a <irange> (lhs),
452 as_a <prange> (op1),
453 as_a <prange> (op2));
455 case RO_IFF:
456 return m_operator->op1_op2_relation (as_a <irange> (lhs),
457 as_a <frange> (op1),
458 as_a <frange> (op2));
460 case RO_FFF:
461 return m_operator->op1_op2_relation (as_a <frange> (lhs),
462 as_a <frange> (op1),
463 as_a <frange> (op2));
465 default:
466 return VREL_VARYING;
470 bool
471 range_op_handler::overflow_free_p (const vrange &lh,
472 const vrange &rh,
473 relation_trio rel) const
475 gcc_checking_assert (m_operator);
476 switch (dispatch_kind (lh, lh, rh))
478 case RO_III:
479 return m_operator->overflow_free_p(as_a <irange> (lh),
480 as_a <irange> (rh),
481 rel);
482 default:
483 return false;
487 bool
488 range_op_handler::operand_check_p (tree t1, tree t2, tree t3) const
490 gcc_checking_assert (m_operator);
491 return m_operator->operand_check_p (t1, t2, t3);
494 // Update the known bitmasks in R when applying the operation CODE to
495 // LH and RH.
497 void
498 update_known_bitmask (vrange &r, tree_code code,
499 const vrange &lh, const vrange &rh)
501 if (r.undefined_p () || lh.undefined_p () || rh.undefined_p ()
502 || r.singleton_p ())
503 return;
505 widest_int widest_value, widest_mask;
506 tree type = r.type ();
507 signop sign = TYPE_SIGN (type);
508 int prec = TYPE_PRECISION (type);
509 irange_bitmask lh_bits = lh.get_bitmask ();
510 irange_bitmask rh_bits = rh.get_bitmask ();
512 switch (get_gimple_rhs_class (code))
514 case GIMPLE_UNARY_RHS:
515 bit_value_unop (code, sign, prec, &widest_value, &widest_mask,
516 TYPE_SIGN (lh.type ()),
517 TYPE_PRECISION (lh.type ()),
518 widest_int::from (lh_bits.value (),
519 TYPE_SIGN (lh.type ())),
520 widest_int::from (lh_bits.mask (),
521 TYPE_SIGN (lh.type ())));
522 break;
523 case GIMPLE_BINARY_RHS:
524 bit_value_binop (code, sign, prec, &widest_value, &widest_mask,
525 TYPE_SIGN (lh.type ()),
526 TYPE_PRECISION (lh.type ()),
527 widest_int::from (lh_bits.value (), sign),
528 widest_int::from (lh_bits.mask (), sign),
529 TYPE_SIGN (rh.type ()),
530 TYPE_PRECISION (rh.type ()),
531 widest_int::from (rh_bits.value (), sign),
532 widest_int::from (rh_bits.mask (), sign));
533 break;
534 default:
535 gcc_unreachable ();
538 wide_int mask = wide_int::from (widest_mask, prec, sign);
539 wide_int value = wide_int::from (widest_value, prec, sign);
540 // Bitmasks must have the unknown value bits cleared.
541 value &= ~mask;
542 irange_bitmask bm (value, mask);
543 r.update_bitmask (bm);
546 // Return the upper limit for a type.
548 static inline wide_int
549 max_limit (const_tree type)
551 return irange_val_max (type);
554 // Return the lower limit for a type.
556 static inline wide_int
557 min_limit (const_tree type)
559 return irange_val_min (type);
562 // Return false if shifting by OP is undefined behavior. Otherwise, return
563 // true and the range it is to be shifted by. This allows trimming out of
564 // undefined ranges, leaving only valid ranges if there are any.
566 static inline bool
567 get_shift_range (irange &r, tree type, const irange &op)
569 if (op.undefined_p ())
570 return false;
572 // Build valid range and intersect it with the shift range.
573 r.set (op.type (),
574 wi::shwi (0, TYPE_PRECISION (op.type ())),
575 wi::shwi (TYPE_PRECISION (type) - 1, TYPE_PRECISION (op.type ())));
576 r.intersect (op);
578 // If there are no valid ranges in the shift range, returned false.
579 if (r.undefined_p ())
580 return false;
581 return true;
584 // Default wide_int fold operation returns [MIN, MAX].
586 void
587 range_operator::wi_fold (irange &r, tree type,
588 const wide_int &lh_lb ATTRIBUTE_UNUSED,
589 const wide_int &lh_ub ATTRIBUTE_UNUSED,
590 const wide_int &rh_lb ATTRIBUTE_UNUSED,
591 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
593 gcc_checking_assert (r.supports_type_p (type));
594 r.set_varying (type);
597 // Call wi_fold when both op1 and op2 are equivalent. Further split small
598 // subranges into constants. This can provide better precision.
599 // For x + y, when x == y with a range of [0,4] instead of [0, 8] produce
600 // [0,0][2, 2][4,4][6, 6][8, 8]
601 // LIMIT is the maximum number of elements in range allowed before we
602 // do not process them individually.
604 void
605 range_operator::wi_fold_in_parts_equiv (irange &r, tree type,
606 const wide_int &lh_lb,
607 const wide_int &lh_ub,
608 unsigned limit) const
610 int_range_max tmp;
611 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
612 widest_int::from (lh_lb, TYPE_SIGN (type)));
613 // if there are 1 to 8 values in the LH range, split them up.
614 r.set_undefined ();
615 if (lh_range >= 0 && lh_range < limit)
617 for (unsigned x = 0; x <= lh_range; x++)
619 wide_int val = lh_lb + x;
620 wi_fold (tmp, type, val, val, val, val);
621 r.union_ (tmp);
624 // Otherwise just call wi_fold.
625 else
626 wi_fold (r, type, lh_lb, lh_ub, lh_lb, lh_ub);
629 // Call wi_fold, except further split small subranges into constants.
630 // This can provide better precision. For something 8 >> [0,1]
631 // Instead of [8, 16], we will produce [8,8][16,16]
633 void
634 range_operator::wi_fold_in_parts (irange &r, tree type,
635 const wide_int &lh_lb,
636 const wide_int &lh_ub,
637 const wide_int &rh_lb,
638 const wide_int &rh_ub) const
640 int_range_max tmp;
641 widest_int rh_range = wi::sub (widest_int::from (rh_ub, TYPE_SIGN (type)),
642 widest_int::from (rh_lb, TYPE_SIGN (type)));
643 widest_int lh_range = wi::sub (widest_int::from (lh_ub, TYPE_SIGN (type)),
644 widest_int::from (lh_lb, TYPE_SIGN (type)));
645 // If there are 2, 3, or 4 values in the RH range, do them separately.
646 // Call wi_fold_in_parts to check the RH side.
647 if (rh_range > 0 && rh_range < 4)
649 wi_fold_in_parts (r, type, lh_lb, lh_ub, rh_lb, rh_lb);
650 if (rh_range > 1)
652 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 1, rh_lb + 1);
653 r.union_ (tmp);
654 if (rh_range == 3)
656 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb + 2, rh_lb + 2);
657 r.union_ (tmp);
660 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_ub, rh_ub);
661 r.union_ (tmp);
663 // Otherwise check for 2, 3, or 4 values in the LH range and split them up.
664 // The RH side has been checked, so no recursion needed.
665 else if (lh_range > 0 && lh_range < 4)
667 wi_fold (r, type, lh_lb, lh_lb, rh_lb, rh_ub);
668 if (lh_range > 1)
670 wi_fold (tmp, type, lh_lb + 1, lh_lb + 1, rh_lb, rh_ub);
671 r.union_ (tmp);
672 if (lh_range == 3)
674 wi_fold (tmp, type, lh_lb + 2, lh_lb + 2, rh_lb, rh_ub);
675 r.union_ (tmp);
678 wi_fold (tmp, type, lh_ub, lh_ub, rh_lb, rh_ub);
679 r.union_ (tmp);
681 // Otherwise just call wi_fold.
682 else
683 wi_fold (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
686 // The default for fold is to break all ranges into sub-ranges and
687 // invoke the wi_fold method on each sub-range pair.
689 bool
690 range_operator::fold_range (irange &r, tree type,
691 const irange &lh,
692 const irange &rh,
693 relation_trio trio) const
695 gcc_checking_assert (r.supports_type_p (type));
696 if (empty_range_varying (r, type, lh, rh))
697 return true;
699 relation_kind rel = trio.op1_op2 ();
700 unsigned num_lh = lh.num_pairs ();
701 unsigned num_rh = rh.num_pairs ();
703 // If op1 and op2 are equivalences, then we don't need a complete cross
704 // product, just pairs of matching elements.
705 if (relation_equiv_p (rel) && lh == rh)
707 int_range_max tmp;
708 r.set_undefined ();
709 for (unsigned x = 0; x < num_lh; ++x)
711 // If the number of subranges is too high, limit subrange creation.
712 unsigned limit = (r.num_pairs () > 32) ? 0 : 8;
713 wide_int lh_lb = lh.lower_bound (x);
714 wide_int lh_ub = lh.upper_bound (x);
715 wi_fold_in_parts_equiv (tmp, type, lh_lb, lh_ub, limit);
716 r.union_ (tmp);
717 if (r.varying_p ())
718 break;
720 op1_op2_relation_effect (r, type, lh, rh, rel);
721 update_bitmask (r, lh, rh);
722 return true;
725 // If both ranges are single pairs, fold directly into the result range.
726 // If the number of subranges grows too high, produce a summary result as the
727 // loop becomes exponential with little benefit. See PR 103821.
728 if ((num_lh == 1 && num_rh == 1) || num_lh * num_rh > 12)
730 wi_fold_in_parts (r, type, lh.lower_bound (), lh.upper_bound (),
731 rh.lower_bound (), rh.upper_bound ());
732 op1_op2_relation_effect (r, type, lh, rh, rel);
733 update_bitmask (r, lh, rh);
734 return true;
737 int_range_max tmp;
738 r.set_undefined ();
739 for (unsigned x = 0; x < num_lh; ++x)
740 for (unsigned y = 0; y < num_rh; ++y)
742 wide_int lh_lb = lh.lower_bound (x);
743 wide_int lh_ub = lh.upper_bound (x);
744 wide_int rh_lb = rh.lower_bound (y);
745 wide_int rh_ub = rh.upper_bound (y);
746 wi_fold_in_parts (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
747 r.union_ (tmp);
748 if (r.varying_p ())
750 op1_op2_relation_effect (r, type, lh, rh, rel);
751 update_bitmask (r, lh, rh);
752 return true;
755 op1_op2_relation_effect (r, type, lh, rh, rel);
756 update_bitmask (r, lh, rh);
757 return true;
760 // The default for op1_range is to return false.
762 bool
763 range_operator::op1_range (irange &r ATTRIBUTE_UNUSED,
764 tree type ATTRIBUTE_UNUSED,
765 const irange &lhs ATTRIBUTE_UNUSED,
766 const irange &op2 ATTRIBUTE_UNUSED,
767 relation_trio) const
769 return false;
772 // The default for op2_range is to return false.
774 bool
775 range_operator::op2_range (irange &r ATTRIBUTE_UNUSED,
776 tree type ATTRIBUTE_UNUSED,
777 const irange &lhs ATTRIBUTE_UNUSED,
778 const irange &op1 ATTRIBUTE_UNUSED,
779 relation_trio) const
781 return false;
784 // The default relation routines return VREL_VARYING.
786 relation_kind
787 range_operator::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
788 const irange &op1 ATTRIBUTE_UNUSED,
789 const irange &op2 ATTRIBUTE_UNUSED,
790 relation_kind rel ATTRIBUTE_UNUSED) const
792 return VREL_VARYING;
795 relation_kind
796 range_operator::lhs_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
797 const irange &op1 ATTRIBUTE_UNUSED,
798 const irange &op2 ATTRIBUTE_UNUSED,
799 relation_kind rel ATTRIBUTE_UNUSED) const
801 return VREL_VARYING;
804 relation_kind
805 range_operator::op1_op2_relation (const irange &lhs ATTRIBUTE_UNUSED,
806 const irange &op1 ATTRIBUTE_UNUSED,
807 const irange &op2 ATTRIBUTE_UNUSED) const
809 return VREL_VARYING;
812 // Default is no relation affects the LHS.
814 bool
815 range_operator::op1_op2_relation_effect (irange &lhs_range ATTRIBUTE_UNUSED,
816 tree type ATTRIBUTE_UNUSED,
817 const irange &op1_range ATTRIBUTE_UNUSED,
818 const irange &op2_range ATTRIBUTE_UNUSED,
819 relation_kind rel ATTRIBUTE_UNUSED) const
821 return false;
824 bool
825 range_operator::overflow_free_p (const irange &, const irange &,
826 relation_trio) const
828 return false;
831 // Apply any known bitmask updates based on this operator.
833 void
834 range_operator::update_bitmask (irange &, const irange &,
835 const irange &) const
839 // Check that operand types are OK. Default to always OK.
841 bool
842 range_operator::operand_check_p (tree, tree, tree) const
844 return true;
847 // Create and return a range from a pair of wide-ints that are known
848 // to have overflowed (or underflowed).
850 static void
851 value_range_from_overflowed_bounds (irange &r, tree type,
852 const wide_int &wmin,
853 const wide_int &wmax)
855 const signop sgn = TYPE_SIGN (type);
856 const unsigned int prec = TYPE_PRECISION (type);
858 wide_int tmin = wide_int::from (wmin, prec, sgn);
859 wide_int tmax = wide_int::from (wmax, prec, sgn);
861 bool covers = false;
862 wide_int tem = tmin;
863 tmin = tmax + 1;
864 if (wi::cmp (tmin, tmax, sgn) < 0)
865 covers = true;
866 tmax = tem - 1;
867 if (wi::cmp (tmax, tem, sgn) > 0)
868 covers = true;
870 // If the anti-range would cover nothing, drop to varying.
871 // Likewise if the anti-range bounds are outside of the types
872 // values.
873 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
874 r.set_varying (type);
875 else
876 r.set (type, tmin, tmax, VR_ANTI_RANGE);
879 // Create and return a range from a pair of wide-ints. MIN_OVF and
880 // MAX_OVF describe any overflow that might have occurred while
881 // calculating WMIN and WMAX respectively.
883 static void
884 value_range_with_overflow (irange &r, tree type,
885 const wide_int &wmin, const wide_int &wmax,
886 wi::overflow_type min_ovf = wi::OVF_NONE,
887 wi::overflow_type max_ovf = wi::OVF_NONE)
889 const signop sgn = TYPE_SIGN (type);
890 const unsigned int prec = TYPE_PRECISION (type);
891 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
893 // For one bit precision if max != min, then the range covers all
894 // values.
895 if (prec == 1 && wi::ne_p (wmax, wmin))
897 r.set_varying (type);
898 return;
901 if (overflow_wraps)
903 // If overflow wraps, truncate the values and adjust the range,
904 // kind, and bounds appropriately.
905 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
907 wide_int tmin = wide_int::from (wmin, prec, sgn);
908 wide_int tmax = wide_int::from (wmax, prec, sgn);
909 // If the limits are swapped, we wrapped around and cover
910 // the entire range.
911 if (wi::gt_p (tmin, tmax, sgn))
912 r.set_varying (type);
913 else
914 // No overflow or both overflow or underflow. The range
915 // kind stays normal.
916 r.set (type, tmin, tmax);
917 return;
920 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
921 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
922 value_range_from_overflowed_bounds (r, type, wmin, wmax);
923 else
924 // Other underflow and/or overflow, drop to VR_VARYING.
925 r.set_varying (type);
927 else
929 // If both bounds either underflowed or overflowed, then the result
930 // is undefined.
931 if ((min_ovf == wi::OVF_OVERFLOW && max_ovf == wi::OVF_OVERFLOW)
932 || (min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_UNDERFLOW))
934 r.set_undefined ();
935 return;
938 // If overflow does not wrap, saturate to [MIN, MAX].
939 wide_int new_lb, new_ub;
940 if (min_ovf == wi::OVF_UNDERFLOW)
941 new_lb = wi::min_value (prec, sgn);
942 else if (min_ovf == wi::OVF_OVERFLOW)
943 new_lb = wi::max_value (prec, sgn);
944 else
945 new_lb = wmin;
947 if (max_ovf == wi::OVF_UNDERFLOW)
948 new_ub = wi::min_value (prec, sgn);
949 else if (max_ovf == wi::OVF_OVERFLOW)
950 new_ub = wi::max_value (prec, sgn);
951 else
952 new_ub = wmax;
954 r.set (type, new_lb, new_ub);
958 // Create and return a range from a pair of wide-ints. Canonicalize
959 // the case where the bounds are swapped. In which case, we transform
960 // [10,5] into [MIN,5][10,MAX].
962 static inline void
963 create_possibly_reversed_range (irange &r, tree type,
964 const wide_int &new_lb, const wide_int &new_ub)
966 signop s = TYPE_SIGN (type);
967 // If the bounds are swapped, treat the result as if an overflow occurred.
968 if (wi::gt_p (new_lb, new_ub, s))
969 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
970 else
971 // Otherwise it's just a normal range.
972 r.set (type, new_lb, new_ub);
975 // Return the summary information about boolean range LHS. If EMPTY/FULL,
976 // return the equivalent range for TYPE in R; if FALSE/TRUE, do nothing.
978 bool_range_state
979 get_bool_state (vrange &r, const vrange &lhs, tree val_type)
981 // If there is no result, then this is unexecutable.
982 if (lhs.undefined_p ())
984 r.set_undefined ();
985 return BRS_EMPTY;
988 if (lhs.zero_p ())
989 return BRS_FALSE;
991 // For TRUE, we can't just test for [1,1] because Ada can have
992 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
993 if (lhs.contains_p (build_zero_cst (lhs.type ())))
995 r.set_varying (val_type);
996 return BRS_FULL;
999 return BRS_TRUE;
1002 // ------------------------------------------------------------------------
1004 void
1005 operator_equal::update_bitmask (irange &r, const irange &lh,
1006 const irange &rh) const
1008 update_known_bitmask (r, EQ_EXPR, lh, rh);
1011 // Check if the LHS range indicates a relation between OP1 and OP2.
1013 relation_kind
1014 operator_equal::op1_op2_relation (const irange &lhs, const irange &,
1015 const irange &) const
1017 if (lhs.undefined_p ())
1018 return VREL_UNDEFINED;
1020 // FALSE = op1 == op2 indicates NE_EXPR.
1021 if (lhs.zero_p ())
1022 return VREL_NE;
1024 // TRUE = op1 == op2 indicates EQ_EXPR.
1025 if (!contains_zero_p (lhs))
1026 return VREL_EQ;
1027 return VREL_VARYING;
1030 bool
1031 operator_equal::fold_range (irange &r, tree type,
1032 const irange &op1,
1033 const irange &op2,
1034 relation_trio rel) const
1036 if (relop_early_resolve (r, type, op1, op2, rel, VREL_EQ))
1037 return true;
1039 // We can be sure the values are always equal or not if both ranges
1040 // consist of a single value, and then compare them.
1041 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
1042 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
1043 if (op1_const && op2_const)
1045 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
1046 r = range_true (type);
1047 else
1048 r = range_false (type);
1050 else
1052 // If ranges do not intersect, we know the range is not equal,
1053 // otherwise we don't know anything for sure.
1054 int_range_max tmp = op1;
1055 tmp.intersect (op2);
1056 if (tmp.undefined_p ())
1057 r = range_false (type);
1058 // Check if a constant cannot satisfy the bitmask requirements.
1059 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
1060 r = range_false (type);
1061 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
1062 r = range_false (type);
1063 else
1064 r = range_true_and_false (type);
1066 return true;
1069 bool
1070 operator_equal::op1_range (irange &r, tree type,
1071 const irange &lhs,
1072 const irange &op2,
1073 relation_trio) const
1075 switch (get_bool_state (r, lhs, type))
1077 case BRS_TRUE:
1078 // If it's true, the result is the same as OP2.
1079 r = op2;
1080 break;
1082 case BRS_FALSE:
1083 // If the result is false, the only time we know anything is
1084 // if OP2 is a constant.
1085 if (!op2.undefined_p ()
1086 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1088 r = op2;
1089 r.invert ();
1091 else
1092 r.set_varying (type);
1093 break;
1095 default:
1096 break;
1098 return true;
1101 bool
1102 operator_equal::op2_range (irange &r, tree type,
1103 const irange &lhs,
1104 const irange &op1,
1105 relation_trio rel) const
1107 return operator_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1110 // -------------------------------------------------------------------------
1112 void
1113 operator_not_equal::update_bitmask (irange &r, const irange &lh,
1114 const irange &rh) const
1116 update_known_bitmask (r, NE_EXPR, lh, rh);
1119 // Check if the LHS range indicates a relation between OP1 and OP2.
1121 relation_kind
1122 operator_not_equal::op1_op2_relation (const irange &lhs, const irange &,
1123 const irange &) const
1125 if (lhs.undefined_p ())
1126 return VREL_UNDEFINED;
1128 // FALSE = op1 != op2 indicates EQ_EXPR.
1129 if (lhs.zero_p ())
1130 return VREL_EQ;
1132 // TRUE = op1 != op2 indicates NE_EXPR.
1133 if (!contains_zero_p (lhs))
1134 return VREL_NE;
1135 return VREL_VARYING;
1138 bool
1139 operator_not_equal::fold_range (irange &r, tree type,
1140 const irange &op1,
1141 const irange &op2,
1142 relation_trio rel) const
1144 if (relop_early_resolve (r, type, op1, op2, rel, VREL_NE))
1145 return true;
1147 // We can be sure the values are always equal or not if both ranges
1148 // consist of a single value, and then compare them.
1149 bool op1_const = wi::eq_p (op1.lower_bound (), op1.upper_bound ());
1150 bool op2_const = wi::eq_p (op2.lower_bound (), op2.upper_bound ());
1151 if (op1_const && op2_const)
1153 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
1154 r = range_true (type);
1155 else
1156 r = range_false (type);
1158 else
1160 // If ranges do not intersect, we know the range is not equal,
1161 // otherwise we don't know anything for sure.
1162 int_range_max tmp = op1;
1163 tmp.intersect (op2);
1164 if (tmp.undefined_p ())
1165 r = range_true (type);
1166 // Check if a constant cannot satisfy the bitmask requirements.
1167 else if (op2_const && !op1.get_bitmask ().member_p (op2.lower_bound ()))
1168 r = range_true (type);
1169 else if (op1_const && !op2.get_bitmask ().member_p (op1.lower_bound ()))
1170 r = range_true (type);
1171 else
1172 r = range_true_and_false (type);
1174 return true;
1177 bool
1178 operator_not_equal::op1_range (irange &r, tree type,
1179 const irange &lhs,
1180 const irange &op2,
1181 relation_trio) const
1183 switch (get_bool_state (r, lhs, type))
1185 case BRS_TRUE:
1186 // If the result is true, the only time we know anything is if
1187 // OP2 is a constant.
1188 if (!op2.undefined_p ()
1189 && wi::eq_p (op2.lower_bound(), op2.upper_bound()))
1191 r = op2;
1192 r.invert ();
1194 else
1195 r.set_varying (type);
1196 break;
1198 case BRS_FALSE:
1199 // If it's false, the result is the same as OP2.
1200 r = op2;
1201 break;
1203 default:
1204 break;
1206 return true;
1210 bool
1211 operator_not_equal::op2_range (irange &r, tree type,
1212 const irange &lhs,
1213 const irange &op1,
1214 relation_trio rel) const
1216 return operator_not_equal::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1219 // (X < VAL) produces the range of [MIN, VAL - 1].
1221 static void
1222 build_lt (irange &r, tree type, const wide_int &val)
1224 wi::overflow_type ov;
1225 wide_int lim;
1226 signop sgn = TYPE_SIGN (type);
1228 // Signed 1 bit cannot represent 1 for subtraction.
1229 if (sgn == SIGNED)
1230 lim = wi::add (val, -1, sgn, &ov);
1231 else
1232 lim = wi::sub (val, 1, sgn, &ov);
1234 // If val - 1 underflows, check if X < MIN, which is an empty range.
1235 if (ov)
1236 r.set_undefined ();
1237 else
1238 r = int_range<1> (type, min_limit (type), lim);
1241 // (X <= VAL) produces the range of [MIN, VAL].
1243 static void
1244 build_le (irange &r, tree type, const wide_int &val)
1246 r = int_range<1> (type, min_limit (type), val);
1249 // (X > VAL) produces the range of [VAL + 1, MAX].
1251 static void
1252 build_gt (irange &r, tree type, const wide_int &val)
1254 wi::overflow_type ov;
1255 wide_int lim;
1256 signop sgn = TYPE_SIGN (type);
1258 // Signed 1 bit cannot represent 1 for addition.
1259 if (sgn == SIGNED)
1260 lim = wi::sub (val, -1, sgn, &ov);
1261 else
1262 lim = wi::add (val, 1, sgn, &ov);
1263 // If val + 1 overflows, check is for X > MAX, which is an empty range.
1264 if (ov)
1265 r.set_undefined ();
1266 else
1267 r = int_range<1> (type, lim, max_limit (type));
1270 // (X >= val) produces the range of [VAL, MAX].
1272 static void
1273 build_ge (irange &r, tree type, const wide_int &val)
1275 r = int_range<1> (type, val, max_limit (type));
1279 void
1280 operator_lt::update_bitmask (irange &r, const irange &lh,
1281 const irange &rh) const
1283 update_known_bitmask (r, LT_EXPR, lh, rh);
1286 // Check if the LHS range indicates a relation between OP1 and OP2.
1288 relation_kind
1289 operator_lt::op1_op2_relation (const irange &lhs, const irange &,
1290 const irange &) const
1292 if (lhs.undefined_p ())
1293 return VREL_UNDEFINED;
1295 // FALSE = op1 < op2 indicates GE_EXPR.
1296 if (lhs.zero_p ())
1297 return VREL_GE;
1299 // TRUE = op1 < op2 indicates LT_EXPR.
1300 if (!contains_zero_p (lhs))
1301 return VREL_LT;
1302 return VREL_VARYING;
1305 bool
1306 operator_lt::fold_range (irange &r, tree type,
1307 const irange &op1,
1308 const irange &op2,
1309 relation_trio rel) const
1311 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LT))
1312 return true;
1314 signop sign = TYPE_SIGN (op1.type ());
1315 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1317 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
1318 r = range_true (type);
1319 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
1320 r = range_false (type);
1321 // Use nonzero bits to determine if < 0 is false.
1322 else if (op2.zero_p () && !wi::neg_p (op1.get_nonzero_bits (), sign))
1323 r = range_false (type);
1324 else
1325 r = range_true_and_false (type);
1326 return true;
1329 bool
1330 operator_lt::op1_range (irange &r, tree type,
1331 const irange &lhs,
1332 const irange &op2,
1333 relation_trio) const
1335 if (op2.undefined_p ())
1336 return false;
1338 switch (get_bool_state (r, lhs, type))
1340 case BRS_TRUE:
1341 build_lt (r, type, op2.upper_bound ());
1342 break;
1344 case BRS_FALSE:
1345 build_ge (r, type, op2.lower_bound ());
1346 break;
1348 default:
1349 break;
1351 return true;
1354 bool
1355 operator_lt::op2_range (irange &r, tree type,
1356 const irange &lhs,
1357 const irange &op1,
1358 relation_trio) const
1360 if (op1.undefined_p ())
1361 return false;
1363 switch (get_bool_state (r, lhs, type))
1365 case BRS_TRUE:
1366 build_gt (r, type, op1.lower_bound ());
1367 break;
1369 case BRS_FALSE:
1370 build_le (r, type, op1.upper_bound ());
1371 break;
1373 default:
1374 break;
1376 return true;
1380 void
1381 operator_le::update_bitmask (irange &r, const irange &lh,
1382 const irange &rh) const
1384 update_known_bitmask (r, LE_EXPR, lh, rh);
1387 // Check if the LHS range indicates a relation between OP1 and OP2.
1389 relation_kind
1390 operator_le::op1_op2_relation (const irange &lhs, const irange &,
1391 const irange &) const
1393 if (lhs.undefined_p ())
1394 return VREL_UNDEFINED;
1396 // FALSE = op1 <= op2 indicates GT_EXPR.
1397 if (lhs.zero_p ())
1398 return VREL_GT;
1400 // TRUE = op1 <= op2 indicates LE_EXPR.
1401 if (!contains_zero_p (lhs))
1402 return VREL_LE;
1403 return VREL_VARYING;
1406 bool
1407 operator_le::fold_range (irange &r, tree type,
1408 const irange &op1,
1409 const irange &op2,
1410 relation_trio rel) const
1412 if (relop_early_resolve (r, type, op1, op2, rel, VREL_LE))
1413 return true;
1415 signop sign = TYPE_SIGN (op1.type ());
1416 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1418 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
1419 r = range_true (type);
1420 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
1421 r = range_false (type);
1422 else
1423 r = range_true_and_false (type);
1424 return true;
1427 bool
1428 operator_le::op1_range (irange &r, tree type,
1429 const irange &lhs,
1430 const irange &op2,
1431 relation_trio) const
1433 if (op2.undefined_p ())
1434 return false;
1436 switch (get_bool_state (r, lhs, type))
1438 case BRS_TRUE:
1439 build_le (r, type, op2.upper_bound ());
1440 break;
1442 case BRS_FALSE:
1443 build_gt (r, type, op2.lower_bound ());
1444 break;
1446 default:
1447 break;
1449 return true;
1452 bool
1453 operator_le::op2_range (irange &r, tree type,
1454 const irange &lhs,
1455 const irange &op1,
1456 relation_trio) const
1458 if (op1.undefined_p ())
1459 return false;
1461 switch (get_bool_state (r, lhs, type))
1463 case BRS_TRUE:
1464 build_ge (r, type, op1.lower_bound ());
1465 break;
1467 case BRS_FALSE:
1468 build_lt (r, type, op1.upper_bound ());
1469 break;
1471 default:
1472 break;
1474 return true;
1478 void
1479 operator_gt::update_bitmask (irange &r, const irange &lh,
1480 const irange &rh) const
1482 update_known_bitmask (r, GT_EXPR, lh, rh);
1485 // Check if the LHS range indicates a relation between OP1 and OP2.
1487 relation_kind
1488 operator_gt::op1_op2_relation (const irange &lhs, const irange &,
1489 const irange &) const
1491 if (lhs.undefined_p ())
1492 return VREL_UNDEFINED;
1494 // FALSE = op1 > op2 indicates LE_EXPR.
1495 if (lhs.zero_p ())
1496 return VREL_LE;
1498 // TRUE = op1 > op2 indicates GT_EXPR.
1499 if (!contains_zero_p (lhs))
1500 return VREL_GT;
1501 return VREL_VARYING;
1504 bool
1505 operator_gt::fold_range (irange &r, tree type,
1506 const irange &op1, const irange &op2,
1507 relation_trio rel) const
1509 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GT))
1510 return true;
1512 signop sign = TYPE_SIGN (op1.type ());
1513 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1515 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
1516 r = range_true (type);
1517 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
1518 r = range_false (type);
1519 else
1520 r = range_true_and_false (type);
1521 return true;
1524 bool
1525 operator_gt::op1_range (irange &r, tree type,
1526 const irange &lhs, const irange &op2,
1527 relation_trio) const
1529 if (op2.undefined_p ())
1530 return false;
1532 switch (get_bool_state (r, lhs, type))
1534 case BRS_TRUE:
1535 build_gt (r, type, op2.lower_bound ());
1536 break;
1538 case BRS_FALSE:
1539 build_le (r, type, op2.upper_bound ());
1540 break;
1542 default:
1543 break;
1545 return true;
1548 bool
1549 operator_gt::op2_range (irange &r, tree type,
1550 const irange &lhs,
1551 const irange &op1,
1552 relation_trio) const
1554 if (op1.undefined_p ())
1555 return false;
1557 switch (get_bool_state (r, lhs, type))
1559 case BRS_TRUE:
1560 build_lt (r, type, op1.upper_bound ());
1561 break;
1563 case BRS_FALSE:
1564 build_ge (r, type, op1.lower_bound ());
1565 break;
1567 default:
1568 break;
1570 return true;
1574 void
1575 operator_ge::update_bitmask (irange &r, const irange &lh,
1576 const irange &rh) const
1578 update_known_bitmask (r, GE_EXPR, lh, rh);
1581 // Check if the LHS range indicates a relation between OP1 and OP2.
1583 relation_kind
1584 operator_ge::op1_op2_relation (const irange &lhs, const irange &,
1585 const irange &) const
1587 if (lhs.undefined_p ())
1588 return VREL_UNDEFINED;
1590 // FALSE = op1 >= op2 indicates LT_EXPR.
1591 if (lhs.zero_p ())
1592 return VREL_LT;
1594 // TRUE = op1 >= op2 indicates GE_EXPR.
1595 if (!contains_zero_p (lhs))
1596 return VREL_GE;
1597 return VREL_VARYING;
1600 bool
1601 operator_ge::fold_range (irange &r, tree type,
1602 const irange &op1,
1603 const irange &op2,
1604 relation_trio rel) const
1606 if (relop_early_resolve (r, type, op1, op2, rel, VREL_GE))
1607 return true;
1609 signop sign = TYPE_SIGN (op1.type ());
1610 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
1612 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
1613 r = range_true (type);
1614 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
1615 r = range_false (type);
1616 else
1617 r = range_true_and_false (type);
1618 return true;
1621 bool
1622 operator_ge::op1_range (irange &r, tree type,
1623 const irange &lhs,
1624 const irange &op2,
1625 relation_trio) const
1627 if (op2.undefined_p ())
1628 return false;
1630 switch (get_bool_state (r, lhs, type))
1632 case BRS_TRUE:
1633 build_ge (r, type, op2.lower_bound ());
1634 break;
1636 case BRS_FALSE:
1637 build_lt (r, type, op2.upper_bound ());
1638 break;
1640 default:
1641 break;
1643 return true;
1646 bool
1647 operator_ge::op2_range (irange &r, tree type,
1648 const irange &lhs,
1649 const irange &op1,
1650 relation_trio) const
1652 if (op1.undefined_p ())
1653 return false;
1655 switch (get_bool_state (r, lhs, type))
1657 case BRS_TRUE:
1658 build_le (r, type, op1.upper_bound ());
1659 break;
1661 case BRS_FALSE:
1662 build_gt (r, type, op1.lower_bound ());
1663 break;
1665 default:
1666 break;
1668 return true;
1672 void
1673 operator_plus::update_bitmask (irange &r, const irange &lh,
1674 const irange &rh) const
1676 update_known_bitmask (r, PLUS_EXPR, lh, rh);
1679 // Check to see if the range of OP2 indicates anything about the relation
1680 // between LHS and OP1.
1682 relation_kind
1683 operator_plus::lhs_op1_relation (const irange &lhs,
1684 const irange &op1,
1685 const irange &op2,
1686 relation_kind) const
1688 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
1689 return VREL_VARYING;
1691 tree type = lhs.type ();
1692 unsigned prec = TYPE_PRECISION (type);
1693 wi::overflow_type ovf1, ovf2;
1694 signop sign = TYPE_SIGN (type);
1696 // LHS = OP1 + 0 indicates LHS == OP1.
1697 if (op2.zero_p ())
1698 return VREL_EQ;
1700 if (TYPE_OVERFLOW_WRAPS (type))
1702 wi::add (op1.lower_bound (), op2.lower_bound (), sign, &ovf1);
1703 wi::add (op1.upper_bound (), op2.upper_bound (), sign, &ovf2);
1705 else
1706 ovf1 = ovf2 = wi::OVF_NONE;
1708 // Never wrapping additions.
1709 if (!ovf1 && !ovf2)
1711 // Positive op2 means lhs > op1.
1712 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1713 return VREL_GT;
1714 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1715 return VREL_GE;
1717 // Negative op2 means lhs < op1.
1718 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1719 return VREL_LT;
1720 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1721 return VREL_LE;
1723 // Always wrapping additions.
1724 else if (ovf1 && ovf1 == ovf2)
1726 // Positive op2 means lhs < op1.
1727 if (wi::gt_p (op2.lower_bound (), wi::zero (prec), sign))
1728 return VREL_LT;
1729 if (wi::ge_p (op2.lower_bound (), wi::zero (prec), sign))
1730 return VREL_LE;
1732 // Negative op2 means lhs > op1.
1733 if (wi::lt_p (op2.upper_bound (), wi::zero (prec), sign))
1734 return VREL_GT;
1735 if (wi::le_p (op2.upper_bound (), wi::zero (prec), sign))
1736 return VREL_GE;
1739 // If op2 does not contain 0, then LHS and OP1 can never be equal.
1740 if (!range_includes_zero_p (op2))
1741 return VREL_NE;
1743 return VREL_VARYING;
1746 // PLUS is symmetrical, so we can simply call lhs_op1_relation with reversed
1747 // operands.
1749 relation_kind
1750 operator_plus::lhs_op2_relation (const irange &lhs, const irange &op1,
1751 const irange &op2, relation_kind rel) const
1753 return lhs_op1_relation (lhs, op2, op1, rel);
1756 void
1757 operator_plus::wi_fold (irange &r, tree type,
1758 const wide_int &lh_lb, const wide_int &lh_ub,
1759 const wide_int &rh_lb, const wide_int &rh_ub) const
1761 wi::overflow_type ov_lb, ov_ub;
1762 signop s = TYPE_SIGN (type);
1763 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
1764 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
1765 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1768 // Given addition or subtraction, determine the possible NORMAL ranges and
1769 // OVERFLOW ranges given an OFFSET range. ADD_P is true for addition.
1770 // Return the relation that exists between the LHS and OP1 in order for the
1771 // NORMAL range to apply.
1772 // a return value of VREL_VARYING means no ranges were applicable.
1774 static relation_kind
1775 plus_minus_ranges (irange &r_ov, irange &r_normal, const irange &offset,
1776 bool add_p)
1778 relation_kind kind = VREL_VARYING;
1779 // For now, only deal with constant adds. This could be extended to ranges
1780 // when someone is so motivated.
1781 if (!offset.singleton_p () || offset.zero_p ())
1782 return kind;
1784 // Always work with a positive offset. ie a+ -2 -> a-2 and a- -2 > a+2
1785 wide_int off = offset.lower_bound ();
1786 if (wi::neg_p (off, SIGNED))
1788 add_p = !add_p;
1789 off = wi::neg (off);
1792 wi::overflow_type ov;
1793 tree type = offset.type ();
1794 unsigned prec = TYPE_PRECISION (type);
1795 wide_int ub;
1796 wide_int lb;
1797 // calculate the normal range and relation for the operation.
1798 if (add_p)
1800 // [ 0 , INF - OFF]
1801 lb = wi::zero (prec);
1802 ub = wi::sub (irange_val_max (type), off, UNSIGNED, &ov);
1803 kind = VREL_GT;
1805 else
1807 // [ OFF, INF ]
1808 lb = off;
1809 ub = irange_val_max (type);
1810 kind = VREL_LT;
1812 int_range<2> normal_range (type, lb, ub);
1813 int_range<2> ov_range (type, lb, ub, VR_ANTI_RANGE);
1815 r_ov = ov_range;
1816 r_normal = normal_range;
1817 return kind;
1820 // Once op1 has been calculated by operator_plus or operator_minus, check
1821 // to see if the relation passed causes any part of the calculation to
1822 // be not possible. ie
1823 // a_2 = b_3 + 1 with a_2 < b_3 can refine the range of b_3 to [INF, INF]
1824 // and that further refines a_2 to [0, 0].
1825 // R is the value of op1, OP2 is the offset being added/subtracted, REL is the
1826 // relation between LHS relation OP1 and ADD_P is true for PLUS, false for
1827 // MINUS. IF any adjustment can be made, R will reflect it.
1829 static void
1830 adjust_op1_for_overflow (irange &r, const irange &op2, relation_kind rel,
1831 bool add_p)
1833 if (r.undefined_p ())
1834 return;
1835 tree type = r.type ();
1836 // Check for unsigned overflow and calculate the overflow part.
1837 signop s = TYPE_SIGN (type);
1838 if (!TYPE_OVERFLOW_WRAPS (type) || s == SIGNED)
1839 return;
1841 // Only work with <, <=, >, >= relations.
1842 if (!relation_lt_le_gt_ge_p (rel))
1843 return;
1845 // Get the ranges for this offset.
1846 int_range_max normal, overflow;
1847 relation_kind k = plus_minus_ranges (overflow, normal, op2, add_p);
1849 // VREL_VARYING means there are no adjustments.
1850 if (k == VREL_VARYING)
1851 return;
1853 // If the relations match use the normal range, otherwise use overflow range.
1854 if (relation_intersect (k, rel) == k)
1855 r.intersect (normal);
1856 else
1857 r.intersect (overflow);
1858 return;
1861 bool
1862 operator_plus::op1_range (irange &r, tree type,
1863 const irange &lhs,
1864 const irange &op2,
1865 relation_trio trio) const
1867 if (lhs.undefined_p ())
1868 return false;
1869 // Start with the default operation.
1870 range_op_handler minus (MINUS_EXPR);
1871 if (!minus)
1872 return false;
1873 bool res = minus.fold_range (r, type, lhs, op2);
1874 relation_kind rel = trio.lhs_op1 ();
1875 // Check for a relation refinement.
1876 if (res)
1877 adjust_op1_for_overflow (r, op2, rel, true /* PLUS_EXPR */);
1878 return res;
1881 bool
1882 operator_plus::op2_range (irange &r, tree type,
1883 const irange &lhs,
1884 const irange &op1,
1885 relation_trio rel) const
1887 return op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
1890 class operator_widen_plus_signed : public range_operator
1892 public:
1893 virtual void wi_fold (irange &r, tree type,
1894 const wide_int &lh_lb,
1895 const wide_int &lh_ub,
1896 const wide_int &rh_lb,
1897 const wide_int &rh_ub) const;
1898 } op_widen_plus_signed;
1900 void
1901 operator_widen_plus_signed::wi_fold (irange &r, tree type,
1902 const wide_int &lh_lb,
1903 const wide_int &lh_ub,
1904 const wide_int &rh_lb,
1905 const wide_int &rh_ub) const
1907 wi::overflow_type ov_lb, ov_ub;
1908 signop s = TYPE_SIGN (type);
1910 wide_int lh_wlb
1911 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
1912 wide_int lh_wub
1913 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
1914 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1915 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1917 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1918 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1920 r = int_range<2> (type, new_lb, new_ub);
1923 class operator_widen_plus_unsigned : public range_operator
1925 public:
1926 virtual void wi_fold (irange &r, tree type,
1927 const wide_int &lh_lb,
1928 const wide_int &lh_ub,
1929 const wide_int &rh_lb,
1930 const wide_int &rh_ub) const;
1931 } op_widen_plus_unsigned;
1933 void
1934 operator_widen_plus_unsigned::wi_fold (irange &r, tree type,
1935 const wide_int &lh_lb,
1936 const wide_int &lh_ub,
1937 const wide_int &rh_lb,
1938 const wide_int &rh_ub) const
1940 wi::overflow_type ov_lb, ov_ub;
1941 signop s = TYPE_SIGN (type);
1943 wide_int lh_wlb
1944 = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
1945 wide_int lh_wub
1946 = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
1947 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
1948 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
1950 wide_int new_lb = wi::add (lh_wlb, rh_wlb, s, &ov_lb);
1951 wide_int new_ub = wi::add (lh_wub, rh_wub, s, &ov_ub);
1953 r = int_range<2> (type, new_lb, new_ub);
1956 void
1957 operator_minus::update_bitmask (irange &r, const irange &lh,
1958 const irange &rh) const
1960 update_known_bitmask (r, MINUS_EXPR, lh, rh);
1963 void
1964 operator_minus::wi_fold (irange &r, tree type,
1965 const wide_int &lh_lb, const wide_int &lh_ub,
1966 const wide_int &rh_lb, const wide_int &rh_ub) const
1968 wi::overflow_type ov_lb, ov_ub;
1969 signop s = TYPE_SIGN (type);
1970 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
1971 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
1972 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
1976 // Return the relation between LHS and OP1 based on the relation between
1977 // OP1 and OP2.
1979 relation_kind
1980 operator_minus::lhs_op1_relation (const irange &, const irange &op1,
1981 const irange &, relation_kind rel) const
1983 if (!op1.undefined_p () && TYPE_SIGN (op1.type ()) == UNSIGNED)
1984 switch (rel)
1986 case VREL_GT:
1987 case VREL_GE:
1988 return VREL_LE;
1989 default:
1990 break;
1992 return VREL_VARYING;
1995 // Check to see if the relation REL between OP1 and OP2 has any effect on the
1996 // LHS of the expression. If so, apply it to LHS_RANGE. This is a helper
1997 // function for both MINUS_EXPR and POINTER_DIFF_EXPR.
1999 bool
2000 minus_op1_op2_relation_effect (irange &lhs_range, tree type,
2001 const irange &op1_range ATTRIBUTE_UNUSED,
2002 const irange &op2_range ATTRIBUTE_UNUSED,
2003 relation_kind rel)
2005 if (rel == VREL_VARYING)
2006 return false;
2008 int_range<2> rel_range;
2009 unsigned prec = TYPE_PRECISION (type);
2010 signop sgn = TYPE_SIGN (type);
2012 // == and != produce [0,0] and ~[0,0] regardless of wrapping.
2013 if (rel == VREL_EQ)
2014 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec));
2015 else if (rel == VREL_NE)
2016 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
2017 VR_ANTI_RANGE);
2018 else if (TYPE_OVERFLOW_WRAPS (type))
2020 switch (rel)
2022 // For wrapping signed values and unsigned, if op1 > op2 or
2023 // op1 < op2, then op1 - op2 can be restricted to ~[0, 0].
2024 case VREL_GT:
2025 case VREL_LT:
2026 rel_range = int_range<2> (type, wi::zero (prec), wi::zero (prec),
2027 VR_ANTI_RANGE);
2028 break;
2029 default:
2030 return false;
2033 else
2035 switch (rel)
2037 // op1 > op2, op1 - op2 can be restricted to [1, +INF]
2038 case VREL_GT:
2039 rel_range = int_range<2> (type, wi::one (prec),
2040 wi::max_value (prec, sgn));
2041 break;
2042 // op1 >= op2, op1 - op2 can be restricted to [0, +INF]
2043 case VREL_GE:
2044 rel_range = int_range<2> (type, wi::zero (prec),
2045 wi::max_value (prec, sgn));
2046 break;
2047 // op1 < op2, op1 - op2 can be restricted to [-INF, -1]
2048 case VREL_LT:
2049 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
2050 wi::minus_one (prec));
2051 break;
2052 // op1 <= op2, op1 - op2 can be restricted to [-INF, 0]
2053 case VREL_LE:
2054 rel_range = int_range<2> (type, wi::min_value (prec, sgn),
2055 wi::zero (prec));
2056 break;
2057 default:
2058 return false;
2061 lhs_range.intersect (rel_range);
2062 return true;
2065 bool
2066 operator_minus::op1_op2_relation_effect (irange &lhs_range, tree type,
2067 const irange &op1_range,
2068 const irange &op2_range,
2069 relation_kind rel) const
2071 return minus_op1_op2_relation_effect (lhs_range, type, op1_range, op2_range,
2072 rel);
2075 bool
2076 operator_minus::op1_range (irange &r, tree type,
2077 const irange &lhs,
2078 const irange &op2,
2079 relation_trio trio) const
2081 if (lhs.undefined_p ())
2082 return false;
2083 // Start with the default operation.
2084 range_op_handler minus (PLUS_EXPR);
2085 if (!minus)
2086 return false;
2087 bool res = minus.fold_range (r, type, lhs, op2);
2088 relation_kind rel = trio.lhs_op1 ();
2089 if (res)
2090 adjust_op1_for_overflow (r, op2, rel, false /* PLUS_EXPR */);
2091 return res;
2095 bool
2096 operator_minus::op2_range (irange &r, tree type,
2097 const irange &lhs,
2098 const irange &op1,
2099 relation_trio) const
2101 if (lhs.undefined_p ())
2102 return false;
2103 return fold_range (r, type, op1, lhs);
2106 void
2107 operator_min::update_bitmask (irange &r, const irange &lh,
2108 const irange &rh) const
2110 update_known_bitmask (r, MIN_EXPR, lh, rh);
2113 void
2114 operator_min::wi_fold (irange &r, tree type,
2115 const wide_int &lh_lb, const wide_int &lh_ub,
2116 const wide_int &rh_lb, const wide_int &rh_ub) const
2118 signop s = TYPE_SIGN (type);
2119 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
2120 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
2121 value_range_with_overflow (r, type, new_lb, new_ub);
2125 void
2126 operator_max::update_bitmask (irange &r, const irange &lh,
2127 const irange &rh) const
2129 update_known_bitmask (r, MAX_EXPR, lh, rh);
2132 void
2133 operator_max::wi_fold (irange &r, tree type,
2134 const wide_int &lh_lb, const wide_int &lh_ub,
2135 const wide_int &rh_lb, const wide_int &rh_ub) const
2137 signop s = TYPE_SIGN (type);
2138 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
2139 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
2140 value_range_with_overflow (r, type, new_lb, new_ub);
2144 // Calculate the cross product of two sets of ranges and return it.
2146 // Multiplications, divisions and shifts are a bit tricky to handle,
2147 // depending on the mix of signs we have in the two ranges, we need to
2148 // operate on different values to get the minimum and maximum values
2149 // for the new range. One approach is to figure out all the
2150 // variations of range combinations and do the operations.
2152 // However, this involves several calls to compare_values and it is
2153 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
2154 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
2155 // figure the smallest and largest values to form the new range.
2157 void
2158 cross_product_operator::wi_cross_product (irange &r, tree type,
2159 const wide_int &lh_lb,
2160 const wide_int &lh_ub,
2161 const wide_int &rh_lb,
2162 const wide_int &rh_ub) const
2164 wide_int cp1, cp2, cp3, cp4;
2165 // Default to varying.
2166 r.set_varying (type);
2168 // Compute the 4 cross operations, bailing if we get an overflow we
2169 // can't handle.
2170 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
2171 return;
2172 if (wi::eq_p (lh_lb, lh_ub))
2173 cp3 = cp1;
2174 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
2175 return;
2176 if (wi::eq_p (rh_lb, rh_ub))
2177 cp2 = cp1;
2178 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
2179 return;
2180 if (wi::eq_p (lh_lb, lh_ub))
2181 cp4 = cp2;
2182 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
2183 return;
2185 // Order pairs.
2186 signop sign = TYPE_SIGN (type);
2187 if (wi::gt_p (cp1, cp2, sign))
2188 std::swap (cp1, cp2);
2189 if (wi::gt_p (cp3, cp4, sign))
2190 std::swap (cp3, cp4);
2192 // Choose min and max from the ordered pairs.
2193 wide_int res_lb = wi::min (cp1, cp3, sign);
2194 wide_int res_ub = wi::max (cp2, cp4, sign);
2195 value_range_with_overflow (r, type, res_lb, res_ub);
2199 void
2200 operator_mult::update_bitmask (irange &r, const irange &lh,
2201 const irange &rh) const
2203 update_known_bitmask (r, MULT_EXPR, lh, rh);
2206 bool
2207 operator_mult::op1_range (irange &r, tree type,
2208 const irange &lhs, const irange &op2,
2209 relation_trio) const
2211 if (lhs.undefined_p ())
2212 return false;
2214 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
2215 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
2216 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
2217 if (TYPE_OVERFLOW_WRAPS (type))
2218 return false;
2220 wide_int offset;
2221 if (op2.singleton_p (offset) && offset != 0)
2222 return range_op_handler (TRUNC_DIV_EXPR).fold_range (r, type, lhs, op2);
2223 return false;
2226 bool
2227 operator_mult::op2_range (irange &r, tree type,
2228 const irange &lhs, const irange &op1,
2229 relation_trio rel) const
2231 return operator_mult::op1_range (r, type, lhs, op1, rel.swap_op1_op2 ());
2234 bool
2235 operator_mult::wi_op_overflows (wide_int &res, tree type,
2236 const wide_int &w0, const wide_int &w1) const
2238 wi::overflow_type overflow = wi::OVF_NONE;
2239 signop sign = TYPE_SIGN (type);
2240 res = wi::mul (w0, w1, sign, &overflow);
2241 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2243 // For multiplication, the sign of the overflow is given
2244 // by the comparison of the signs of the operands.
2245 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
2246 res = wi::max_value (w0.get_precision (), sign);
2247 else
2248 res = wi::min_value (w0.get_precision (), sign);
2249 return false;
2251 return overflow;
2254 void
2255 operator_mult::wi_fold (irange &r, tree type,
2256 const wide_int &lh_lb, const wide_int &lh_ub,
2257 const wide_int &rh_lb, const wide_int &rh_ub) const
2259 if (TYPE_OVERFLOW_UNDEFINED (type))
2261 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2262 return;
2265 // Multiply the ranges when overflow wraps. This is basically fancy
2266 // code so we don't drop to varying with an unsigned
2267 // [-3,-1]*[-3,-1].
2269 // This test requires 2*prec bits if both operands are signed and
2270 // 2*prec + 2 bits if either is not. Therefore, extend the values
2271 // using the sign of the result to PREC2. From here on out,
2272 // everything is just signed math no matter what the input types
2273 // were.
2275 signop sign = TYPE_SIGN (type);
2276 unsigned prec = TYPE_PRECISION (type);
2277 widest2_int min0 = widest2_int::from (lh_lb, sign);
2278 widest2_int max0 = widest2_int::from (lh_ub, sign);
2279 widest2_int min1 = widest2_int::from (rh_lb, sign);
2280 widest2_int max1 = widest2_int::from (rh_ub, sign);
2281 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
2282 widest2_int size = sizem1 + 1;
2284 // Canonicalize the intervals.
2285 if (sign == UNSIGNED)
2287 if (wi::ltu_p (size, min0 + max0))
2289 min0 -= size;
2290 max0 -= size;
2292 if (wi::ltu_p (size, min1 + max1))
2294 min1 -= size;
2295 max1 -= size;
2299 // Sort the 4 products so that min is in prod0 and max is in
2300 // prod3.
2301 widest2_int prod0 = min0 * min1;
2302 widest2_int prod1 = min0 * max1;
2303 widest2_int prod2 = max0 * min1;
2304 widest2_int prod3 = max0 * max1;
2306 // min0min1 > max0max1
2307 if (prod0 > prod3)
2308 std::swap (prod0, prod3);
2310 // min0max1 > max0min1
2311 if (prod1 > prod2)
2312 std::swap (prod1, prod2);
2314 if (prod0 > prod1)
2315 std::swap (prod0, prod1);
2317 if (prod2 > prod3)
2318 std::swap (prod2, prod3);
2320 // diff = max - min
2321 prod2 = prod3 - prod0;
2322 if (wi::geu_p (prod2, sizem1))
2324 // Multiplying by X, where X is a power of 2 is [0,0][X,+INF].
2325 if (TYPE_UNSIGNED (type) && rh_lb == rh_ub
2326 && wi::exact_log2 (rh_lb) != -1 && prec > 1)
2328 r.set (type, rh_lb, wi::max_value (prec, sign));
2329 int_range<2> zero;
2330 zero.set_zero (type);
2331 r.union_ (zero);
2333 else
2334 // The range covers all values.
2335 r.set_varying (type);
2337 else
2339 wide_int new_lb = wide_int::from (prod0, prec, sign);
2340 wide_int new_ub = wide_int::from (prod3, prec, sign);
2341 create_possibly_reversed_range (r, type, new_lb, new_ub);
2345 class operator_widen_mult_signed : public range_operator
2347 public:
2348 virtual void wi_fold (irange &r, tree type,
2349 const wide_int &lh_lb,
2350 const wide_int &lh_ub,
2351 const wide_int &rh_lb,
2352 const wide_int &rh_ub)
2353 const;
2354 } op_widen_mult_signed;
2356 void
2357 operator_widen_mult_signed::wi_fold (irange &r, tree type,
2358 const wide_int &lh_lb,
2359 const wide_int &lh_ub,
2360 const wide_int &rh_lb,
2361 const wide_int &rh_ub) const
2363 signop s = TYPE_SIGN (type);
2365 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, SIGNED);
2366 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, SIGNED);
2367 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2368 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2370 /* We don't expect a widening multiplication to be able to overflow but range
2371 calculations for multiplications are complicated. After widening the
2372 operands lets call the base class. */
2373 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2377 class operator_widen_mult_unsigned : public range_operator
2379 public:
2380 virtual void wi_fold (irange &r, tree type,
2381 const wide_int &lh_lb,
2382 const wide_int &lh_ub,
2383 const wide_int &rh_lb,
2384 const wide_int &rh_ub)
2385 const;
2386 } op_widen_mult_unsigned;
2388 void
2389 operator_widen_mult_unsigned::wi_fold (irange &r, tree type,
2390 const wide_int &lh_lb,
2391 const wide_int &lh_ub,
2392 const wide_int &rh_lb,
2393 const wide_int &rh_ub) const
2395 signop s = TYPE_SIGN (type);
2397 wide_int lh_wlb = wide_int::from (lh_lb, wi::get_precision (lh_lb) * 2, UNSIGNED);
2398 wide_int lh_wub = wide_int::from (lh_ub, wi::get_precision (lh_ub) * 2, UNSIGNED);
2399 wide_int rh_wlb = wide_int::from (rh_lb, wi::get_precision (rh_lb) * 2, s);
2400 wide_int rh_wub = wide_int::from (rh_ub, wi::get_precision (rh_ub) * 2, s);
2402 /* We don't expect a widening multiplication to be able to overflow but range
2403 calculations for multiplications are complicated. After widening the
2404 operands lets call the base class. */
2405 return op_mult.wi_fold (r, type, lh_wlb, lh_wub, rh_wlb, rh_wub);
2408 class operator_div : public cross_product_operator
2410 using range_operator::update_bitmask;
2411 public:
2412 operator_div (tree_code div_kind) { m_code = div_kind; }
2413 virtual void wi_fold (irange &r, tree type,
2414 const wide_int &lh_lb,
2415 const wide_int &lh_ub,
2416 const wide_int &rh_lb,
2417 const wide_int &rh_ub) const final override;
2418 virtual bool wi_op_overflows (wide_int &res, tree type,
2419 const wide_int &, const wide_int &)
2420 const final override;
2421 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
2422 { update_known_bitmask (r, m_code, lh, rh); }
2423 protected:
2424 tree_code m_code;
2427 static operator_div op_trunc_div (TRUNC_DIV_EXPR);
2428 static operator_div op_floor_div (FLOOR_DIV_EXPR);
2429 static operator_div op_round_div (ROUND_DIV_EXPR);
2430 static operator_div op_ceil_div (CEIL_DIV_EXPR);
2432 bool
2433 operator_div::wi_op_overflows (wide_int &res, tree type,
2434 const wide_int &w0, const wide_int &w1) const
2436 if (w1 == 0)
2437 return true;
2439 wi::overflow_type overflow = wi::OVF_NONE;
2440 signop sign = TYPE_SIGN (type);
2442 switch (m_code)
2444 case EXACT_DIV_EXPR:
2445 case TRUNC_DIV_EXPR:
2446 res = wi::div_trunc (w0, w1, sign, &overflow);
2447 break;
2448 case FLOOR_DIV_EXPR:
2449 res = wi::div_floor (w0, w1, sign, &overflow);
2450 break;
2451 case ROUND_DIV_EXPR:
2452 res = wi::div_round (w0, w1, sign, &overflow);
2453 break;
2454 case CEIL_DIV_EXPR:
2455 res = wi::div_ceil (w0, w1, sign, &overflow);
2456 break;
2457 default:
2458 gcc_unreachable ();
2461 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
2463 // For division, the only case is -INF / -1 = +INF.
2464 res = wi::max_value (w0.get_precision (), sign);
2465 return false;
2467 return overflow;
2470 void
2471 operator_div::wi_fold (irange &r, tree type,
2472 const wide_int &lh_lb, const wide_int &lh_ub,
2473 const wide_int &rh_lb, const wide_int &rh_ub) const
2475 const wide_int dividend_min = lh_lb;
2476 const wide_int dividend_max = lh_ub;
2477 const wide_int divisor_min = rh_lb;
2478 const wide_int divisor_max = rh_ub;
2479 signop sign = TYPE_SIGN (type);
2480 unsigned prec = TYPE_PRECISION (type);
2481 wide_int extra_min, extra_max;
2483 // If we know we won't divide by zero, just do the division.
2484 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
2486 wi_cross_product (r, type, dividend_min, dividend_max,
2487 divisor_min, divisor_max);
2488 return;
2491 // If we're definitely dividing by zero, there's nothing to do.
2492 if (wi_zero_p (type, divisor_min, divisor_max))
2494 r.set_undefined ();
2495 return;
2498 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
2499 // skip any division by zero.
2501 // First divide by the negative numbers, if any.
2502 if (wi::neg_p (divisor_min, sign))
2503 wi_cross_product (r, type, dividend_min, dividend_max,
2504 divisor_min, wi::minus_one (prec));
2505 else
2506 r.set_undefined ();
2508 // Then divide by the non-zero positive numbers, if any.
2509 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
2511 int_range_max tmp;
2512 wi_cross_product (tmp, type, dividend_min, dividend_max,
2513 wi::one (prec), divisor_max);
2514 r.union_ (tmp);
2516 // We shouldn't still have undefined here.
2517 gcc_checking_assert (!r.undefined_p ());
2521 class operator_exact_divide : public operator_div
2523 using range_operator::op1_range;
2524 public:
2525 operator_exact_divide () : operator_div (EXACT_DIV_EXPR) { }
2526 virtual bool op1_range (irange &r, tree type,
2527 const irange &lhs,
2528 const irange &op2,
2529 relation_trio) const;
2531 } op_exact_div;
2533 bool
2534 operator_exact_divide::op1_range (irange &r, tree type,
2535 const irange &lhs,
2536 const irange &op2,
2537 relation_trio) const
2539 if (lhs.undefined_p ())
2540 return false;
2541 wide_int offset;
2542 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
2543 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
2544 // We wont bother trying to enumerate all the in between stuff :-P
2545 // TRUE accuracy is [6,6][9,9][12,12]. This is unlikely to matter most of
2546 // the time however.
2547 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
2548 if (op2.singleton_p (offset) && offset != 0)
2549 return range_op_handler (MULT_EXPR).fold_range (r, type, lhs, op2);
2550 return false;
2554 class operator_lshift : public cross_product_operator
2556 using range_operator::fold_range;
2557 using range_operator::op1_range;
2558 using range_operator::update_bitmask;
2559 public:
2560 virtual bool op1_range (irange &r, tree type, const irange &lhs,
2561 const irange &op2, relation_trio rel = TRIO_VARYING)
2562 const final override;
2563 virtual bool fold_range (irange &r, tree type, const irange &op1,
2564 const irange &op2, relation_trio rel = TRIO_VARYING)
2565 const final override;
2567 virtual void wi_fold (irange &r, tree type,
2568 const wide_int &lh_lb, const wide_int &lh_ub,
2569 const wide_int &rh_lb,
2570 const wide_int &rh_ub) const final override;
2571 virtual bool wi_op_overflows (wide_int &res,
2572 tree type,
2573 const wide_int &,
2574 const wide_int &) const final override;
2575 void update_bitmask (irange &r, const irange &lh,
2576 const irange &rh) const final override
2577 { update_known_bitmask (r, LSHIFT_EXPR, lh, rh); }
2578 // Check compatibility of LHS and op1.
2579 bool operand_check_p (tree t1, tree t2, tree) const final override
2580 { return range_compatible_p (t1, t2); }
2581 } op_lshift;
2583 class operator_rshift : public cross_product_operator
2585 using range_operator::fold_range;
2586 using range_operator::op1_range;
2587 using range_operator::lhs_op1_relation;
2588 using range_operator::update_bitmask;
2589 public:
2590 virtual bool fold_range (irange &r, tree type, const irange &op1,
2591 const irange &op2, relation_trio rel = TRIO_VARYING)
2592 const final override;
2593 virtual void wi_fold (irange &r, tree type,
2594 const wide_int &lh_lb,
2595 const wide_int &lh_ub,
2596 const wide_int &rh_lb,
2597 const wide_int &rh_ub) const final override;
2598 virtual bool wi_op_overflows (wide_int &res,
2599 tree type,
2600 const wide_int &w0,
2601 const wide_int &w1) const final override;
2602 virtual bool op1_range (irange &, tree type, const irange &lhs,
2603 const irange &op2, relation_trio rel = TRIO_VARYING)
2604 const final override;
2605 virtual relation_kind lhs_op1_relation (const irange &lhs, const irange &op1,
2606 const irange &op2, relation_kind rel)
2607 const final override;
2608 void update_bitmask (irange &r, const irange &lh,
2609 const irange &rh) const final override
2610 { update_known_bitmask (r, RSHIFT_EXPR, lh, rh); }
2611 // Check compatibility of LHS and op1.
2612 bool operand_check_p (tree t1, tree t2, tree) const final override
2613 { return range_compatible_p (t1, t2); }
2614 } op_rshift;
2617 relation_kind
2618 operator_rshift::lhs_op1_relation (const irange &lhs ATTRIBUTE_UNUSED,
2619 const irange &op1,
2620 const irange &op2,
2621 relation_kind) const
2623 // If both operands range are >= 0, then the LHS <= op1.
2624 if (!op1.undefined_p () && !op2.undefined_p ()
2625 && wi::ge_p (op1.lower_bound (), 0, TYPE_SIGN (op1.type ()))
2626 && wi::ge_p (op2.lower_bound (), 0, TYPE_SIGN (op2.type ())))
2627 return VREL_LE;
2628 return VREL_VARYING;
2631 bool
2632 operator_lshift::fold_range (irange &r, tree type,
2633 const irange &op1,
2634 const irange &op2,
2635 relation_trio rel) const
2637 int_range_max shift_range;
2638 if (!get_shift_range (shift_range, type, op2))
2640 if (op2.undefined_p ())
2641 r.set_undefined ();
2642 else
2643 r.set_zero (type);
2644 return true;
2647 // Transform left shifts by constants into multiplies.
2648 if (shift_range.singleton_p ())
2650 unsigned shift = shift_range.lower_bound ().to_uhwi ();
2651 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
2652 int_range<1> mult (type, tmp, tmp);
2654 // Force wrapping multiplication.
2655 bool saved_flag_wrapv = flag_wrapv;
2656 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
2657 flag_wrapv = 1;
2658 flag_wrapv_pointer = 1;
2659 bool b = op_mult.fold_range (r, type, op1, mult);
2660 flag_wrapv = saved_flag_wrapv;
2661 flag_wrapv_pointer = saved_flag_wrapv_pointer;
2662 return b;
2664 else
2665 // Otherwise, invoke the generic fold routine.
2666 return range_operator::fold_range (r, type, op1, shift_range, rel);
2669 void
2670 operator_lshift::wi_fold (irange &r, tree type,
2671 const wide_int &lh_lb, const wide_int &lh_ub,
2672 const wide_int &rh_lb, const wide_int &rh_ub) const
2674 signop sign = TYPE_SIGN (type);
2675 unsigned prec = TYPE_PRECISION (type);
2676 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
2677 int bound_shift = overflow_pos - rh_ub.to_shwi ();
2678 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2679 // overflow. However, for that to happen, rh.max needs to be zero,
2680 // which means rh is a singleton range of zero, which means we simply return
2681 // [lh_lb, lh_ub] as the range.
2682 if (wi::eq_p (rh_ub, rh_lb) && wi::eq_p (rh_ub, 0))
2684 r = int_range<2> (type, lh_lb, lh_ub);
2685 return;
2688 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2689 wide_int complement = ~(bound - 1);
2690 wide_int low_bound, high_bound;
2691 bool in_bounds = false;
2693 if (sign == UNSIGNED)
2695 low_bound = bound;
2696 high_bound = complement;
2697 if (wi::ltu_p (lh_ub, low_bound))
2699 // [5, 6] << [1, 2] == [10, 24].
2700 // We're shifting out only zeroes, the value increases
2701 // monotonically.
2702 in_bounds = true;
2704 else if (wi::ltu_p (high_bound, lh_lb))
2706 // [0xffffff00, 0xffffffff] << [1, 2]
2707 // == [0xfffffc00, 0xfffffffe].
2708 // We're shifting out only ones, the value decreases
2709 // monotonically.
2710 in_bounds = true;
2713 else
2715 // [-1, 1] << [1, 2] == [-4, 4]
2716 low_bound = complement;
2717 high_bound = bound;
2718 if (wi::lts_p (lh_ub, high_bound)
2719 && wi::lts_p (low_bound, lh_lb))
2721 // For non-negative numbers, we're shifting out only zeroes,
2722 // the value increases monotonically. For negative numbers,
2723 // we're shifting out only ones, the value decreases
2724 // monotonically.
2725 in_bounds = true;
2729 if (in_bounds)
2730 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2731 else
2732 r.set_varying (type);
2735 bool
2736 operator_lshift::wi_op_overflows (wide_int &res, tree type,
2737 const wide_int &w0, const wide_int &w1) const
2739 signop sign = TYPE_SIGN (type);
2740 if (wi::neg_p (w1))
2742 // It's unclear from the C standard whether shifts can overflow.
2743 // The following code ignores overflow; perhaps a C standard
2744 // interpretation ruling is needed.
2745 res = wi::rshift (w0, -w1, sign);
2747 else
2748 res = wi::lshift (w0, w1);
2749 return false;
2752 bool
2753 operator_lshift::op1_range (irange &r,
2754 tree type,
2755 const irange &lhs,
2756 const irange &op2,
2757 relation_trio) const
2759 if (lhs.undefined_p ())
2760 return false;
2762 if (!contains_zero_p (lhs))
2763 r.set_nonzero (type);
2764 else
2765 r.set_varying (type);
2767 wide_int shift;
2768 if (op2.singleton_p (shift))
2770 if (wi::lt_p (shift, 0, SIGNED))
2771 return false;
2772 if (wi::ge_p (shift, wi::uhwi (TYPE_PRECISION (type),
2773 TYPE_PRECISION (op2.type ())),
2774 UNSIGNED))
2775 return false;
2776 if (shift == 0)
2778 r.intersect (lhs);
2779 return true;
2782 // Work completely in unsigned mode to start.
2783 tree utype = type;
2784 int_range_max tmp_range;
2785 if (TYPE_SIGN (type) == SIGNED)
2787 int_range_max tmp = lhs;
2788 utype = unsigned_type_for (type);
2789 range_cast (tmp, utype);
2790 op_rshift.fold_range (tmp_range, utype, tmp, op2);
2792 else
2793 op_rshift.fold_range (tmp_range, utype, lhs, op2);
2795 // Start with ranges which can produce the LHS by right shifting the
2796 // result by the shift amount.
2797 // ie [0x08, 0xF0] = op1 << 2 will start with
2798 // [00001000, 11110000] = op1 << 2
2799 // [0x02, 0x4C] aka [00000010, 00111100]
2801 // Then create a range from the LB with the least significant upper bit
2802 // set, to the upper bound with all the bits set.
2803 // This would be [0x42, 0xFC] aka [01000010, 11111100].
2805 // Ideally we do this for each subrange, but just lump them all for now.
2806 unsigned low_bits = TYPE_PRECISION (utype) - shift.to_uhwi ();
2807 wide_int up_mask = wi::mask (low_bits, true, TYPE_PRECISION (utype));
2808 wide_int new_ub = wi::bit_or (up_mask, tmp_range.upper_bound ());
2809 wide_int new_lb = wi::set_bit (tmp_range.lower_bound (), low_bits);
2810 int_range<2> fill_range (utype, new_lb, new_ub);
2811 tmp_range.union_ (fill_range);
2813 if (utype != type)
2814 range_cast (tmp_range, type);
2816 r.intersect (tmp_range);
2817 return true;
2820 return !r.varying_p ();
2823 bool
2824 operator_rshift::op1_range (irange &r,
2825 tree type,
2826 const irange &lhs,
2827 const irange &op2,
2828 relation_trio) const
2830 if (lhs.undefined_p ())
2831 return false;
2832 wide_int shift;
2833 if (op2.singleton_p (shift))
2835 // Ignore nonsensical shifts.
2836 unsigned prec = TYPE_PRECISION (type);
2837 if (wi::ge_p (shift,
2838 wi::uhwi (prec, TYPE_PRECISION (op2.type ())),
2839 UNSIGNED))
2840 return false;
2841 if (shift == 0)
2843 r = lhs;
2844 return true;
2847 // Folding the original operation may discard some impossible
2848 // ranges from the LHS.
2849 int_range_max lhs_refined;
2850 op_rshift.fold_range (lhs_refined, type, int_range<1> (type), op2);
2851 lhs_refined.intersect (lhs);
2852 if (lhs_refined.undefined_p ())
2854 r.set_undefined ();
2855 return true;
2857 int_range_max shift_range (op2.type (), shift, shift);
2858 int_range_max lb, ub;
2859 op_lshift.fold_range (lb, type, lhs_refined, shift_range);
2860 // LHS
2861 // 0000 0111 = OP1 >> 3
2863 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
2864 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
2865 // right hand side (0x07).
2866 wide_int mask = wi::bit_not (wi::lshift (wi::minus_one (prec), shift));
2867 int_range_max mask_range (type,
2868 wi::zero (TYPE_PRECISION (type)),
2869 mask);
2870 op_plus.fold_range (ub, type, lb, mask_range);
2871 r = lb;
2872 r.union_ (ub);
2873 if (!contains_zero_p (lhs_refined))
2875 mask_range.invert ();
2876 r.intersect (mask_range);
2878 return true;
2880 return false;
2883 bool
2884 operator_rshift::wi_op_overflows (wide_int &res,
2885 tree type,
2886 const wide_int &w0,
2887 const wide_int &w1) const
2889 signop sign = TYPE_SIGN (type);
2890 if (wi::neg_p (w1))
2891 res = wi::lshift (w0, -w1);
2892 else
2894 // It's unclear from the C standard whether shifts can overflow.
2895 // The following code ignores overflow; perhaps a C standard
2896 // interpretation ruling is needed.
2897 res = wi::rshift (w0, w1, sign);
2899 return false;
2902 bool
2903 operator_rshift::fold_range (irange &r, tree type,
2904 const irange &op1,
2905 const irange &op2,
2906 relation_trio rel) const
2908 int_range_max shift;
2909 if (!get_shift_range (shift, type, op2))
2911 if (op2.undefined_p ())
2912 r.set_undefined ();
2913 else
2914 r.set_zero (type);
2915 return true;
2918 return range_operator::fold_range (r, type, op1, shift, rel);
2921 void
2922 operator_rshift::wi_fold (irange &r, tree type,
2923 const wide_int &lh_lb, const wide_int &lh_ub,
2924 const wide_int &rh_lb, const wide_int &rh_ub) const
2926 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
2930 // Add a partial equivalence between the LHS and op1 for casts.
2932 relation_kind
2933 operator_cast::lhs_op1_relation (const irange &lhs,
2934 const irange &op1,
2935 const irange &op2 ATTRIBUTE_UNUSED,
2936 relation_kind) const
2938 if (lhs.undefined_p () || op1.undefined_p ())
2939 return VREL_VARYING;
2940 unsigned lhs_prec = TYPE_PRECISION (lhs.type ());
2941 unsigned op1_prec = TYPE_PRECISION (op1.type ());
2942 // If the result gets sign extended into a larger type check first if this
2943 // qualifies as a partial equivalence.
2944 if (TYPE_SIGN (op1.type ()) == SIGNED && lhs_prec > op1_prec)
2946 // If the result is sign extended, and the LHS is larger than op1,
2947 // check if op1's range can be negative as the sign extension will
2948 // cause the upper bits to be 1 instead of 0, invalidating the PE.
2949 int_range<3> negs = range_negatives (op1.type ());
2950 negs.intersect (op1);
2951 if (!negs.undefined_p ())
2952 return VREL_VARYING;
2955 unsigned prec = MIN (lhs_prec, op1_prec);
2956 return bits_to_pe (prec);
2959 // Return TRUE if casting from INNER to OUTER is a truncating cast.
2961 inline bool
2962 operator_cast::truncating_cast_p (const irange &inner,
2963 const irange &outer) const
2965 return TYPE_PRECISION (outer.type ()) < TYPE_PRECISION (inner.type ());
2968 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
2970 bool
2971 operator_cast::inside_domain_p (const wide_int &min,
2972 const wide_int &max,
2973 const irange &range) const
2975 wide_int domain_min = irange_val_min (range.type ());
2976 wide_int domain_max = irange_val_max (range.type ());
2977 signop domain_sign = TYPE_SIGN (range.type ());
2978 return (wi::le_p (min, domain_max, domain_sign)
2979 && wi::le_p (max, domain_max, domain_sign)
2980 && wi::ge_p (min, domain_min, domain_sign)
2981 && wi::ge_p (max, domain_min, domain_sign));
2985 // Helper for fold_range which work on a pair at a time.
2987 void
2988 operator_cast::fold_pair (irange &r, unsigned index,
2989 const irange &inner,
2990 const irange &outer) const
2992 tree inner_type = inner.type ();
2993 tree outer_type = outer.type ();
2994 signop inner_sign = TYPE_SIGN (inner_type);
2995 unsigned outer_prec = TYPE_PRECISION (outer_type);
2997 // check to see if casting from INNER to OUTER is a conversion that
2998 // fits in the resulting OUTER type.
2999 wide_int inner_lb = inner.lower_bound (index);
3000 wide_int inner_ub = inner.upper_bound (index);
3001 if (truncating_cast_p (inner, outer))
3003 // We may be able to accommodate a truncating cast if the
3004 // resulting range can be represented in the target type...
3005 if (wi::rshift (wi::sub (inner_ub, inner_lb),
3006 wi::uhwi (outer_prec, TYPE_PRECISION (inner.type ())),
3007 inner_sign) != 0)
3009 r.set_varying (outer_type);
3010 return;
3013 // ...but we must still verify that the final range fits in the
3014 // domain. This catches -fstrict-enum restrictions where the domain
3015 // range is smaller than what fits in the underlying type.
3016 wide_int min = wide_int::from (inner_lb, outer_prec, inner_sign);
3017 wide_int max = wide_int::from (inner_ub, outer_prec, inner_sign);
3018 if (inside_domain_p (min, max, outer))
3019 create_possibly_reversed_range (r, outer_type, min, max);
3020 else
3021 r.set_varying (outer_type);
3025 bool
3026 operator_cast::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3027 const irange &inner,
3028 const irange &outer,
3029 relation_trio) const
3031 if (empty_range_varying (r, type, inner, outer))
3032 return true;
3034 gcc_checking_assert (outer.varying_p ());
3035 gcc_checking_assert (inner.num_pairs () > 0);
3037 // Avoid a temporary by folding the first pair directly into the result.
3038 fold_pair (r, 0, inner, outer);
3040 // Then process any additional pairs by unioning with their results.
3041 for (unsigned x = 1; x < inner.num_pairs (); ++x)
3043 int_range_max tmp;
3044 fold_pair (tmp, x, inner, outer);
3045 r.union_ (tmp);
3046 if (r.varying_p ())
3047 return true;
3050 update_bitmask (r, inner, outer);
3051 return true;
3054 void
3055 operator_cast::update_bitmask (irange &r, const irange &lh,
3056 const irange &rh) const
3058 update_known_bitmask (r, CONVERT_EXPR, lh, rh);
3061 bool
3062 operator_cast::op1_range (irange &r, tree type,
3063 const irange &lhs,
3064 const irange &op2,
3065 relation_trio) const
3067 if (lhs.undefined_p ())
3068 return false;
3069 tree lhs_type = lhs.type ();
3070 gcc_checking_assert (types_compatible_p (op2.type(), type));
3072 // If we are calculating a pointer, shortcut to what we really care about.
3073 if (POINTER_TYPE_P (type))
3075 // Conversion from other pointers or a constant (including 0/NULL)
3076 // are straightforward.
3077 if (POINTER_TYPE_P (lhs.type ())
3078 || (lhs.singleton_p ()
3079 && TYPE_PRECISION (lhs.type ()) >= TYPE_PRECISION (type)))
3081 r = lhs;
3082 range_cast (r, type);
3084 else
3086 // If the LHS is not a pointer nor a singleton, then it is
3087 // either VARYING or non-zero.
3088 if (!lhs.undefined_p () && !contains_zero_p (lhs))
3089 r.set_nonzero (type);
3090 else
3091 r.set_varying (type);
3093 r.intersect (op2);
3094 return true;
3097 if (truncating_cast_p (op2, lhs))
3099 if (lhs.varying_p ())
3100 r.set_varying (type);
3101 else
3103 // We want to insert the LHS as an unsigned value since it
3104 // would not trigger the signed bit of the larger type.
3105 int_range_max converted_lhs = lhs;
3106 range_cast (converted_lhs, unsigned_type_for (lhs_type));
3107 range_cast (converted_lhs, type);
3108 // Start by building the positive signed outer range for the type.
3109 wide_int lim = wi::set_bit_in_zero (TYPE_PRECISION (lhs_type),
3110 TYPE_PRECISION (type));
3111 create_possibly_reversed_range (r, type, lim,
3112 wi::max_value (TYPE_PRECISION (type),
3113 SIGNED));
3114 // For the signed part, we need to simply union the 2 ranges now.
3115 r.union_ (converted_lhs);
3117 // Create maximal negative number outside of LHS bits.
3118 lim = wi::mask (TYPE_PRECISION (lhs_type), true,
3119 TYPE_PRECISION (type));
3120 // Add this to the unsigned LHS range(s).
3121 int_range_max lim_range (type, lim, lim);
3122 int_range_max lhs_neg;
3123 range_op_handler (PLUS_EXPR).fold_range (lhs_neg, type,
3124 converted_lhs, lim_range);
3125 // lhs_neg now has all the negative versions of the LHS.
3126 // Now union in all the values from SIGNED MIN (0x80000) to
3127 // lim-1 in order to fill in all the ranges with the upper
3128 // bits set.
3130 // PR 97317. If the lhs has only 1 bit less precision than the rhs,
3131 // we don't need to create a range from min to lim-1
3132 // calculate neg range traps trying to create [lim, lim - 1].
3133 wide_int min_val = wi::min_value (TYPE_PRECISION (type), SIGNED);
3134 if (lim != min_val)
3136 int_range_max neg (type,
3137 wi::min_value (TYPE_PRECISION (type),
3138 SIGNED),
3139 lim - 1);
3140 lhs_neg.union_ (neg);
3142 // And finally, munge the signed and unsigned portions.
3143 r.union_ (lhs_neg);
3145 // And intersect with any known value passed in the extra operand.
3146 r.intersect (op2);
3147 return true;
3150 int_range_max tmp;
3151 if (TYPE_PRECISION (lhs_type) == TYPE_PRECISION (type))
3152 tmp = lhs;
3153 else
3155 // The cast is not truncating, and the range is restricted to
3156 // the range of the RHS by this assignment.
3158 // Cast the range of the RHS to the type of the LHS.
3159 fold_range (tmp, lhs_type, int_range<1> (type), int_range<1> (lhs_type));
3160 // Intersect this with the LHS range will produce the range,
3161 // which will be cast to the RHS type before returning.
3162 tmp.intersect (lhs);
3165 // Cast the calculated range to the type of the RHS.
3166 fold_range (r, type, tmp, int_range<1> (type));
3167 return true;
3171 class operator_logical_and : public range_operator
3173 using range_operator::fold_range;
3174 using range_operator::op1_range;
3175 using range_operator::op2_range;
3176 public:
3177 virtual bool fold_range (irange &r, tree type,
3178 const irange &lh,
3179 const irange &rh,
3180 relation_trio rel = TRIO_VARYING) const;
3181 virtual bool op1_range (irange &r, tree type,
3182 const irange &lhs,
3183 const irange &op2,
3184 relation_trio rel = TRIO_VARYING) const;
3185 virtual bool op2_range (irange &r, tree type,
3186 const irange &lhs,
3187 const irange &op1,
3188 relation_trio rel = TRIO_VARYING) const;
3189 // Check compatibility of all operands.
3190 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3191 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3192 } op_logical_and;
3194 bool
3195 operator_logical_and::fold_range (irange &r, tree type,
3196 const irange &lh,
3197 const irange &rh,
3198 relation_trio) const
3200 if (empty_range_varying (r, type, lh, rh))
3201 return true;
3203 // Precision of LHS and both operands must match.
3204 if (TYPE_PRECISION (lh.type ()) != TYPE_PRECISION (type)
3205 || TYPE_PRECISION (type) != TYPE_PRECISION (rh.type ()))
3206 return false;
3208 // 0 && anything is 0.
3209 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
3210 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
3211 r = range_false (type);
3212 else if (contains_zero_p (lh) || contains_zero_p (rh))
3213 // To reach this point, there must be a logical 1 on each side, and
3214 // the only remaining question is whether there is a zero or not.
3215 r = range_true_and_false (type);
3216 else
3217 r = range_true (type);
3218 return true;
3221 bool
3222 operator_logical_and::op1_range (irange &r, tree type,
3223 const irange &lhs,
3224 const irange &op2 ATTRIBUTE_UNUSED,
3225 relation_trio) const
3227 switch (get_bool_state (r, lhs, type))
3229 case BRS_TRUE:
3230 // A true result means both sides of the AND must be true.
3231 r = range_true (type);
3232 break;
3233 default:
3234 // Any other result means only one side has to be false, the
3235 // other side can be anything. So we cannot be sure of any
3236 // result here.
3237 r = range_true_and_false (type);
3238 break;
3240 return true;
3243 bool
3244 operator_logical_and::op2_range (irange &r, tree type,
3245 const irange &lhs,
3246 const irange &op1,
3247 relation_trio) const
3249 return operator_logical_and::op1_range (r, type, lhs, op1);
3253 void
3254 operator_bitwise_and::update_bitmask (irange &r, const irange &lh,
3255 const irange &rh) const
3257 update_known_bitmask (r, BIT_AND_EXPR, lh, rh);
3260 // Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
3261 // by considering the number of leading redundant sign bit copies.
3262 // clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
3263 // [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
3264 static bool
3265 wi_optimize_signed_bitwise_op (irange &r, tree type,
3266 const wide_int &lh_lb, const wide_int &lh_ub,
3267 const wide_int &rh_lb, const wide_int &rh_ub)
3269 int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
3270 int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
3271 int new_clrsb = MIN (lh_clrsb, rh_clrsb);
3272 if (new_clrsb == 0)
3273 return false;
3274 int type_prec = TYPE_PRECISION (type);
3275 int rprec = (type_prec - new_clrsb) - 1;
3276 value_range_with_overflow (r, type,
3277 wi::mask (rprec, true, type_prec),
3278 wi::mask (rprec, false, type_prec));
3279 return true;
3282 // An AND of 8,16, 32 or 64 bits can produce a partial equivalence between
3283 // the LHS and op1.
3285 relation_kind
3286 operator_bitwise_and::lhs_op1_relation (const irange &lhs,
3287 const irange &op1,
3288 const irange &op2,
3289 relation_kind) const
3291 if (lhs.undefined_p () || op1.undefined_p () || op2.undefined_p ())
3292 return VREL_VARYING;
3293 if (!op2.singleton_p ())
3294 return VREL_VARYING;
3295 // if val == 0xff or 0xFFFF OR 0Xffffffff OR 0Xffffffffffffffff, return TRUE
3296 int prec1 = TYPE_PRECISION (op1.type ());
3297 int prec2 = TYPE_PRECISION (op2.type ());
3298 int mask_prec = 0;
3299 wide_int mask = op2.lower_bound ();
3300 if (wi::eq_p (mask, wi::mask (8, false, prec2)))
3301 mask_prec = 8;
3302 else if (wi::eq_p (mask, wi::mask (16, false, prec2)))
3303 mask_prec = 16;
3304 else if (wi::eq_p (mask, wi::mask (32, false, prec2)))
3305 mask_prec = 32;
3306 else if (wi::eq_p (mask, wi::mask (64, false, prec2)))
3307 mask_prec = 64;
3308 return bits_to_pe (MIN (prec1, mask_prec));
3311 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
3312 // possible. Basically, see if we can optimize:
3314 // [LB, UB] op Z
3315 // into:
3316 // [LB op Z, UB op Z]
3318 // If the optimization was successful, accumulate the range in R and
3319 // return TRUE.
3321 static bool
3322 wi_optimize_and_or (irange &r,
3323 enum tree_code code,
3324 tree type,
3325 const wide_int &lh_lb, const wide_int &lh_ub,
3326 const wide_int &rh_lb, const wide_int &rh_ub)
3328 // Calculate the singleton mask among the ranges, if any.
3329 wide_int lower_bound, upper_bound, mask;
3330 if (wi::eq_p (rh_lb, rh_ub))
3332 mask = rh_lb;
3333 lower_bound = lh_lb;
3334 upper_bound = lh_ub;
3336 else if (wi::eq_p (lh_lb, lh_ub))
3338 mask = lh_lb;
3339 lower_bound = rh_lb;
3340 upper_bound = rh_ub;
3342 else
3343 return false;
3345 // If Z is a constant which (for op | its bitwise not) has n
3346 // consecutive least significant bits cleared followed by m 1
3347 // consecutive bits set immediately above it and either
3348 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
3350 // The least significant n bits of all the values in the range are
3351 // cleared or set, the m bits above it are preserved and any bits
3352 // above these are required to be the same for all values in the
3353 // range.
3354 wide_int w = mask;
3355 int m = 0, n = 0;
3356 if (code == BIT_IOR_EXPR)
3357 w = ~w;
3358 if (wi::eq_p (w, 0))
3359 n = w.get_precision ();
3360 else
3362 n = wi::ctz (w);
3363 w = ~(w | wi::mask (n, false, w.get_precision ()));
3364 if (wi::eq_p (w, 0))
3365 m = w.get_precision () - n;
3366 else
3367 m = wi::ctz (w) - n;
3369 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
3370 if ((new_mask & lower_bound) != (new_mask & upper_bound))
3371 return false;
3373 wide_int res_lb, res_ub;
3374 if (code == BIT_AND_EXPR)
3376 res_lb = wi::bit_and (lower_bound, mask);
3377 res_ub = wi::bit_and (upper_bound, mask);
3379 else if (code == BIT_IOR_EXPR)
3381 res_lb = wi::bit_or (lower_bound, mask);
3382 res_ub = wi::bit_or (upper_bound, mask);
3384 else
3385 gcc_unreachable ();
3386 value_range_with_overflow (r, type, res_lb, res_ub);
3388 // Furthermore, if the mask is non-zero, an IOR cannot contain zero.
3389 if (code == BIT_IOR_EXPR && wi::ne_p (mask, 0))
3391 int_range<2> tmp;
3392 tmp.set_nonzero (type);
3393 r.intersect (tmp);
3395 return true;
3398 // For range [LB, UB] compute two wide_int bit masks.
3400 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
3401 // for all numbers in the range the bit is 0, otherwise it might be 0
3402 // or 1.
3404 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
3405 // for all numbers in the range the bit is 1, otherwise it might be 0
3406 // or 1.
3408 void
3409 wi_set_zero_nonzero_bits (tree type,
3410 const wide_int &lb, const wide_int &ub,
3411 wide_int &maybe_nonzero,
3412 wide_int &mustbe_nonzero)
3414 signop sign = TYPE_SIGN (type);
3416 if (wi::eq_p (lb, ub))
3417 maybe_nonzero = mustbe_nonzero = lb;
3418 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
3420 wide_int xor_mask = lb ^ ub;
3421 maybe_nonzero = lb | ub;
3422 mustbe_nonzero = lb & ub;
3423 if (xor_mask != 0)
3425 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
3426 maybe_nonzero.get_precision ());
3427 maybe_nonzero = maybe_nonzero | mask;
3428 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
3431 else
3433 maybe_nonzero = wi::minus_one (lb.get_precision ());
3434 mustbe_nonzero = wi::zero (lb.get_precision ());
3438 void
3439 operator_bitwise_and::wi_fold (irange &r, tree type,
3440 const wide_int &lh_lb,
3441 const wide_int &lh_ub,
3442 const wide_int &rh_lb,
3443 const wide_int &rh_ub) const
3445 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3446 return;
3448 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3449 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3450 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3451 maybe_nonzero_lh, mustbe_nonzero_lh);
3452 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3453 maybe_nonzero_rh, mustbe_nonzero_rh);
3455 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
3456 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
3457 signop sign = TYPE_SIGN (type);
3458 unsigned prec = TYPE_PRECISION (type);
3459 // If both input ranges contain only negative values, we can
3460 // truncate the result range maximum to the minimum of the
3461 // input range maxima.
3462 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
3464 new_ub = wi::min (new_ub, lh_ub, sign);
3465 new_ub = wi::min (new_ub, rh_ub, sign);
3467 // If either input range contains only non-negative values
3468 // we can truncate the result range maximum to the respective
3469 // maximum of the input range.
3470 if (wi::ge_p (lh_lb, 0, sign))
3471 new_ub = wi::min (new_ub, lh_ub, sign);
3472 if (wi::ge_p (rh_lb, 0, sign))
3473 new_ub = wi::min (new_ub, rh_ub, sign);
3474 // PR68217: In case of signed & sign-bit-CST should
3475 // result in [-INF, 0] instead of [-INF, INF].
3476 if (wi::gt_p (new_lb, new_ub, sign))
3478 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
3479 if (sign == SIGNED
3480 && ((wi::eq_p (lh_lb, lh_ub)
3481 && !wi::cmps (lh_lb, sign_bit))
3482 || (wi::eq_p (rh_lb, rh_ub)
3483 && !wi::cmps (rh_lb, sign_bit))))
3485 new_lb = wi::min_value (prec, sign);
3486 new_ub = wi::zero (prec);
3489 // If the limits got swapped around, return varying.
3490 if (wi::gt_p (new_lb, new_ub,sign))
3492 if (sign == SIGNED
3493 && wi_optimize_signed_bitwise_op (r, type,
3494 lh_lb, lh_ub,
3495 rh_lb, rh_ub))
3496 return;
3497 r.set_varying (type);
3499 else
3500 value_range_with_overflow (r, type, new_lb, new_ub);
3503 static void
3504 set_nonzero_range_from_mask (irange &r, tree type, const irange &lhs)
3506 if (lhs.undefined_p () || contains_zero_p (lhs))
3507 r.set_varying (type);
3508 else
3509 r.set_nonzero (type);
3512 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3513 (otherwise return VAL). VAL and MASK must be zero-extended for
3514 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
3515 (to transform signed values into unsigned) and at the end xor
3516 SGNBIT back. */
3518 wide_int
3519 masked_increment (const wide_int &val_in, const wide_int &mask,
3520 const wide_int &sgnbit, unsigned int prec)
3522 wide_int bit = wi::one (prec), res;
3523 unsigned int i;
3525 wide_int val = val_in ^ sgnbit;
3526 for (i = 0; i < prec; i++, bit += bit)
3528 res = mask;
3529 if ((res & bit) == 0)
3530 continue;
3531 res = bit - 1;
3532 res = wi::bit_and_not (val + bit, res);
3533 res &= mask;
3534 if (wi::gtu_p (res, val))
3535 return res ^ sgnbit;
3537 return val ^ sgnbit;
3540 // This was shamelessly stolen from register_edge_assert_for_2 and
3541 // adjusted to work with iranges.
3543 void
3544 operator_bitwise_and::simple_op1_range_solver (irange &r, tree type,
3545 const irange &lhs,
3546 const irange &op2) const
3548 if (!op2.singleton_p ())
3550 set_nonzero_range_from_mask (r, type, lhs);
3551 return;
3553 unsigned int nprec = TYPE_PRECISION (type);
3554 wide_int cst2v = op2.lower_bound ();
3555 bool cst2n = wi::neg_p (cst2v, TYPE_SIGN (type));
3556 wide_int sgnbit;
3557 if (cst2n)
3558 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3559 else
3560 sgnbit = wi::zero (nprec);
3562 // Solve [lhs.lower_bound (), +INF] = x & MASK.
3564 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
3565 // maximum unsigned value is ~0. For signed comparison, if CST2
3566 // doesn't have the most significant bit set, handle it similarly. If
3567 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
3568 wide_int valv = lhs.lower_bound ();
3569 wide_int minv = valv & cst2v, maxv;
3570 bool we_know_nothing = false;
3571 if (minv != valv)
3573 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
3574 minv = masked_increment (valv, cst2v, sgnbit, nprec);
3575 if (minv == valv)
3577 // If we can't determine anything on this bound, fall
3578 // through and conservatively solve for the other end point.
3579 we_know_nothing = true;
3582 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3583 if (we_know_nothing)
3584 r.set_varying (type);
3585 else
3586 create_possibly_reversed_range (r, type, minv, maxv);
3588 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
3590 // Minimum unsigned value for <= is 0 and maximum unsigned value is
3591 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
3592 // VAL2 where
3593 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3594 // as maximum.
3595 // For signed comparison, if CST2 doesn't have most significant bit
3596 // set, handle it similarly. If CST2 has MSB set, the maximum is
3597 // the same and minimum is INT_MIN.
3598 valv = lhs.upper_bound ();
3599 minv = valv & cst2v;
3600 if (minv == valv)
3601 maxv = valv;
3602 else
3604 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3605 if (maxv == valv)
3607 // If we couldn't determine anything on either bound, return
3608 // undefined.
3609 if (we_know_nothing)
3610 r.set_undefined ();
3611 return;
3613 maxv -= 1;
3615 maxv |= ~cst2v;
3616 minv = sgnbit;
3617 int_range<2> upper_bits;
3618 create_possibly_reversed_range (upper_bits, type, minv, maxv);
3619 r.intersect (upper_bits);
3622 bool
3623 operator_bitwise_and::op1_range (irange &r, tree type,
3624 const irange &lhs,
3625 const irange &op2,
3626 relation_trio) const
3628 if (lhs.undefined_p ())
3629 return false;
3630 if (types_compatible_p (type, boolean_type_node))
3631 return op_logical_and.op1_range (r, type, lhs, op2);
3633 r.set_undefined ();
3634 for (unsigned i = 0; i < lhs.num_pairs (); ++i)
3636 int_range_max chunk (lhs.type (),
3637 lhs.lower_bound (i),
3638 lhs.upper_bound (i));
3639 int_range_max res;
3640 simple_op1_range_solver (res, type, chunk, op2);
3641 r.union_ (res);
3643 if (r.undefined_p ())
3644 set_nonzero_range_from_mask (r, type, lhs);
3646 // For MASK == op1 & MASK, all the bits in MASK must be set in op1.
3647 wide_int mask;
3648 if (lhs == op2 && lhs.singleton_p (mask))
3650 r.update_bitmask (irange_bitmask (mask, ~mask));
3651 return true;
3654 // For 0 = op1 & MASK, op1 is ~MASK.
3655 if (lhs.zero_p () && op2.singleton_p ())
3657 wide_int nz = wi::bit_not (op2.get_nonzero_bits ());
3658 int_range<2> tmp (type);
3659 tmp.set_nonzero_bits (nz);
3660 r.intersect (tmp);
3662 return true;
3665 bool
3666 operator_bitwise_and::op2_range (irange &r, tree type,
3667 const irange &lhs,
3668 const irange &op1,
3669 relation_trio) const
3671 return operator_bitwise_and::op1_range (r, type, lhs, op1);
3675 class operator_logical_or : public range_operator
3677 using range_operator::fold_range;
3678 using range_operator::op1_range;
3679 using range_operator::op2_range;
3680 public:
3681 virtual bool fold_range (irange &r, tree type,
3682 const irange &lh,
3683 const irange &rh,
3684 relation_trio rel = TRIO_VARYING) const;
3685 virtual bool op1_range (irange &r, tree type,
3686 const irange &lhs,
3687 const irange &op2,
3688 relation_trio rel = TRIO_VARYING) const;
3689 virtual bool op2_range (irange &r, tree type,
3690 const irange &lhs,
3691 const irange &op1,
3692 relation_trio rel = TRIO_VARYING) const;
3693 // Check compatibility of all operands.
3694 bool operand_check_p (tree t1, tree t2, tree t3) const final override
3695 { return range_compatible_p (t1, t2) && range_compatible_p (t1, t3); }
3696 } op_logical_or;
3698 bool
3699 operator_logical_or::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
3700 const irange &lh,
3701 const irange &rh,
3702 relation_trio) const
3704 if (empty_range_varying (r, type, lh, rh))
3705 return true;
3707 r = lh;
3708 r.union_ (rh);
3709 return true;
3712 bool
3713 operator_logical_or::op1_range (irange &r, tree type,
3714 const irange &lhs,
3715 const irange &op2 ATTRIBUTE_UNUSED,
3716 relation_trio) const
3718 switch (get_bool_state (r, lhs, type))
3720 case BRS_FALSE:
3721 // A false result means both sides of the OR must be false.
3722 r = range_false (type);
3723 break;
3724 default:
3725 // Any other result means only one side has to be true, the
3726 // other side can be anything. so we can't be sure of any result
3727 // here.
3728 r = range_true_and_false (type);
3729 break;
3731 return true;
3734 bool
3735 operator_logical_or::op2_range (irange &r, tree type,
3736 const irange &lhs,
3737 const irange &op1,
3738 relation_trio) const
3740 return operator_logical_or::op1_range (r, type, lhs, op1);
3744 void
3745 operator_bitwise_or::update_bitmask (irange &r, const irange &lh,
3746 const irange &rh) const
3748 update_known_bitmask (r, BIT_IOR_EXPR, lh, rh);
3751 void
3752 operator_bitwise_or::wi_fold (irange &r, tree type,
3753 const wide_int &lh_lb,
3754 const wide_int &lh_ub,
3755 const wide_int &rh_lb,
3756 const wide_int &rh_ub) const
3758 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
3759 return;
3761 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3762 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3763 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3764 maybe_nonzero_lh, mustbe_nonzero_lh);
3765 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3766 maybe_nonzero_rh, mustbe_nonzero_rh);
3767 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
3768 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
3769 signop sign = TYPE_SIGN (type);
3770 // If the input ranges contain only positive values we can
3771 // truncate the minimum of the result range to the maximum
3772 // of the input range minima.
3773 if (wi::ge_p (lh_lb, 0, sign)
3774 && wi::ge_p (rh_lb, 0, sign))
3776 new_lb = wi::max (new_lb, lh_lb, sign);
3777 new_lb = wi::max (new_lb, rh_lb, sign);
3779 // If either input range contains only negative values
3780 // we can truncate the minimum of the result range to the
3781 // respective minimum range.
3782 if (wi::lt_p (lh_ub, 0, sign))
3783 new_lb = wi::max (new_lb, lh_lb, sign);
3784 if (wi::lt_p (rh_ub, 0, sign))
3785 new_lb = wi::max (new_lb, rh_lb, sign);
3786 // If the limits got swapped around, return a conservative range.
3787 if (wi::gt_p (new_lb, new_ub, sign))
3789 // Make sure that nonzero|X is nonzero.
3790 if (wi::gt_p (lh_lb, 0, sign)
3791 || wi::gt_p (rh_lb, 0, sign)
3792 || wi::lt_p (lh_ub, 0, sign)
3793 || wi::lt_p (rh_ub, 0, sign))
3794 r.set_nonzero (type);
3795 else if (sign == SIGNED
3796 && wi_optimize_signed_bitwise_op (r, type,
3797 lh_lb, lh_ub,
3798 rh_lb, rh_ub))
3799 return;
3800 else
3801 r.set_varying (type);
3802 return;
3804 value_range_with_overflow (r, type, new_lb, new_ub);
3807 bool
3808 operator_bitwise_or::op1_range (irange &r, tree type,
3809 const irange &lhs,
3810 const irange &op2,
3811 relation_trio) const
3813 if (lhs.undefined_p ())
3814 return false;
3815 // If this is really a logical wi_fold, call that.
3816 if (types_compatible_p (type, boolean_type_node))
3817 return op_logical_or.op1_range (r, type, lhs, op2);
3819 if (lhs.zero_p ())
3821 r.set_zero (type);
3822 return true;
3824 r.set_varying (type);
3825 return true;
3828 bool
3829 operator_bitwise_or::op2_range (irange &r, tree type,
3830 const irange &lhs,
3831 const irange &op1,
3832 relation_trio) const
3834 return operator_bitwise_or::op1_range (r, type, lhs, op1);
3837 void
3838 operator_bitwise_xor::update_bitmask (irange &r, const irange &lh,
3839 const irange &rh) const
3841 update_known_bitmask (r, BIT_XOR_EXPR, lh, rh);
3844 void
3845 operator_bitwise_xor::wi_fold (irange &r, tree type,
3846 const wide_int &lh_lb,
3847 const wide_int &lh_ub,
3848 const wide_int &rh_lb,
3849 const wide_int &rh_ub) const
3851 signop sign = TYPE_SIGN (type);
3852 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
3853 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
3854 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
3855 maybe_nonzero_lh, mustbe_nonzero_lh);
3856 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
3857 maybe_nonzero_rh, mustbe_nonzero_rh);
3859 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
3860 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
3861 wide_int result_one_bits
3862 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
3863 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
3864 wide_int new_ub = ~result_zero_bits;
3865 wide_int new_lb = result_one_bits;
3867 // If the range has all positive or all negative values, the result
3868 // is better than VARYING.
3869 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
3870 value_range_with_overflow (r, type, new_lb, new_ub);
3871 else if (sign == SIGNED
3872 && wi_optimize_signed_bitwise_op (r, type,
3873 lh_lb, lh_ub,
3874 rh_lb, rh_ub))
3875 ; /* Do nothing. */
3876 else
3877 r.set_varying (type);
3879 /* Furthermore, XOR is non-zero if its arguments can't be equal. */
3880 if (wi::lt_p (lh_ub, rh_lb, sign)
3881 || wi::lt_p (rh_ub, lh_lb, sign)
3882 || wi::ne_p (result_one_bits, 0))
3884 int_range<2> tmp;
3885 tmp.set_nonzero (type);
3886 r.intersect (tmp);
3890 bool
3891 operator_bitwise_xor::op1_op2_relation_effect (irange &lhs_range,
3892 tree type,
3893 const irange &,
3894 const irange &,
3895 relation_kind rel) const
3897 if (rel == VREL_VARYING)
3898 return false;
3900 int_range<2> rel_range;
3902 switch (rel)
3904 case VREL_EQ:
3905 rel_range.set_zero (type);
3906 break;
3907 case VREL_NE:
3908 rel_range.set_nonzero (type);
3909 break;
3910 default:
3911 return false;
3914 lhs_range.intersect (rel_range);
3915 return true;
3918 bool
3919 operator_bitwise_xor::op1_range (irange &r, tree type,
3920 const irange &lhs,
3921 const irange &op2,
3922 relation_trio) const
3924 if (lhs.undefined_p () || lhs.varying_p ())
3926 r = lhs;
3927 return true;
3929 if (types_compatible_p (type, boolean_type_node))
3931 switch (get_bool_state (r, lhs, type))
3933 case BRS_TRUE:
3934 if (op2.varying_p ())
3935 r.set_varying (type);
3936 else if (op2.zero_p ())
3937 r = range_true (type);
3938 // See get_bool_state for the rationale
3939 else if (op2.undefined_p () || contains_zero_p (op2))
3940 r = range_true_and_false (type);
3941 else
3942 r = range_false (type);
3943 break;
3944 case BRS_FALSE:
3945 r = op2;
3946 break;
3947 default:
3948 break;
3950 return true;
3952 r.set_varying (type);
3953 return true;
3956 bool
3957 operator_bitwise_xor::op2_range (irange &r, tree type,
3958 const irange &lhs,
3959 const irange &op1,
3960 relation_trio) const
3962 return operator_bitwise_xor::op1_range (r, type, lhs, op1);
3965 class operator_trunc_mod : public range_operator
3967 using range_operator::op1_range;
3968 using range_operator::op2_range;
3969 using range_operator::update_bitmask;
3970 public:
3971 virtual void wi_fold (irange &r, tree type,
3972 const wide_int &lh_lb,
3973 const wide_int &lh_ub,
3974 const wide_int &rh_lb,
3975 const wide_int &rh_ub) const;
3976 virtual bool op1_range (irange &r, tree type,
3977 const irange &lhs,
3978 const irange &op2,
3979 relation_trio) const;
3980 virtual bool op2_range (irange &r, tree type,
3981 const irange &lhs,
3982 const irange &op1,
3983 relation_trio) const;
3984 void update_bitmask (irange &r, const irange &lh, const irange &rh) const
3985 { update_known_bitmask (r, TRUNC_MOD_EXPR, lh, rh); }
3986 } op_trunc_mod;
3988 void
3989 operator_trunc_mod::wi_fold (irange &r, tree type,
3990 const wide_int &lh_lb,
3991 const wide_int &lh_ub,
3992 const wide_int &rh_lb,
3993 const wide_int &rh_ub) const
3995 wide_int new_lb, new_ub, tmp;
3996 signop sign = TYPE_SIGN (type);
3997 unsigned prec = TYPE_PRECISION (type);
3999 // Mod 0 is undefined.
4000 if (wi_zero_p (type, rh_lb, rh_ub))
4002 r.set_undefined ();
4003 return;
4006 // Check for constant and try to fold.
4007 if (lh_lb == lh_ub && rh_lb == rh_ub)
4009 wi::overflow_type ov = wi::OVF_NONE;
4010 tmp = wi::mod_trunc (lh_lb, rh_lb, sign, &ov);
4011 if (ov == wi::OVF_NONE)
4013 r = int_range<2> (type, tmp, tmp);
4014 return;
4018 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
4019 new_ub = rh_ub - 1;
4020 if (sign == SIGNED)
4022 tmp = -1 - rh_lb;
4023 new_ub = wi::smax (new_ub, tmp);
4026 if (sign == UNSIGNED)
4027 new_lb = wi::zero (prec);
4028 else
4030 new_lb = -new_ub;
4031 tmp = lh_lb;
4032 if (wi::gts_p (tmp, 0))
4033 tmp = wi::zero (prec);
4034 new_lb = wi::smax (new_lb, tmp);
4036 tmp = lh_ub;
4037 if (sign == SIGNED && wi::neg_p (tmp))
4038 tmp = wi::zero (prec);
4039 new_ub = wi::min (new_ub, tmp, sign);
4041 value_range_with_overflow (r, type, new_lb, new_ub);
4044 bool
4045 operator_trunc_mod::op1_range (irange &r, tree type,
4046 const irange &lhs,
4047 const irange &,
4048 relation_trio) const
4050 if (lhs.undefined_p ())
4051 return false;
4052 // PR 91029.
4053 signop sign = TYPE_SIGN (type);
4054 unsigned prec = TYPE_PRECISION (type);
4055 // (a % b) >= x && x > 0 , then a >= x.
4056 if (wi::gt_p (lhs.lower_bound (), 0, sign))
4058 r.set (type, lhs.lower_bound (), wi::max_value (prec, sign));
4059 return true;
4061 // (a % b) <= x && x < 0 , then a <= x.
4062 if (wi::lt_p (lhs.upper_bound (), 0, sign))
4064 r.set (type, wi::min_value (prec, sign), lhs.upper_bound ());
4065 return true;
4067 return false;
4070 bool
4071 operator_trunc_mod::op2_range (irange &r, tree type,
4072 const irange &lhs,
4073 const irange &,
4074 relation_trio) const
4076 if (lhs.undefined_p ())
4077 return false;
4078 // PR 91029.
4079 signop sign = TYPE_SIGN (type);
4080 unsigned prec = TYPE_PRECISION (type);
4081 // (a % b) >= x && x > 0 , then b is in ~[-x, x] for signed
4082 // or b > x for unsigned.
4083 if (wi::gt_p (lhs.lower_bound (), 0, sign))
4085 if (sign == SIGNED)
4086 r.set (type, wi::neg (lhs.lower_bound ()),
4087 lhs.lower_bound (), VR_ANTI_RANGE);
4088 else if (wi::lt_p (lhs.lower_bound (), wi::max_value (prec, sign),
4089 sign))
4090 r.set (type, lhs.lower_bound () + 1, wi::max_value (prec, sign));
4091 else
4092 return false;
4093 return true;
4095 // (a % b) <= x && x < 0 , then b is in ~[x, -x].
4096 if (wi::lt_p (lhs.upper_bound (), 0, sign))
4098 if (wi::gt_p (lhs.upper_bound (), wi::min_value (prec, sign), sign))
4099 r.set (type, lhs.upper_bound (),
4100 wi::neg (lhs.upper_bound ()), VR_ANTI_RANGE);
4101 else
4102 return false;
4103 return true;
4105 return false;
4109 class operator_logical_not : public range_operator
4111 using range_operator::fold_range;
4112 using range_operator::op1_range;
4113 public:
4114 virtual bool fold_range (irange &r, tree type,
4115 const irange &lh,
4116 const irange &rh,
4117 relation_trio rel = TRIO_VARYING) const;
4118 virtual bool op1_range (irange &r, tree type,
4119 const irange &lhs,
4120 const irange &op2,
4121 relation_trio rel = TRIO_VARYING) const;
4122 // Check compatibility of LHS and op1.
4123 bool operand_check_p (tree t1, tree t2, tree) const final override
4124 { return range_compatible_p (t1, t2); }
4125 } op_logical_not;
4127 // Folding a logical NOT, oddly enough, involves doing nothing on the
4128 // forward pass through. During the initial walk backwards, the
4129 // logical NOT reversed the desired outcome on the way back, so on the
4130 // way forward all we do is pass the range forward.
4132 // b_2 = x_1 < 20
4133 // b_3 = !b_2
4134 // if (b_3)
4135 // to determine the TRUE branch, walking backward
4136 // if (b_3) if ([1,1])
4137 // b_3 = !b_2 [1,1] = ![0,0]
4138 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
4139 // which is the result we are looking for.. so.. pass it through.
4141 bool
4142 operator_logical_not::fold_range (irange &r, tree type,
4143 const irange &lh,
4144 const irange &rh ATTRIBUTE_UNUSED,
4145 relation_trio) const
4147 if (empty_range_varying (r, type, lh, rh))
4148 return true;
4150 r = lh;
4151 if (!lh.varying_p () && !lh.undefined_p ())
4152 r.invert ();
4154 return true;
4157 bool
4158 operator_logical_not::op1_range (irange &r,
4159 tree type,
4160 const irange &lhs,
4161 const irange &op2,
4162 relation_trio) const
4164 // Logical NOT is involutary...do it again.
4165 return fold_range (r, type, lhs, op2);
4168 bool
4169 operator_bitwise_not::fold_range (irange &r, tree type,
4170 const irange &lh,
4171 const irange &rh,
4172 relation_trio) const
4174 if (empty_range_varying (r, type, lh, rh))
4175 return true;
4177 if (types_compatible_p (type, boolean_type_node))
4178 return op_logical_not.fold_range (r, type, lh, rh);
4180 // ~X is simply -1 - X.
4181 int_range<1> minusone (type, wi::minus_one (TYPE_PRECISION (type)),
4182 wi::minus_one (TYPE_PRECISION (type)));
4183 return range_op_handler (MINUS_EXPR).fold_range (r, type, minusone, lh);
4186 bool
4187 operator_bitwise_not::op1_range (irange &r, tree type,
4188 const irange &lhs,
4189 const irange &op2,
4190 relation_trio) const
4192 if (lhs.undefined_p ())
4193 return false;
4194 if (types_compatible_p (type, boolean_type_node))
4195 return op_logical_not.op1_range (r, type, lhs, op2);
4197 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
4198 return fold_range (r, type, lhs, op2);
4201 void
4202 operator_bitwise_not::update_bitmask (irange &r, const irange &lh,
4203 const irange &rh) const
4205 update_known_bitmask (r, BIT_NOT_EXPR, lh, rh);
4209 bool
4210 operator_cst::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4211 const irange &lh,
4212 const irange &rh ATTRIBUTE_UNUSED,
4213 relation_trio) const
4215 r = lh;
4216 return true;
4220 // Determine if there is a relationship between LHS and OP1.
4222 relation_kind
4223 operator_identity::lhs_op1_relation (const irange &lhs,
4224 const irange &op1 ATTRIBUTE_UNUSED,
4225 const irange &op2 ATTRIBUTE_UNUSED,
4226 relation_kind) const
4228 if (lhs.undefined_p ())
4229 return VREL_VARYING;
4230 // Simply a copy, so they are equivalent.
4231 return VREL_EQ;
4234 bool
4235 operator_identity::fold_range (irange &r, tree type ATTRIBUTE_UNUSED,
4236 const irange &lh,
4237 const irange &rh ATTRIBUTE_UNUSED,
4238 relation_trio) const
4240 r = lh;
4241 return true;
4244 bool
4245 operator_identity::op1_range (irange &r, tree type ATTRIBUTE_UNUSED,
4246 const irange &lhs,
4247 const irange &op2 ATTRIBUTE_UNUSED,
4248 relation_trio) const
4250 r = lhs;
4251 return true;
4255 class operator_unknown : public range_operator
4257 using range_operator::fold_range;
4258 public:
4259 virtual bool fold_range (irange &r, tree type,
4260 const irange &op1,
4261 const irange &op2,
4262 relation_trio rel = TRIO_VARYING) const;
4263 } op_unknown;
4265 bool
4266 operator_unknown::fold_range (irange &r, tree type,
4267 const irange &lh ATTRIBUTE_UNUSED,
4268 const irange &rh ATTRIBUTE_UNUSED,
4269 relation_trio) const
4271 r.set_varying (type);
4272 return true;
4276 void
4277 operator_abs::wi_fold (irange &r, tree type,
4278 const wide_int &lh_lb, const wide_int &lh_ub,
4279 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4280 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4282 wide_int min, max;
4283 signop sign = TYPE_SIGN (type);
4284 unsigned prec = TYPE_PRECISION (type);
4286 // Pass through LH for the easy cases.
4287 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
4289 r = int_range<1> (type, lh_lb, lh_ub);
4290 return;
4293 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
4294 // a useful range.
4295 wide_int min_value = wi::min_value (prec, sign);
4296 wide_int max_value = wi::max_value (prec, sign);
4297 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
4299 r.set_varying (type);
4300 return;
4303 // ABS_EXPR may flip the range around, if the original range
4304 // included negative values.
4305 if (wi::eq_p (lh_lb, min_value))
4307 // ABS ([-MIN, -MIN]) isn't representable, but we have traditionally
4308 // returned [-MIN,-MIN] so this preserves that behavior. PR37078
4309 if (wi::eq_p (lh_ub, min_value))
4311 r = int_range<1> (type, min_value, min_value);
4312 return;
4314 min = max_value;
4316 else
4317 min = wi::abs (lh_lb);
4319 if (wi::eq_p (lh_ub, min_value))
4320 max = max_value;
4321 else
4322 max = wi::abs (lh_ub);
4324 // If the range contains zero then we know that the minimum value in the
4325 // range will be zero.
4326 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
4328 if (wi::gt_p (min, max, sign))
4329 max = min;
4330 min = wi::zero (prec);
4332 else
4334 // If the range was reversed, swap MIN and MAX.
4335 if (wi::gt_p (min, max, sign))
4336 std::swap (min, max);
4339 // If the new range has its limits swapped around (MIN > MAX), then
4340 // the operation caused one of them to wrap around. The only thing
4341 // we know is that the result is positive.
4342 if (wi::gt_p (min, max, sign))
4344 min = wi::zero (prec);
4345 max = max_value;
4347 r = int_range<1> (type, min, max);
4350 bool
4351 operator_abs::op1_range (irange &r, tree type,
4352 const irange &lhs,
4353 const irange &op2,
4354 relation_trio) const
4356 if (empty_range_varying (r, type, lhs, op2))
4357 return true;
4358 if (TYPE_UNSIGNED (type))
4360 r = lhs;
4361 return true;
4363 // Start with the positives because negatives are an impossible result.
4364 int_range_max positives = range_positives (type);
4365 positives.intersect (lhs);
4366 r = positives;
4367 // Then add the negative of each pair:
4368 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
4369 for (unsigned i = 0; i < positives.num_pairs (); ++i)
4370 r.union_ (int_range<1> (type,
4371 -positives.upper_bound (i),
4372 -positives.lower_bound (i)));
4373 // With flag_wrapv, -TYPE_MIN_VALUE = TYPE_MIN_VALUE which is
4374 // unrepresentable. Add -TYPE_MIN_VALUE in this case.
4375 wide_int min_value = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4376 wide_int lb = lhs.lower_bound ();
4377 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lb, min_value))
4378 r.union_ (int_range<2> (type, lb, lb));
4379 return true;
4382 void
4383 operator_abs::update_bitmask (irange &r, const irange &lh,
4384 const irange &rh) const
4386 update_known_bitmask (r, ABS_EXPR, lh, rh);
4389 class operator_absu : public range_operator
4391 using range_operator::update_bitmask;
4392 public:
4393 virtual void wi_fold (irange &r, tree type,
4394 const wide_int &lh_lb, const wide_int &lh_ub,
4395 const wide_int &rh_lb, const wide_int &rh_ub) const;
4396 virtual void update_bitmask (irange &r, const irange &lh,
4397 const irange &rh) const final override;
4398 } op_absu;
4400 void
4401 operator_absu::wi_fold (irange &r, tree type,
4402 const wide_int &lh_lb, const wide_int &lh_ub,
4403 const wide_int &rh_lb ATTRIBUTE_UNUSED,
4404 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
4406 wide_int new_lb, new_ub;
4408 // Pass through VR0 the easy cases.
4409 if (wi::ges_p (lh_lb, 0))
4411 new_lb = lh_lb;
4412 new_ub = lh_ub;
4414 else
4416 new_lb = wi::abs (lh_lb);
4417 new_ub = wi::abs (lh_ub);
4419 // If the range contains zero then we know that the minimum
4420 // value in the range will be zero.
4421 if (wi::ges_p (lh_ub, 0))
4423 if (wi::gtu_p (new_lb, new_ub))
4424 new_ub = new_lb;
4425 new_lb = wi::zero (TYPE_PRECISION (type));
4427 else
4428 std::swap (new_lb, new_ub);
4431 gcc_checking_assert (TYPE_UNSIGNED (type));
4432 r = int_range<1> (type, new_lb, new_ub);
4435 void
4436 operator_absu::update_bitmask (irange &r, const irange &lh,
4437 const irange &rh) const
4439 update_known_bitmask (r, ABSU_EXPR, lh, rh);
4443 bool
4444 operator_negate::fold_range (irange &r, tree type,
4445 const irange &lh,
4446 const irange &rh,
4447 relation_trio) const
4449 if (empty_range_varying (r, type, lh, rh))
4450 return true;
4452 // -X is simply 0 - X.
4453 int_range<1> zero;
4454 zero.set_zero (type);
4455 return range_op_handler (MINUS_EXPR).fold_range (r, type, zero, lh);
4458 bool
4459 operator_negate::op1_range (irange &r, tree type,
4460 const irange &lhs,
4461 const irange &op2,
4462 relation_trio) const
4464 // NEGATE is involutory.
4465 return fold_range (r, type, lhs, op2);
4469 bool
4470 operator_addr_expr::fold_range (irange &r, tree type,
4471 const irange &lh,
4472 const irange &rh,
4473 relation_trio) const
4475 if (empty_range_varying (r, type, lh, rh))
4476 return true;
4478 // Return a non-null pointer of the LHS type (passed in op2).
4479 if (lh.zero_p ())
4480 r.set_zero (type);
4481 else if (lh.undefined_p () || contains_zero_p (lh))
4482 r.set_varying (type);
4483 else
4484 r.set_nonzero (type);
4485 return true;
4488 bool
4489 operator_addr_expr::op1_range (irange &r, tree type,
4490 const irange &lhs,
4491 const irange &op2,
4492 relation_trio) const
4494 if (empty_range_varying (r, type, lhs, op2))
4495 return true;
4497 // Return a non-null pointer of the LHS type (passed in op2), but only
4498 // if we cant overflow, eitherwise a no-zero offset could wrap to zero.
4499 // See PR 111009.
4500 if (!lhs.undefined_p () && !contains_zero_p (lhs) && TYPE_OVERFLOW_UNDEFINED (type))
4501 r.set_nonzero (type);
4502 else
4503 r.set_varying (type);
4504 return true;
4507 // Initialize any integral operators to the primary table
4509 void
4510 range_op_table::initialize_integral_ops ()
4512 set (TRUNC_DIV_EXPR, op_trunc_div);
4513 set (FLOOR_DIV_EXPR, op_floor_div);
4514 set (ROUND_DIV_EXPR, op_round_div);
4515 set (CEIL_DIV_EXPR, op_ceil_div);
4516 set (EXACT_DIV_EXPR, op_exact_div);
4517 set (LSHIFT_EXPR, op_lshift);
4518 set (RSHIFT_EXPR, op_rshift);
4519 set (TRUTH_AND_EXPR, op_logical_and);
4520 set (TRUTH_OR_EXPR, op_logical_or);
4521 set (TRUNC_MOD_EXPR, op_trunc_mod);
4522 set (TRUTH_NOT_EXPR, op_logical_not);
4523 set (IMAGPART_EXPR, op_unknown);
4524 set (REALPART_EXPR, op_unknown);
4525 set (ABSU_EXPR, op_absu);
4526 set (OP_WIDEN_MULT_SIGNED, op_widen_mult_signed);
4527 set (OP_WIDEN_MULT_UNSIGNED, op_widen_mult_unsigned);
4528 set (OP_WIDEN_PLUS_SIGNED, op_widen_plus_signed);
4529 set (OP_WIDEN_PLUS_UNSIGNED, op_widen_plus_unsigned);
4533 bool
4534 operator_plus::overflow_free_p (const irange &lh, const irange &rh,
4535 relation_trio) const
4537 if (lh.undefined_p () || rh.undefined_p ())
4538 return false;
4540 tree type = lh.type ();
4541 if (TYPE_OVERFLOW_UNDEFINED (type))
4542 return true;
4544 wi::overflow_type ovf;
4545 signop sgn = TYPE_SIGN (type);
4546 wide_int wmax0 = lh.upper_bound ();
4547 wide_int wmax1 = rh.upper_bound ();
4548 wi::add (wmax0, wmax1, sgn, &ovf);
4549 if (ovf != wi::OVF_NONE)
4550 return false;
4552 if (TYPE_UNSIGNED (type))
4553 return true;
4555 wide_int wmin0 = lh.lower_bound ();
4556 wide_int wmin1 = rh.lower_bound ();
4557 wi::add (wmin0, wmin1, sgn, &ovf);
4558 if (ovf != wi::OVF_NONE)
4559 return false;
4561 return true;
4564 bool
4565 operator_minus::overflow_free_p (const irange &lh, const irange &rh,
4566 relation_trio) const
4568 if (lh.undefined_p () || rh.undefined_p ())
4569 return false;
4571 tree type = lh.type ();
4572 if (TYPE_OVERFLOW_UNDEFINED (type))
4573 return true;
4575 wi::overflow_type ovf;
4576 signop sgn = TYPE_SIGN (type);
4577 wide_int wmin0 = lh.lower_bound ();
4578 wide_int wmax1 = rh.upper_bound ();
4579 wi::sub (wmin0, wmax1, sgn, &ovf);
4580 if (ovf != wi::OVF_NONE)
4581 return false;
4583 if (TYPE_UNSIGNED (type))
4584 return true;
4586 wide_int wmax0 = lh.upper_bound ();
4587 wide_int wmin1 = rh.lower_bound ();
4588 wi::sub (wmax0, wmin1, sgn, &ovf);
4589 if (ovf != wi::OVF_NONE)
4590 return false;
4592 return true;
4595 bool
4596 operator_mult::overflow_free_p (const irange &lh, const irange &rh,
4597 relation_trio) const
4599 if (lh.undefined_p () || rh.undefined_p ())
4600 return false;
4602 tree type = lh.type ();
4603 if (TYPE_OVERFLOW_UNDEFINED (type))
4604 return true;
4606 wi::overflow_type ovf;
4607 signop sgn = TYPE_SIGN (type);
4608 wide_int wmax0 = lh.upper_bound ();
4609 wide_int wmax1 = rh.upper_bound ();
4610 wi::mul (wmax0, wmax1, sgn, &ovf);
4611 if (ovf != wi::OVF_NONE)
4612 return false;
4614 if (TYPE_UNSIGNED (type))
4615 return true;
4617 wide_int wmin0 = lh.lower_bound ();
4618 wide_int wmin1 = rh.lower_bound ();
4619 wi::mul (wmin0, wmin1, sgn, &ovf);
4620 if (ovf != wi::OVF_NONE)
4621 return false;
4623 wi::mul (wmin0, wmax1, sgn, &ovf);
4624 if (ovf != wi::OVF_NONE)
4625 return false;
4627 wi::mul (wmax0, wmin1, sgn, &ovf);
4628 if (ovf != wi::OVF_NONE)
4629 return false;
4631 return true;
4634 #if CHECKING_P
4635 #include "selftest.h"
4637 namespace selftest
4639 #define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
4640 #define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
4641 #define INT16(x) wi::shwi ((x), TYPE_PRECISION (short_integer_type_node))
4642 #define UINT16(x) wi::uhwi ((x), TYPE_PRECISION (short_unsigned_type_node))
4643 #define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
4644 #define UCHAR(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_char_type_node))
4646 static void
4647 range_op_cast_tests ()
4649 int_range<2> r0, r1, r2, rold;
4650 r0.set_varying (integer_type_node);
4651 wide_int maxint = r0.upper_bound ();
4653 // If a range is in any way outside of the range for the converted
4654 // to range, default to the range for the new type.
4655 r0.set_varying (short_integer_type_node);
4656 wide_int minshort = r0.lower_bound ();
4657 wide_int maxshort = r0.upper_bound ();
4658 if (TYPE_PRECISION (integer_type_node)
4659 > TYPE_PRECISION (short_integer_type_node))
4661 r1 = int_range<1> (integer_type_node,
4662 wi::zero (TYPE_PRECISION (integer_type_node)),
4663 maxint);
4664 range_cast (r1, short_integer_type_node);
4665 ASSERT_TRUE (r1.lower_bound () == minshort
4666 && r1.upper_bound() == maxshort);
4669 // (unsigned char)[-5,-1] => [251,255].
4670 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (-1));
4671 range_cast (r0, unsigned_char_type_node);
4672 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node,
4673 UCHAR (251), UCHAR (255)));
4674 range_cast (r0, signed_char_type_node);
4675 ASSERT_TRUE (r0 == rold);
4677 // (signed char)[15, 150] => [-128,-106][15,127].
4678 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (15), UCHAR (150));
4679 range_cast (r0, signed_char_type_node);
4680 r1 = int_range<1> (signed_char_type_node, SCHAR (15), SCHAR (127));
4681 r2 = int_range<1> (signed_char_type_node, SCHAR (-128), SCHAR (-106));
4682 r1.union_ (r2);
4683 ASSERT_TRUE (r1 == r0);
4684 range_cast (r0, unsigned_char_type_node);
4685 ASSERT_TRUE (r0 == rold);
4687 // (unsigned char)[-5, 5] => [0,5][251,255].
4688 r0 = rold = int_range<1> (signed_char_type_node, SCHAR (-5), SCHAR (5));
4689 range_cast (r0, unsigned_char_type_node);
4690 r1 = int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255));
4691 r2 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4692 r1.union_ (r2);
4693 ASSERT_TRUE (r0 == r1);
4694 range_cast (r0, signed_char_type_node);
4695 ASSERT_TRUE (r0 == rold);
4697 // (unsigned char)[-5,5] => [0,5][251,255].
4698 r0 = int_range<1> (integer_type_node, INT (-5), INT (5));
4699 range_cast (r0, unsigned_char_type_node);
4700 r1 = int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (5));
4701 r1.union_ (int_range<1> (unsigned_char_type_node, UCHAR (251), UCHAR (255)));
4702 ASSERT_TRUE (r0 == r1);
4704 // (unsigned char)[5U,1974U] => [0,255].
4705 r0 = int_range<1> (unsigned_type_node, UINT (5), UINT (1974));
4706 range_cast (r0, unsigned_char_type_node);
4707 ASSERT_TRUE (r0 == int_range<1> (unsigned_char_type_node, UCHAR (0), UCHAR (255)));
4708 range_cast (r0, integer_type_node);
4709 // Going to a wider range should not sign extend.
4710 ASSERT_TRUE (r0 == int_range<1> (integer_type_node, INT (0), INT (255)));
4712 // (unsigned char)[-350,15] => [0,255].
4713 r0 = int_range<1> (integer_type_node, INT (-350), INT (15));
4714 range_cast (r0, unsigned_char_type_node);
4715 ASSERT_TRUE (r0 == (int_range<1>
4716 (unsigned_char_type_node,
4717 min_limit (unsigned_char_type_node),
4718 max_limit (unsigned_char_type_node))));
4720 // Casting [-120,20] from signed char to unsigned short.
4721 // => [0, 20][0xff88, 0xffff].
4722 r0 = int_range<1> (signed_char_type_node, SCHAR (-120), SCHAR (20));
4723 range_cast (r0, short_unsigned_type_node);
4724 r1 = int_range<1> (short_unsigned_type_node, UINT16 (0), UINT16 (20));
4725 r2 = int_range<1> (short_unsigned_type_node,
4726 UINT16 (0xff88), UINT16 (0xffff));
4727 r1.union_ (r2);
4728 ASSERT_TRUE (r0 == r1);
4729 // A truncating cast back to signed char will work because [-120, 20]
4730 // is representable in signed char.
4731 range_cast (r0, signed_char_type_node);
4732 ASSERT_TRUE (r0 == int_range<1> (signed_char_type_node,
4733 SCHAR (-120), SCHAR (20)));
4735 // unsigned char -> signed short
4736 // (signed short)[(unsigned char)25, (unsigned char)250]
4737 // => [(signed short)25, (signed short)250]
4738 r0 = rold = int_range<1> (unsigned_char_type_node, UCHAR (25), UCHAR (250));
4739 range_cast (r0, short_integer_type_node);
4740 r1 = int_range<1> (short_integer_type_node, INT16 (25), INT16 (250));
4741 ASSERT_TRUE (r0 == r1);
4742 range_cast (r0, unsigned_char_type_node);
4743 ASSERT_TRUE (r0 == rold);
4745 // Test casting a wider signed [-MIN,MAX] to a narrower unsigned.
4746 r0 = int_range<1> (long_long_integer_type_node,
4747 min_limit (long_long_integer_type_node),
4748 max_limit (long_long_integer_type_node));
4749 range_cast (r0, short_unsigned_type_node);
4750 r1 = int_range<1> (short_unsigned_type_node,
4751 min_limit (short_unsigned_type_node),
4752 max_limit (short_unsigned_type_node));
4753 ASSERT_TRUE (r0 == r1);
4755 // Casting NONZERO to a narrower type will wrap/overflow so
4756 // it's just the entire range for the narrower type.
4758 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
4759 // is outside of the range of a smaller range, return the full
4760 // smaller range.
4761 if (TYPE_PRECISION (integer_type_node)
4762 > TYPE_PRECISION (short_integer_type_node))
4764 r0.set_nonzero (integer_type_node);
4765 range_cast (r0, short_integer_type_node);
4766 r1 = int_range<1> (short_integer_type_node,
4767 min_limit (short_integer_type_node),
4768 max_limit (short_integer_type_node));
4769 ASSERT_TRUE (r0 == r1);
4772 // Casting NONZERO from a narrower signed to a wider signed.
4774 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
4775 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
4776 r0.set_nonzero (short_integer_type_node);
4777 range_cast (r0, integer_type_node);
4778 r1 = int_range<1> (integer_type_node, INT (-32768), INT (-1));
4779 r2 = int_range<1> (integer_type_node, INT (1), INT (32767));
4780 r1.union_ (r2);
4781 ASSERT_TRUE (r0 == r1);
4784 static void
4785 range_op_lshift_tests ()
4787 // Test that 0x808.... & 0x8.... still contains 0x8....
4788 // for a large set of numbers.
4790 int_range_max res;
4791 tree big_type = long_long_unsigned_type_node;
4792 unsigned big_prec = TYPE_PRECISION (big_type);
4793 // big_num = 0x808,0000,0000,0000
4794 wide_int big_num = wi::lshift (wi::uhwi (0x808, big_prec),
4795 wi::uhwi (48, big_prec));
4796 op_bitwise_and.fold_range (res, big_type,
4797 int_range <1> (big_type),
4798 int_range <1> (big_type, big_num, big_num));
4799 // val = 0x8,0000,0000,0000
4800 wide_int val = wi::lshift (wi::uhwi (8, big_prec),
4801 wi::uhwi (48, big_prec));
4802 ASSERT_TRUE (res.contains_p (val));
4805 if (TYPE_PRECISION (unsigned_type_node) > 31)
4807 // unsigned VARYING = op1 << 1 should be VARYING.
4808 int_range<2> lhs (unsigned_type_node);
4809 int_range<2> shift (unsigned_type_node, INT (1), INT (1));
4810 int_range_max op1;
4811 op_lshift.op1_range (op1, unsigned_type_node, lhs, shift);
4812 ASSERT_TRUE (op1.varying_p ());
4814 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4815 int_range<2> zero (unsigned_type_node, UINT (0), UINT (0));
4816 op_lshift.op1_range (op1, unsigned_type_node, zero, shift);
4817 ASSERT_TRUE (op1.num_pairs () == 2);
4818 // Remove the [0,0] range.
4819 op1.intersect (zero);
4820 ASSERT_TRUE (op1.num_pairs () == 1);
4821 // op1 << 1 should be [0x8000,0x8000] << 1,
4822 // which should result in [0,0].
4823 int_range_max result;
4824 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4825 ASSERT_TRUE (result == zero);
4827 // signed VARYING = op1 << 1 should be VARYING.
4828 if (TYPE_PRECISION (integer_type_node) > 31)
4830 // unsigned VARYING = op1 << 1 should be VARYING.
4831 int_range<2> lhs (integer_type_node);
4832 int_range<2> shift (integer_type_node, INT (1), INT (1));
4833 int_range_max op1;
4834 op_lshift.op1_range (op1, integer_type_node, lhs, shift);
4835 ASSERT_TRUE (op1.varying_p ());
4837 // 0 = op1 << 1 should be [0,0], [0x8000000, 0x8000000].
4838 int_range<2> zero (integer_type_node, INT (0), INT (0));
4839 op_lshift.op1_range (op1, integer_type_node, zero, shift);
4840 ASSERT_TRUE (op1.num_pairs () == 2);
4841 // Remove the [0,0] range.
4842 op1.intersect (zero);
4843 ASSERT_TRUE (op1.num_pairs () == 1);
4844 // op1 << 1 should be [0x8000,0x8000] << 1,
4845 // which should result in [0,0].
4846 int_range_max result;
4847 op_lshift.fold_range (result, unsigned_type_node, op1, shift);
4848 ASSERT_TRUE (result == zero);
4852 static void
4853 range_op_rshift_tests ()
4855 // unsigned: [3, MAX] = OP1 >> 1
4857 int_range_max lhs (unsigned_type_node,
4858 UINT (3), max_limit (unsigned_type_node));
4859 int_range_max one (unsigned_type_node,
4860 wi::one (TYPE_PRECISION (unsigned_type_node)),
4861 wi::one (TYPE_PRECISION (unsigned_type_node)));
4862 int_range_max op1;
4863 op_rshift.op1_range (op1, unsigned_type_node, lhs, one);
4864 ASSERT_FALSE (op1.contains_p (UINT (3)));
4867 // signed: [3, MAX] = OP1 >> 1
4869 int_range_max lhs (integer_type_node,
4870 INT (3), max_limit (integer_type_node));
4871 int_range_max one (integer_type_node, INT (1), INT (1));
4872 int_range_max op1;
4873 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4874 ASSERT_FALSE (op1.contains_p (INT (-2)));
4877 // This is impossible, so OP1 should be [].
4878 // signed: [MIN, MIN] = OP1 >> 1
4880 int_range_max lhs (integer_type_node,
4881 min_limit (integer_type_node),
4882 min_limit (integer_type_node));
4883 int_range_max one (integer_type_node, INT (1), INT (1));
4884 int_range_max op1;
4885 op_rshift.op1_range (op1, integer_type_node, lhs, one);
4886 ASSERT_TRUE (op1.undefined_p ());
4889 // signed: ~[-1] = OP1 >> 31
4890 if (TYPE_PRECISION (integer_type_node) > 31)
4892 int_range_max lhs (integer_type_node, INT (-1), INT (-1), VR_ANTI_RANGE);
4893 int_range_max shift (integer_type_node, INT (31), INT (31));
4894 int_range_max op1;
4895 op_rshift.op1_range (op1, integer_type_node, lhs, shift);
4896 int_range_max negatives = range_negatives (integer_type_node);
4897 negatives.intersect (op1);
4898 ASSERT_TRUE (negatives.undefined_p ());
4902 static void
4903 range_op_bitwise_and_tests ()
4905 int_range_max res;
4906 wide_int min = min_limit (integer_type_node);
4907 wide_int max = max_limit (integer_type_node);
4908 wide_int tiny = wi::add (min, wi::one (TYPE_PRECISION (integer_type_node)));
4909 int_range_max i1 (integer_type_node, tiny, max);
4910 int_range_max i2 (integer_type_node, INT (255), INT (255));
4912 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
4913 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4914 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4916 // VARYING = OP1 & 255: OP1 is VARYING
4917 i1 = int_range<1> (integer_type_node);
4918 op_bitwise_and.op1_range (res, integer_type_node, i1, i2);
4919 ASSERT_TRUE (res == int_range<1> (integer_type_node));
4921 // For 0 = x & MASK, x is ~MASK.
4923 int_range<2> zero (integer_type_node, INT (0), INT (0));
4924 int_range<2> mask = int_range<2> (integer_type_node, INT (7), INT (7));
4925 op_bitwise_and.op1_range (res, integer_type_node, zero, mask);
4926 wide_int inv = wi::shwi (~7U, TYPE_PRECISION (integer_type_node));
4927 ASSERT_TRUE (res.get_nonzero_bits () == inv);
4930 // (NONZERO | X) is nonzero.
4931 i1.set_nonzero (integer_type_node);
4932 i2.set_varying (integer_type_node);
4933 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4934 ASSERT_TRUE (res.nonzero_p ());
4936 // (NEGATIVE | X) is nonzero.
4937 i1 = int_range<1> (integer_type_node, INT (-5), INT (-3));
4938 i2.set_varying (integer_type_node);
4939 op_bitwise_or.fold_range (res, integer_type_node, i1, i2);
4940 ASSERT_FALSE (res.contains_p (INT (0)));
4943 static void
4944 range_relational_tests ()
4946 int_range<2> lhs (unsigned_char_type_node);
4947 int_range<2> op1 (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4948 int_range<2> op2 (unsigned_char_type_node, UCHAR (20), UCHAR (20));
4950 // Never wrapping additions mean LHS > OP1.
4951 relation_kind code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4952 ASSERT_TRUE (code == VREL_GT);
4954 // Most wrapping additions mean nothing...
4955 op1 = int_range<2> (unsigned_char_type_node, UCHAR (8), UCHAR (10));
4956 op2 = int_range<2> (unsigned_char_type_node, UCHAR (0), UCHAR (255));
4957 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4958 ASSERT_TRUE (code == VREL_VARYING);
4960 // However, always wrapping additions mean LHS < OP1.
4961 op1 = int_range<2> (unsigned_char_type_node, UCHAR (1), UCHAR (255));
4962 op2 = int_range<2> (unsigned_char_type_node, UCHAR (255), UCHAR (255));
4963 code = op_plus.lhs_op1_relation (lhs, op1, op2, VREL_VARYING);
4964 ASSERT_TRUE (code == VREL_LT);
4967 void
4968 range_op_tests ()
4970 range_op_rshift_tests ();
4971 range_op_lshift_tests ();
4972 range_op_bitwise_and_tests ();
4973 range_op_cast_tests ();
4974 range_relational_tests ();
4976 extern void range_op_float_tests ();
4977 range_op_float_tests ();
4980 } // namespace selftest
4982 #endif // CHECKING_P