Add support for ARMv8-R architecture
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob5a7cab529bfbab1d27dc72af6ab5896a1c51961e
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
47 /* The maximum number of dominator BBs we search for conditions
48 of loop header copies we use for simplifying a conditional
49 expression. */
50 #define MAX_DOMINATORS_TO_WALK 8
54 Analysis of number of iterations of an affine exit test.
58 /* Bounds on some value, BELOW <= X <= UP. */
60 struct bounds
62 mpz_t below, up;
66 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
68 static void
69 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
71 tree type = TREE_TYPE (expr);
72 tree op0, op1;
73 bool negate = false;
75 *var = expr;
76 mpz_set_ui (offset, 0);
78 switch (TREE_CODE (expr))
80 case MINUS_EXPR:
81 negate = true;
82 /* Fallthru. */
84 case PLUS_EXPR:
85 case POINTER_PLUS_EXPR:
86 op0 = TREE_OPERAND (expr, 0);
87 op1 = TREE_OPERAND (expr, 1);
89 if (TREE_CODE (op1) != INTEGER_CST)
90 break;
92 *var = op0;
93 /* Always sign extend the offset. */
94 wi::to_mpz (op1, offset, SIGNED);
95 if (negate)
96 mpz_neg (offset, offset);
97 break;
99 case INTEGER_CST:
100 *var = build_int_cst_type (type, 0);
101 wi::to_mpz (expr, offset, TYPE_SIGN (type));
102 break;
104 default:
105 break;
109 /* From condition C0 CMP C1 derives information regarding the value range
110 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
112 static void
113 refine_value_range_using_guard (tree type, tree var,
114 tree c0, enum tree_code cmp, tree c1,
115 mpz_t below, mpz_t up)
117 tree varc0, varc1, ctype;
118 mpz_t offc0, offc1;
119 mpz_t mint, maxt, minc1, maxc1;
120 wide_int minv, maxv;
121 bool no_wrap = nowrap_type_p (type);
122 bool c0_ok, c1_ok;
123 signop sgn = TYPE_SIGN (type);
125 switch (cmp)
127 case LT_EXPR:
128 case LE_EXPR:
129 case GT_EXPR:
130 case GE_EXPR:
131 STRIP_SIGN_NOPS (c0);
132 STRIP_SIGN_NOPS (c1);
133 ctype = TREE_TYPE (c0);
134 if (!useless_type_conversion_p (ctype, type))
135 return;
137 break;
139 case EQ_EXPR:
140 /* We could derive quite precise information from EQ_EXPR, however,
141 such a guard is unlikely to appear, so we do not bother with
142 handling it. */
143 return;
145 case NE_EXPR:
146 /* NE_EXPR comparisons do not contain much of useful information,
147 except for cases of comparing with bounds. */
148 if (TREE_CODE (c1) != INTEGER_CST
149 || !INTEGRAL_TYPE_P (type))
150 return;
152 /* Ensure that the condition speaks about an expression in the same
153 type as X and Y. */
154 ctype = TREE_TYPE (c0);
155 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
156 return;
157 c0 = fold_convert (type, c0);
158 c1 = fold_convert (type, c1);
160 if (operand_equal_p (var, c0, 0))
162 mpz_t valc1;
164 /* Case of comparing VAR with its below/up bounds. */
165 mpz_init (valc1);
166 wi::to_mpz (c1, valc1, TYPE_SIGN (type));
167 if (mpz_cmp (valc1, below) == 0)
168 cmp = GT_EXPR;
169 if (mpz_cmp (valc1, up) == 0)
170 cmp = LT_EXPR;
172 mpz_clear (valc1);
174 else
176 /* Case of comparing with the bounds of the type. */
177 wide_int min = wi::min_value (type);
178 wide_int max = wi::max_value (type);
180 if (wi::eq_p (c1, min))
181 cmp = GT_EXPR;
182 if (wi::eq_p (c1, max))
183 cmp = LT_EXPR;
186 /* Quick return if no useful information. */
187 if (cmp == NE_EXPR)
188 return;
190 break;
192 default:
193 return;
196 mpz_init (offc0);
197 mpz_init (offc1);
198 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
199 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
201 /* We are only interested in comparisons of expressions based on VAR. */
202 if (operand_equal_p (var, varc1, 0))
204 std::swap (varc0, varc1);
205 mpz_swap (offc0, offc1);
206 cmp = swap_tree_comparison (cmp);
208 else if (!operand_equal_p (var, varc0, 0))
210 mpz_clear (offc0);
211 mpz_clear (offc1);
212 return;
215 mpz_init (mint);
216 mpz_init (maxt);
217 get_type_static_bounds (type, mint, maxt);
218 mpz_init (minc1);
219 mpz_init (maxc1);
220 /* Setup range information for varc1. */
221 if (integer_zerop (varc1))
223 wi::to_mpz (integer_zero_node, minc1, TYPE_SIGN (type));
224 wi::to_mpz (integer_zero_node, maxc1, TYPE_SIGN (type));
226 else if (TREE_CODE (varc1) == SSA_NAME
227 && INTEGRAL_TYPE_P (type)
228 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
230 gcc_assert (wi::le_p (minv, maxv, sgn));
231 wi::to_mpz (minv, minc1, sgn);
232 wi::to_mpz (maxv, maxc1, sgn);
234 else
236 mpz_set (minc1, mint);
237 mpz_set (maxc1, maxt);
240 /* Compute valid range information for varc1 + offc1. Note nothing
241 useful can be derived if it overflows or underflows. Overflow or
242 underflow could happen when:
244 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
245 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
246 mpz_add (minc1, minc1, offc1);
247 mpz_add (maxc1, maxc1, offc1);
248 c1_ok = (no_wrap
249 || mpz_sgn (offc1) == 0
250 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
251 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
252 if (!c1_ok)
253 goto end;
255 if (mpz_cmp (minc1, mint) < 0)
256 mpz_set (minc1, mint);
257 if (mpz_cmp (maxc1, maxt) > 0)
258 mpz_set (maxc1, maxt);
260 if (cmp == LT_EXPR)
262 cmp = LE_EXPR;
263 mpz_sub_ui (maxc1, maxc1, 1);
265 if (cmp == GT_EXPR)
267 cmp = GE_EXPR;
268 mpz_add_ui (minc1, minc1, 1);
271 /* Compute range information for varc0. If there is no overflow,
272 the condition implied that
274 (varc0) cmp (varc1 + offc1 - offc0)
276 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
277 or the below bound if cmp is GE_EXPR.
279 To prove there is no overflow/underflow, we need to check below
280 four cases:
281 1) cmp == LE_EXPR && offc0 > 0
283 (varc0 + offc0) doesn't overflow
284 && (varc1 + offc1 - offc0) doesn't underflow
286 2) cmp == LE_EXPR && offc0 < 0
288 (varc0 + offc0) doesn't underflow
289 && (varc1 + offc1 - offc0) doesn't overfloe
291 In this case, (varc0 + offc0) will never underflow if we can
292 prove (varc1 + offc1 - offc0) doesn't overflow.
294 3) cmp == GE_EXPR && offc0 < 0
296 (varc0 + offc0) doesn't underflow
297 && (varc1 + offc1 - offc0) doesn't overflow
299 4) cmp == GE_EXPR && offc0 > 0
301 (varc0 + offc0) doesn't overflow
302 && (varc1 + offc1 - offc0) doesn't underflow
304 In this case, (varc0 + offc0) will never overflow if we can
305 prove (varc1 + offc1 - offc0) doesn't underflow.
307 Note we only handle case 2 and 4 in below code. */
309 mpz_sub (minc1, minc1, offc0);
310 mpz_sub (maxc1, maxc1, offc0);
311 c0_ok = (no_wrap
312 || mpz_sgn (offc0) == 0
313 || (cmp == LE_EXPR
314 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
315 || (cmp == GE_EXPR
316 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
317 if (!c0_ok)
318 goto end;
320 if (cmp == LE_EXPR)
322 if (mpz_cmp (up, maxc1) > 0)
323 mpz_set (up, maxc1);
325 else
327 if (mpz_cmp (below, minc1) < 0)
328 mpz_set (below, minc1);
331 end:
332 mpz_clear (mint);
333 mpz_clear (maxt);
334 mpz_clear (minc1);
335 mpz_clear (maxc1);
336 mpz_clear (offc0);
337 mpz_clear (offc1);
340 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
341 in TYPE to MIN and MAX. */
343 static void
344 determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
345 mpz_t min, mpz_t max)
347 int cnt = 0;
348 mpz_t minm, maxm;
349 basic_block bb;
350 wide_int minv, maxv;
351 enum value_range_type rtype = VR_VARYING;
353 /* If the expression is a constant, we know its value exactly. */
354 if (integer_zerop (var))
356 mpz_set (min, off);
357 mpz_set (max, off);
358 return;
361 get_type_static_bounds (type, min, max);
363 /* See if we have some range info from VRP. */
364 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
366 edge e = loop_preheader_edge (loop);
367 signop sgn = TYPE_SIGN (type);
368 gphi_iterator gsi;
370 /* Either for VAR itself... */
371 rtype = get_range_info (var, &minv, &maxv);
372 /* Or for PHI results in loop->header where VAR is used as
373 PHI argument from the loop preheader edge. */
374 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
376 gphi *phi = gsi.phi ();
377 wide_int minc, maxc;
378 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
379 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
380 == VR_RANGE))
382 if (rtype != VR_RANGE)
384 rtype = VR_RANGE;
385 minv = minc;
386 maxv = maxc;
388 else
390 minv = wi::max (minv, minc, sgn);
391 maxv = wi::min (maxv, maxc, sgn);
392 /* If the PHI result range are inconsistent with
393 the VAR range, give up on looking at the PHI
394 results. This can happen if VR_UNDEFINED is
395 involved. */
396 if (wi::gt_p (minv, maxv, sgn))
398 rtype = get_range_info (var, &minv, &maxv);
399 break;
404 mpz_init (minm);
405 mpz_init (maxm);
406 if (rtype != VR_RANGE)
408 mpz_set (minm, min);
409 mpz_set (maxm, max);
411 else
413 gcc_assert (wi::le_p (minv, maxv, sgn));
414 wi::to_mpz (minv, minm, sgn);
415 wi::to_mpz (maxv, maxm, sgn);
417 /* Now walk the dominators of the loop header and use the entry
418 guards to refine the estimates. */
419 for (bb = loop->header;
420 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
421 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
423 edge e;
424 tree c0, c1;
425 gimple *cond;
426 enum tree_code cmp;
428 if (!single_pred_p (bb))
429 continue;
430 e = single_pred_edge (bb);
432 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
433 continue;
435 cond = last_stmt (e->src);
436 c0 = gimple_cond_lhs (cond);
437 cmp = gimple_cond_code (cond);
438 c1 = gimple_cond_rhs (cond);
440 if (e->flags & EDGE_FALSE_VALUE)
441 cmp = invert_tree_comparison (cmp, false);
443 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
444 ++cnt;
447 mpz_add (minm, minm, off);
448 mpz_add (maxm, maxm, off);
449 /* If the computation may not wrap or off is zero, then this
450 is always fine. If off is negative and minv + off isn't
451 smaller than type's minimum, or off is positive and
452 maxv + off isn't bigger than type's maximum, use the more
453 precise range too. */
454 if (nowrap_type_p (type)
455 || mpz_sgn (off) == 0
456 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
457 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
459 mpz_set (min, minm);
460 mpz_set (max, maxm);
461 mpz_clear (minm);
462 mpz_clear (maxm);
463 return;
465 mpz_clear (minm);
466 mpz_clear (maxm);
469 /* If the computation may wrap, we know nothing about the value, except for
470 the range of the type. */
471 if (!nowrap_type_p (type))
472 return;
474 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
475 add it to MIN, otherwise to MAX. */
476 if (mpz_sgn (off) < 0)
477 mpz_add (max, max, off);
478 else
479 mpz_add (min, min, off);
482 /* Stores the bounds on the difference of the values of the expressions
483 (var + X) and (var + Y), computed in TYPE, to BNDS. */
485 static void
486 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
487 bounds *bnds)
489 int rel = mpz_cmp (x, y);
490 bool may_wrap = !nowrap_type_p (type);
491 mpz_t m;
493 /* If X == Y, then the expressions are always equal.
494 If X > Y, there are the following possibilities:
495 a) neither of var + X and var + Y overflow or underflow, or both of
496 them do. Then their difference is X - Y.
497 b) var + X overflows, and var + Y does not. Then the values of the
498 expressions are var + X - M and var + Y, where M is the range of
499 the type, and their difference is X - Y - M.
500 c) var + Y underflows and var + X does not. Their difference again
501 is M - X + Y.
502 Therefore, if the arithmetics in type does not overflow, then the
503 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
504 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
505 (X - Y, X - Y + M). */
507 if (rel == 0)
509 mpz_set_ui (bnds->below, 0);
510 mpz_set_ui (bnds->up, 0);
511 return;
514 mpz_init (m);
515 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
516 mpz_add_ui (m, m, 1);
517 mpz_sub (bnds->up, x, y);
518 mpz_set (bnds->below, bnds->up);
520 if (may_wrap)
522 if (rel > 0)
523 mpz_sub (bnds->below, bnds->below, m);
524 else
525 mpz_add (bnds->up, bnds->up, m);
528 mpz_clear (m);
531 /* From condition C0 CMP C1 derives information regarding the
532 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
533 and stores it to BNDS. */
535 static void
536 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
537 tree vary, mpz_t offy,
538 tree c0, enum tree_code cmp, tree c1,
539 bounds *bnds)
541 tree varc0, varc1, ctype;
542 mpz_t offc0, offc1, loffx, loffy, bnd;
543 bool lbound = false;
544 bool no_wrap = nowrap_type_p (type);
545 bool x_ok, y_ok;
547 switch (cmp)
549 case LT_EXPR:
550 case LE_EXPR:
551 case GT_EXPR:
552 case GE_EXPR:
553 STRIP_SIGN_NOPS (c0);
554 STRIP_SIGN_NOPS (c1);
555 ctype = TREE_TYPE (c0);
556 if (!useless_type_conversion_p (ctype, type))
557 return;
559 break;
561 case EQ_EXPR:
562 /* We could derive quite precise information from EQ_EXPR, however, such
563 a guard is unlikely to appear, so we do not bother with handling
564 it. */
565 return;
567 case NE_EXPR:
568 /* NE_EXPR comparisons do not contain much of useful information, except for
569 special case of comparing with the bounds of the type. */
570 if (TREE_CODE (c1) != INTEGER_CST
571 || !INTEGRAL_TYPE_P (type))
572 return;
574 /* Ensure that the condition speaks about an expression in the same type
575 as X and Y. */
576 ctype = TREE_TYPE (c0);
577 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
578 return;
579 c0 = fold_convert (type, c0);
580 c1 = fold_convert (type, c1);
582 if (TYPE_MIN_VALUE (type)
583 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
585 cmp = GT_EXPR;
586 break;
588 if (TYPE_MAX_VALUE (type)
589 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
591 cmp = LT_EXPR;
592 break;
595 return;
596 default:
597 return;
600 mpz_init (offc0);
601 mpz_init (offc1);
602 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
603 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
605 /* We are only interested in comparisons of expressions based on VARX and
606 VARY. TODO -- we might also be able to derive some bounds from
607 expressions containing just one of the variables. */
609 if (operand_equal_p (varx, varc1, 0))
611 std::swap (varc0, varc1);
612 mpz_swap (offc0, offc1);
613 cmp = swap_tree_comparison (cmp);
616 if (!operand_equal_p (varx, varc0, 0)
617 || !operand_equal_p (vary, varc1, 0))
618 goto end;
620 mpz_init_set (loffx, offx);
621 mpz_init_set (loffy, offy);
623 if (cmp == GT_EXPR || cmp == GE_EXPR)
625 std::swap (varx, vary);
626 mpz_swap (offc0, offc1);
627 mpz_swap (loffx, loffy);
628 cmp = swap_tree_comparison (cmp);
629 lbound = true;
632 /* If there is no overflow, the condition implies that
634 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
636 The overflows and underflows may complicate things a bit; each
637 overflow decreases the appropriate offset by M, and underflow
638 increases it by M. The above inequality would not necessarily be
639 true if
641 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
642 VARX + OFFC0 overflows, but VARX + OFFX does not.
643 This may only happen if OFFX < OFFC0.
644 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
645 VARY + OFFC1 underflows and VARY + OFFY does not.
646 This may only happen if OFFY > OFFC1. */
648 if (no_wrap)
650 x_ok = true;
651 y_ok = true;
653 else
655 x_ok = (integer_zerop (varx)
656 || mpz_cmp (loffx, offc0) >= 0);
657 y_ok = (integer_zerop (vary)
658 || mpz_cmp (loffy, offc1) <= 0);
661 if (x_ok && y_ok)
663 mpz_init (bnd);
664 mpz_sub (bnd, loffx, loffy);
665 mpz_add (bnd, bnd, offc1);
666 mpz_sub (bnd, bnd, offc0);
668 if (cmp == LT_EXPR)
669 mpz_sub_ui (bnd, bnd, 1);
671 if (lbound)
673 mpz_neg (bnd, bnd);
674 if (mpz_cmp (bnds->below, bnd) < 0)
675 mpz_set (bnds->below, bnd);
677 else
679 if (mpz_cmp (bnd, bnds->up) < 0)
680 mpz_set (bnds->up, bnd);
682 mpz_clear (bnd);
685 mpz_clear (loffx);
686 mpz_clear (loffy);
687 end:
688 mpz_clear (offc0);
689 mpz_clear (offc1);
692 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
693 The subtraction is considered to be performed in arbitrary precision,
694 without overflows.
696 We do not attempt to be too clever regarding the value ranges of X and
697 Y; most of the time, they are just integers or ssa names offsetted by
698 integer. However, we try to use the information contained in the
699 comparisons before the loop (usually created by loop header copying). */
701 static void
702 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
704 tree type = TREE_TYPE (x);
705 tree varx, vary;
706 mpz_t offx, offy;
707 mpz_t minx, maxx, miny, maxy;
708 int cnt = 0;
709 edge e;
710 basic_block bb;
711 tree c0, c1;
712 gimple *cond;
713 enum tree_code cmp;
715 /* Get rid of unnecessary casts, but preserve the value of
716 the expressions. */
717 STRIP_SIGN_NOPS (x);
718 STRIP_SIGN_NOPS (y);
720 mpz_init (bnds->below);
721 mpz_init (bnds->up);
722 mpz_init (offx);
723 mpz_init (offy);
724 split_to_var_and_offset (x, &varx, offx);
725 split_to_var_and_offset (y, &vary, offy);
727 if (!integer_zerop (varx)
728 && operand_equal_p (varx, vary, 0))
730 /* Special case VARX == VARY -- we just need to compare the
731 offsets. The matters are a bit more complicated in the
732 case addition of offsets may wrap. */
733 bound_difference_of_offsetted_base (type, offx, offy, bnds);
735 else
737 /* Otherwise, use the value ranges to determine the initial
738 estimates on below and up. */
739 mpz_init (minx);
740 mpz_init (maxx);
741 mpz_init (miny);
742 mpz_init (maxy);
743 determine_value_range (loop, type, varx, offx, minx, maxx);
744 determine_value_range (loop, type, vary, offy, miny, maxy);
746 mpz_sub (bnds->below, minx, maxy);
747 mpz_sub (bnds->up, maxx, miny);
748 mpz_clear (minx);
749 mpz_clear (maxx);
750 mpz_clear (miny);
751 mpz_clear (maxy);
754 /* If both X and Y are constants, we cannot get any more precise. */
755 if (integer_zerop (varx) && integer_zerop (vary))
756 goto end;
758 /* Now walk the dominators of the loop header and use the entry
759 guards to refine the estimates. */
760 for (bb = loop->header;
761 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
762 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
764 if (!single_pred_p (bb))
765 continue;
766 e = single_pred_edge (bb);
768 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
769 continue;
771 cond = last_stmt (e->src);
772 c0 = gimple_cond_lhs (cond);
773 cmp = gimple_cond_code (cond);
774 c1 = gimple_cond_rhs (cond);
776 if (e->flags & EDGE_FALSE_VALUE)
777 cmp = invert_tree_comparison (cmp, false);
779 refine_bounds_using_guard (type, varx, offx, vary, offy,
780 c0, cmp, c1, bnds);
781 ++cnt;
784 end:
785 mpz_clear (offx);
786 mpz_clear (offy);
789 /* Update the bounds in BNDS that restrict the value of X to the bounds
790 that restrict the value of X + DELTA. X can be obtained as a
791 difference of two values in TYPE. */
793 static void
794 bounds_add (bounds *bnds, const widest_int &delta, tree type)
796 mpz_t mdelta, max;
798 mpz_init (mdelta);
799 wi::to_mpz (delta, mdelta, SIGNED);
801 mpz_init (max);
802 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
804 mpz_add (bnds->up, bnds->up, mdelta);
805 mpz_add (bnds->below, bnds->below, mdelta);
807 if (mpz_cmp (bnds->up, max) > 0)
808 mpz_set (bnds->up, max);
810 mpz_neg (max, max);
811 if (mpz_cmp (bnds->below, max) < 0)
812 mpz_set (bnds->below, max);
814 mpz_clear (mdelta);
815 mpz_clear (max);
818 /* Update the bounds in BNDS that restrict the value of X to the bounds
819 that restrict the value of -X. */
821 static void
822 bounds_negate (bounds *bnds)
824 mpz_t tmp;
826 mpz_init_set (tmp, bnds->up);
827 mpz_neg (bnds->up, bnds->below);
828 mpz_neg (bnds->below, tmp);
829 mpz_clear (tmp);
832 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
834 static tree
835 inverse (tree x, tree mask)
837 tree type = TREE_TYPE (x);
838 tree rslt;
839 unsigned ctr = tree_floor_log2 (mask);
841 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
843 unsigned HOST_WIDE_INT ix;
844 unsigned HOST_WIDE_INT imask;
845 unsigned HOST_WIDE_INT irslt = 1;
847 gcc_assert (cst_and_fits_in_hwi (x));
848 gcc_assert (cst_and_fits_in_hwi (mask));
850 ix = int_cst_value (x);
851 imask = int_cst_value (mask);
853 for (; ctr; ctr--)
855 irslt *= ix;
856 ix *= ix;
858 irslt &= imask;
860 rslt = build_int_cst_type (type, irslt);
862 else
864 rslt = build_int_cst (type, 1);
865 for (; ctr; ctr--)
867 rslt = int_const_binop (MULT_EXPR, rslt, x);
868 x = int_const_binop (MULT_EXPR, x, x);
870 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
873 return rslt;
876 /* Derives the upper bound BND on the number of executions of loop with exit
877 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
878 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
879 that the loop ends through this exit, i.e., the induction variable ever
880 reaches the value of C.
882 The value C is equal to final - base, where final and base are the final and
883 initial value of the actual induction variable in the analysed loop. BNDS
884 bounds the value of this difference when computed in signed type with
885 unbounded range, while the computation of C is performed in an unsigned
886 type with the range matching the range of the type of the induction variable.
887 In particular, BNDS.up contains an upper bound on C in the following cases:
888 -- if the iv must reach its final value without overflow, i.e., if
889 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
890 -- if final >= base, which we know to hold when BNDS.below >= 0. */
892 static void
893 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
894 bounds *bnds, bool exit_must_be_taken)
896 widest_int max;
897 mpz_t d;
898 tree type = TREE_TYPE (c);
899 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
900 || mpz_sgn (bnds->below) >= 0);
902 if (integer_onep (s)
903 || (TREE_CODE (c) == INTEGER_CST
904 && TREE_CODE (s) == INTEGER_CST
905 && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
906 || (TYPE_OVERFLOW_UNDEFINED (type)
907 && multiple_of_p (type, c, s)))
909 /* If C is an exact multiple of S, then its value will be reached before
910 the induction variable overflows (unless the loop is exited in some
911 other way before). Note that the actual induction variable in the
912 loop (which ranges from base to final instead of from 0 to C) may
913 overflow, in which case BNDS.up will not be giving a correct upper
914 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
915 no_overflow = true;
916 exit_must_be_taken = true;
919 /* If the induction variable can overflow, the number of iterations is at
920 most the period of the control variable (or infinite, but in that case
921 the whole # of iterations analysis will fail). */
922 if (!no_overflow)
924 max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
925 wi::to_mpz (max, bnd, UNSIGNED);
926 return;
929 /* Now we know that the induction variable does not overflow, so the loop
930 iterates at most (range of type / S) times. */
931 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
933 /* If the induction variable is guaranteed to reach the value of C before
934 overflow, ... */
935 if (exit_must_be_taken)
937 /* ... then we can strengthen this to C / S, and possibly we can use
938 the upper bound on C given by BNDS. */
939 if (TREE_CODE (c) == INTEGER_CST)
940 wi::to_mpz (c, bnd, UNSIGNED);
941 else if (bnds_u_valid)
942 mpz_set (bnd, bnds->up);
945 mpz_init (d);
946 wi::to_mpz (s, d, UNSIGNED);
947 mpz_fdiv_q (bnd, bnd, d);
948 mpz_clear (d);
951 /* Determines number of iterations of loop whose ending condition
952 is IV <> FINAL. TYPE is the type of the iv. The number of
953 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
954 we know that the exit must be taken eventually, i.e., that the IV
955 ever reaches the value FINAL (we derived this earlier, and possibly set
956 NITER->assumptions to make sure this is the case). BNDS contains the
957 bounds on the difference FINAL - IV->base. */
959 static bool
960 number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
961 tree final, struct tree_niter_desc *niter,
962 bool exit_must_be_taken, bounds *bnds)
964 tree niter_type = unsigned_type_for (type);
965 tree s, c, d, bits, assumption, tmp, bound;
966 mpz_t max;
968 niter->control = *iv;
969 niter->bound = final;
970 niter->cmp = NE_EXPR;
972 /* Rearrange the terms so that we get inequality S * i <> C, with S
973 positive. Also cast everything to the unsigned type. If IV does
974 not overflow, BNDS bounds the value of C. Also, this is the
975 case if the computation |FINAL - IV->base| does not overflow, i.e.,
976 if BNDS->below in the result is nonnegative. */
977 if (tree_int_cst_sign_bit (iv->step))
979 s = fold_convert (niter_type,
980 fold_build1 (NEGATE_EXPR, type, iv->step));
981 c = fold_build2 (MINUS_EXPR, niter_type,
982 fold_convert (niter_type, iv->base),
983 fold_convert (niter_type, final));
984 bounds_negate (bnds);
986 else
988 s = fold_convert (niter_type, iv->step);
989 c = fold_build2 (MINUS_EXPR, niter_type,
990 fold_convert (niter_type, final),
991 fold_convert (niter_type, iv->base));
994 mpz_init (max);
995 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
996 exit_must_be_taken);
997 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
998 TYPE_SIGN (niter_type));
999 mpz_clear (max);
1001 /* Compute no-overflow information for the control iv. This can be
1002 proven when below two conditions are satisfied:
1004 1) IV evaluates toward FINAL at beginning, i.e:
1005 base <= FINAL ; step > 0
1006 base >= FINAL ; step < 0
1008 2) |FINAL - base| is an exact multiple of step.
1010 Unfortunately, it's hard to prove above conditions after pass loop-ch
1011 because loop with exit condition (IV != FINAL) usually will be guarded
1012 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1013 can alternatively try to prove below conditions:
1015 1') IV evaluates toward FINAL at beginning, i.e:
1016 new_base = base - step < FINAL ; step > 0
1017 && base - step doesn't underflow
1018 new_base = base - step > FINAL ; step < 0
1019 && base - step doesn't overflow
1021 2') |FINAL - new_base| is an exact multiple of step.
1023 Please refer to PR34114 as an example of loop-ch's impact, also refer
1024 to PR72817 as an example why condition 2') is necessary.
1026 Note, for NE_EXPR, base equals to FINAL is a special case, in
1027 which the loop exits immediately, and the iv does not overflow. */
1028 if (!niter->control.no_overflow
1029 && (integer_onep (s) || multiple_of_p (type, c, s)))
1031 tree t, cond, new_c, relaxed_cond = boolean_false_node;
1033 if (tree_int_cst_sign_bit (iv->step))
1035 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1036 if (TREE_CODE (type) == INTEGER_TYPE)
1038 /* Only when base - step doesn't overflow. */
1039 t = TYPE_MAX_VALUE (type);
1040 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1041 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1042 if (integer_nonzerop (t))
1044 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1045 new_c = fold_build2 (MINUS_EXPR, niter_type,
1046 fold_convert (niter_type, t),
1047 fold_convert (niter_type, final));
1048 if (multiple_of_p (type, new_c, s))
1049 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1050 t, final);
1054 else
1056 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1057 if (TREE_CODE (type) == INTEGER_TYPE)
1059 /* Only when base - step doesn't underflow. */
1060 t = TYPE_MIN_VALUE (type);
1061 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1062 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1063 if (integer_nonzerop (t))
1065 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1066 new_c = fold_build2 (MINUS_EXPR, niter_type,
1067 fold_convert (niter_type, final),
1068 fold_convert (niter_type, t));
1069 if (multiple_of_p (type, new_c, s))
1070 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1071 t, final);
1076 t = simplify_using_initial_conditions (loop, cond);
1077 if (!t || !integer_onep (t))
1078 t = simplify_using_initial_conditions (loop, relaxed_cond);
1080 if (t && integer_onep (t))
1081 niter->control.no_overflow = true;
1084 /* First the trivial cases -- when the step is 1. */
1085 if (integer_onep (s))
1087 niter->niter = c;
1088 return true;
1090 if (niter->control.no_overflow && multiple_of_p (type, c, s))
1092 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1093 return true;
1096 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1097 is infinite. Otherwise, the number of iterations is
1098 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1099 bits = num_ending_zeros (s);
1100 bound = build_low_bits_mask (niter_type,
1101 (TYPE_PRECISION (niter_type)
1102 - tree_to_uhwi (bits)));
1104 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1105 build_int_cst (niter_type, 1), bits);
1106 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1108 if (!exit_must_be_taken)
1110 /* If we cannot assume that the exit is taken eventually, record the
1111 assumptions for divisibility of c. */
1112 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1113 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1114 assumption, build_int_cst (niter_type, 0));
1115 if (!integer_nonzerop (assumption))
1116 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1117 niter->assumptions, assumption);
1120 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1121 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1122 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1123 return true;
1126 /* Checks whether we can determine the final value of the control variable
1127 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1128 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1129 of the step. The assumptions necessary to ensure that the computation
1130 of the final value does not overflow are recorded in NITER. If we
1131 find the final value, we adjust DELTA and return TRUE. Otherwise
1132 we return false. BNDS bounds the value of IV1->base - IV0->base,
1133 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1134 true if we know that the exit must be taken eventually. */
1136 static bool
1137 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1138 struct tree_niter_desc *niter,
1139 tree *delta, tree step,
1140 bool exit_must_be_taken, bounds *bnds)
1142 tree niter_type = TREE_TYPE (step);
1143 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1144 tree tmod;
1145 tree assumption = boolean_true_node, bound;
1146 tree type1 = (POINTER_TYPE_P (type)) ? sizetype : type;
1148 if (TREE_CODE (mod) != INTEGER_CST)
1149 return false;
1150 if (integer_nonzerop (mod))
1151 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1152 tmod = fold_convert (type1, mod);
1154 /* If the induction variable does not overflow and the exit is taken,
1155 then the computation of the final value does not overflow. There
1156 are three cases:
1157 1) The case if the new final value is equal to the current one.
1158 2) Induction varaible has pointer type, as the code cannot rely
1159 on the object to that the pointer points being placed at the
1160 end of the address space (and more pragmatically,
1161 TYPE_{MIN,MAX}_VALUE is not defined for pointers).
1162 3) EXIT_MUST_BE_TAKEN is true, note it implies that the induction
1163 variable does not overflow. */
1164 if (!integer_zerop (mod) && !POINTER_TYPE_P (type) && !exit_must_be_taken)
1166 if (integer_nonzerop (iv0->step))
1168 /* The final value of the iv is iv1->base + MOD, assuming
1169 that this computation does not overflow, and that
1170 iv0->base <= iv1->base + MOD. */
1171 bound = fold_build2 (MINUS_EXPR, type1,
1172 TYPE_MAX_VALUE (type1), tmod);
1173 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1174 iv1->base, bound);
1176 else
1178 /* The final value of the iv is iv0->base - MOD, assuming
1179 that this computation does not overflow, and that
1180 iv0->base - MOD <= iv1->base. */
1181 bound = fold_build2 (PLUS_EXPR, type1,
1182 TYPE_MIN_VALUE (type1), tmod);
1183 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1184 iv0->base, bound);
1186 if (integer_zerop (assumption))
1187 return false;
1188 else if (!integer_nonzerop (assumption))
1189 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1190 niter->assumptions, assumption);
1193 /* Since we are transforming LT to NE and DELTA is constant, there
1194 is no need to compute may_be_zero because this loop must roll. */
1196 bounds_add (bnds, wi::to_widest (mod), type);
1197 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1198 return true;
1201 /* Add assertions to NITER that ensure that the control variable of the loop
1202 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1203 are TYPE. Returns false if we can prove that there is an overflow, true
1204 otherwise. STEP is the absolute value of the step. */
1206 static bool
1207 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1208 struct tree_niter_desc *niter, tree step)
1210 tree bound, d, assumption, diff;
1211 tree niter_type = TREE_TYPE (step);
1213 if (integer_nonzerop (iv0->step))
1215 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1216 if (iv0->no_overflow)
1217 return true;
1219 /* If iv0->base is a constant, we can determine the last value before
1220 overflow precisely; otherwise we conservatively assume
1221 MAX - STEP + 1. */
1223 if (TREE_CODE (iv0->base) == INTEGER_CST)
1225 d = fold_build2 (MINUS_EXPR, niter_type,
1226 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1227 fold_convert (niter_type, iv0->base));
1228 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1230 else
1231 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1232 build_int_cst (niter_type, 1));
1233 bound = fold_build2 (MINUS_EXPR, type,
1234 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1235 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1236 iv1->base, bound);
1238 else
1240 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1241 if (iv1->no_overflow)
1242 return true;
1244 if (TREE_CODE (iv1->base) == INTEGER_CST)
1246 d = fold_build2 (MINUS_EXPR, niter_type,
1247 fold_convert (niter_type, iv1->base),
1248 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1249 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1251 else
1252 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1253 build_int_cst (niter_type, 1));
1254 bound = fold_build2 (PLUS_EXPR, type,
1255 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1256 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1257 iv0->base, bound);
1260 if (integer_zerop (assumption))
1261 return false;
1262 if (!integer_nonzerop (assumption))
1263 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1264 niter->assumptions, assumption);
1266 iv0->no_overflow = true;
1267 iv1->no_overflow = true;
1268 return true;
1271 /* Add an assumption to NITER that a loop whose ending condition
1272 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1273 bounds the value of IV1->base - IV0->base. */
1275 static void
1276 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1277 struct tree_niter_desc *niter, bounds *bnds)
1279 tree assumption = boolean_true_node, bound, diff;
1280 tree mbz, mbzl, mbzr, type1;
1281 bool rolls_p, no_overflow_p;
1282 widest_int dstep;
1283 mpz_t mstep, max;
1285 /* We are going to compute the number of iterations as
1286 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1287 variant of TYPE. This formula only works if
1289 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1291 (where MAX is the maximum value of the unsigned variant of TYPE, and
1292 the computations in this formula are performed in full precision,
1293 i.e., without overflows).
1295 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1296 we have a condition of the form iv0->base - step < iv1->base before the loop,
1297 and for loops iv0->base < iv1->base - step * i the condition
1298 iv0->base < iv1->base + step, due to loop header copying, which enable us
1299 to prove the lower bound.
1301 The upper bound is more complicated. Unless the expressions for initial
1302 and final value themselves contain enough information, we usually cannot
1303 derive it from the context. */
1305 /* First check whether the answer does not follow from the bounds we gathered
1306 before. */
1307 if (integer_nonzerop (iv0->step))
1308 dstep = wi::to_widest (iv0->step);
1309 else
1311 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1312 dstep = -dstep;
1315 mpz_init (mstep);
1316 wi::to_mpz (dstep, mstep, UNSIGNED);
1317 mpz_neg (mstep, mstep);
1318 mpz_add_ui (mstep, mstep, 1);
1320 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1322 mpz_init (max);
1323 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1324 mpz_add (max, max, mstep);
1325 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1326 /* For pointers, only values lying inside a single object
1327 can be compared or manipulated by pointer arithmetics.
1328 Gcc in general does not allow or handle objects larger
1329 than half of the address space, hence the upper bound
1330 is satisfied for pointers. */
1331 || POINTER_TYPE_P (type));
1332 mpz_clear (mstep);
1333 mpz_clear (max);
1335 if (rolls_p && no_overflow_p)
1336 return;
1338 type1 = type;
1339 if (POINTER_TYPE_P (type))
1340 type1 = sizetype;
1342 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1343 we must be careful not to introduce overflow. */
1345 if (integer_nonzerop (iv0->step))
1347 diff = fold_build2 (MINUS_EXPR, type1,
1348 iv0->step, build_int_cst (type1, 1));
1350 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1351 0 address never belongs to any object, we can assume this for
1352 pointers. */
1353 if (!POINTER_TYPE_P (type))
1355 bound = fold_build2 (PLUS_EXPR, type1,
1356 TYPE_MIN_VALUE (type), diff);
1357 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1358 iv0->base, bound);
1361 /* And then we can compute iv0->base - diff, and compare it with
1362 iv1->base. */
1363 mbzl = fold_build2 (MINUS_EXPR, type1,
1364 fold_convert (type1, iv0->base), diff);
1365 mbzr = fold_convert (type1, iv1->base);
1367 else
1369 diff = fold_build2 (PLUS_EXPR, type1,
1370 iv1->step, build_int_cst (type1, 1));
1372 if (!POINTER_TYPE_P (type))
1374 bound = fold_build2 (PLUS_EXPR, type1,
1375 TYPE_MAX_VALUE (type), diff);
1376 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1377 iv1->base, bound);
1380 mbzl = fold_convert (type1, iv0->base);
1381 mbzr = fold_build2 (MINUS_EXPR, type1,
1382 fold_convert (type1, iv1->base), diff);
1385 if (!integer_nonzerop (assumption))
1386 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1387 niter->assumptions, assumption);
1388 if (!rolls_p)
1390 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1391 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1392 niter->may_be_zero, mbz);
1396 /* Determines number of iterations of loop whose ending condition
1397 is IV0 < IV1. TYPE is the type of the iv. The number of
1398 iterations is stored to NITER. BNDS bounds the difference
1399 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1400 that the exit must be taken eventually. */
1402 static bool
1403 number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
1404 affine_iv *iv1, struct tree_niter_desc *niter,
1405 bool exit_must_be_taken, bounds *bnds)
1407 tree niter_type = unsigned_type_for (type);
1408 tree delta, step, s;
1409 mpz_t mstep, tmp;
1411 if (integer_nonzerop (iv0->step))
1413 niter->control = *iv0;
1414 niter->cmp = LT_EXPR;
1415 niter->bound = iv1->base;
1417 else
1419 niter->control = *iv1;
1420 niter->cmp = GT_EXPR;
1421 niter->bound = iv0->base;
1424 delta = fold_build2 (MINUS_EXPR, niter_type,
1425 fold_convert (niter_type, iv1->base),
1426 fold_convert (niter_type, iv0->base));
1428 /* First handle the special case that the step is +-1. */
1429 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1430 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1432 /* for (i = iv0->base; i < iv1->base; i++)
1436 for (i = iv1->base; i > iv0->base; i--).
1438 In both cases # of iterations is iv1->base - iv0->base, assuming that
1439 iv1->base >= iv0->base.
1441 First try to derive a lower bound on the value of
1442 iv1->base - iv0->base, computed in full precision. If the difference
1443 is nonnegative, we are done, otherwise we must record the
1444 condition. */
1446 if (mpz_sgn (bnds->below) < 0)
1447 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1448 iv1->base, iv0->base);
1449 niter->niter = delta;
1450 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1451 TYPE_SIGN (niter_type));
1452 niter->control.no_overflow = true;
1453 return true;
1456 if (integer_nonzerop (iv0->step))
1457 step = fold_convert (niter_type, iv0->step);
1458 else
1459 step = fold_convert (niter_type,
1460 fold_build1 (NEGATE_EXPR, type, iv1->step));
1462 /* If we can determine the final value of the control iv exactly, we can
1463 transform the condition to != comparison. In particular, this will be
1464 the case if DELTA is constant. */
1465 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1466 exit_must_be_taken, bnds))
1468 affine_iv zps;
1470 zps.base = build_int_cst (niter_type, 0);
1471 zps.step = step;
1472 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1473 zps does not overflow. */
1474 zps.no_overflow = true;
1476 return number_of_iterations_ne (loop, type, &zps,
1477 delta, niter, true, bnds);
1480 /* Make sure that the control iv does not overflow. */
1481 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1482 return false;
1484 /* We determine the number of iterations as (delta + step - 1) / step. For
1485 this to work, we must know that iv1->base >= iv0->base - step + 1,
1486 otherwise the loop does not roll. */
1487 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1489 s = fold_build2 (MINUS_EXPR, niter_type,
1490 step, build_int_cst (niter_type, 1));
1491 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1492 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1494 mpz_init (mstep);
1495 mpz_init (tmp);
1496 wi::to_mpz (step, mstep, UNSIGNED);
1497 mpz_add (tmp, bnds->up, mstep);
1498 mpz_sub_ui (tmp, tmp, 1);
1499 mpz_fdiv_q (tmp, tmp, mstep);
1500 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1501 TYPE_SIGN (niter_type));
1502 mpz_clear (mstep);
1503 mpz_clear (tmp);
1505 return true;
1508 /* Determines number of iterations of loop whose ending condition
1509 is IV0 <= IV1. TYPE is the type of the iv. The number of
1510 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1511 we know that this condition must eventually become false (we derived this
1512 earlier, and possibly set NITER->assumptions to make sure this
1513 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1515 static bool
1516 number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
1517 affine_iv *iv1, struct tree_niter_desc *niter,
1518 bool exit_must_be_taken, bounds *bnds)
1520 tree assumption;
1521 tree type1 = type;
1522 if (POINTER_TYPE_P (type))
1523 type1 = sizetype;
1525 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1526 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1527 value of the type. This we must know anyway, since if it is
1528 equal to this value, the loop rolls forever. We do not check
1529 this condition for pointer type ivs, as the code cannot rely on
1530 the object to that the pointer points being placed at the end of
1531 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1532 not defined for pointers). */
1534 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1536 if (integer_nonzerop (iv0->step))
1537 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1538 iv1->base, TYPE_MAX_VALUE (type));
1539 else
1540 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1541 iv0->base, TYPE_MIN_VALUE (type));
1543 if (integer_zerop (assumption))
1544 return false;
1545 if (!integer_nonzerop (assumption))
1546 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1547 niter->assumptions, assumption);
1550 if (integer_nonzerop (iv0->step))
1552 if (POINTER_TYPE_P (type))
1553 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1554 else
1555 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1556 build_int_cst (type1, 1));
1558 else if (POINTER_TYPE_P (type))
1559 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1560 else
1561 iv0->base = fold_build2 (MINUS_EXPR, type1,
1562 iv0->base, build_int_cst (type1, 1));
1564 bounds_add (bnds, 1, type1);
1566 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1567 bnds);
1570 /* Dumps description of affine induction variable IV to FILE. */
1572 static void
1573 dump_affine_iv (FILE *file, affine_iv *iv)
1575 if (!integer_zerop (iv->step))
1576 fprintf (file, "[");
1578 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1580 if (!integer_zerop (iv->step))
1582 fprintf (file, ", + , ");
1583 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1584 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1588 /* Determine the number of iterations according to condition (for staying
1589 inside loop) which compares two induction variables using comparison
1590 operator CODE. The induction variable on left side of the comparison
1591 is IV0, the right-hand side is IV1. Both induction variables must have
1592 type TYPE, which must be an integer or pointer type. The steps of the
1593 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1595 LOOP is the loop whose number of iterations we are determining.
1597 ONLY_EXIT is true if we are sure this is the only way the loop could be
1598 exited (including possibly non-returning function calls, exceptions, etc.)
1599 -- in this case we can use the information whether the control induction
1600 variables can overflow or not in a more efficient way.
1602 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1604 The results (number of iterations and assumptions as described in
1605 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1606 Returns false if it fails to determine number of iterations, true if it
1607 was determined (possibly with some assumptions). */
1609 static bool
1610 number_of_iterations_cond (struct loop *loop,
1611 tree type, affine_iv *iv0, enum tree_code code,
1612 affine_iv *iv1, struct tree_niter_desc *niter,
1613 bool only_exit, bool every_iteration)
1615 bool exit_must_be_taken = false, ret;
1616 bounds bnds;
1618 /* If the test is not executed every iteration, wrapping may make the test
1619 to pass again.
1620 TODO: the overflow case can be still used as unreliable estimate of upper
1621 bound. But we have no API to pass it down to number of iterations code
1622 and, at present, it will not use it anyway. */
1623 if (!every_iteration
1624 && (!iv0->no_overflow || !iv1->no_overflow
1625 || code == NE_EXPR || code == EQ_EXPR))
1626 return false;
1628 /* The meaning of these assumptions is this:
1629 if !assumptions
1630 then the rest of information does not have to be valid
1631 if may_be_zero then the loop does not roll, even if
1632 niter != 0. */
1633 niter->assumptions = boolean_true_node;
1634 niter->may_be_zero = boolean_false_node;
1635 niter->niter = NULL_TREE;
1636 niter->max = 0;
1637 niter->bound = NULL_TREE;
1638 niter->cmp = ERROR_MARK;
1640 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1641 the control variable is on lhs. */
1642 if (code == GE_EXPR || code == GT_EXPR
1643 || (code == NE_EXPR && integer_zerop (iv0->step)))
1645 std::swap (iv0, iv1);
1646 code = swap_tree_comparison (code);
1649 if (POINTER_TYPE_P (type))
1651 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1652 to the same object. If they do, the control variable cannot wrap
1653 (as wrap around the bounds of memory will never return a pointer
1654 that would be guaranteed to point to the same object, even if we
1655 avoid undefined behavior by casting to size_t and back). */
1656 iv0->no_overflow = true;
1657 iv1->no_overflow = true;
1660 /* If the control induction variable does not overflow and the only exit
1661 from the loop is the one that we analyze, we know it must be taken
1662 eventually. */
1663 if (only_exit)
1665 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1666 exit_must_be_taken = true;
1667 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1668 exit_must_be_taken = true;
1671 /* We can handle cases which neither of the sides of the comparison is
1672 invariant:
1674 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1675 as if:
1676 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1678 provided that either below condition is satisfied:
1680 a) the test is NE_EXPR;
1681 b) iv0.step - iv1.step is positive integer.
1683 This rarely occurs in practice, but it is simple enough to manage. */
1684 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1686 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1687 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1688 iv0->step, iv1->step);
1690 /* No need to check sign of the new step since below code takes care
1691 of this well. */
1692 if (code != NE_EXPR && TREE_CODE (step) != INTEGER_CST)
1693 return false;
1695 iv0->step = step;
1696 if (!POINTER_TYPE_P (type))
1697 iv0->no_overflow = false;
1699 iv1->step = build_int_cst (step_type, 0);
1700 iv1->no_overflow = true;
1703 /* If the result of the comparison is a constant, the loop is weird. More
1704 precise handling would be possible, but the situation is not common enough
1705 to waste time on it. */
1706 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1707 return false;
1709 /* Ignore loops of while (i-- < 10) type. */
1710 if (code != NE_EXPR)
1712 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1713 return false;
1715 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1716 return false;
1719 /* If the loop exits immediately, there is nothing to do. */
1720 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1721 if (tem && integer_zerop (tem))
1723 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1724 niter->max = 0;
1725 return true;
1728 /* OK, now we know we have a senseful loop. Handle several cases, depending
1729 on what comparison operator is used. */
1730 bound_difference (loop, iv1->base, iv0->base, &bnds);
1732 if (dump_file && (dump_flags & TDF_DETAILS))
1734 fprintf (dump_file,
1735 "Analyzing # of iterations of loop %d\n", loop->num);
1737 fprintf (dump_file, " exit condition ");
1738 dump_affine_iv (dump_file, iv0);
1739 fprintf (dump_file, " %s ",
1740 code == NE_EXPR ? "!="
1741 : code == LT_EXPR ? "<"
1742 : "<=");
1743 dump_affine_iv (dump_file, iv1);
1744 fprintf (dump_file, "\n");
1746 fprintf (dump_file, " bounds on difference of bases: ");
1747 mpz_out_str (dump_file, 10, bnds.below);
1748 fprintf (dump_file, " ... ");
1749 mpz_out_str (dump_file, 10, bnds.up);
1750 fprintf (dump_file, "\n");
1753 switch (code)
1755 case NE_EXPR:
1756 gcc_assert (integer_zerop (iv1->step));
1757 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1758 exit_must_be_taken, &bnds);
1759 break;
1761 case LT_EXPR:
1762 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1763 exit_must_be_taken, &bnds);
1764 break;
1766 case LE_EXPR:
1767 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1768 exit_must_be_taken, &bnds);
1769 break;
1771 default:
1772 gcc_unreachable ();
1775 mpz_clear (bnds.up);
1776 mpz_clear (bnds.below);
1778 if (dump_file && (dump_flags & TDF_DETAILS))
1780 if (ret)
1782 fprintf (dump_file, " result:\n");
1783 if (!integer_nonzerop (niter->assumptions))
1785 fprintf (dump_file, " under assumptions ");
1786 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1787 fprintf (dump_file, "\n");
1790 if (!integer_zerop (niter->may_be_zero))
1792 fprintf (dump_file, " zero if ");
1793 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1794 fprintf (dump_file, "\n");
1797 fprintf (dump_file, " # of iterations ");
1798 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1799 fprintf (dump_file, ", bounded by ");
1800 print_decu (niter->max, dump_file);
1801 fprintf (dump_file, "\n");
1803 else
1804 fprintf (dump_file, " failed\n\n");
1806 return ret;
1809 /* Substitute NEW for OLD in EXPR and fold the result. */
1811 static tree
1812 simplify_replace_tree (tree expr, tree old, tree new_tree)
1814 unsigned i, n;
1815 tree ret = NULL_TREE, e, se;
1817 if (!expr)
1818 return NULL_TREE;
1820 /* Do not bother to replace constants. */
1821 if (CONSTANT_CLASS_P (old))
1822 return expr;
1824 if (expr == old
1825 || operand_equal_p (expr, old, 0))
1826 return unshare_expr (new_tree);
1828 if (!EXPR_P (expr))
1829 return expr;
1831 n = TREE_OPERAND_LENGTH (expr);
1832 for (i = 0; i < n; i++)
1834 e = TREE_OPERAND (expr, i);
1835 se = simplify_replace_tree (e, old, new_tree);
1836 if (e == se)
1837 continue;
1839 if (!ret)
1840 ret = copy_node (expr);
1842 TREE_OPERAND (ret, i) = se;
1845 return (ret ? fold (ret) : expr);
1848 /* Expand definitions of ssa names in EXPR as long as they are simple
1849 enough, and return the new expression. If STOP is specified, stop
1850 expanding if EXPR equals to it. */
1852 tree
1853 expand_simple_operations (tree expr, tree stop)
1855 unsigned i, n;
1856 tree ret = NULL_TREE, e, ee, e1;
1857 enum tree_code code;
1858 gimple *stmt;
1860 if (expr == NULL_TREE)
1861 return expr;
1863 if (is_gimple_min_invariant (expr))
1864 return expr;
1866 code = TREE_CODE (expr);
1867 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1869 n = TREE_OPERAND_LENGTH (expr);
1870 for (i = 0; i < n; i++)
1872 e = TREE_OPERAND (expr, i);
1873 ee = expand_simple_operations (e, stop);
1874 if (e == ee)
1875 continue;
1877 if (!ret)
1878 ret = copy_node (expr);
1880 TREE_OPERAND (ret, i) = ee;
1883 if (!ret)
1884 return expr;
1886 fold_defer_overflow_warnings ();
1887 ret = fold (ret);
1888 fold_undefer_and_ignore_overflow_warnings ();
1889 return ret;
1892 /* Stop if it's not ssa name or the one we don't want to expand. */
1893 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
1894 return expr;
1896 stmt = SSA_NAME_DEF_STMT (expr);
1897 if (gimple_code (stmt) == GIMPLE_PHI)
1899 basic_block src, dest;
1901 if (gimple_phi_num_args (stmt) != 1)
1902 return expr;
1903 e = PHI_ARG_DEF (stmt, 0);
1905 /* Avoid propagating through loop exit phi nodes, which
1906 could break loop-closed SSA form restrictions. */
1907 dest = gimple_bb (stmt);
1908 src = single_pred (dest);
1909 if (TREE_CODE (e) == SSA_NAME
1910 && src->loop_father != dest->loop_father)
1911 return expr;
1913 return expand_simple_operations (e, stop);
1915 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1916 return expr;
1918 /* Avoid expanding to expressions that contain SSA names that need
1919 to take part in abnormal coalescing. */
1920 ssa_op_iter iter;
1921 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
1922 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
1923 return expr;
1925 e = gimple_assign_rhs1 (stmt);
1926 code = gimple_assign_rhs_code (stmt);
1927 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1929 if (is_gimple_min_invariant (e))
1930 return e;
1932 if (code == SSA_NAME)
1933 return expand_simple_operations (e, stop);
1935 return expr;
1938 switch (code)
1940 CASE_CONVERT:
1941 /* Casts are simple. */
1942 ee = expand_simple_operations (e, stop);
1943 return fold_build1 (code, TREE_TYPE (expr), ee);
1945 case PLUS_EXPR:
1946 case MINUS_EXPR:
1947 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
1948 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
1949 return expr;
1950 /* Fallthru. */
1951 case POINTER_PLUS_EXPR:
1952 /* And increments and decrements by a constant are simple. */
1953 e1 = gimple_assign_rhs2 (stmt);
1954 if (!is_gimple_min_invariant (e1))
1955 return expr;
1957 ee = expand_simple_operations (e, stop);
1958 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1960 default:
1961 return expr;
1965 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1966 expression (or EXPR unchanged, if no simplification was possible). */
1968 static tree
1969 tree_simplify_using_condition_1 (tree cond, tree expr)
1971 bool changed;
1972 tree e, e0, e1, e2, notcond;
1973 enum tree_code code = TREE_CODE (expr);
1975 if (code == INTEGER_CST)
1976 return expr;
1978 if (code == TRUTH_OR_EXPR
1979 || code == TRUTH_AND_EXPR
1980 || code == COND_EXPR)
1982 changed = false;
1984 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1985 if (TREE_OPERAND (expr, 0) != e0)
1986 changed = true;
1988 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1989 if (TREE_OPERAND (expr, 1) != e1)
1990 changed = true;
1992 if (code == COND_EXPR)
1994 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1995 if (TREE_OPERAND (expr, 2) != e2)
1996 changed = true;
1998 else
1999 e2 = NULL_TREE;
2001 if (changed)
2003 if (code == COND_EXPR)
2004 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2005 else
2006 expr = fold_build2 (code, boolean_type_node, e0, e1);
2009 return expr;
2012 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2013 propagation, and vice versa. Fold does not handle this, since it is
2014 considered too expensive. */
2015 if (TREE_CODE (cond) == EQ_EXPR)
2017 e0 = TREE_OPERAND (cond, 0);
2018 e1 = TREE_OPERAND (cond, 1);
2020 /* We know that e0 == e1. Check whether we cannot simplify expr
2021 using this fact. */
2022 e = simplify_replace_tree (expr, e0, e1);
2023 if (integer_zerop (e) || integer_nonzerop (e))
2024 return e;
2026 e = simplify_replace_tree (expr, e1, e0);
2027 if (integer_zerop (e) || integer_nonzerop (e))
2028 return e;
2030 if (TREE_CODE (expr) == EQ_EXPR)
2032 e0 = TREE_OPERAND (expr, 0);
2033 e1 = TREE_OPERAND (expr, 1);
2035 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2036 e = simplify_replace_tree (cond, e0, e1);
2037 if (integer_zerop (e))
2038 return e;
2039 e = simplify_replace_tree (cond, e1, e0);
2040 if (integer_zerop (e))
2041 return e;
2043 if (TREE_CODE (expr) == NE_EXPR)
2045 e0 = TREE_OPERAND (expr, 0);
2046 e1 = TREE_OPERAND (expr, 1);
2048 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2049 e = simplify_replace_tree (cond, e0, e1);
2050 if (integer_zerop (e))
2051 return boolean_true_node;
2052 e = simplify_replace_tree (cond, e1, e0);
2053 if (integer_zerop (e))
2054 return boolean_true_node;
2057 /* Check whether COND ==> EXPR. */
2058 notcond = invert_truthvalue (cond);
2059 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2060 if (e && integer_nonzerop (e))
2061 return e;
2063 /* Check whether COND ==> not EXPR. */
2064 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2065 if (e && integer_zerop (e))
2066 return e;
2068 return expr;
2071 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2072 expression (or EXPR unchanged, if no simplification was possible).
2073 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2074 of simple operations in definitions of ssa names in COND are expanded,
2075 so that things like casts or incrementing the value of the bound before
2076 the loop do not cause us to fail. */
2078 static tree
2079 tree_simplify_using_condition (tree cond, tree expr)
2081 cond = expand_simple_operations (cond);
2083 return tree_simplify_using_condition_1 (cond, expr);
2086 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2087 Returns the simplified expression (or EXPR unchanged, if no
2088 simplification was possible). */
2090 tree
2091 simplify_using_initial_conditions (struct loop *loop, tree expr)
2093 edge e;
2094 basic_block bb;
2095 gimple *stmt;
2096 tree cond, expanded, backup;
2097 int cnt = 0;
2099 if (TREE_CODE (expr) == INTEGER_CST)
2100 return expr;
2102 backup = expanded = expand_simple_operations (expr);
2104 /* Limit walking the dominators to avoid quadraticness in
2105 the number of BBs times the number of loops in degenerate
2106 cases. */
2107 for (bb = loop->header;
2108 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2109 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2111 if (!single_pred_p (bb))
2112 continue;
2113 e = single_pred_edge (bb);
2115 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2116 continue;
2118 stmt = last_stmt (e->src);
2119 cond = fold_build2 (gimple_cond_code (stmt),
2120 boolean_type_node,
2121 gimple_cond_lhs (stmt),
2122 gimple_cond_rhs (stmt));
2123 if (e->flags & EDGE_FALSE_VALUE)
2124 cond = invert_truthvalue (cond);
2125 expanded = tree_simplify_using_condition (cond, expanded);
2126 /* Break if EXPR is simplified to const values. */
2127 if (expanded
2128 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2129 return expanded;
2131 ++cnt;
2134 /* Return the original expression if no simplification is done. */
2135 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2138 /* Tries to simplify EXPR using the evolutions of the loop invariants
2139 in the superloops of LOOP. Returns the simplified expression
2140 (or EXPR unchanged, if no simplification was possible). */
2142 static tree
2143 simplify_using_outer_evolutions (struct loop *loop, tree expr)
2145 enum tree_code code = TREE_CODE (expr);
2146 bool changed;
2147 tree e, e0, e1, e2;
2149 if (is_gimple_min_invariant (expr))
2150 return expr;
2152 if (code == TRUTH_OR_EXPR
2153 || code == TRUTH_AND_EXPR
2154 || code == COND_EXPR)
2156 changed = false;
2158 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2159 if (TREE_OPERAND (expr, 0) != e0)
2160 changed = true;
2162 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2163 if (TREE_OPERAND (expr, 1) != e1)
2164 changed = true;
2166 if (code == COND_EXPR)
2168 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2169 if (TREE_OPERAND (expr, 2) != e2)
2170 changed = true;
2172 else
2173 e2 = NULL_TREE;
2175 if (changed)
2177 if (code == COND_EXPR)
2178 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2179 else
2180 expr = fold_build2 (code, boolean_type_node, e0, e1);
2183 return expr;
2186 e = instantiate_parameters (loop, expr);
2187 if (is_gimple_min_invariant (e))
2188 return e;
2190 return expr;
2193 /* Returns true if EXIT is the only possible exit from LOOP. */
2195 bool
2196 loop_only_exit_p (const struct loop *loop, const_edge exit)
2198 basic_block *body;
2199 gimple_stmt_iterator bsi;
2200 unsigned i;
2202 if (exit != single_exit (loop))
2203 return false;
2205 body = get_loop_body (loop);
2206 for (i = 0; i < loop->num_nodes; i++)
2208 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2209 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2211 free (body);
2212 return true;
2216 free (body);
2217 return true;
2220 /* Stores description of number of iterations of LOOP derived from
2221 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2222 information could be derived (and fields of NITER have meaning described
2223 in comments at struct tree_niter_desc declaration), false otherwise.
2224 When EVERY_ITERATION is true, only tests that are known to be executed
2225 every iteration are considered (i.e. only test that alone bounds the loop).
2226 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2227 it when returning true. */
2229 bool
2230 number_of_iterations_exit_assumptions (struct loop *loop, edge exit,
2231 struct tree_niter_desc *niter,
2232 gcond **at_stmt, bool every_iteration)
2234 gimple *last;
2235 gcond *stmt;
2236 tree type;
2237 tree op0, op1;
2238 enum tree_code code;
2239 affine_iv iv0, iv1;
2240 bool safe;
2242 /* Nothing to analyze if the loop is known to be infinite. */
2243 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2244 return false;
2246 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2248 if (every_iteration && !safe)
2249 return false;
2251 niter->assumptions = boolean_false_node;
2252 niter->control.base = NULL_TREE;
2253 niter->control.step = NULL_TREE;
2254 niter->control.no_overflow = false;
2255 last = last_stmt (exit->src);
2256 if (!last)
2257 return false;
2258 stmt = dyn_cast <gcond *> (last);
2259 if (!stmt)
2260 return false;
2262 /* We want the condition for staying inside loop. */
2263 code = gimple_cond_code (stmt);
2264 if (exit->flags & EDGE_TRUE_VALUE)
2265 code = invert_tree_comparison (code, false);
2267 switch (code)
2269 case GT_EXPR:
2270 case GE_EXPR:
2271 case LT_EXPR:
2272 case LE_EXPR:
2273 case NE_EXPR:
2274 break;
2276 default:
2277 return false;
2280 op0 = gimple_cond_lhs (stmt);
2281 op1 = gimple_cond_rhs (stmt);
2282 type = TREE_TYPE (op0);
2284 if (TREE_CODE (type) != INTEGER_TYPE
2285 && !POINTER_TYPE_P (type))
2286 return false;
2288 tree iv0_niters = NULL_TREE;
2289 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2290 op0, &iv0, &iv0_niters, false))
2291 return false;
2292 tree iv1_niters = NULL_TREE;
2293 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2294 op1, &iv1, &iv1_niters, false))
2295 return false;
2296 /* Give up on complicated case. */
2297 if (iv0_niters && iv1_niters)
2298 return false;
2300 /* We don't want to see undefined signed overflow warnings while
2301 computing the number of iterations. */
2302 fold_defer_overflow_warnings ();
2304 iv0.base = expand_simple_operations (iv0.base);
2305 iv1.base = expand_simple_operations (iv1.base);
2306 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2307 loop_only_exit_p (loop, exit), safe))
2309 fold_undefer_and_ignore_overflow_warnings ();
2310 return false;
2313 /* Incorporate additional assumption implied by control iv. */
2314 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2315 if (iv_niters)
2317 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2318 fold_convert (TREE_TYPE (niter->niter),
2319 iv_niters));
2321 if (!integer_nonzerop (assumption))
2322 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2323 niter->assumptions, assumption);
2325 /* Refine upper bound if possible. */
2326 if (TREE_CODE (iv_niters) == INTEGER_CST
2327 && niter->max > wi::to_widest (iv_niters))
2328 niter->max = wi::to_widest (iv_niters);
2331 /* There is no assumptions if the loop is known to be finite. */
2332 if (!integer_zerop (niter->assumptions)
2333 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2334 niter->assumptions = boolean_true_node;
2336 if (optimize >= 3)
2338 niter->assumptions = simplify_using_outer_evolutions (loop,
2339 niter->assumptions);
2340 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2341 niter->may_be_zero);
2342 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2345 niter->assumptions
2346 = simplify_using_initial_conditions (loop,
2347 niter->assumptions);
2348 niter->may_be_zero
2349 = simplify_using_initial_conditions (loop,
2350 niter->may_be_zero);
2352 fold_undefer_and_ignore_overflow_warnings ();
2354 /* If NITER has simplified into a constant, update MAX. */
2355 if (TREE_CODE (niter->niter) == INTEGER_CST)
2356 niter->max = wi::to_widest (niter->niter);
2358 if (at_stmt)
2359 *at_stmt = stmt;
2361 return (!integer_zerop (niter->assumptions));
2364 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2365 the niter information holds unconditionally. */
2367 bool
2368 number_of_iterations_exit (struct loop *loop, edge exit,
2369 struct tree_niter_desc *niter,
2370 bool warn, bool every_iteration)
2372 gcond *stmt;
2373 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2374 &stmt, every_iteration))
2375 return false;
2377 if (integer_nonzerop (niter->assumptions))
2378 return true;
2380 if (warn)
2381 warning_at (gimple_location_safe (stmt),
2382 OPT_Wunsafe_loop_optimizations,
2383 "missed loop optimization, the loop counter may overflow");
2385 return false;
2388 /* Try to determine the number of iterations of LOOP. If we succeed,
2389 expression giving number of iterations is returned and *EXIT is
2390 set to the edge from that the information is obtained. Otherwise
2391 chrec_dont_know is returned. */
2393 tree
2394 find_loop_niter (struct loop *loop, edge *exit)
2396 unsigned i;
2397 vec<edge> exits = get_loop_exit_edges (loop);
2398 edge ex;
2399 tree niter = NULL_TREE, aniter;
2400 struct tree_niter_desc desc;
2402 *exit = NULL;
2403 FOR_EACH_VEC_ELT (exits, i, ex)
2405 if (!number_of_iterations_exit (loop, ex, &desc, false))
2406 continue;
2408 if (integer_nonzerop (desc.may_be_zero))
2410 /* We exit in the first iteration through this exit.
2411 We won't find anything better. */
2412 niter = build_int_cst (unsigned_type_node, 0);
2413 *exit = ex;
2414 break;
2417 if (!integer_zerop (desc.may_be_zero))
2418 continue;
2420 aniter = desc.niter;
2422 if (!niter)
2424 /* Nothing recorded yet. */
2425 niter = aniter;
2426 *exit = ex;
2427 continue;
2430 /* Prefer constants, the lower the better. */
2431 if (TREE_CODE (aniter) != INTEGER_CST)
2432 continue;
2434 if (TREE_CODE (niter) != INTEGER_CST)
2436 niter = aniter;
2437 *exit = ex;
2438 continue;
2441 if (tree_int_cst_lt (aniter, niter))
2443 niter = aniter;
2444 *exit = ex;
2445 continue;
2448 exits.release ();
2450 return niter ? niter : chrec_dont_know;
2453 /* Return true if loop is known to have bounded number of iterations. */
2455 bool
2456 finite_loop_p (struct loop *loop)
2458 widest_int nit;
2459 int flags;
2461 flags = flags_from_decl_or_type (current_function_decl);
2462 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2464 if (dump_file && (dump_flags & TDF_DETAILS))
2465 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2466 loop->num);
2467 return true;
2470 if (loop->any_upper_bound
2471 || max_loop_iterations (loop, &nit))
2473 if (dump_file && (dump_flags & TDF_DETAILS))
2474 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2475 loop->num);
2476 return true;
2478 return false;
2483 Analysis of a number of iterations of a loop by a brute-force evaluation.
2487 /* Bound on the number of iterations we try to evaluate. */
2489 #define MAX_ITERATIONS_TO_TRACK \
2490 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2492 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2493 result by a chain of operations such that all but exactly one of their
2494 operands are constants. */
2496 static gphi *
2497 chain_of_csts_start (struct loop *loop, tree x)
2499 gimple *stmt = SSA_NAME_DEF_STMT (x);
2500 tree use;
2501 basic_block bb = gimple_bb (stmt);
2502 enum tree_code code;
2504 if (!bb
2505 || !flow_bb_inside_loop_p (loop, bb))
2506 return NULL;
2508 if (gimple_code (stmt) == GIMPLE_PHI)
2510 if (bb == loop->header)
2511 return as_a <gphi *> (stmt);
2513 return NULL;
2516 if (gimple_code (stmt) != GIMPLE_ASSIGN
2517 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2518 return NULL;
2520 code = gimple_assign_rhs_code (stmt);
2521 if (gimple_references_memory_p (stmt)
2522 || TREE_CODE_CLASS (code) == tcc_reference
2523 || (code == ADDR_EXPR
2524 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2525 return NULL;
2527 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2528 if (use == NULL_TREE)
2529 return NULL;
2531 return chain_of_csts_start (loop, use);
2534 /* Determines whether the expression X is derived from a result of a phi node
2535 in header of LOOP such that
2537 * the derivation of X consists only from operations with constants
2538 * the initial value of the phi node is constant
2539 * the value of the phi node in the next iteration can be derived from the
2540 value in the current iteration by a chain of operations with constants,
2541 or is also a constant
2543 If such phi node exists, it is returned, otherwise NULL is returned. */
2545 static gphi *
2546 get_base_for (struct loop *loop, tree x)
2548 gphi *phi;
2549 tree init, next;
2551 if (is_gimple_min_invariant (x))
2552 return NULL;
2554 phi = chain_of_csts_start (loop, x);
2555 if (!phi)
2556 return NULL;
2558 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2559 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2561 if (!is_gimple_min_invariant (init))
2562 return NULL;
2564 if (TREE_CODE (next) == SSA_NAME
2565 && chain_of_csts_start (loop, next) != phi)
2566 return NULL;
2568 return phi;
2571 /* Given an expression X, then
2573 * if X is NULL_TREE, we return the constant BASE.
2574 * if X is a constant, we return the constant X.
2575 * otherwise X is a SSA name, whose value in the considered loop is derived
2576 by a chain of operations with constant from a result of a phi node in
2577 the header of the loop. Then we return value of X when the value of the
2578 result of this phi node is given by the constant BASE. */
2580 static tree
2581 get_val_for (tree x, tree base)
2583 gimple *stmt;
2585 gcc_checking_assert (is_gimple_min_invariant (base));
2587 if (!x)
2588 return base;
2589 else if (is_gimple_min_invariant (x))
2590 return x;
2592 stmt = SSA_NAME_DEF_STMT (x);
2593 if (gimple_code (stmt) == GIMPLE_PHI)
2594 return base;
2596 gcc_checking_assert (is_gimple_assign (stmt));
2598 /* STMT must be either an assignment of a single SSA name or an
2599 expression involving an SSA name and a constant. Try to fold that
2600 expression using the value for the SSA name. */
2601 if (gimple_assign_ssa_name_copy_p (stmt))
2602 return get_val_for (gimple_assign_rhs1 (stmt), base);
2603 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2604 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2605 return fold_build1 (gimple_assign_rhs_code (stmt),
2606 gimple_expr_type (stmt),
2607 get_val_for (gimple_assign_rhs1 (stmt), base));
2608 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2610 tree rhs1 = gimple_assign_rhs1 (stmt);
2611 tree rhs2 = gimple_assign_rhs2 (stmt);
2612 if (TREE_CODE (rhs1) == SSA_NAME)
2613 rhs1 = get_val_for (rhs1, base);
2614 else if (TREE_CODE (rhs2) == SSA_NAME)
2615 rhs2 = get_val_for (rhs2, base);
2616 else
2617 gcc_unreachable ();
2618 return fold_build2 (gimple_assign_rhs_code (stmt),
2619 gimple_expr_type (stmt), rhs1, rhs2);
2621 else
2622 gcc_unreachable ();
2626 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2627 by brute force -- i.e. by determining the value of the operands of the
2628 condition at EXIT in first few iterations of the loop (assuming that
2629 these values are constant) and determining the first one in that the
2630 condition is not satisfied. Returns the constant giving the number
2631 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2633 tree
2634 loop_niter_by_eval (struct loop *loop, edge exit)
2636 tree acnd;
2637 tree op[2], val[2], next[2], aval[2];
2638 gphi *phi;
2639 gimple *cond;
2640 unsigned i, j;
2641 enum tree_code cmp;
2643 cond = last_stmt (exit->src);
2644 if (!cond || gimple_code (cond) != GIMPLE_COND)
2645 return chrec_dont_know;
2647 cmp = gimple_cond_code (cond);
2648 if (exit->flags & EDGE_TRUE_VALUE)
2649 cmp = invert_tree_comparison (cmp, false);
2651 switch (cmp)
2653 case EQ_EXPR:
2654 case NE_EXPR:
2655 case GT_EXPR:
2656 case GE_EXPR:
2657 case LT_EXPR:
2658 case LE_EXPR:
2659 op[0] = gimple_cond_lhs (cond);
2660 op[1] = gimple_cond_rhs (cond);
2661 break;
2663 default:
2664 return chrec_dont_know;
2667 for (j = 0; j < 2; j++)
2669 if (is_gimple_min_invariant (op[j]))
2671 val[j] = op[j];
2672 next[j] = NULL_TREE;
2673 op[j] = NULL_TREE;
2675 else
2677 phi = get_base_for (loop, op[j]);
2678 if (!phi)
2679 return chrec_dont_know;
2680 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2681 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2685 /* Don't issue signed overflow warnings. */
2686 fold_defer_overflow_warnings ();
2688 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2690 for (j = 0; j < 2; j++)
2691 aval[j] = get_val_for (op[j], val[j]);
2693 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2694 if (acnd && integer_zerop (acnd))
2696 fold_undefer_and_ignore_overflow_warnings ();
2697 if (dump_file && (dump_flags & TDF_DETAILS))
2698 fprintf (dump_file,
2699 "Proved that loop %d iterates %d times using brute force.\n",
2700 loop->num, i);
2701 return build_int_cst (unsigned_type_node, i);
2704 for (j = 0; j < 2; j++)
2706 aval[j] = val[j];
2707 val[j] = get_val_for (next[j], val[j]);
2708 if (!is_gimple_min_invariant (val[j]))
2710 fold_undefer_and_ignore_overflow_warnings ();
2711 return chrec_dont_know;
2715 /* If the next iteration would use the same base values
2716 as the current one, there is no point looping further,
2717 all following iterations will be the same as this one. */
2718 if (val[0] == aval[0] && val[1] == aval[1])
2719 break;
2722 fold_undefer_and_ignore_overflow_warnings ();
2724 return chrec_dont_know;
2727 /* Finds the exit of the LOOP by that the loop exits after a constant
2728 number of iterations and stores the exit edge to *EXIT. The constant
2729 giving the number of iterations of LOOP is returned. The number of
2730 iterations is determined using loop_niter_by_eval (i.e. by brute force
2731 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2732 determines the number of iterations, chrec_dont_know is returned. */
2734 tree
2735 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2737 unsigned i;
2738 vec<edge> exits = get_loop_exit_edges (loop);
2739 edge ex;
2740 tree niter = NULL_TREE, aniter;
2742 *exit = NULL;
2744 /* Loops with multiple exits are expensive to handle and less important. */
2745 if (!flag_expensive_optimizations
2746 && exits.length () > 1)
2748 exits.release ();
2749 return chrec_dont_know;
2752 FOR_EACH_VEC_ELT (exits, i, ex)
2754 if (!just_once_each_iteration_p (loop, ex->src))
2755 continue;
2757 aniter = loop_niter_by_eval (loop, ex);
2758 if (chrec_contains_undetermined (aniter))
2759 continue;
2761 if (niter
2762 && !tree_int_cst_lt (aniter, niter))
2763 continue;
2765 niter = aniter;
2766 *exit = ex;
2768 exits.release ();
2770 return niter ? niter : chrec_dont_know;
2775 Analysis of upper bounds on number of iterations of a loop.
2779 static widest_int derive_constant_upper_bound_ops (tree, tree,
2780 enum tree_code, tree);
2782 /* Returns a constant upper bound on the value of the right-hand side of
2783 an assignment statement STMT. */
2785 static widest_int
2786 derive_constant_upper_bound_assign (gimple *stmt)
2788 enum tree_code code = gimple_assign_rhs_code (stmt);
2789 tree op0 = gimple_assign_rhs1 (stmt);
2790 tree op1 = gimple_assign_rhs2 (stmt);
2792 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2793 op0, code, op1);
2796 /* Returns a constant upper bound on the value of expression VAL. VAL
2797 is considered to be unsigned. If its type is signed, its value must
2798 be nonnegative. */
2800 static widest_int
2801 derive_constant_upper_bound (tree val)
2803 enum tree_code code;
2804 tree op0, op1, op2;
2806 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
2807 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2810 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2811 whose type is TYPE. The expression is considered to be unsigned. If
2812 its type is signed, its value must be nonnegative. */
2814 static widest_int
2815 derive_constant_upper_bound_ops (tree type, tree op0,
2816 enum tree_code code, tree op1)
2818 tree subtype, maxt;
2819 widest_int bnd, max, cst;
2820 gimple *stmt;
2822 if (INTEGRAL_TYPE_P (type))
2823 maxt = TYPE_MAX_VALUE (type);
2824 else
2825 maxt = upper_bound_in_type (type, type);
2827 max = wi::to_widest (maxt);
2829 switch (code)
2831 case INTEGER_CST:
2832 return wi::to_widest (op0);
2834 CASE_CONVERT:
2835 subtype = TREE_TYPE (op0);
2836 if (!TYPE_UNSIGNED (subtype)
2837 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2838 that OP0 is nonnegative. */
2839 && TYPE_UNSIGNED (type)
2840 && !tree_expr_nonnegative_p (op0))
2842 /* If we cannot prove that the casted expression is nonnegative,
2843 we cannot establish more useful upper bound than the precision
2844 of the type gives us. */
2845 return max;
2848 /* We now know that op0 is an nonnegative value. Try deriving an upper
2849 bound for it. */
2850 bnd = derive_constant_upper_bound (op0);
2852 /* If the bound does not fit in TYPE, max. value of TYPE could be
2853 attained. */
2854 if (wi::ltu_p (max, bnd))
2855 return max;
2857 return bnd;
2859 case PLUS_EXPR:
2860 case POINTER_PLUS_EXPR:
2861 case MINUS_EXPR:
2862 if (TREE_CODE (op1) != INTEGER_CST
2863 || !tree_expr_nonnegative_p (op0))
2864 return max;
2866 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2867 choose the most logical way how to treat this constant regardless
2868 of the signedness of the type. */
2869 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
2870 if (code != MINUS_EXPR)
2871 cst = -cst;
2873 bnd = derive_constant_upper_bound (op0);
2875 if (wi::neg_p (cst))
2877 cst = -cst;
2878 /* Avoid CST == 0x80000... */
2879 if (wi::neg_p (cst))
2880 return max;
2882 /* OP0 + CST. We need to check that
2883 BND <= MAX (type) - CST. */
2885 widest_int mmax = max - cst;
2886 if (wi::leu_p (bnd, mmax))
2887 return max;
2889 return bnd + cst;
2891 else
2893 /* OP0 - CST, where CST >= 0.
2895 If TYPE is signed, we have already verified that OP0 >= 0, and we
2896 know that the result is nonnegative. This implies that
2897 VAL <= BND - CST.
2899 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2900 otherwise the operation underflows.
2903 /* This should only happen if the type is unsigned; however, for
2904 buggy programs that use overflowing signed arithmetics even with
2905 -fno-wrapv, this condition may also be true for signed values. */
2906 if (wi::ltu_p (bnd, cst))
2907 return max;
2909 if (TYPE_UNSIGNED (type))
2911 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2912 wide_int_to_tree (type, cst));
2913 if (!tem || integer_nonzerop (tem))
2914 return max;
2917 bnd -= cst;
2920 return bnd;
2922 case FLOOR_DIV_EXPR:
2923 case EXACT_DIV_EXPR:
2924 if (TREE_CODE (op1) != INTEGER_CST
2925 || tree_int_cst_sign_bit (op1))
2926 return max;
2928 bnd = derive_constant_upper_bound (op0);
2929 return wi::udiv_floor (bnd, wi::to_widest (op1));
2931 case BIT_AND_EXPR:
2932 if (TREE_CODE (op1) != INTEGER_CST
2933 || tree_int_cst_sign_bit (op1))
2934 return max;
2935 return wi::to_widest (op1);
2937 case SSA_NAME:
2938 stmt = SSA_NAME_DEF_STMT (op0);
2939 if (gimple_code (stmt) != GIMPLE_ASSIGN
2940 || gimple_assign_lhs (stmt) != op0)
2941 return max;
2942 return derive_constant_upper_bound_assign (stmt);
2944 default:
2945 return max;
2949 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2951 static void
2952 do_warn_aggressive_loop_optimizations (struct loop *loop,
2953 widest_int i_bound, gimple *stmt)
2955 /* Don't warn if the loop doesn't have known constant bound. */
2956 if (!loop->nb_iterations
2957 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
2958 || !warn_aggressive_loop_optimizations
2959 /* To avoid warning multiple times for the same loop,
2960 only start warning when we preserve loops. */
2961 || (cfun->curr_properties & PROP_loops) == 0
2962 /* Only warn once per loop. */
2963 || loop->warned_aggressive_loop_optimizations
2964 /* Only warn if undefined behavior gives us lower estimate than the
2965 known constant bound. */
2966 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
2967 /* And undefined behavior happens unconditionally. */
2968 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
2969 return;
2971 edge e = single_exit (loop);
2972 if (e == NULL)
2973 return;
2975 gimple *estmt = last_stmt (e->src);
2976 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2977 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
2978 ? UNSIGNED : SIGNED);
2979 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
2980 "iteration %s invokes undefined behavior", buf))
2981 inform (gimple_location (estmt), "within this loop");
2982 loop->warned_aggressive_loop_optimizations = true;
2985 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2986 is true if the loop is exited immediately after STMT, and this exit
2987 is taken at last when the STMT is executed BOUND + 1 times.
2988 REALISTIC is true if BOUND is expected to be close to the real number
2989 of iterations. UPPER is true if we are sure the loop iterates at most
2990 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
2992 static void
2993 record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
2994 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
2996 widest_int delta;
2998 if (dump_file && (dump_flags & TDF_DETAILS))
3000 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3001 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3002 fprintf (dump_file, " is %sexecuted at most ",
3003 upper ? "" : "probably ");
3004 print_generic_expr (dump_file, bound, TDF_SLIM);
3005 fprintf (dump_file, " (bounded by ");
3006 print_decu (i_bound, dump_file);
3007 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3010 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3011 real number of iterations. */
3012 if (TREE_CODE (bound) != INTEGER_CST)
3013 realistic = false;
3014 else
3015 gcc_checking_assert (i_bound == wi::to_widest (bound));
3017 /* If we have a guaranteed upper bound, record it in the appropriate
3018 list, unless this is an !is_exit bound (i.e. undefined behavior in
3019 at_stmt) in a loop with known constant number of iterations. */
3020 if (upper
3021 && (is_exit
3022 || loop->nb_iterations == NULL_TREE
3023 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3025 struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3027 elt->bound = i_bound;
3028 elt->stmt = at_stmt;
3029 elt->is_exit = is_exit;
3030 elt->next = loop->bounds;
3031 loop->bounds = elt;
3034 /* If statement is executed on every path to the loop latch, we can directly
3035 infer the upper bound on the # of iterations of the loop. */
3036 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3037 upper = false;
3039 /* Update the number of iteration estimates according to the bound.
3040 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3041 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3042 later if such statement must be executed on last iteration */
3043 if (is_exit)
3044 delta = 0;
3045 else
3046 delta = 1;
3047 widest_int new_i_bound = i_bound + delta;
3049 /* If an overflow occurred, ignore the result. */
3050 if (wi::ltu_p (new_i_bound, delta))
3051 return;
3053 if (upper && !is_exit)
3054 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3055 record_niter_bound (loop, new_i_bound, realistic, upper);
3058 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3059 and doesn't overflow. */
3061 static void
3062 record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
3064 struct control_iv *iv;
3066 if (!niter->control.base || !niter->control.step)
3067 return;
3069 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3070 return;
3072 iv = ggc_alloc<control_iv> ();
3073 iv->base = niter->control.base;
3074 iv->step = niter->control.step;
3075 iv->next = loop->control_ivs;
3076 loop->control_ivs = iv;
3078 return;
3081 /* This function returns TRUE if below conditions are satisfied:
3082 1) VAR is SSA variable.
3083 2) VAR is an IV:{base, step} in its defining loop.
3084 3) IV doesn't overflow.
3085 4) Both base and step are integer constants.
3086 5) Base is the MIN/MAX value depends on IS_MIN.
3087 Store value of base to INIT correspondingly. */
3089 static bool
3090 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3092 if (TREE_CODE (var) != SSA_NAME)
3093 return false;
3095 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3096 struct loop *loop = loop_containing_stmt (def_stmt);
3098 if (loop == NULL)
3099 return false;
3101 affine_iv iv;
3102 if (!simple_iv (loop, loop, var, &iv, false))
3103 return false;
3105 if (!iv.no_overflow)
3106 return false;
3108 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3109 return false;
3111 if (is_min == tree_int_cst_sign_bit (iv.step))
3112 return false;
3114 *init = iv.base;
3115 return true;
3118 /* Record the estimate on number of iterations of LOOP based on the fact that
3119 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3120 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3121 estimated number of iterations is expected to be close to the real one.
3122 UPPER is true if we are sure the induction variable does not wrap. */
3124 static void
3125 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
3126 tree low, tree high, bool realistic, bool upper)
3128 tree niter_bound, extreme, delta;
3129 tree type = TREE_TYPE (base), unsigned_type;
3130 tree orig_base = base;
3132 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3133 return;
3135 if (dump_file && (dump_flags & TDF_DETAILS))
3137 fprintf (dump_file, "Induction variable (");
3138 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3139 fprintf (dump_file, ") ");
3140 print_generic_expr (dump_file, base, TDF_SLIM);
3141 fprintf (dump_file, " + ");
3142 print_generic_expr (dump_file, step, TDF_SLIM);
3143 fprintf (dump_file, " * iteration does not wrap in statement ");
3144 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3145 fprintf (dump_file, " in loop %d.\n", loop->num);
3148 unsigned_type = unsigned_type_for (type);
3149 base = fold_convert (unsigned_type, base);
3150 step = fold_convert (unsigned_type, step);
3152 if (tree_int_cst_sign_bit (step))
3154 wide_int min, max;
3155 extreme = fold_convert (unsigned_type, low);
3156 if (TREE_CODE (orig_base) == SSA_NAME
3157 && TREE_CODE (high) == INTEGER_CST
3158 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3159 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3160 || get_cst_init_from_scev (orig_base, &max, false))
3161 && wi::gts_p (high, max))
3162 base = wide_int_to_tree (unsigned_type, max);
3163 else if (TREE_CODE (base) != INTEGER_CST
3164 && dominated_by_p (CDI_DOMINATORS,
3165 loop->latch, gimple_bb (stmt)))
3166 base = fold_convert (unsigned_type, high);
3167 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3168 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3170 else
3172 wide_int min, max;
3173 extreme = fold_convert (unsigned_type, high);
3174 if (TREE_CODE (orig_base) == SSA_NAME
3175 && TREE_CODE (low) == INTEGER_CST
3176 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3177 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3178 || get_cst_init_from_scev (orig_base, &min, true))
3179 && wi::gts_p (min, low))
3180 base = wide_int_to_tree (unsigned_type, min);
3181 else if (TREE_CODE (base) != INTEGER_CST
3182 && dominated_by_p (CDI_DOMINATORS,
3183 loop->latch, gimple_bb (stmt)))
3184 base = fold_convert (unsigned_type, low);
3185 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3188 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3189 would get out of the range. */
3190 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3191 widest_int max = derive_constant_upper_bound (niter_bound);
3192 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3195 /* Determine information about number of iterations a LOOP from the index
3196 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3197 guaranteed to be executed in every iteration of LOOP. Callback for
3198 for_each_index. */
3200 struct ilb_data
3202 struct loop *loop;
3203 gimple *stmt;
3206 static bool
3207 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3209 struct ilb_data *data = (struct ilb_data *) dta;
3210 tree ev, init, step;
3211 tree low, high, type, next;
3212 bool sign, upper = true, at_end = false;
3213 struct loop *loop = data->loop;
3215 if (TREE_CODE (base) != ARRAY_REF)
3216 return true;
3218 /* For arrays at the end of the structure, we are not guaranteed that they
3219 do not really extend over their declared size. However, for arrays of
3220 size greater than one, this is unlikely to be intended. */
3221 if (array_at_struct_end_p (base))
3223 at_end = true;
3224 upper = false;
3227 struct loop *dloop = loop_containing_stmt (data->stmt);
3228 if (!dloop)
3229 return true;
3231 ev = analyze_scalar_evolution (dloop, *idx);
3232 ev = instantiate_parameters (loop, ev);
3233 init = initial_condition (ev);
3234 step = evolution_part_in_loop_num (ev, loop->num);
3236 if (!init
3237 || !step
3238 || TREE_CODE (step) != INTEGER_CST
3239 || integer_zerop (step)
3240 || tree_contains_chrecs (init, NULL)
3241 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3242 return true;
3244 low = array_ref_low_bound (base);
3245 high = array_ref_up_bound (base);
3247 /* The case of nonconstant bounds could be handled, but it would be
3248 complicated. */
3249 if (TREE_CODE (low) != INTEGER_CST
3250 || !high
3251 || TREE_CODE (high) != INTEGER_CST)
3252 return true;
3253 sign = tree_int_cst_sign_bit (step);
3254 type = TREE_TYPE (step);
3256 /* The array of length 1 at the end of a structure most likely extends
3257 beyond its bounds. */
3258 if (at_end
3259 && operand_equal_p (low, high, 0))
3260 return true;
3262 /* In case the relevant bound of the array does not fit in type, or
3263 it does, but bound + step (in type) still belongs into the range of the
3264 array, the index may wrap and still stay within the range of the array
3265 (consider e.g. if the array is indexed by the full range of
3266 unsigned char).
3268 To make things simpler, we require both bounds to fit into type, although
3269 there are cases where this would not be strictly necessary. */
3270 if (!int_fits_type_p (high, type)
3271 || !int_fits_type_p (low, type))
3272 return true;
3273 low = fold_convert (type, low);
3274 high = fold_convert (type, high);
3276 if (sign)
3277 next = fold_binary (PLUS_EXPR, type, low, step);
3278 else
3279 next = fold_binary (PLUS_EXPR, type, high, step);
3281 if (tree_int_cst_compare (low, next) <= 0
3282 && tree_int_cst_compare (next, high) <= 0)
3283 return true;
3285 /* If access is not executed on every iteration, we must ensure that overlow
3286 may not make the access valid later. */
3287 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3288 && scev_probably_wraps_p (NULL_TREE,
3289 initial_condition_in_loop_num (ev, loop->num),
3290 step, data->stmt, loop, true))
3291 upper = false;
3293 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3294 return true;
3297 /* Determine information about number of iterations a LOOP from the bounds
3298 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3299 STMT is guaranteed to be executed in every iteration of LOOP.*/
3301 static void
3302 infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
3304 struct ilb_data data;
3306 data.loop = loop;
3307 data.stmt = stmt;
3308 for_each_index (&ref, idx_infer_loop_bounds, &data);
3311 /* Determine information about number of iterations of a LOOP from the way
3312 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3313 executed in every iteration of LOOP. */
3315 static void
3316 infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
3318 if (is_gimple_assign (stmt))
3320 tree op0 = gimple_assign_lhs (stmt);
3321 tree op1 = gimple_assign_rhs1 (stmt);
3323 /* For each memory access, analyze its access function
3324 and record a bound on the loop iteration domain. */
3325 if (REFERENCE_CLASS_P (op0))
3326 infer_loop_bounds_from_ref (loop, stmt, op0);
3328 if (REFERENCE_CLASS_P (op1))
3329 infer_loop_bounds_from_ref (loop, stmt, op1);
3331 else if (is_gimple_call (stmt))
3333 tree arg, lhs;
3334 unsigned i, n = gimple_call_num_args (stmt);
3336 lhs = gimple_call_lhs (stmt);
3337 if (lhs && REFERENCE_CLASS_P (lhs))
3338 infer_loop_bounds_from_ref (loop, stmt, lhs);
3340 for (i = 0; i < n; i++)
3342 arg = gimple_call_arg (stmt, i);
3343 if (REFERENCE_CLASS_P (arg))
3344 infer_loop_bounds_from_ref (loop, stmt, arg);
3349 /* Determine information about number of iterations of a LOOP from the fact
3350 that pointer arithmetics in STMT does not overflow. */
3352 static void
3353 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
3355 tree def, base, step, scev, type, low, high;
3356 tree var, ptr;
3358 if (!is_gimple_assign (stmt)
3359 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3360 return;
3362 def = gimple_assign_lhs (stmt);
3363 if (TREE_CODE (def) != SSA_NAME)
3364 return;
3366 type = TREE_TYPE (def);
3367 if (!nowrap_type_p (type))
3368 return;
3370 ptr = gimple_assign_rhs1 (stmt);
3371 if (!expr_invariant_in_loop_p (loop, ptr))
3372 return;
3374 var = gimple_assign_rhs2 (stmt);
3375 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3376 return;
3378 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3379 if (chrec_contains_undetermined (scev))
3380 return;
3382 base = initial_condition_in_loop_num (scev, loop->num);
3383 step = evolution_part_in_loop_num (scev, loop->num);
3385 if (!base || !step
3386 || TREE_CODE (step) != INTEGER_CST
3387 || tree_contains_chrecs (base, NULL)
3388 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3389 return;
3391 low = lower_bound_in_type (type, type);
3392 high = upper_bound_in_type (type, type);
3394 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3395 produce a NULL pointer. The contrary would mean NULL points to an object,
3396 while NULL is supposed to compare unequal with the address of all objects.
3397 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3398 NULL pointer since that would mean wrapping, which we assume here not to
3399 happen. So, we can exclude NULL from the valid range of pointer
3400 arithmetic. */
3401 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3402 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3404 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3407 /* Determine information about number of iterations of a LOOP from the fact
3408 that signed arithmetics in STMT does not overflow. */
3410 static void
3411 infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
3413 tree def, base, step, scev, type, low, high;
3415 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3416 return;
3418 def = gimple_assign_lhs (stmt);
3420 if (TREE_CODE (def) != SSA_NAME)
3421 return;
3423 type = TREE_TYPE (def);
3424 if (!INTEGRAL_TYPE_P (type)
3425 || !TYPE_OVERFLOW_UNDEFINED (type))
3426 return;
3428 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3429 if (chrec_contains_undetermined (scev))
3430 return;
3432 base = initial_condition_in_loop_num (scev, loop->num);
3433 step = evolution_part_in_loop_num (scev, loop->num);
3435 if (!base || !step
3436 || TREE_CODE (step) != INTEGER_CST
3437 || tree_contains_chrecs (base, NULL)
3438 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3439 return;
3441 low = lower_bound_in_type (type, type);
3442 high = upper_bound_in_type (type, type);
3444 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3447 /* The following analyzers are extracting informations on the bounds
3448 of LOOP from the following undefined behaviors:
3450 - data references should not access elements over the statically
3451 allocated size,
3453 - signed variables should not overflow when flag_wrapv is not set.
3456 static void
3457 infer_loop_bounds_from_undefined (struct loop *loop)
3459 unsigned i;
3460 basic_block *bbs;
3461 gimple_stmt_iterator bsi;
3462 basic_block bb;
3463 bool reliable;
3465 bbs = get_loop_body (loop);
3467 for (i = 0; i < loop->num_nodes; i++)
3469 bb = bbs[i];
3471 /* If BB is not executed in each iteration of the loop, we cannot
3472 use the operations in it to infer reliable upper bound on the
3473 # of iterations of the loop. However, we can use it as a guess.
3474 Reliable guesses come only from array bounds. */
3475 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3477 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3479 gimple *stmt = gsi_stmt (bsi);
3481 infer_loop_bounds_from_array (loop, stmt);
3483 if (reliable)
3485 infer_loop_bounds_from_signedness (loop, stmt);
3486 infer_loop_bounds_from_pointer_arith (loop, stmt);
3492 free (bbs);
3495 /* Compare wide ints, callback for qsort. */
3497 static int
3498 wide_int_cmp (const void *p1, const void *p2)
3500 const widest_int *d1 = (const widest_int *) p1;
3501 const widest_int *d2 = (const widest_int *) p2;
3502 return wi::cmpu (*d1, *d2);
3505 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3506 Lookup by binary search. */
3508 static int
3509 bound_index (vec<widest_int> bounds, const widest_int &bound)
3511 unsigned int end = bounds.length ();
3512 unsigned int begin = 0;
3514 /* Find a matching index by means of a binary search. */
3515 while (begin != end)
3517 unsigned int middle = (begin + end) / 2;
3518 widest_int index = bounds[middle];
3520 if (index == bound)
3521 return middle;
3522 else if (wi::ltu_p (index, bound))
3523 begin = middle + 1;
3524 else
3525 end = middle;
3527 gcc_unreachable ();
3530 /* We recorded loop bounds only for statements dominating loop latch (and thus
3531 executed each loop iteration). If there are any bounds on statements not
3532 dominating the loop latch we can improve the estimate by walking the loop
3533 body and seeing if every path from loop header to loop latch contains
3534 some bounded statement. */
3536 static void
3537 discover_iteration_bound_by_body_walk (struct loop *loop)
3539 struct nb_iter_bound *elt;
3540 auto_vec<widest_int> bounds;
3541 vec<vec<basic_block> > queues = vNULL;
3542 vec<basic_block> queue = vNULL;
3543 ptrdiff_t queue_index;
3544 ptrdiff_t latch_index = 0;
3546 /* Discover what bounds may interest us. */
3547 for (elt = loop->bounds; elt; elt = elt->next)
3549 widest_int bound = elt->bound;
3551 /* Exit terminates loop at given iteration, while non-exits produce undefined
3552 effect on the next iteration. */
3553 if (!elt->is_exit)
3555 bound += 1;
3556 /* If an overflow occurred, ignore the result. */
3557 if (bound == 0)
3558 continue;
3561 if (!loop->any_upper_bound
3562 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3563 bounds.safe_push (bound);
3566 /* Exit early if there is nothing to do. */
3567 if (!bounds.exists ())
3568 return;
3570 if (dump_file && (dump_flags & TDF_DETAILS))
3571 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3573 /* Sort the bounds in decreasing order. */
3574 bounds.qsort (wide_int_cmp);
3576 /* For every basic block record the lowest bound that is guaranteed to
3577 terminate the loop. */
3579 hash_map<basic_block, ptrdiff_t> bb_bounds;
3580 for (elt = loop->bounds; elt; elt = elt->next)
3582 widest_int bound = elt->bound;
3583 if (!elt->is_exit)
3585 bound += 1;
3586 /* If an overflow occurred, ignore the result. */
3587 if (bound == 0)
3588 continue;
3591 if (!loop->any_upper_bound
3592 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3594 ptrdiff_t index = bound_index (bounds, bound);
3595 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3596 if (!entry)
3597 bb_bounds.put (gimple_bb (elt->stmt), index);
3598 else if ((ptrdiff_t)*entry > index)
3599 *entry = index;
3603 hash_map<basic_block, ptrdiff_t> block_priority;
3605 /* Perform shortest path discovery loop->header ... loop->latch.
3607 The "distance" is given by the smallest loop bound of basic block
3608 present in the path and we look for path with largest smallest bound
3609 on it.
3611 To avoid the need for fibonacci heap on double ints we simply compress
3612 double ints into indexes to BOUNDS array and then represent the queue
3613 as arrays of queues for every index.
3614 Index of BOUNDS.length() means that the execution of given BB has
3615 no bounds determined.
3617 VISITED is a pointer map translating basic block into smallest index
3618 it was inserted into the priority queue with. */
3619 latch_index = -1;
3621 /* Start walk in loop header with index set to infinite bound. */
3622 queue_index = bounds.length ();
3623 queues.safe_grow_cleared (queue_index + 1);
3624 queue.safe_push (loop->header);
3625 queues[queue_index] = queue;
3626 block_priority.put (loop->header, queue_index);
3628 for (; queue_index >= 0; queue_index--)
3630 if (latch_index < queue_index)
3632 while (queues[queue_index].length ())
3634 basic_block bb;
3635 ptrdiff_t bound_index = queue_index;
3636 edge e;
3637 edge_iterator ei;
3639 queue = queues[queue_index];
3640 bb = queue.pop ();
3642 /* OK, we later inserted the BB with lower priority, skip it. */
3643 if (*block_priority.get (bb) > queue_index)
3644 continue;
3646 /* See if we can improve the bound. */
3647 ptrdiff_t *entry = bb_bounds.get (bb);
3648 if (entry && *entry < bound_index)
3649 bound_index = *entry;
3651 /* Insert succesors into the queue, watch for latch edge
3652 and record greatest index we saw. */
3653 FOR_EACH_EDGE (e, ei, bb->succs)
3655 bool insert = false;
3657 if (loop_exit_edge_p (loop, e))
3658 continue;
3660 if (e == loop_latch_edge (loop)
3661 && latch_index < bound_index)
3662 latch_index = bound_index;
3663 else if (!(entry = block_priority.get (e->dest)))
3665 insert = true;
3666 block_priority.put (e->dest, bound_index);
3668 else if (*entry < bound_index)
3670 insert = true;
3671 *entry = bound_index;
3674 if (insert)
3675 queues[bound_index].safe_push (e->dest);
3679 queues[queue_index].release ();
3682 gcc_assert (latch_index >= 0);
3683 if ((unsigned)latch_index < bounds.length ())
3685 if (dump_file && (dump_flags & TDF_DETAILS))
3687 fprintf (dump_file, "Found better loop bound ");
3688 print_decu (bounds[latch_index], dump_file);
3689 fprintf (dump_file, "\n");
3691 record_niter_bound (loop, bounds[latch_index], false, true);
3694 queues.release ();
3697 /* See if every path cross the loop goes through a statement that is known
3698 to not execute at the last iteration. In that case we can decrese iteration
3699 count by 1. */
3701 static void
3702 maybe_lower_iteration_bound (struct loop *loop)
3704 hash_set<gimple *> *not_executed_last_iteration = NULL;
3705 struct nb_iter_bound *elt;
3706 bool found_exit = false;
3707 auto_vec<basic_block> queue;
3708 bitmap visited;
3710 /* Collect all statements with interesting (i.e. lower than
3711 nb_iterations_upper_bound) bound on them.
3713 TODO: Due to the way record_estimate choose estimates to store, the bounds
3714 will be always nb_iterations_upper_bound-1. We can change this to record
3715 also statements not dominating the loop latch and update the walk bellow
3716 to the shortest path algorithm. */
3717 for (elt = loop->bounds; elt; elt = elt->next)
3719 if (!elt->is_exit
3720 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
3722 if (!not_executed_last_iteration)
3723 not_executed_last_iteration = new hash_set<gimple *>;
3724 not_executed_last_iteration->add (elt->stmt);
3727 if (!not_executed_last_iteration)
3728 return;
3730 /* Start DFS walk in the loop header and see if we can reach the
3731 loop latch or any of the exits (including statements with side
3732 effects that may terminate the loop otherwise) without visiting
3733 any of the statements known to have undefined effect on the last
3734 iteration. */
3735 queue.safe_push (loop->header);
3736 visited = BITMAP_ALLOC (NULL);
3737 bitmap_set_bit (visited, loop->header->index);
3738 found_exit = false;
3742 basic_block bb = queue.pop ();
3743 gimple_stmt_iterator gsi;
3744 bool stmt_found = false;
3746 /* Loop for possible exits and statements bounding the execution. */
3747 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3749 gimple *stmt = gsi_stmt (gsi);
3750 if (not_executed_last_iteration->contains (stmt))
3752 stmt_found = true;
3753 break;
3755 if (gimple_has_side_effects (stmt))
3757 found_exit = true;
3758 break;
3761 if (found_exit)
3762 break;
3764 /* If no bounding statement is found, continue the walk. */
3765 if (!stmt_found)
3767 edge e;
3768 edge_iterator ei;
3770 FOR_EACH_EDGE (e, ei, bb->succs)
3772 if (loop_exit_edge_p (loop, e)
3773 || e == loop_latch_edge (loop))
3775 found_exit = true;
3776 break;
3778 if (bitmap_set_bit (visited, e->dest->index))
3779 queue.safe_push (e->dest);
3783 while (queue.length () && !found_exit);
3785 /* If every path through the loop reach bounding statement before exit,
3786 then we know the last iteration of the loop will have undefined effect
3787 and we can decrease number of iterations. */
3789 if (!found_exit)
3791 if (dump_file && (dump_flags & TDF_DETAILS))
3792 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3793 "undefined statement must be executed at the last iteration.\n");
3794 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
3795 false, true);
3798 BITMAP_FREE (visited);
3799 delete not_executed_last_iteration;
3802 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3803 is true also use estimates derived from undefined behavior. */
3805 void
3806 estimate_numbers_of_iterations (struct loop *loop)
3808 vec<edge> exits;
3809 tree niter, type;
3810 unsigned i;
3811 struct tree_niter_desc niter_desc;
3812 edge ex;
3813 widest_int bound;
3814 edge likely_exit;
3816 /* Give up if we already have tried to compute an estimation. */
3817 if (loop->estimate_state != EST_NOT_COMPUTED)
3818 return;
3820 loop->estimate_state = EST_AVAILABLE;
3822 /* If we have a measured profile, use it to estimate the number of
3823 iterations. Normally this is recorded by branch_prob right after
3824 reading the profile. In case we however found a new loop, record the
3825 information here.
3827 Explicitly check for profile status so we do not report
3828 wrong prediction hitrates for guessed loop iterations heuristics.
3829 Do not recompute already recorded bounds - we ought to be better on
3830 updating iteration bounds than updating profile in general and thus
3831 recomputing iteration bounds later in the compilation process will just
3832 introduce random roundoff errors. */
3833 if (!loop->any_estimate
3834 && loop->header->count > 0)
3836 gcov_type nit = expected_loop_iterations_unbounded (loop);
3837 bound = gcov_type_to_wide_int (nit);
3838 record_niter_bound (loop, bound, true, false);
3841 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3842 to be constant, we avoid undefined behavior implied bounds and instead
3843 diagnose those loops with -Waggressive-loop-optimizations. */
3844 number_of_latch_executions (loop);
3846 exits = get_loop_exit_edges (loop);
3847 likely_exit = single_likely_exit (loop);
3848 FOR_EACH_VEC_ELT (exits, i, ex)
3850 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
3851 continue;
3853 niter = niter_desc.niter;
3854 type = TREE_TYPE (niter);
3855 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3856 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3857 build_int_cst (type, 0),
3858 niter);
3859 record_estimate (loop, niter, niter_desc.max,
3860 last_stmt (ex->src),
3861 true, ex == likely_exit, true);
3862 record_control_iv (loop, &niter_desc);
3864 exits.release ();
3866 if (flag_aggressive_loop_optimizations)
3867 infer_loop_bounds_from_undefined (loop);
3869 discover_iteration_bound_by_body_walk (loop);
3871 maybe_lower_iteration_bound (loop);
3873 /* If we know the exact number of iterations of this loop, try to
3874 not break code with undefined behavior by not recording smaller
3875 maximum number of iterations. */
3876 if (loop->nb_iterations
3877 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
3879 loop->any_upper_bound = true;
3880 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
3884 /* Sets NIT to the estimated number of executions of the latch of the
3885 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3886 large as the number of iterations. If we have no reliable estimate,
3887 the function returns false, otherwise returns true. */
3889 bool
3890 estimated_loop_iterations (struct loop *loop, widest_int *nit)
3892 /* When SCEV information is available, try to update loop iterations
3893 estimate. Otherwise just return whatever we recorded earlier. */
3894 if (scev_initialized_p ())
3895 estimate_numbers_of_iterations (loop);
3897 return (get_estimated_loop_iterations (loop, nit));
3900 /* Similar to estimated_loop_iterations, but returns the estimate only
3901 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3902 on the number of iterations of LOOP could not be derived, returns -1. */
3904 HOST_WIDE_INT
3905 estimated_loop_iterations_int (struct loop *loop)
3907 widest_int nit;
3908 HOST_WIDE_INT hwi_nit;
3910 if (!estimated_loop_iterations (loop, &nit))
3911 return -1;
3913 if (!wi::fits_shwi_p (nit))
3914 return -1;
3915 hwi_nit = nit.to_shwi ();
3917 return hwi_nit < 0 ? -1 : hwi_nit;
3921 /* Sets NIT to an upper bound for the maximum number of executions of the
3922 latch of the LOOP. If we have no reliable estimate, the function returns
3923 false, otherwise returns true. */
3925 bool
3926 max_loop_iterations (struct loop *loop, widest_int *nit)
3928 /* When SCEV information is available, try to update loop iterations
3929 estimate. Otherwise just return whatever we recorded earlier. */
3930 if (scev_initialized_p ())
3931 estimate_numbers_of_iterations (loop);
3933 return get_max_loop_iterations (loop, nit);
3936 /* Similar to max_loop_iterations, but returns the estimate only
3937 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3938 on the number of iterations of LOOP could not be derived, returns -1. */
3940 HOST_WIDE_INT
3941 max_loop_iterations_int (struct loop *loop)
3943 widest_int nit;
3944 HOST_WIDE_INT hwi_nit;
3946 if (!max_loop_iterations (loop, &nit))
3947 return -1;
3949 if (!wi::fits_shwi_p (nit))
3950 return -1;
3951 hwi_nit = nit.to_shwi ();
3953 return hwi_nit < 0 ? -1 : hwi_nit;
3956 /* Sets NIT to an likely upper bound for the maximum number of executions of the
3957 latch of the LOOP. If we have no reliable estimate, the function returns
3958 false, otherwise returns true. */
3960 bool
3961 likely_max_loop_iterations (struct loop *loop, widest_int *nit)
3963 /* When SCEV information is available, try to update loop iterations
3964 estimate. Otherwise just return whatever we recorded earlier. */
3965 if (scev_initialized_p ())
3966 estimate_numbers_of_iterations (loop);
3968 return get_likely_max_loop_iterations (loop, nit);
3971 /* Similar to max_loop_iterations, but returns the estimate only
3972 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3973 on the number of iterations of LOOP could not be derived, returns -1. */
3975 HOST_WIDE_INT
3976 likely_max_loop_iterations_int (struct loop *loop)
3978 widest_int nit;
3979 HOST_WIDE_INT hwi_nit;
3981 if (!likely_max_loop_iterations (loop, &nit))
3982 return -1;
3984 if (!wi::fits_shwi_p (nit))
3985 return -1;
3986 hwi_nit = nit.to_shwi ();
3988 return hwi_nit < 0 ? -1 : hwi_nit;
3991 /* Returns an estimate for the number of executions of statements
3992 in the LOOP. For statements before the loop exit, this exceeds
3993 the number of execution of the latch by one. */
3995 HOST_WIDE_INT
3996 estimated_stmt_executions_int (struct loop *loop)
3998 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3999 HOST_WIDE_INT snit;
4001 if (nit == -1)
4002 return -1;
4004 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4006 /* If the computation overflows, return -1. */
4007 return snit < 0 ? -1 : snit;
4010 /* Sets NIT to the maximum number of executions of the latch of the
4011 LOOP, plus one. If we have no reliable estimate, the function returns
4012 false, otherwise returns true. */
4014 bool
4015 max_stmt_executions (struct loop *loop, widest_int *nit)
4017 widest_int nit_minus_one;
4019 if (!max_loop_iterations (loop, nit))
4020 return false;
4022 nit_minus_one = *nit;
4024 *nit += 1;
4026 return wi::gtu_p (*nit, nit_minus_one);
4029 /* Sets NIT to the estimated maximum number of executions of the latch of the
4030 LOOP, plus one. If we have no likely estimate, the function returns
4031 false, otherwise returns true. */
4033 bool
4034 likely_max_stmt_executions (struct loop *loop, widest_int *nit)
4036 widest_int nit_minus_one;
4038 if (!likely_max_loop_iterations (loop, nit))
4039 return false;
4041 nit_minus_one = *nit;
4043 *nit += 1;
4045 return wi::gtu_p (*nit, nit_minus_one);
4048 /* Sets NIT to the estimated number of executions of the latch of the
4049 LOOP, plus one. If we have no reliable estimate, the function returns
4050 false, otherwise returns true. */
4052 bool
4053 estimated_stmt_executions (struct loop *loop, widest_int *nit)
4055 widest_int nit_minus_one;
4057 if (!estimated_loop_iterations (loop, nit))
4058 return false;
4060 nit_minus_one = *nit;
4062 *nit += 1;
4064 return wi::gtu_p (*nit, nit_minus_one);
4067 /* Records estimates on numbers of iterations of loops. */
4069 void
4070 estimate_numbers_of_iterations (function *fn)
4072 struct loop *loop;
4074 /* We don't want to issue signed overflow warnings while getting
4075 loop iteration estimates. */
4076 fold_defer_overflow_warnings ();
4078 FOR_EACH_LOOP_FN (fn, loop, 0)
4079 estimate_numbers_of_iterations (loop);
4081 fold_undefer_and_ignore_overflow_warnings ();
4084 /* Returns true if statement S1 dominates statement S2. */
4086 bool
4087 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4089 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4091 if (!bb1
4092 || s1 == s2)
4093 return true;
4095 if (bb1 == bb2)
4097 gimple_stmt_iterator bsi;
4099 if (gimple_code (s2) == GIMPLE_PHI)
4100 return false;
4102 if (gimple_code (s1) == GIMPLE_PHI)
4103 return true;
4105 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4106 if (gsi_stmt (bsi) == s1)
4107 return true;
4109 return false;
4112 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4115 /* Returns true when we can prove that the number of executions of
4116 STMT in the loop is at most NITER, according to the bound on
4117 the number of executions of the statement NITER_BOUND->stmt recorded in
4118 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4120 ??? This code can become quite a CPU hog - we can have many bounds,
4121 and large basic block forcing stmt_dominates_stmt_p to be queried
4122 many times on a large basic blocks, so the whole thing is O(n^2)
4123 for scev_probably_wraps_p invocation (that can be done n times).
4125 It would make more sense (and give better answers) to remember BB
4126 bounds computed by discover_iteration_bound_by_body_walk. */
4128 static bool
4129 n_of_executions_at_most (gimple *stmt,
4130 struct nb_iter_bound *niter_bound,
4131 tree niter)
4133 widest_int bound = niter_bound->bound;
4134 tree nit_type = TREE_TYPE (niter), e;
4135 enum tree_code cmp;
4137 gcc_assert (TYPE_UNSIGNED (nit_type));
4139 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4140 the number of iterations is small. */
4141 if (!wi::fits_to_tree_p (bound, nit_type))
4142 return false;
4144 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4145 times. This means that:
4147 -- if NITER_BOUND->is_exit is true, then everything after
4148 it at most NITER_BOUND->bound times.
4150 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4151 is executed, then NITER_BOUND->stmt is executed as well in the same
4152 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4154 If we can determine that NITER_BOUND->stmt is always executed
4155 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4156 We conclude that if both statements belong to the same
4157 basic block and STMT is before NITER_BOUND->stmt and there are no
4158 statements with side effects in between. */
4160 if (niter_bound->is_exit)
4162 if (stmt == niter_bound->stmt
4163 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4164 return false;
4165 cmp = GE_EXPR;
4167 else
4169 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4171 gimple_stmt_iterator bsi;
4172 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4173 || gimple_code (stmt) == GIMPLE_PHI
4174 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4175 return false;
4177 /* By stmt_dominates_stmt_p we already know that STMT appears
4178 before NITER_BOUND->STMT. Still need to test that the loop
4179 can not be terinated by a side effect in between. */
4180 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4181 gsi_next (&bsi))
4182 if (gimple_has_side_effects (gsi_stmt (bsi)))
4183 return false;
4184 bound += 1;
4185 if (bound == 0
4186 || !wi::fits_to_tree_p (bound, nit_type))
4187 return false;
4189 cmp = GT_EXPR;
4192 e = fold_binary (cmp, boolean_type_node,
4193 niter, wide_int_to_tree (nit_type, bound));
4194 return e && integer_nonzerop (e);
4197 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4199 bool
4200 nowrap_type_p (tree type)
4202 if (ANY_INTEGRAL_TYPE_P (type)
4203 && TYPE_OVERFLOW_UNDEFINED (type))
4204 return true;
4206 if (POINTER_TYPE_P (type))
4207 return true;
4209 return false;
4212 /* Return true if we can prove LOOP is exited before evolution of induction
4213 variable {BASE, STEP} overflows with respect to its type bound. */
4215 static bool
4216 loop_exits_before_overflow (tree base, tree step,
4217 gimple *at_stmt, struct loop *loop)
4219 widest_int niter;
4220 struct control_iv *civ;
4221 struct nb_iter_bound *bound;
4222 tree e, delta, step_abs, unsigned_base;
4223 tree type = TREE_TYPE (step);
4224 tree unsigned_type, valid_niter;
4226 /* Don't issue signed overflow warnings. */
4227 fold_defer_overflow_warnings ();
4229 /* Compute the number of iterations before we reach the bound of the
4230 type, and verify that the loop is exited before this occurs. */
4231 unsigned_type = unsigned_type_for (type);
4232 unsigned_base = fold_convert (unsigned_type, base);
4234 if (tree_int_cst_sign_bit (step))
4236 tree extreme = fold_convert (unsigned_type,
4237 lower_bound_in_type (type, type));
4238 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4239 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4240 fold_convert (unsigned_type, step));
4242 else
4244 tree extreme = fold_convert (unsigned_type,
4245 upper_bound_in_type (type, type));
4246 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4247 step_abs = fold_convert (unsigned_type, step);
4250 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4252 estimate_numbers_of_iterations (loop);
4254 if (max_loop_iterations (loop, &niter)
4255 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4256 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4257 wide_int_to_tree (TREE_TYPE (valid_niter),
4258 niter))) != NULL
4259 && integer_nonzerop (e))
4261 fold_undefer_and_ignore_overflow_warnings ();
4262 return true;
4264 if (at_stmt)
4265 for (bound = loop->bounds; bound; bound = bound->next)
4267 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4269 fold_undefer_and_ignore_overflow_warnings ();
4270 return true;
4273 fold_undefer_and_ignore_overflow_warnings ();
4275 /* Try to prove loop is exited before {base, step} overflows with the
4276 help of analyzed loop control IV. This is done only for IVs with
4277 constant step because otherwise we don't have the information. */
4278 if (TREE_CODE (step) == INTEGER_CST)
4280 for (civ = loop->control_ivs; civ; civ = civ->next)
4282 enum tree_code code;
4283 tree civ_type = TREE_TYPE (civ->step);
4285 /* Have to consider type difference because operand_equal_p ignores
4286 that for constants. */
4287 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4288 || element_precision (type) != element_precision (civ_type))
4289 continue;
4291 /* Only consider control IV with same step. */
4292 if (!operand_equal_p (step, civ->step, 0))
4293 continue;
4295 /* Done proving if this is a no-overflow control IV. */
4296 if (operand_equal_p (base, civ->base, 0))
4297 return true;
4299 /* Control IV is recorded after expanding simple operations,
4300 Here we expand base and compare it too. */
4301 tree expanded_base = expand_simple_operations (base);
4302 if (operand_equal_p (expanded_base, civ->base, 0))
4303 return true;
4305 /* If this is a before stepping control IV, in other words, we have
4307 {civ_base, step} = {base + step, step}
4309 Because civ {base + step, step} doesn't overflow during loop
4310 iterations, {base, step} will not overflow if we can prove the
4311 operation "base + step" does not overflow. Specifically, we try
4312 to prove below conditions are satisfied:
4314 base <= UPPER_BOUND (type) - step ;;step > 0
4315 base >= LOWER_BOUND (type) - step ;;step < 0
4317 by proving the reverse conditions are false using loop's initial
4318 condition. */
4319 if (POINTER_TYPE_P (TREE_TYPE (base)))
4320 code = POINTER_PLUS_EXPR;
4321 else
4322 code = PLUS_EXPR;
4324 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4325 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4326 expanded_base, step);
4327 if (operand_equal_p (stepped, civ->base, 0)
4328 || operand_equal_p (expanded_stepped, civ->base, 0))
4330 tree extreme;
4332 if (tree_int_cst_sign_bit (step))
4334 code = LT_EXPR;
4335 extreme = lower_bound_in_type (type, type);
4337 else
4339 code = GT_EXPR;
4340 extreme = upper_bound_in_type (type, type);
4342 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4343 e = fold_build2 (code, boolean_type_node, base, extreme);
4344 e = simplify_using_initial_conditions (loop, e);
4345 if (integer_zerop (e))
4346 return true;
4351 return false;
4354 /* VAR is scev variable whose evolution part is constant STEP, this function
4355 proves that VAR can't overflow by using value range info. If VAR's value
4356 range is [MIN, MAX], it can be proven by:
4357 MAX + step doesn't overflow ; if step > 0
4359 MIN + step doesn't underflow ; if step < 0.
4361 We can only do this if var is computed in every loop iteration, i.e, var's
4362 definition has to dominate loop latch. Consider below example:
4365 unsigned int i;
4367 <bb 3>:
4369 <bb 4>:
4370 # RANGE [0, 4294967294] NONZERO 65535
4371 # i_21 = PHI <0(3), i_18(9)>
4372 if (i_21 != 0)
4373 goto <bb 6>;
4374 else
4375 goto <bb 8>;
4377 <bb 6>:
4378 # RANGE [0, 65533] NONZERO 65535
4379 _6 = i_21 + 4294967295;
4380 # RANGE [0, 65533] NONZERO 65535
4381 _7 = (long unsigned int) _6;
4382 # RANGE [0, 524264] NONZERO 524280
4383 _8 = _7 * 8;
4384 # PT = nonlocal escaped
4385 _9 = a_14 + _8;
4386 *_9 = 0;
4388 <bb 8>:
4389 # RANGE [1, 65535] NONZERO 65535
4390 i_18 = i_21 + 1;
4391 if (i_18 >= 65535)
4392 goto <bb 10>;
4393 else
4394 goto <bb 9>;
4396 <bb 9>:
4397 goto <bb 4>;
4399 <bb 10>:
4400 return;
4403 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4404 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4405 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4406 (4294967295, 4294967296, ...). */
4408 static bool
4409 scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
4411 tree type;
4412 wide_int minv, maxv, diff, step_wi;
4413 enum value_range_type rtype;
4415 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4416 return false;
4418 /* Check if VAR evaluates in every loop iteration. It's not the case
4419 if VAR is default definition or does not dominate loop's latch. */
4420 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4421 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4422 return false;
4424 rtype = get_range_info (var, &minv, &maxv);
4425 if (rtype != VR_RANGE)
4426 return false;
4428 /* VAR is a scev whose evolution part is STEP and value range info
4429 is [MIN, MAX], we can prove its no-overflowness by conditions:
4431 type_MAX - MAX >= step ; if step > 0
4432 MIN - type_MIN >= |step| ; if step < 0.
4434 Or VAR must take value outside of value range, which is not true. */
4435 step_wi = step;
4436 type = TREE_TYPE (var);
4437 if (tree_int_cst_sign_bit (step))
4439 diff = lower_bound_in_type (type, type);
4440 diff = minv - diff;
4441 step_wi = - step_wi;
4443 else
4445 diff = upper_bound_in_type (type, type);
4446 diff = diff - maxv;
4449 return (wi::geu_p (diff, step_wi));
4452 /* Return false only when the induction variable BASE + STEP * I is
4453 known to not overflow: i.e. when the number of iterations is small
4454 enough with respect to the step and initial condition in order to
4455 keep the evolution confined in TYPEs bounds. Return true when the
4456 iv is known to overflow or when the property is not computable.
4458 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4459 the rules for overflow of the given language apply (e.g., that signed
4460 arithmetics in C does not overflow).
4462 If VAR is a ssa variable, this function also returns false if VAR can
4463 be proven not overflow with value range info. */
4465 bool
4466 scev_probably_wraps_p (tree var, tree base, tree step,
4467 gimple *at_stmt, struct loop *loop,
4468 bool use_overflow_semantics)
4470 /* FIXME: We really need something like
4471 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4473 We used to test for the following situation that frequently appears
4474 during address arithmetics:
4476 D.1621_13 = (long unsigned intD.4) D.1620_12;
4477 D.1622_14 = D.1621_13 * 8;
4478 D.1623_15 = (doubleD.29 *) D.1622_14;
4480 And derived that the sequence corresponding to D_14
4481 can be proved to not wrap because it is used for computing a
4482 memory access; however, this is not really the case -- for example,
4483 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4484 2032, 2040, 0, 8, ..., but the code is still legal. */
4486 if (chrec_contains_undetermined (base)
4487 || chrec_contains_undetermined (step))
4488 return true;
4490 if (integer_zerop (step))
4491 return false;
4493 /* If we can use the fact that signed and pointer arithmetics does not
4494 wrap, we are done. */
4495 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4496 return false;
4498 /* To be able to use estimates on number of iterations of the loop,
4499 we must have an upper bound on the absolute value of the step. */
4500 if (TREE_CODE (step) != INTEGER_CST)
4501 return true;
4503 /* Check if var can be proven not overflow with value range info. */
4504 if (var && TREE_CODE (var) == SSA_NAME
4505 && scev_var_range_cant_overflow (var, step, loop))
4506 return false;
4508 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4509 return false;
4511 /* At this point we still don't have a proof that the iv does not
4512 overflow: give up. */
4513 return true;
4516 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4518 void
4519 free_numbers_of_iterations_estimates (struct loop *loop)
4521 struct control_iv *civ;
4522 struct nb_iter_bound *bound;
4524 loop->nb_iterations = NULL;
4525 loop->estimate_state = EST_NOT_COMPUTED;
4526 for (bound = loop->bounds; bound;)
4528 struct nb_iter_bound *next = bound->next;
4529 ggc_free (bound);
4530 bound = next;
4532 loop->bounds = NULL;
4534 for (civ = loop->control_ivs; civ;)
4536 struct control_iv *next = civ->next;
4537 ggc_free (civ);
4538 civ = next;
4540 loop->control_ivs = NULL;
4543 /* Frees the information on upper bounds on numbers of iterations of loops. */
4545 void
4546 free_numbers_of_iterations_estimates (function *fn)
4548 struct loop *loop;
4550 FOR_EACH_LOOP_FN (fn, loop, 0)
4551 free_numbers_of_iterations_estimates (loop);
4554 /* Substitute value VAL for ssa name NAME inside expressions held
4555 at LOOP. */
4557 void
4558 substitute_in_loop_info (struct loop *loop, tree name, tree val)
4560 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);