2013-01-12 Janus Weil <janus@gcc.gnu.org>
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob2d93462b55059f8f2aa1fd60e8e722b3410617ed
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "intl.h"
29 #include "tree-flow.h"
30 #include "dumpfile.h"
31 #include "cfgloop.h"
32 #include "ggc.h"
33 #include "tree-chrec.h"
34 #include "tree-scalar-evolution.h"
35 #include "tree-data-ref.h"
36 #include "params.h"
37 #include "flags.h"
38 #include "diagnostic-core.h"
39 #include "tree-inline.h"
41 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
43 /* The maximum number of dominator BBs we search for conditions
44 of loop header copies we use for simplifying a conditional
45 expression. */
46 #define MAX_DOMINATORS_TO_WALK 8
50 Analysis of number of iterations of an affine exit test.
54 /* Bounds on some value, BELOW <= X <= UP. */
56 typedef struct
58 mpz_t below, up;
59 } bounds;
62 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
64 static void
65 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
67 tree type = TREE_TYPE (expr);
68 tree op0, op1;
69 double_int off;
70 bool negate = false;
72 *var = expr;
73 mpz_set_ui (offset, 0);
75 switch (TREE_CODE (expr))
77 case MINUS_EXPR:
78 negate = true;
79 /* Fallthru. */
81 case PLUS_EXPR:
82 case POINTER_PLUS_EXPR:
83 op0 = TREE_OPERAND (expr, 0);
84 op1 = TREE_OPERAND (expr, 1);
86 if (TREE_CODE (op1) != INTEGER_CST)
87 break;
89 *var = op0;
90 /* Always sign extend the offset. */
91 off = tree_to_double_int (op1);
92 off = off.sext (TYPE_PRECISION (type));
93 mpz_set_double_int (offset, off, false);
94 if (negate)
95 mpz_neg (offset, offset);
96 break;
98 case INTEGER_CST:
99 *var = build_int_cst_type (type, 0);
100 off = tree_to_double_int (expr);
101 mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
102 break;
104 default:
105 break;
109 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
110 in TYPE to MIN and MAX. */
112 static void
113 determine_value_range (tree type, tree var, mpz_t off,
114 mpz_t min, mpz_t max)
116 /* If the expression is a constant, we know its value exactly. */
117 if (integer_zerop (var))
119 mpz_set (min, off);
120 mpz_set (max, off);
121 return;
124 /* If the computation may wrap, we know nothing about the value, except for
125 the range of the type. */
126 get_type_static_bounds (type, min, max);
127 if (!nowrap_type_p (type))
128 return;
130 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
131 add it to MIN, otherwise to MAX. */
132 if (mpz_sgn (off) < 0)
133 mpz_add (max, max, off);
134 else
135 mpz_add (min, min, off);
138 /* Stores the bounds on the difference of the values of the expressions
139 (var + X) and (var + Y), computed in TYPE, to BNDS. */
141 static void
142 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
143 bounds *bnds)
145 int rel = mpz_cmp (x, y);
146 bool may_wrap = !nowrap_type_p (type);
147 mpz_t m;
149 /* If X == Y, then the expressions are always equal.
150 If X > Y, there are the following possibilities:
151 a) neither of var + X and var + Y overflow or underflow, or both of
152 them do. Then their difference is X - Y.
153 b) var + X overflows, and var + Y does not. Then the values of the
154 expressions are var + X - M and var + Y, where M is the range of
155 the type, and their difference is X - Y - M.
156 c) var + Y underflows and var + X does not. Their difference again
157 is M - X + Y.
158 Therefore, if the arithmetics in type does not overflow, then the
159 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
160 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
161 (X - Y, X - Y + M). */
163 if (rel == 0)
165 mpz_set_ui (bnds->below, 0);
166 mpz_set_ui (bnds->up, 0);
167 return;
170 mpz_init (m);
171 mpz_set_double_int (m, double_int::mask (TYPE_PRECISION (type)), true);
172 mpz_add_ui (m, m, 1);
173 mpz_sub (bnds->up, x, y);
174 mpz_set (bnds->below, bnds->up);
176 if (may_wrap)
178 if (rel > 0)
179 mpz_sub (bnds->below, bnds->below, m);
180 else
181 mpz_add (bnds->up, bnds->up, m);
184 mpz_clear (m);
187 /* From condition C0 CMP C1 derives information regarding the
188 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
189 and stores it to BNDS. */
191 static void
192 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
193 tree vary, mpz_t offy,
194 tree c0, enum tree_code cmp, tree c1,
195 bounds *bnds)
197 tree varc0, varc1, tmp, ctype;
198 mpz_t offc0, offc1, loffx, loffy, bnd;
199 bool lbound = false;
200 bool no_wrap = nowrap_type_p (type);
201 bool x_ok, y_ok;
203 switch (cmp)
205 case LT_EXPR:
206 case LE_EXPR:
207 case GT_EXPR:
208 case GE_EXPR:
209 STRIP_SIGN_NOPS (c0);
210 STRIP_SIGN_NOPS (c1);
211 ctype = TREE_TYPE (c0);
212 if (!useless_type_conversion_p (ctype, type))
213 return;
215 break;
217 case EQ_EXPR:
218 /* We could derive quite precise information from EQ_EXPR, however, such
219 a guard is unlikely to appear, so we do not bother with handling
220 it. */
221 return;
223 case NE_EXPR:
224 /* NE_EXPR comparisons do not contain much of useful information, except for
225 special case of comparing with the bounds of the type. */
226 if (TREE_CODE (c1) != INTEGER_CST
227 || !INTEGRAL_TYPE_P (type))
228 return;
230 /* Ensure that the condition speaks about an expression in the same type
231 as X and Y. */
232 ctype = TREE_TYPE (c0);
233 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
234 return;
235 c0 = fold_convert (type, c0);
236 c1 = fold_convert (type, c1);
238 if (TYPE_MIN_VALUE (type)
239 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
241 cmp = GT_EXPR;
242 break;
244 if (TYPE_MAX_VALUE (type)
245 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
247 cmp = LT_EXPR;
248 break;
251 return;
252 default:
253 return;
256 mpz_init (offc0);
257 mpz_init (offc1);
258 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
259 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
261 /* We are only interested in comparisons of expressions based on VARX and
262 VARY. TODO -- we might also be able to derive some bounds from
263 expressions containing just one of the variables. */
265 if (operand_equal_p (varx, varc1, 0))
267 tmp = varc0; varc0 = varc1; varc1 = tmp;
268 mpz_swap (offc0, offc1);
269 cmp = swap_tree_comparison (cmp);
272 if (!operand_equal_p (varx, varc0, 0)
273 || !operand_equal_p (vary, varc1, 0))
274 goto end;
276 mpz_init_set (loffx, offx);
277 mpz_init_set (loffy, offy);
279 if (cmp == GT_EXPR || cmp == GE_EXPR)
281 tmp = varx; varx = vary; vary = tmp;
282 mpz_swap (offc0, offc1);
283 mpz_swap (loffx, loffy);
284 cmp = swap_tree_comparison (cmp);
285 lbound = true;
288 /* If there is no overflow, the condition implies that
290 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
292 The overflows and underflows may complicate things a bit; each
293 overflow decreases the appropriate offset by M, and underflow
294 increases it by M. The above inequality would not necessarily be
295 true if
297 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
298 VARX + OFFC0 overflows, but VARX + OFFX does not.
299 This may only happen if OFFX < OFFC0.
300 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
301 VARY + OFFC1 underflows and VARY + OFFY does not.
302 This may only happen if OFFY > OFFC1. */
304 if (no_wrap)
306 x_ok = true;
307 y_ok = true;
309 else
311 x_ok = (integer_zerop (varx)
312 || mpz_cmp (loffx, offc0) >= 0);
313 y_ok = (integer_zerop (vary)
314 || mpz_cmp (loffy, offc1) <= 0);
317 if (x_ok && y_ok)
319 mpz_init (bnd);
320 mpz_sub (bnd, loffx, loffy);
321 mpz_add (bnd, bnd, offc1);
322 mpz_sub (bnd, bnd, offc0);
324 if (cmp == LT_EXPR)
325 mpz_sub_ui (bnd, bnd, 1);
327 if (lbound)
329 mpz_neg (bnd, bnd);
330 if (mpz_cmp (bnds->below, bnd) < 0)
331 mpz_set (bnds->below, bnd);
333 else
335 if (mpz_cmp (bnd, bnds->up) < 0)
336 mpz_set (bnds->up, bnd);
338 mpz_clear (bnd);
341 mpz_clear (loffx);
342 mpz_clear (loffy);
343 end:
344 mpz_clear (offc0);
345 mpz_clear (offc1);
348 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
349 The subtraction is considered to be performed in arbitrary precision,
350 without overflows.
352 We do not attempt to be too clever regarding the value ranges of X and
353 Y; most of the time, they are just integers or ssa names offsetted by
354 integer. However, we try to use the information contained in the
355 comparisons before the loop (usually created by loop header copying). */
357 static void
358 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
360 tree type = TREE_TYPE (x);
361 tree varx, vary;
362 mpz_t offx, offy;
363 mpz_t minx, maxx, miny, maxy;
364 int cnt = 0;
365 edge e;
366 basic_block bb;
367 tree c0, c1;
368 gimple cond;
369 enum tree_code cmp;
371 /* Get rid of unnecessary casts, but preserve the value of
372 the expressions. */
373 STRIP_SIGN_NOPS (x);
374 STRIP_SIGN_NOPS (y);
376 mpz_init (bnds->below);
377 mpz_init (bnds->up);
378 mpz_init (offx);
379 mpz_init (offy);
380 split_to_var_and_offset (x, &varx, offx);
381 split_to_var_and_offset (y, &vary, offy);
383 if (!integer_zerop (varx)
384 && operand_equal_p (varx, vary, 0))
386 /* Special case VARX == VARY -- we just need to compare the
387 offsets. The matters are a bit more complicated in the
388 case addition of offsets may wrap. */
389 bound_difference_of_offsetted_base (type, offx, offy, bnds);
391 else
393 /* Otherwise, use the value ranges to determine the initial
394 estimates on below and up. */
395 mpz_init (minx);
396 mpz_init (maxx);
397 mpz_init (miny);
398 mpz_init (maxy);
399 determine_value_range (type, varx, offx, minx, maxx);
400 determine_value_range (type, vary, offy, miny, maxy);
402 mpz_sub (bnds->below, minx, maxy);
403 mpz_sub (bnds->up, maxx, miny);
404 mpz_clear (minx);
405 mpz_clear (maxx);
406 mpz_clear (miny);
407 mpz_clear (maxy);
410 /* If both X and Y are constants, we cannot get any more precise. */
411 if (integer_zerop (varx) && integer_zerop (vary))
412 goto end;
414 /* Now walk the dominators of the loop header and use the entry
415 guards to refine the estimates. */
416 for (bb = loop->header;
417 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
418 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
420 if (!single_pred_p (bb))
421 continue;
422 e = single_pred_edge (bb);
424 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
425 continue;
427 cond = last_stmt (e->src);
428 c0 = gimple_cond_lhs (cond);
429 cmp = gimple_cond_code (cond);
430 c1 = gimple_cond_rhs (cond);
432 if (e->flags & EDGE_FALSE_VALUE)
433 cmp = invert_tree_comparison (cmp, false);
435 refine_bounds_using_guard (type, varx, offx, vary, offy,
436 c0, cmp, c1, bnds);
437 ++cnt;
440 end:
441 mpz_clear (offx);
442 mpz_clear (offy);
445 /* Update the bounds in BNDS that restrict the value of X to the bounds
446 that restrict the value of X + DELTA. X can be obtained as a
447 difference of two values in TYPE. */
449 static void
450 bounds_add (bounds *bnds, double_int delta, tree type)
452 mpz_t mdelta, max;
454 mpz_init (mdelta);
455 mpz_set_double_int (mdelta, delta, false);
457 mpz_init (max);
458 mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
460 mpz_add (bnds->up, bnds->up, mdelta);
461 mpz_add (bnds->below, bnds->below, mdelta);
463 if (mpz_cmp (bnds->up, max) > 0)
464 mpz_set (bnds->up, max);
466 mpz_neg (max, max);
467 if (mpz_cmp (bnds->below, max) < 0)
468 mpz_set (bnds->below, max);
470 mpz_clear (mdelta);
471 mpz_clear (max);
474 /* Update the bounds in BNDS that restrict the value of X to the bounds
475 that restrict the value of -X. */
477 static void
478 bounds_negate (bounds *bnds)
480 mpz_t tmp;
482 mpz_init_set (tmp, bnds->up);
483 mpz_neg (bnds->up, bnds->below);
484 mpz_neg (bnds->below, tmp);
485 mpz_clear (tmp);
488 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
490 static tree
491 inverse (tree x, tree mask)
493 tree type = TREE_TYPE (x);
494 tree rslt;
495 unsigned ctr = tree_floor_log2 (mask);
497 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
499 unsigned HOST_WIDE_INT ix;
500 unsigned HOST_WIDE_INT imask;
501 unsigned HOST_WIDE_INT irslt = 1;
503 gcc_assert (cst_and_fits_in_hwi (x));
504 gcc_assert (cst_and_fits_in_hwi (mask));
506 ix = int_cst_value (x);
507 imask = int_cst_value (mask);
509 for (; ctr; ctr--)
511 irslt *= ix;
512 ix *= ix;
514 irslt &= imask;
516 rslt = build_int_cst_type (type, irslt);
518 else
520 rslt = build_int_cst (type, 1);
521 for (; ctr; ctr--)
523 rslt = int_const_binop (MULT_EXPR, rslt, x);
524 x = int_const_binop (MULT_EXPR, x, x);
526 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
529 return rslt;
532 /* Derives the upper bound BND on the number of executions of loop with exit
533 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
534 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
535 that the loop ends through this exit, i.e., the induction variable ever
536 reaches the value of C.
538 The value C is equal to final - base, where final and base are the final and
539 initial value of the actual induction variable in the analysed loop. BNDS
540 bounds the value of this difference when computed in signed type with
541 unbounded range, while the computation of C is performed in an unsigned
542 type with the range matching the range of the type of the induction variable.
543 In particular, BNDS.up contains an upper bound on C in the following cases:
544 -- if the iv must reach its final value without overflow, i.e., if
545 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
546 -- if final >= base, which we know to hold when BNDS.below >= 0. */
548 static void
549 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
550 bounds *bnds, bool exit_must_be_taken)
552 double_int max;
553 mpz_t d;
554 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
555 || mpz_sgn (bnds->below) >= 0);
557 if (multiple_of_p (TREE_TYPE (c), c, s))
559 /* If C is an exact multiple of S, then its value will be reached before
560 the induction variable overflows (unless the loop is exited in some
561 other way before). Note that the actual induction variable in the
562 loop (which ranges from base to final instead of from 0 to C) may
563 overflow, in which case BNDS.up will not be giving a correct upper
564 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
565 no_overflow = true;
566 exit_must_be_taken = true;
569 /* If the induction variable can overflow, the number of iterations is at
570 most the period of the control variable (or infinite, but in that case
571 the whole # of iterations analysis will fail). */
572 if (!no_overflow)
574 max = double_int::mask (TYPE_PRECISION (TREE_TYPE (c))
575 - tree_low_cst (num_ending_zeros (s), 1));
576 mpz_set_double_int (bnd, max, true);
577 return;
580 /* Now we know that the induction variable does not overflow, so the loop
581 iterates at most (range of type / S) times. */
582 mpz_set_double_int (bnd, double_int::mask (TYPE_PRECISION (TREE_TYPE (c))),
583 true);
585 /* If the induction variable is guaranteed to reach the value of C before
586 overflow, ... */
587 if (exit_must_be_taken)
589 /* ... then we can strengthen this to C / S, and possibly we can use
590 the upper bound on C given by BNDS. */
591 if (TREE_CODE (c) == INTEGER_CST)
592 mpz_set_double_int (bnd, tree_to_double_int (c), true);
593 else if (bnds_u_valid)
594 mpz_set (bnd, bnds->up);
597 mpz_init (d);
598 mpz_set_double_int (d, tree_to_double_int (s), true);
599 mpz_fdiv_q (bnd, bnd, d);
600 mpz_clear (d);
603 /* Determines number of iterations of loop whose ending condition
604 is IV <> FINAL. TYPE is the type of the iv. The number of
605 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
606 we know that the exit must be taken eventually, i.e., that the IV
607 ever reaches the value FINAL (we derived this earlier, and possibly set
608 NITER->assumptions to make sure this is the case). BNDS contains the
609 bounds on the difference FINAL - IV->base. */
611 static bool
612 number_of_iterations_ne (tree type, affine_iv *iv, tree final,
613 struct tree_niter_desc *niter, bool exit_must_be_taken,
614 bounds *bnds)
616 tree niter_type = unsigned_type_for (type);
617 tree s, c, d, bits, assumption, tmp, bound;
618 mpz_t max;
620 niter->control = *iv;
621 niter->bound = final;
622 niter->cmp = NE_EXPR;
624 /* Rearrange the terms so that we get inequality S * i <> C, with S
625 positive. Also cast everything to the unsigned type. If IV does
626 not overflow, BNDS bounds the value of C. Also, this is the
627 case if the computation |FINAL - IV->base| does not overflow, i.e.,
628 if BNDS->below in the result is nonnegative. */
629 if (tree_int_cst_sign_bit (iv->step))
631 s = fold_convert (niter_type,
632 fold_build1 (NEGATE_EXPR, type, iv->step));
633 c = fold_build2 (MINUS_EXPR, niter_type,
634 fold_convert (niter_type, iv->base),
635 fold_convert (niter_type, final));
636 bounds_negate (bnds);
638 else
640 s = fold_convert (niter_type, iv->step);
641 c = fold_build2 (MINUS_EXPR, niter_type,
642 fold_convert (niter_type, final),
643 fold_convert (niter_type, iv->base));
646 mpz_init (max);
647 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
648 exit_must_be_taken);
649 niter->max = mpz_get_double_int (niter_type, max, false);
650 mpz_clear (max);
652 /* First the trivial cases -- when the step is 1. */
653 if (integer_onep (s))
655 niter->niter = c;
656 return true;
659 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
660 is infinite. Otherwise, the number of iterations is
661 (inverse(s/d) * (c/d)) mod (size of mode/d). */
662 bits = num_ending_zeros (s);
663 bound = build_low_bits_mask (niter_type,
664 (TYPE_PRECISION (niter_type)
665 - tree_low_cst (bits, 1)));
667 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
668 build_int_cst (niter_type, 1), bits);
669 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
671 if (!exit_must_be_taken)
673 /* If we cannot assume that the exit is taken eventually, record the
674 assumptions for divisibility of c. */
675 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
676 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
677 assumption, build_int_cst (niter_type, 0));
678 if (!integer_nonzerop (assumption))
679 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
680 niter->assumptions, assumption);
683 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
684 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
685 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
686 return true;
689 /* Checks whether we can determine the final value of the control variable
690 of the loop with ending condition IV0 < IV1 (computed in TYPE).
691 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
692 of the step. The assumptions necessary to ensure that the computation
693 of the final value does not overflow are recorded in NITER. If we
694 find the final value, we adjust DELTA and return TRUE. Otherwise
695 we return false. BNDS bounds the value of IV1->base - IV0->base,
696 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
697 true if we know that the exit must be taken eventually. */
699 static bool
700 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
701 struct tree_niter_desc *niter,
702 tree *delta, tree step,
703 bool exit_must_be_taken, bounds *bnds)
705 tree niter_type = TREE_TYPE (step);
706 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
707 tree tmod;
708 mpz_t mmod;
709 tree assumption = boolean_true_node, bound, noloop;
710 bool ret = false, fv_comp_no_overflow;
711 tree type1 = type;
712 if (POINTER_TYPE_P (type))
713 type1 = sizetype;
715 if (TREE_CODE (mod) != INTEGER_CST)
716 return false;
717 if (integer_nonzerop (mod))
718 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
719 tmod = fold_convert (type1, mod);
721 mpz_init (mmod);
722 mpz_set_double_int (mmod, tree_to_double_int (mod), true);
723 mpz_neg (mmod, mmod);
725 /* If the induction variable does not overflow and the exit is taken,
726 then the computation of the final value does not overflow. This is
727 also obviously the case if the new final value is equal to the
728 current one. Finally, we postulate this for pointer type variables,
729 as the code cannot rely on the object to that the pointer points being
730 placed at the end of the address space (and more pragmatically,
731 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
732 if (integer_zerop (mod) || POINTER_TYPE_P (type))
733 fv_comp_no_overflow = true;
734 else if (!exit_must_be_taken)
735 fv_comp_no_overflow = false;
736 else
737 fv_comp_no_overflow =
738 (iv0->no_overflow && integer_nonzerop (iv0->step))
739 || (iv1->no_overflow && integer_nonzerop (iv1->step));
741 if (integer_nonzerop (iv0->step))
743 /* The final value of the iv is iv1->base + MOD, assuming that this
744 computation does not overflow, and that
745 iv0->base <= iv1->base + MOD. */
746 if (!fv_comp_no_overflow)
748 bound = fold_build2 (MINUS_EXPR, type1,
749 TYPE_MAX_VALUE (type1), tmod);
750 assumption = fold_build2 (LE_EXPR, boolean_type_node,
751 iv1->base, bound);
752 if (integer_zerop (assumption))
753 goto end;
755 if (mpz_cmp (mmod, bnds->below) < 0)
756 noloop = boolean_false_node;
757 else if (POINTER_TYPE_P (type))
758 noloop = fold_build2 (GT_EXPR, boolean_type_node,
759 iv0->base,
760 fold_build_pointer_plus (iv1->base, tmod));
761 else
762 noloop = fold_build2 (GT_EXPR, boolean_type_node,
763 iv0->base,
764 fold_build2 (PLUS_EXPR, type1,
765 iv1->base, tmod));
767 else
769 /* The final value of the iv is iv0->base - MOD, assuming that this
770 computation does not overflow, and that
771 iv0->base - MOD <= iv1->base. */
772 if (!fv_comp_no_overflow)
774 bound = fold_build2 (PLUS_EXPR, type1,
775 TYPE_MIN_VALUE (type1), tmod);
776 assumption = fold_build2 (GE_EXPR, boolean_type_node,
777 iv0->base, bound);
778 if (integer_zerop (assumption))
779 goto end;
781 if (mpz_cmp (mmod, bnds->below) < 0)
782 noloop = boolean_false_node;
783 else if (POINTER_TYPE_P (type))
784 noloop = fold_build2 (GT_EXPR, boolean_type_node,
785 fold_build_pointer_plus (iv0->base,
786 fold_build1 (NEGATE_EXPR,
787 type1, tmod)),
788 iv1->base);
789 else
790 noloop = fold_build2 (GT_EXPR, boolean_type_node,
791 fold_build2 (MINUS_EXPR, type1,
792 iv0->base, tmod),
793 iv1->base);
796 if (!integer_nonzerop (assumption))
797 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
798 niter->assumptions,
799 assumption);
800 if (!integer_zerop (noloop))
801 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
802 niter->may_be_zero,
803 noloop);
804 bounds_add (bnds, tree_to_double_int (mod), type);
805 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
807 ret = true;
808 end:
809 mpz_clear (mmod);
810 return ret;
813 /* Add assertions to NITER that ensure that the control variable of the loop
814 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
815 are TYPE. Returns false if we can prove that there is an overflow, true
816 otherwise. STEP is the absolute value of the step. */
818 static bool
819 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
820 struct tree_niter_desc *niter, tree step)
822 tree bound, d, assumption, diff;
823 tree niter_type = TREE_TYPE (step);
825 if (integer_nonzerop (iv0->step))
827 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
828 if (iv0->no_overflow)
829 return true;
831 /* If iv0->base is a constant, we can determine the last value before
832 overflow precisely; otherwise we conservatively assume
833 MAX - STEP + 1. */
835 if (TREE_CODE (iv0->base) == INTEGER_CST)
837 d = fold_build2 (MINUS_EXPR, niter_type,
838 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
839 fold_convert (niter_type, iv0->base));
840 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
842 else
843 diff = fold_build2 (MINUS_EXPR, niter_type, step,
844 build_int_cst (niter_type, 1));
845 bound = fold_build2 (MINUS_EXPR, type,
846 TYPE_MAX_VALUE (type), fold_convert (type, diff));
847 assumption = fold_build2 (LE_EXPR, boolean_type_node,
848 iv1->base, bound);
850 else
852 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
853 if (iv1->no_overflow)
854 return true;
856 if (TREE_CODE (iv1->base) == INTEGER_CST)
858 d = fold_build2 (MINUS_EXPR, niter_type,
859 fold_convert (niter_type, iv1->base),
860 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
861 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
863 else
864 diff = fold_build2 (MINUS_EXPR, niter_type, step,
865 build_int_cst (niter_type, 1));
866 bound = fold_build2 (PLUS_EXPR, type,
867 TYPE_MIN_VALUE (type), fold_convert (type, diff));
868 assumption = fold_build2 (GE_EXPR, boolean_type_node,
869 iv0->base, bound);
872 if (integer_zerop (assumption))
873 return false;
874 if (!integer_nonzerop (assumption))
875 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
876 niter->assumptions, assumption);
878 iv0->no_overflow = true;
879 iv1->no_overflow = true;
880 return true;
883 /* Add an assumption to NITER that a loop whose ending condition
884 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
885 bounds the value of IV1->base - IV0->base. */
887 static void
888 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
889 struct tree_niter_desc *niter, bounds *bnds)
891 tree assumption = boolean_true_node, bound, diff;
892 tree mbz, mbzl, mbzr, type1;
893 bool rolls_p, no_overflow_p;
894 double_int dstep;
895 mpz_t mstep, max;
897 /* We are going to compute the number of iterations as
898 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
899 variant of TYPE. This formula only works if
901 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
903 (where MAX is the maximum value of the unsigned variant of TYPE, and
904 the computations in this formula are performed in full precision,
905 i.e., without overflows).
907 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
908 we have a condition of the form iv0->base - step < iv1->base before the loop,
909 and for loops iv0->base < iv1->base - step * i the condition
910 iv0->base < iv1->base + step, due to loop header copying, which enable us
911 to prove the lower bound.
913 The upper bound is more complicated. Unless the expressions for initial
914 and final value themselves contain enough information, we usually cannot
915 derive it from the context. */
917 /* First check whether the answer does not follow from the bounds we gathered
918 before. */
919 if (integer_nonzerop (iv0->step))
920 dstep = tree_to_double_int (iv0->step);
921 else
923 dstep = tree_to_double_int (iv1->step).sext (TYPE_PRECISION (type));
924 dstep = -dstep;
927 mpz_init (mstep);
928 mpz_set_double_int (mstep, dstep, true);
929 mpz_neg (mstep, mstep);
930 mpz_add_ui (mstep, mstep, 1);
932 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
934 mpz_init (max);
935 mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
936 mpz_add (max, max, mstep);
937 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
938 /* For pointers, only values lying inside a single object
939 can be compared or manipulated by pointer arithmetics.
940 Gcc in general does not allow or handle objects larger
941 than half of the address space, hence the upper bound
942 is satisfied for pointers. */
943 || POINTER_TYPE_P (type));
944 mpz_clear (mstep);
945 mpz_clear (max);
947 if (rolls_p && no_overflow_p)
948 return;
950 type1 = type;
951 if (POINTER_TYPE_P (type))
952 type1 = sizetype;
954 /* Now the hard part; we must formulate the assumption(s) as expressions, and
955 we must be careful not to introduce overflow. */
957 if (integer_nonzerop (iv0->step))
959 diff = fold_build2 (MINUS_EXPR, type1,
960 iv0->step, build_int_cst (type1, 1));
962 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
963 0 address never belongs to any object, we can assume this for
964 pointers. */
965 if (!POINTER_TYPE_P (type))
967 bound = fold_build2 (PLUS_EXPR, type1,
968 TYPE_MIN_VALUE (type), diff);
969 assumption = fold_build2 (GE_EXPR, boolean_type_node,
970 iv0->base, bound);
973 /* And then we can compute iv0->base - diff, and compare it with
974 iv1->base. */
975 mbzl = fold_build2 (MINUS_EXPR, type1,
976 fold_convert (type1, iv0->base), diff);
977 mbzr = fold_convert (type1, iv1->base);
979 else
981 diff = fold_build2 (PLUS_EXPR, type1,
982 iv1->step, build_int_cst (type1, 1));
984 if (!POINTER_TYPE_P (type))
986 bound = fold_build2 (PLUS_EXPR, type1,
987 TYPE_MAX_VALUE (type), diff);
988 assumption = fold_build2 (LE_EXPR, boolean_type_node,
989 iv1->base, bound);
992 mbzl = fold_convert (type1, iv0->base);
993 mbzr = fold_build2 (MINUS_EXPR, type1,
994 fold_convert (type1, iv1->base), diff);
997 if (!integer_nonzerop (assumption))
998 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
999 niter->assumptions, assumption);
1000 if (!rolls_p)
1002 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1003 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1004 niter->may_be_zero, mbz);
1008 /* Determines number of iterations of loop whose ending condition
1009 is IV0 < IV1. TYPE is the type of the iv. The number of
1010 iterations is stored to NITER. BNDS bounds the difference
1011 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1012 that the exit must be taken eventually. */
1014 static bool
1015 number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1016 struct tree_niter_desc *niter,
1017 bool exit_must_be_taken, bounds *bnds)
1019 tree niter_type = unsigned_type_for (type);
1020 tree delta, step, s;
1021 mpz_t mstep, tmp;
1023 if (integer_nonzerop (iv0->step))
1025 niter->control = *iv0;
1026 niter->cmp = LT_EXPR;
1027 niter->bound = iv1->base;
1029 else
1031 niter->control = *iv1;
1032 niter->cmp = GT_EXPR;
1033 niter->bound = iv0->base;
1036 delta = fold_build2 (MINUS_EXPR, niter_type,
1037 fold_convert (niter_type, iv1->base),
1038 fold_convert (niter_type, iv0->base));
1040 /* First handle the special case that the step is +-1. */
1041 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1042 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1044 /* for (i = iv0->base; i < iv1->base; i++)
1048 for (i = iv1->base; i > iv0->base; i--).
1050 In both cases # of iterations is iv1->base - iv0->base, assuming that
1051 iv1->base >= iv0->base.
1053 First try to derive a lower bound on the value of
1054 iv1->base - iv0->base, computed in full precision. If the difference
1055 is nonnegative, we are done, otherwise we must record the
1056 condition. */
1058 if (mpz_sgn (bnds->below) < 0)
1059 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1060 iv1->base, iv0->base);
1061 niter->niter = delta;
1062 niter->max = mpz_get_double_int (niter_type, bnds->up, false);
1063 return true;
1066 if (integer_nonzerop (iv0->step))
1067 step = fold_convert (niter_type, iv0->step);
1068 else
1069 step = fold_convert (niter_type,
1070 fold_build1 (NEGATE_EXPR, type, iv1->step));
1072 /* If we can determine the final value of the control iv exactly, we can
1073 transform the condition to != comparison. In particular, this will be
1074 the case if DELTA is constant. */
1075 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1076 exit_must_be_taken, bnds))
1078 affine_iv zps;
1080 zps.base = build_int_cst (niter_type, 0);
1081 zps.step = step;
1082 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1083 zps does not overflow. */
1084 zps.no_overflow = true;
1086 return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1089 /* Make sure that the control iv does not overflow. */
1090 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1091 return false;
1093 /* We determine the number of iterations as (delta + step - 1) / step. For
1094 this to work, we must know that iv1->base >= iv0->base - step + 1,
1095 otherwise the loop does not roll. */
1096 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1098 s = fold_build2 (MINUS_EXPR, niter_type,
1099 step, build_int_cst (niter_type, 1));
1100 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1101 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1103 mpz_init (mstep);
1104 mpz_init (tmp);
1105 mpz_set_double_int (mstep, tree_to_double_int (step), true);
1106 mpz_add (tmp, bnds->up, mstep);
1107 mpz_sub_ui (tmp, tmp, 1);
1108 mpz_fdiv_q (tmp, tmp, mstep);
1109 niter->max = mpz_get_double_int (niter_type, tmp, false);
1110 mpz_clear (mstep);
1111 mpz_clear (tmp);
1113 return true;
1116 /* Determines number of iterations of loop whose ending condition
1117 is IV0 <= IV1. TYPE is the type of the iv. The number of
1118 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1119 we know that this condition must eventually become false (we derived this
1120 earlier, and possibly set NITER->assumptions to make sure this
1121 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1123 static bool
1124 number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1125 struct tree_niter_desc *niter, bool exit_must_be_taken,
1126 bounds *bnds)
1128 tree assumption;
1129 tree type1 = type;
1130 if (POINTER_TYPE_P (type))
1131 type1 = sizetype;
1133 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1134 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1135 value of the type. This we must know anyway, since if it is
1136 equal to this value, the loop rolls forever. We do not check
1137 this condition for pointer type ivs, as the code cannot rely on
1138 the object to that the pointer points being placed at the end of
1139 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1140 not defined for pointers). */
1142 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1144 if (integer_nonzerop (iv0->step))
1145 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1146 iv1->base, TYPE_MAX_VALUE (type));
1147 else
1148 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1149 iv0->base, TYPE_MIN_VALUE (type));
1151 if (integer_zerop (assumption))
1152 return false;
1153 if (!integer_nonzerop (assumption))
1154 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1155 niter->assumptions, assumption);
1158 if (integer_nonzerop (iv0->step))
1160 if (POINTER_TYPE_P (type))
1161 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1162 else
1163 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1164 build_int_cst (type1, 1));
1166 else if (POINTER_TYPE_P (type))
1167 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1168 else
1169 iv0->base = fold_build2 (MINUS_EXPR, type1,
1170 iv0->base, build_int_cst (type1, 1));
1172 bounds_add (bnds, double_int_one, type1);
1174 return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1175 bnds);
1178 /* Dumps description of affine induction variable IV to FILE. */
1180 static void
1181 dump_affine_iv (FILE *file, affine_iv *iv)
1183 if (!integer_zerop (iv->step))
1184 fprintf (file, "[");
1186 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1188 if (!integer_zerop (iv->step))
1190 fprintf (file, ", + , ");
1191 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1192 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1196 /* Determine the number of iterations according to condition (for staying
1197 inside loop) which compares two induction variables using comparison
1198 operator CODE. The induction variable on left side of the comparison
1199 is IV0, the right-hand side is IV1. Both induction variables must have
1200 type TYPE, which must be an integer or pointer type. The steps of the
1201 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1203 LOOP is the loop whose number of iterations we are determining.
1205 ONLY_EXIT is true if we are sure this is the only way the loop could be
1206 exited (including possibly non-returning function calls, exceptions, etc.)
1207 -- in this case we can use the information whether the control induction
1208 variables can overflow or not in a more efficient way.
1210 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1212 The results (number of iterations and assumptions as described in
1213 comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
1214 Returns false if it fails to determine number of iterations, true if it
1215 was determined (possibly with some assumptions). */
1217 static bool
1218 number_of_iterations_cond (struct loop *loop,
1219 tree type, affine_iv *iv0, enum tree_code code,
1220 affine_iv *iv1, struct tree_niter_desc *niter,
1221 bool only_exit, bool every_iteration)
1223 bool exit_must_be_taken = false, ret;
1224 bounds bnds;
1226 /* If the test is not executed every iteration, wrapping may make the test
1227 to pass again.
1228 TODO: the overflow case can be still used as unreliable estimate of upper
1229 bound. But we have no API to pass it down to number of iterations code
1230 and, at present, it will not use it anyway. */
1231 if (!every_iteration
1232 && (!iv0->no_overflow || !iv1->no_overflow
1233 || code == NE_EXPR || code == EQ_EXPR))
1234 return false;
1236 /* The meaning of these assumptions is this:
1237 if !assumptions
1238 then the rest of information does not have to be valid
1239 if may_be_zero then the loop does not roll, even if
1240 niter != 0. */
1241 niter->assumptions = boolean_true_node;
1242 niter->may_be_zero = boolean_false_node;
1243 niter->niter = NULL_TREE;
1244 niter->max = double_int_zero;
1246 niter->bound = NULL_TREE;
1247 niter->cmp = ERROR_MARK;
1249 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1250 the control variable is on lhs. */
1251 if (code == GE_EXPR || code == GT_EXPR
1252 || (code == NE_EXPR && integer_zerop (iv0->step)))
1254 SWAP (iv0, iv1);
1255 code = swap_tree_comparison (code);
1258 if (POINTER_TYPE_P (type))
1260 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1261 to the same object. If they do, the control variable cannot wrap
1262 (as wrap around the bounds of memory will never return a pointer
1263 that would be guaranteed to point to the same object, even if we
1264 avoid undefined behavior by casting to size_t and back). */
1265 iv0->no_overflow = true;
1266 iv1->no_overflow = true;
1269 /* If the control induction variable does not overflow and the only exit
1270 from the loop is the one that we analyze, we know it must be taken
1271 eventually. */
1272 if (only_exit)
1274 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1275 exit_must_be_taken = true;
1276 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1277 exit_must_be_taken = true;
1280 /* We can handle the case when neither of the sides of the comparison is
1281 invariant, provided that the test is NE_EXPR. This rarely occurs in
1282 practice, but it is simple enough to manage. */
1283 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1285 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1286 if (code != NE_EXPR)
1287 return false;
1289 iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
1290 iv0->step, iv1->step);
1291 iv0->no_overflow = false;
1292 iv1->step = build_int_cst (step_type, 0);
1293 iv1->no_overflow = true;
1296 /* If the result of the comparison is a constant, the loop is weird. More
1297 precise handling would be possible, but the situation is not common enough
1298 to waste time on it. */
1299 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1300 return false;
1302 /* Ignore loops of while (i-- < 10) type. */
1303 if (code != NE_EXPR)
1305 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1306 return false;
1308 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1309 return false;
1312 /* If the loop exits immediately, there is nothing to do. */
1313 if (integer_zerop (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
1315 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1316 niter->max = double_int_zero;
1317 return true;
1320 /* OK, now we know we have a senseful loop. Handle several cases, depending
1321 on what comparison operator is used. */
1322 bound_difference (loop, iv1->base, iv0->base, &bnds);
1324 if (dump_file && (dump_flags & TDF_DETAILS))
1326 fprintf (dump_file,
1327 "Analyzing # of iterations of loop %d\n", loop->num);
1329 fprintf (dump_file, " exit condition ");
1330 dump_affine_iv (dump_file, iv0);
1331 fprintf (dump_file, " %s ",
1332 code == NE_EXPR ? "!="
1333 : code == LT_EXPR ? "<"
1334 : "<=");
1335 dump_affine_iv (dump_file, iv1);
1336 fprintf (dump_file, "\n");
1338 fprintf (dump_file, " bounds on difference of bases: ");
1339 mpz_out_str (dump_file, 10, bnds.below);
1340 fprintf (dump_file, " ... ");
1341 mpz_out_str (dump_file, 10, bnds.up);
1342 fprintf (dump_file, "\n");
1345 switch (code)
1347 case NE_EXPR:
1348 gcc_assert (integer_zerop (iv1->step));
1349 ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1350 exit_must_be_taken, &bnds);
1351 break;
1353 case LT_EXPR:
1354 ret = number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1355 &bnds);
1356 break;
1358 case LE_EXPR:
1359 ret = number_of_iterations_le (type, iv0, iv1, niter, exit_must_be_taken,
1360 &bnds);
1361 break;
1363 default:
1364 gcc_unreachable ();
1367 mpz_clear (bnds.up);
1368 mpz_clear (bnds.below);
1370 if (dump_file && (dump_flags & TDF_DETAILS))
1372 if (ret)
1374 fprintf (dump_file, " result:\n");
1375 if (!integer_nonzerop (niter->assumptions))
1377 fprintf (dump_file, " under assumptions ");
1378 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1379 fprintf (dump_file, "\n");
1382 if (!integer_zerop (niter->may_be_zero))
1384 fprintf (dump_file, " zero if ");
1385 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1386 fprintf (dump_file, "\n");
1389 fprintf (dump_file, " # of iterations ");
1390 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1391 fprintf (dump_file, ", bounded by ");
1392 dump_double_int (dump_file, niter->max, true);
1393 fprintf (dump_file, "\n");
1395 else
1396 fprintf (dump_file, " failed\n\n");
1398 return ret;
1401 /* Substitute NEW for OLD in EXPR and fold the result. */
1403 static tree
1404 simplify_replace_tree (tree expr, tree old, tree new_tree)
1406 unsigned i, n;
1407 tree ret = NULL_TREE, e, se;
1409 if (!expr)
1410 return NULL_TREE;
1412 /* Do not bother to replace constants. */
1413 if (CONSTANT_CLASS_P (old))
1414 return expr;
1416 if (expr == old
1417 || operand_equal_p (expr, old, 0))
1418 return unshare_expr (new_tree);
1420 if (!EXPR_P (expr))
1421 return expr;
1423 n = TREE_OPERAND_LENGTH (expr);
1424 for (i = 0; i < n; i++)
1426 e = TREE_OPERAND (expr, i);
1427 se = simplify_replace_tree (e, old, new_tree);
1428 if (e == se)
1429 continue;
1431 if (!ret)
1432 ret = copy_node (expr);
1434 TREE_OPERAND (ret, i) = se;
1437 return (ret ? fold (ret) : expr);
1440 /* Expand definitions of ssa names in EXPR as long as they are simple
1441 enough, and return the new expression. */
1443 tree
1444 expand_simple_operations (tree expr)
1446 unsigned i, n;
1447 tree ret = NULL_TREE, e, ee, e1;
1448 enum tree_code code;
1449 gimple stmt;
1451 if (expr == NULL_TREE)
1452 return expr;
1454 if (is_gimple_min_invariant (expr))
1455 return expr;
1457 code = TREE_CODE (expr);
1458 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1460 n = TREE_OPERAND_LENGTH (expr);
1461 for (i = 0; i < n; i++)
1463 e = TREE_OPERAND (expr, i);
1464 ee = expand_simple_operations (e);
1465 if (e == ee)
1466 continue;
1468 if (!ret)
1469 ret = copy_node (expr);
1471 TREE_OPERAND (ret, i) = ee;
1474 if (!ret)
1475 return expr;
1477 fold_defer_overflow_warnings ();
1478 ret = fold (ret);
1479 fold_undefer_and_ignore_overflow_warnings ();
1480 return ret;
1483 if (TREE_CODE (expr) != SSA_NAME)
1484 return expr;
1486 stmt = SSA_NAME_DEF_STMT (expr);
1487 if (gimple_code (stmt) == GIMPLE_PHI)
1489 basic_block src, dest;
1491 if (gimple_phi_num_args (stmt) != 1)
1492 return expr;
1493 e = PHI_ARG_DEF (stmt, 0);
1495 /* Avoid propagating through loop exit phi nodes, which
1496 could break loop-closed SSA form restrictions. */
1497 dest = gimple_bb (stmt);
1498 src = single_pred (dest);
1499 if (TREE_CODE (e) == SSA_NAME
1500 && src->loop_father != dest->loop_father)
1501 return expr;
1503 return expand_simple_operations (e);
1505 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1506 return expr;
1508 e = gimple_assign_rhs1 (stmt);
1509 code = gimple_assign_rhs_code (stmt);
1510 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1512 if (is_gimple_min_invariant (e))
1513 return e;
1515 if (code == SSA_NAME)
1516 return expand_simple_operations (e);
1518 return expr;
1521 switch (code)
1523 CASE_CONVERT:
1524 /* Casts are simple. */
1525 ee = expand_simple_operations (e);
1526 return fold_build1 (code, TREE_TYPE (expr), ee);
1528 case PLUS_EXPR:
1529 case MINUS_EXPR:
1530 case POINTER_PLUS_EXPR:
1531 /* And increments and decrements by a constant are simple. */
1532 e1 = gimple_assign_rhs2 (stmt);
1533 if (!is_gimple_min_invariant (e1))
1534 return expr;
1536 ee = expand_simple_operations (e);
1537 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1539 default:
1540 return expr;
1544 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1545 expression (or EXPR unchanged, if no simplification was possible). */
1547 static tree
1548 tree_simplify_using_condition_1 (tree cond, tree expr)
1550 bool changed;
1551 tree e, te, e0, e1, e2, notcond;
1552 enum tree_code code = TREE_CODE (expr);
1554 if (code == INTEGER_CST)
1555 return expr;
1557 if (code == TRUTH_OR_EXPR
1558 || code == TRUTH_AND_EXPR
1559 || code == COND_EXPR)
1561 changed = false;
1563 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1564 if (TREE_OPERAND (expr, 0) != e0)
1565 changed = true;
1567 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1568 if (TREE_OPERAND (expr, 1) != e1)
1569 changed = true;
1571 if (code == COND_EXPR)
1573 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1574 if (TREE_OPERAND (expr, 2) != e2)
1575 changed = true;
1577 else
1578 e2 = NULL_TREE;
1580 if (changed)
1582 if (code == COND_EXPR)
1583 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1584 else
1585 expr = fold_build2 (code, boolean_type_node, e0, e1);
1588 return expr;
1591 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1592 propagation, and vice versa. Fold does not handle this, since it is
1593 considered too expensive. */
1594 if (TREE_CODE (cond) == EQ_EXPR)
1596 e0 = TREE_OPERAND (cond, 0);
1597 e1 = TREE_OPERAND (cond, 1);
1599 /* We know that e0 == e1. Check whether we cannot simplify expr
1600 using this fact. */
1601 e = simplify_replace_tree (expr, e0, e1);
1602 if (integer_zerop (e) || integer_nonzerop (e))
1603 return e;
1605 e = simplify_replace_tree (expr, e1, e0);
1606 if (integer_zerop (e) || integer_nonzerop (e))
1607 return e;
1609 if (TREE_CODE (expr) == EQ_EXPR)
1611 e0 = TREE_OPERAND (expr, 0);
1612 e1 = TREE_OPERAND (expr, 1);
1614 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1615 e = simplify_replace_tree (cond, e0, e1);
1616 if (integer_zerop (e))
1617 return e;
1618 e = simplify_replace_tree (cond, e1, e0);
1619 if (integer_zerop (e))
1620 return e;
1622 if (TREE_CODE (expr) == NE_EXPR)
1624 e0 = TREE_OPERAND (expr, 0);
1625 e1 = TREE_OPERAND (expr, 1);
1627 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1628 e = simplify_replace_tree (cond, e0, e1);
1629 if (integer_zerop (e))
1630 return boolean_true_node;
1631 e = simplify_replace_tree (cond, e1, e0);
1632 if (integer_zerop (e))
1633 return boolean_true_node;
1636 te = expand_simple_operations (expr);
1638 /* Check whether COND ==> EXPR. */
1639 notcond = invert_truthvalue (cond);
1640 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1641 if (e && integer_nonzerop (e))
1642 return e;
1644 /* Check whether COND ==> not EXPR. */
1645 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1646 if (e && integer_zerop (e))
1647 return e;
1649 return expr;
1652 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1653 expression (or EXPR unchanged, if no simplification was possible).
1654 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1655 of simple operations in definitions of ssa names in COND are expanded,
1656 so that things like casts or incrementing the value of the bound before
1657 the loop do not cause us to fail. */
1659 static tree
1660 tree_simplify_using_condition (tree cond, tree expr)
1662 cond = expand_simple_operations (cond);
1664 return tree_simplify_using_condition_1 (cond, expr);
1667 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1668 Returns the simplified expression (or EXPR unchanged, if no
1669 simplification was possible).*/
1671 static tree
1672 simplify_using_initial_conditions (struct loop *loop, tree expr)
1674 edge e;
1675 basic_block bb;
1676 gimple stmt;
1677 tree cond;
1678 int cnt = 0;
1680 if (TREE_CODE (expr) == INTEGER_CST)
1681 return expr;
1683 /* Limit walking the dominators to avoid quadraticness in
1684 the number of BBs times the number of loops in degenerate
1685 cases. */
1686 for (bb = loop->header;
1687 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
1688 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1690 if (!single_pred_p (bb))
1691 continue;
1692 e = single_pred_edge (bb);
1694 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
1695 continue;
1697 stmt = last_stmt (e->src);
1698 cond = fold_build2 (gimple_cond_code (stmt),
1699 boolean_type_node,
1700 gimple_cond_lhs (stmt),
1701 gimple_cond_rhs (stmt));
1702 if (e->flags & EDGE_FALSE_VALUE)
1703 cond = invert_truthvalue (cond);
1704 expr = tree_simplify_using_condition (cond, expr);
1705 ++cnt;
1708 return expr;
1711 /* Tries to simplify EXPR using the evolutions of the loop invariants
1712 in the superloops of LOOP. Returns the simplified expression
1713 (or EXPR unchanged, if no simplification was possible). */
1715 static tree
1716 simplify_using_outer_evolutions (struct loop *loop, tree expr)
1718 enum tree_code code = TREE_CODE (expr);
1719 bool changed;
1720 tree e, e0, e1, e2;
1722 if (is_gimple_min_invariant (expr))
1723 return expr;
1725 if (code == TRUTH_OR_EXPR
1726 || code == TRUTH_AND_EXPR
1727 || code == COND_EXPR)
1729 changed = false;
1731 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
1732 if (TREE_OPERAND (expr, 0) != e0)
1733 changed = true;
1735 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
1736 if (TREE_OPERAND (expr, 1) != e1)
1737 changed = true;
1739 if (code == COND_EXPR)
1741 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
1742 if (TREE_OPERAND (expr, 2) != e2)
1743 changed = true;
1745 else
1746 e2 = NULL_TREE;
1748 if (changed)
1750 if (code == COND_EXPR)
1751 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1752 else
1753 expr = fold_build2 (code, boolean_type_node, e0, e1);
1756 return expr;
1759 e = instantiate_parameters (loop, expr);
1760 if (is_gimple_min_invariant (e))
1761 return e;
1763 return expr;
1766 /* Returns true if EXIT is the only possible exit from LOOP. */
1768 bool
1769 loop_only_exit_p (const struct loop *loop, const_edge exit)
1771 basic_block *body;
1772 gimple_stmt_iterator bsi;
1773 unsigned i;
1774 gimple call;
1776 if (exit != single_exit (loop))
1777 return false;
1779 body = get_loop_body (loop);
1780 for (i = 0; i < loop->num_nodes; i++)
1782 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
1784 call = gsi_stmt (bsi);
1785 if (gimple_code (call) != GIMPLE_CALL)
1786 continue;
1788 if (gimple_has_side_effects (call))
1790 free (body);
1791 return false;
1796 free (body);
1797 return true;
1800 /* Stores description of number of iterations of LOOP derived from
1801 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1802 useful information could be derived (and fields of NITER has
1803 meaning described in comments at struct tree_niter_desc
1804 declaration), false otherwise. If WARN is true and
1805 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1806 potentially unsafe assumptions.
1807 When EVERY_ITERATION is true, only tests that are known to be executed
1808 every iteration are considered (i.e. only test that alone bounds the loop).
1811 bool
1812 number_of_iterations_exit (struct loop *loop, edge exit,
1813 struct tree_niter_desc *niter,
1814 bool warn, bool every_iteration)
1816 gimple stmt;
1817 tree type;
1818 tree op0, op1;
1819 enum tree_code code;
1820 affine_iv iv0, iv1;
1821 bool safe;
1823 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
1825 if (every_iteration && !safe)
1826 return false;
1828 niter->assumptions = boolean_false_node;
1829 stmt = last_stmt (exit->src);
1830 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1831 return false;
1833 /* We want the condition for staying inside loop. */
1834 code = gimple_cond_code (stmt);
1835 if (exit->flags & EDGE_TRUE_VALUE)
1836 code = invert_tree_comparison (code, false);
1838 switch (code)
1840 case GT_EXPR:
1841 case GE_EXPR:
1842 case LT_EXPR:
1843 case LE_EXPR:
1844 case NE_EXPR:
1845 break;
1847 default:
1848 return false;
1851 op0 = gimple_cond_lhs (stmt);
1852 op1 = gimple_cond_rhs (stmt);
1853 type = TREE_TYPE (op0);
1855 if (TREE_CODE (type) != INTEGER_TYPE
1856 && !POINTER_TYPE_P (type))
1857 return false;
1859 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
1860 return false;
1861 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
1862 return false;
1864 /* We don't want to see undefined signed overflow warnings while
1865 computing the number of iterations. */
1866 fold_defer_overflow_warnings ();
1868 iv0.base = expand_simple_operations (iv0.base);
1869 iv1.base = expand_simple_operations (iv1.base);
1870 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1871 loop_only_exit_p (loop, exit), safe))
1873 fold_undefer_and_ignore_overflow_warnings ();
1874 return false;
1877 if (optimize >= 3)
1879 niter->assumptions = simplify_using_outer_evolutions (loop,
1880 niter->assumptions);
1881 niter->may_be_zero = simplify_using_outer_evolutions (loop,
1882 niter->may_be_zero);
1883 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
1886 niter->assumptions
1887 = simplify_using_initial_conditions (loop,
1888 niter->assumptions);
1889 niter->may_be_zero
1890 = simplify_using_initial_conditions (loop,
1891 niter->may_be_zero);
1893 fold_undefer_and_ignore_overflow_warnings ();
1895 /* If NITER has simplified into a constant, update MAX. */
1896 if (TREE_CODE (niter->niter) == INTEGER_CST)
1897 niter->max = tree_to_double_int (niter->niter);
1899 if (integer_onep (niter->assumptions))
1900 return true;
1902 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1903 But if we can prove that there is overflow or some other source of weird
1904 behavior, ignore the loop even with -funsafe-loop-optimizations. */
1905 if (integer_zerop (niter->assumptions) || !single_exit (loop))
1906 return false;
1908 if (flag_unsafe_loop_optimizations)
1909 niter->assumptions = boolean_true_node;
1911 if (warn)
1913 const char *wording;
1914 location_t loc = gimple_location (stmt);
1916 /* We can provide a more specific warning if one of the operator is
1917 constant and the other advances by +1 or -1. */
1918 if (!integer_zerop (iv1.step)
1919 ? (integer_zerop (iv0.step)
1920 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
1921 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
1922 wording =
1923 flag_unsafe_loop_optimizations
1924 ? N_("assuming that the loop is not infinite")
1925 : N_("cannot optimize possibly infinite loops");
1926 else
1927 wording =
1928 flag_unsafe_loop_optimizations
1929 ? N_("assuming that the loop counter does not overflow")
1930 : N_("cannot optimize loop, the loop counter may overflow");
1932 warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
1933 OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
1936 return flag_unsafe_loop_optimizations;
1939 /* Try to determine the number of iterations of LOOP. If we succeed,
1940 expression giving number of iterations is returned and *EXIT is
1941 set to the edge from that the information is obtained. Otherwise
1942 chrec_dont_know is returned. */
1944 tree
1945 find_loop_niter (struct loop *loop, edge *exit)
1947 unsigned i;
1948 vec<edge> exits = get_loop_exit_edges (loop);
1949 edge ex;
1950 tree niter = NULL_TREE, aniter;
1951 struct tree_niter_desc desc;
1953 *exit = NULL;
1954 FOR_EACH_VEC_ELT (exits, i, ex)
1956 if (!number_of_iterations_exit (loop, ex, &desc, false))
1957 continue;
1959 if (integer_nonzerop (desc.may_be_zero))
1961 /* We exit in the first iteration through this exit.
1962 We won't find anything better. */
1963 niter = build_int_cst (unsigned_type_node, 0);
1964 *exit = ex;
1965 break;
1968 if (!integer_zerop (desc.may_be_zero))
1969 continue;
1971 aniter = desc.niter;
1973 if (!niter)
1975 /* Nothing recorded yet. */
1976 niter = aniter;
1977 *exit = ex;
1978 continue;
1981 /* Prefer constants, the lower the better. */
1982 if (TREE_CODE (aniter) != INTEGER_CST)
1983 continue;
1985 if (TREE_CODE (niter) != INTEGER_CST)
1987 niter = aniter;
1988 *exit = ex;
1989 continue;
1992 if (tree_int_cst_lt (aniter, niter))
1994 niter = aniter;
1995 *exit = ex;
1996 continue;
1999 exits.release ();
2001 return niter ? niter : chrec_dont_know;
2004 /* Return true if loop is known to have bounded number of iterations. */
2006 bool
2007 finite_loop_p (struct loop *loop)
2009 double_int nit;
2010 int flags;
2012 if (flag_unsafe_loop_optimizations)
2013 return true;
2014 flags = flags_from_decl_or_type (current_function_decl);
2015 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2017 if (dump_file && (dump_flags & TDF_DETAILS))
2018 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2019 loop->num);
2020 return true;
2023 if (loop->any_upper_bound
2024 || max_loop_iterations (loop, &nit))
2026 if (dump_file && (dump_flags & TDF_DETAILS))
2027 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2028 loop->num);
2029 return true;
2031 return false;
2036 Analysis of a number of iterations of a loop by a brute-force evaluation.
2040 /* Bound on the number of iterations we try to evaluate. */
2042 #define MAX_ITERATIONS_TO_TRACK \
2043 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2045 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2046 result by a chain of operations such that all but exactly one of their
2047 operands are constants. */
2049 static gimple
2050 chain_of_csts_start (struct loop *loop, tree x)
2052 gimple stmt = SSA_NAME_DEF_STMT (x);
2053 tree use;
2054 basic_block bb = gimple_bb (stmt);
2055 enum tree_code code;
2057 if (!bb
2058 || !flow_bb_inside_loop_p (loop, bb))
2059 return NULL;
2061 if (gimple_code (stmt) == GIMPLE_PHI)
2063 if (bb == loop->header)
2064 return stmt;
2066 return NULL;
2069 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2070 return NULL;
2072 code = gimple_assign_rhs_code (stmt);
2073 if (gimple_references_memory_p (stmt)
2074 || TREE_CODE_CLASS (code) == tcc_reference
2075 || (code == ADDR_EXPR
2076 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2077 return NULL;
2079 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2080 if (use == NULL_TREE)
2081 return NULL;
2083 return chain_of_csts_start (loop, use);
2086 /* Determines whether the expression X is derived from a result of a phi node
2087 in header of LOOP such that
2089 * the derivation of X consists only from operations with constants
2090 * the initial value of the phi node is constant
2091 * the value of the phi node in the next iteration can be derived from the
2092 value in the current iteration by a chain of operations with constants.
2094 If such phi node exists, it is returned, otherwise NULL is returned. */
2096 static gimple
2097 get_base_for (struct loop *loop, tree x)
2099 gimple phi;
2100 tree init, next;
2102 if (is_gimple_min_invariant (x))
2103 return NULL;
2105 phi = chain_of_csts_start (loop, x);
2106 if (!phi)
2107 return NULL;
2109 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2110 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2112 if (TREE_CODE (next) != SSA_NAME)
2113 return NULL;
2115 if (!is_gimple_min_invariant (init))
2116 return NULL;
2118 if (chain_of_csts_start (loop, next) != phi)
2119 return NULL;
2121 return phi;
2124 /* Given an expression X, then
2126 * if X is NULL_TREE, we return the constant BASE.
2127 * otherwise X is a SSA name, whose value in the considered loop is derived
2128 by a chain of operations with constant from a result of a phi node in
2129 the header of the loop. Then we return value of X when the value of the
2130 result of this phi node is given by the constant BASE. */
2132 static tree
2133 get_val_for (tree x, tree base)
2135 gimple stmt;
2137 gcc_assert (is_gimple_min_invariant (base));
2139 if (!x)
2140 return base;
2142 stmt = SSA_NAME_DEF_STMT (x);
2143 if (gimple_code (stmt) == GIMPLE_PHI)
2144 return base;
2146 gcc_assert (is_gimple_assign (stmt));
2148 /* STMT must be either an assignment of a single SSA name or an
2149 expression involving an SSA name and a constant. Try to fold that
2150 expression using the value for the SSA name. */
2151 if (gimple_assign_ssa_name_copy_p (stmt))
2152 return get_val_for (gimple_assign_rhs1 (stmt), base);
2153 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2154 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2156 return fold_build1 (gimple_assign_rhs_code (stmt),
2157 gimple_expr_type (stmt),
2158 get_val_for (gimple_assign_rhs1 (stmt), base));
2160 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2162 tree rhs1 = gimple_assign_rhs1 (stmt);
2163 tree rhs2 = gimple_assign_rhs2 (stmt);
2164 if (TREE_CODE (rhs1) == SSA_NAME)
2165 rhs1 = get_val_for (rhs1, base);
2166 else if (TREE_CODE (rhs2) == SSA_NAME)
2167 rhs2 = get_val_for (rhs2, base);
2168 else
2169 gcc_unreachable ();
2170 return fold_build2 (gimple_assign_rhs_code (stmt),
2171 gimple_expr_type (stmt), rhs1, rhs2);
2173 else
2174 gcc_unreachable ();
2178 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2179 by brute force -- i.e. by determining the value of the operands of the
2180 condition at EXIT in first few iterations of the loop (assuming that
2181 these values are constant) and determining the first one in that the
2182 condition is not satisfied. Returns the constant giving the number
2183 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2185 tree
2186 loop_niter_by_eval (struct loop *loop, edge exit)
2188 tree acnd;
2189 tree op[2], val[2], next[2], aval[2];
2190 gimple phi, cond;
2191 unsigned i, j;
2192 enum tree_code cmp;
2194 cond = last_stmt (exit->src);
2195 if (!cond || gimple_code (cond) != GIMPLE_COND)
2196 return chrec_dont_know;
2198 cmp = gimple_cond_code (cond);
2199 if (exit->flags & EDGE_TRUE_VALUE)
2200 cmp = invert_tree_comparison (cmp, false);
2202 switch (cmp)
2204 case EQ_EXPR:
2205 case NE_EXPR:
2206 case GT_EXPR:
2207 case GE_EXPR:
2208 case LT_EXPR:
2209 case LE_EXPR:
2210 op[0] = gimple_cond_lhs (cond);
2211 op[1] = gimple_cond_rhs (cond);
2212 break;
2214 default:
2215 return chrec_dont_know;
2218 for (j = 0; j < 2; j++)
2220 if (is_gimple_min_invariant (op[j]))
2222 val[j] = op[j];
2223 next[j] = NULL_TREE;
2224 op[j] = NULL_TREE;
2226 else
2228 phi = get_base_for (loop, op[j]);
2229 if (!phi)
2230 return chrec_dont_know;
2231 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2232 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2236 /* Don't issue signed overflow warnings. */
2237 fold_defer_overflow_warnings ();
2239 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2241 for (j = 0; j < 2; j++)
2242 aval[j] = get_val_for (op[j], val[j]);
2244 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2245 if (acnd && integer_zerop (acnd))
2247 fold_undefer_and_ignore_overflow_warnings ();
2248 if (dump_file && (dump_flags & TDF_DETAILS))
2249 fprintf (dump_file,
2250 "Proved that loop %d iterates %d times using brute force.\n",
2251 loop->num, i);
2252 return build_int_cst (unsigned_type_node, i);
2255 for (j = 0; j < 2; j++)
2257 val[j] = get_val_for (next[j], val[j]);
2258 if (!is_gimple_min_invariant (val[j]))
2260 fold_undefer_and_ignore_overflow_warnings ();
2261 return chrec_dont_know;
2266 fold_undefer_and_ignore_overflow_warnings ();
2268 return chrec_dont_know;
2271 /* Finds the exit of the LOOP by that the loop exits after a constant
2272 number of iterations and stores the exit edge to *EXIT. The constant
2273 giving the number of iterations of LOOP is returned. The number of
2274 iterations is determined using loop_niter_by_eval (i.e. by brute force
2275 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2276 determines the number of iterations, chrec_dont_know is returned. */
2278 tree
2279 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2281 unsigned i;
2282 vec<edge> exits = get_loop_exit_edges (loop);
2283 edge ex;
2284 tree niter = NULL_TREE, aniter;
2286 *exit = NULL;
2288 /* Loops with multiple exits are expensive to handle and less important. */
2289 if (!flag_expensive_optimizations
2290 && exits.length () > 1)
2292 exits.release ();
2293 return chrec_dont_know;
2296 FOR_EACH_VEC_ELT (exits, i, ex)
2298 if (!just_once_each_iteration_p (loop, ex->src))
2299 continue;
2301 aniter = loop_niter_by_eval (loop, ex);
2302 if (chrec_contains_undetermined (aniter))
2303 continue;
2305 if (niter
2306 && !tree_int_cst_lt (aniter, niter))
2307 continue;
2309 niter = aniter;
2310 *exit = ex;
2312 exits.release ();
2314 return niter ? niter : chrec_dont_know;
2319 Analysis of upper bounds on number of iterations of a loop.
2323 static double_int derive_constant_upper_bound_ops (tree, tree,
2324 enum tree_code, tree);
2326 /* Returns a constant upper bound on the value of the right-hand side of
2327 an assignment statement STMT. */
2329 static double_int
2330 derive_constant_upper_bound_assign (gimple stmt)
2332 enum tree_code code = gimple_assign_rhs_code (stmt);
2333 tree op0 = gimple_assign_rhs1 (stmt);
2334 tree op1 = gimple_assign_rhs2 (stmt);
2336 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2337 op0, code, op1);
2340 /* Returns a constant upper bound on the value of expression VAL. VAL
2341 is considered to be unsigned. If its type is signed, its value must
2342 be nonnegative. */
2344 static double_int
2345 derive_constant_upper_bound (tree val)
2347 enum tree_code code;
2348 tree op0, op1;
2350 extract_ops_from_tree (val, &code, &op0, &op1);
2351 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2354 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2355 whose type is TYPE. The expression is considered to be unsigned. If
2356 its type is signed, its value must be nonnegative. */
2358 static double_int
2359 derive_constant_upper_bound_ops (tree type, tree op0,
2360 enum tree_code code, tree op1)
2362 tree subtype, maxt;
2363 double_int bnd, max, mmax, cst;
2364 gimple stmt;
2366 if (INTEGRAL_TYPE_P (type))
2367 maxt = TYPE_MAX_VALUE (type);
2368 else
2369 maxt = upper_bound_in_type (type, type);
2371 max = tree_to_double_int (maxt);
2373 switch (code)
2375 case INTEGER_CST:
2376 return tree_to_double_int (op0);
2378 CASE_CONVERT:
2379 subtype = TREE_TYPE (op0);
2380 if (!TYPE_UNSIGNED (subtype)
2381 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2382 that OP0 is nonnegative. */
2383 && TYPE_UNSIGNED (type)
2384 && !tree_expr_nonnegative_p (op0))
2386 /* If we cannot prove that the casted expression is nonnegative,
2387 we cannot establish more useful upper bound than the precision
2388 of the type gives us. */
2389 return max;
2392 /* We now know that op0 is an nonnegative value. Try deriving an upper
2393 bound for it. */
2394 bnd = derive_constant_upper_bound (op0);
2396 /* If the bound does not fit in TYPE, max. value of TYPE could be
2397 attained. */
2398 if (max.ult (bnd))
2399 return max;
2401 return bnd;
2403 case PLUS_EXPR:
2404 case POINTER_PLUS_EXPR:
2405 case MINUS_EXPR:
2406 if (TREE_CODE (op1) != INTEGER_CST
2407 || !tree_expr_nonnegative_p (op0))
2408 return max;
2410 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2411 choose the most logical way how to treat this constant regardless
2412 of the signedness of the type. */
2413 cst = tree_to_double_int (op1);
2414 cst = cst.sext (TYPE_PRECISION (type));
2415 if (code != MINUS_EXPR)
2416 cst = -cst;
2418 bnd = derive_constant_upper_bound (op0);
2420 if (cst.is_negative ())
2422 cst = -cst;
2423 /* Avoid CST == 0x80000... */
2424 if (cst.is_negative ())
2425 return max;;
2427 /* OP0 + CST. We need to check that
2428 BND <= MAX (type) - CST. */
2430 mmax -= cst;
2431 if (bnd.ugt (mmax))
2432 return max;
2434 return bnd + cst;
2436 else
2438 /* OP0 - CST, where CST >= 0.
2440 If TYPE is signed, we have already verified that OP0 >= 0, and we
2441 know that the result is nonnegative. This implies that
2442 VAL <= BND - CST.
2444 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2445 otherwise the operation underflows.
2448 /* This should only happen if the type is unsigned; however, for
2449 buggy programs that use overflowing signed arithmetics even with
2450 -fno-wrapv, this condition may also be true for signed values. */
2451 if (bnd.ult (cst))
2452 return max;
2454 if (TYPE_UNSIGNED (type))
2456 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2457 double_int_to_tree (type, cst));
2458 if (!tem || integer_nonzerop (tem))
2459 return max;
2462 bnd -= cst;
2465 return bnd;
2467 case FLOOR_DIV_EXPR:
2468 case EXACT_DIV_EXPR:
2469 if (TREE_CODE (op1) != INTEGER_CST
2470 || tree_int_cst_sign_bit (op1))
2471 return max;
2473 bnd = derive_constant_upper_bound (op0);
2474 return bnd.udiv (tree_to_double_int (op1), FLOOR_DIV_EXPR);
2476 case BIT_AND_EXPR:
2477 if (TREE_CODE (op1) != INTEGER_CST
2478 || tree_int_cst_sign_bit (op1))
2479 return max;
2480 return tree_to_double_int (op1);
2482 case SSA_NAME:
2483 stmt = SSA_NAME_DEF_STMT (op0);
2484 if (gimple_code (stmt) != GIMPLE_ASSIGN
2485 || gimple_assign_lhs (stmt) != op0)
2486 return max;
2487 return derive_constant_upper_bound_assign (stmt);
2489 default:
2490 return max;
2494 /* Records that every statement in LOOP is executed I_BOUND times.
2495 REALISTIC is true if I_BOUND is expected to be close to the real number
2496 of iterations. UPPER is true if we are sure the loop iterates at most
2497 I_BOUND times. */
2499 void
2500 record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
2501 bool upper)
2503 /* Update the bounds only when there is no previous estimation, or when the
2504 current estimation is smaller. */
2505 if (upper
2506 && (!loop->any_upper_bound
2507 || i_bound.ult (loop->nb_iterations_upper_bound)))
2509 loop->any_upper_bound = true;
2510 loop->nb_iterations_upper_bound = i_bound;
2512 if (realistic
2513 && (!loop->any_estimate
2514 || i_bound.ult (loop->nb_iterations_estimate)))
2516 loop->any_estimate = true;
2517 loop->nb_iterations_estimate = i_bound;
2520 /* If an upper bound is smaller than the realistic estimate of the
2521 number of iterations, use the upper bound instead. */
2522 if (loop->any_upper_bound
2523 && loop->any_estimate
2524 && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate))
2525 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
2528 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2529 is true if the loop is exited immediately after STMT, and this exit
2530 is taken at last when the STMT is executed BOUND + 1 times.
2531 REALISTIC is true if BOUND is expected to be close to the real number
2532 of iterations. UPPER is true if we are sure the loop iterates at most
2533 BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
2535 static void
2536 record_estimate (struct loop *loop, tree bound, double_int i_bound,
2537 gimple at_stmt, bool is_exit, bool realistic, bool upper)
2539 double_int delta;
2541 if (dump_file && (dump_flags & TDF_DETAILS))
2543 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2544 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2545 fprintf (dump_file, " is %sexecuted at most ",
2546 upper ? "" : "probably ");
2547 print_generic_expr (dump_file, bound, TDF_SLIM);
2548 fprintf (dump_file, " (bounded by ");
2549 dump_double_int (dump_file, i_bound, true);
2550 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2553 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2554 real number of iterations. */
2555 if (TREE_CODE (bound) != INTEGER_CST)
2556 realistic = false;
2557 else
2558 gcc_checking_assert (i_bound == tree_to_double_int (bound));
2559 if (!upper && !realistic)
2560 return;
2562 /* If we have a guaranteed upper bound, record it in the appropriate
2563 list. */
2564 if (upper)
2566 struct nb_iter_bound *elt = ggc_alloc_nb_iter_bound ();
2568 elt->bound = i_bound;
2569 elt->stmt = at_stmt;
2570 elt->is_exit = is_exit;
2571 elt->next = loop->bounds;
2572 loop->bounds = elt;
2575 /* If statement is executed on every path to the loop latch, we can directly
2576 infer the upper bound on the # of iterations of the loop. */
2577 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
2578 return;
2580 /* Update the number of iteration estimates according to the bound.
2581 If at_stmt is an exit then the loop latch is executed at most BOUND times,
2582 otherwise it can be executed BOUND + 1 times. We will lower the estimate
2583 later if such statement must be executed on last iteration */
2584 if (is_exit)
2585 delta = double_int_zero;
2586 else
2587 delta = double_int_one;
2588 i_bound += delta;
2590 /* If an overflow occurred, ignore the result. */
2591 if (i_bound.ult (delta))
2592 return;
2594 record_niter_bound (loop, i_bound, realistic, upper);
2597 /* Record the estimate on number of iterations of LOOP based on the fact that
2598 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2599 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2600 estimated number of iterations is expected to be close to the real one.
2601 UPPER is true if we are sure the induction variable does not wrap. */
2603 static void
2604 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
2605 tree low, tree high, bool realistic, bool upper)
2607 tree niter_bound, extreme, delta;
2608 tree type = TREE_TYPE (base), unsigned_type;
2609 double_int max;
2611 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2612 return;
2614 if (dump_file && (dump_flags & TDF_DETAILS))
2616 fprintf (dump_file, "Induction variable (");
2617 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
2618 fprintf (dump_file, ") ");
2619 print_generic_expr (dump_file, base, TDF_SLIM);
2620 fprintf (dump_file, " + ");
2621 print_generic_expr (dump_file, step, TDF_SLIM);
2622 fprintf (dump_file, " * iteration does not wrap in statement ");
2623 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2624 fprintf (dump_file, " in loop %d.\n", loop->num);
2627 unsigned_type = unsigned_type_for (type);
2628 base = fold_convert (unsigned_type, base);
2629 step = fold_convert (unsigned_type, step);
2631 if (tree_int_cst_sign_bit (step))
2633 extreme = fold_convert (unsigned_type, low);
2634 if (TREE_CODE (base) != INTEGER_CST)
2635 base = fold_convert (unsigned_type, high);
2636 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
2637 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
2639 else
2641 extreme = fold_convert (unsigned_type, high);
2642 if (TREE_CODE (base) != INTEGER_CST)
2643 base = fold_convert (unsigned_type, low);
2644 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
2647 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2648 would get out of the range. */
2649 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2650 max = derive_constant_upper_bound (niter_bound);
2651 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2654 /* Determine information about number of iterations a LOOP from the index
2655 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2656 guaranteed to be executed in every iteration of LOOP. Callback for
2657 for_each_index. */
2659 struct ilb_data
2661 struct loop *loop;
2662 gimple stmt;
2665 static bool
2666 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
2668 struct ilb_data *data = (struct ilb_data *) dta;
2669 tree ev, init, step;
2670 tree low, high, type, next;
2671 bool sign, upper = true, at_end = false;
2672 struct loop *loop = data->loop;
2673 bool reliable = true;
2675 if (TREE_CODE (base) != ARRAY_REF)
2676 return true;
2678 /* For arrays at the end of the structure, we are not guaranteed that they
2679 do not really extend over their declared size. However, for arrays of
2680 size greater than one, this is unlikely to be intended. */
2681 if (array_at_struct_end_p (base))
2683 at_end = true;
2684 upper = false;
2687 struct loop *dloop = loop_containing_stmt (data->stmt);
2688 if (!dloop)
2689 return true;
2691 ev = analyze_scalar_evolution (dloop, *idx);
2692 ev = instantiate_parameters (loop, ev);
2693 init = initial_condition (ev);
2694 step = evolution_part_in_loop_num (ev, loop->num);
2696 if (!init
2697 || !step
2698 || TREE_CODE (step) != INTEGER_CST
2699 || integer_zerop (step)
2700 || tree_contains_chrecs (init, NULL)
2701 || chrec_contains_symbols_defined_in_loop (init, loop->num))
2702 return true;
2704 low = array_ref_low_bound (base);
2705 high = array_ref_up_bound (base);
2707 /* The case of nonconstant bounds could be handled, but it would be
2708 complicated. */
2709 if (TREE_CODE (low) != INTEGER_CST
2710 || !high
2711 || TREE_CODE (high) != INTEGER_CST)
2712 return true;
2713 sign = tree_int_cst_sign_bit (step);
2714 type = TREE_TYPE (step);
2716 /* The array of length 1 at the end of a structure most likely extends
2717 beyond its bounds. */
2718 if (at_end
2719 && operand_equal_p (low, high, 0))
2720 return true;
2722 /* In case the relevant bound of the array does not fit in type, or
2723 it does, but bound + step (in type) still belongs into the range of the
2724 array, the index may wrap and still stay within the range of the array
2725 (consider e.g. if the array is indexed by the full range of
2726 unsigned char).
2728 To make things simpler, we require both bounds to fit into type, although
2729 there are cases where this would not be strictly necessary. */
2730 if (!int_fits_type_p (high, type)
2731 || !int_fits_type_p (low, type))
2732 return true;
2733 low = fold_convert (type, low);
2734 high = fold_convert (type, high);
2736 if (sign)
2737 next = fold_binary (PLUS_EXPR, type, low, step);
2738 else
2739 next = fold_binary (PLUS_EXPR, type, high, step);
2741 if (tree_int_cst_compare (low, next) <= 0
2742 && tree_int_cst_compare (next, high) <= 0)
2743 return true;
2745 /* If access is not executed on every iteration, we must ensure that overlow may
2746 not make the access valid later. */
2747 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
2748 && scev_probably_wraps_p (initial_condition_in_loop_num (ev, loop->num),
2749 step, data->stmt, loop, true))
2750 reliable = false;
2752 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, reliable, upper);
2753 return true;
2756 /* Determine information about number of iterations a LOOP from the bounds
2757 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2758 STMT is guaranteed to be executed in every iteration of LOOP.*/
2760 static void
2761 infer_loop_bounds_from_ref (struct loop *loop, gimple stmt, tree ref)
2763 struct ilb_data data;
2765 data.loop = loop;
2766 data.stmt = stmt;
2767 for_each_index (&ref, idx_infer_loop_bounds, &data);
2770 /* Determine information about number of iterations of a LOOP from the way
2771 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2772 executed in every iteration of LOOP. */
2774 static void
2775 infer_loop_bounds_from_array (struct loop *loop, gimple stmt)
2777 if (is_gimple_assign (stmt))
2779 tree op0 = gimple_assign_lhs (stmt);
2780 tree op1 = gimple_assign_rhs1 (stmt);
2782 /* For each memory access, analyze its access function
2783 and record a bound on the loop iteration domain. */
2784 if (REFERENCE_CLASS_P (op0))
2785 infer_loop_bounds_from_ref (loop, stmt, op0);
2787 if (REFERENCE_CLASS_P (op1))
2788 infer_loop_bounds_from_ref (loop, stmt, op1);
2790 else if (is_gimple_call (stmt))
2792 tree arg, lhs;
2793 unsigned i, n = gimple_call_num_args (stmt);
2795 lhs = gimple_call_lhs (stmt);
2796 if (lhs && REFERENCE_CLASS_P (lhs))
2797 infer_loop_bounds_from_ref (loop, stmt, lhs);
2799 for (i = 0; i < n; i++)
2801 arg = gimple_call_arg (stmt, i);
2802 if (REFERENCE_CLASS_P (arg))
2803 infer_loop_bounds_from_ref (loop, stmt, arg);
2808 /* Determine information about number of iterations of a LOOP from the fact
2809 that pointer arithmetics in STMT does not overflow. */
2811 static void
2812 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple stmt)
2814 tree def, base, step, scev, type, low, high;
2815 tree var, ptr;
2817 if (!is_gimple_assign (stmt)
2818 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
2819 return;
2821 def = gimple_assign_lhs (stmt);
2822 if (TREE_CODE (def) != SSA_NAME)
2823 return;
2825 type = TREE_TYPE (def);
2826 if (!nowrap_type_p (type))
2827 return;
2829 ptr = gimple_assign_rhs1 (stmt);
2830 if (!expr_invariant_in_loop_p (loop, ptr))
2831 return;
2833 var = gimple_assign_rhs2 (stmt);
2834 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
2835 return;
2837 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2838 if (chrec_contains_undetermined (scev))
2839 return;
2841 base = initial_condition_in_loop_num (scev, loop->num);
2842 step = evolution_part_in_loop_num (scev, loop->num);
2844 if (!base || !step
2845 || TREE_CODE (step) != INTEGER_CST
2846 || tree_contains_chrecs (base, NULL)
2847 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2848 return;
2850 low = lower_bound_in_type (type, type);
2851 high = upper_bound_in_type (type, type);
2853 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
2854 produce a NULL pointer. The contrary would mean NULL points to an object,
2855 while NULL is supposed to compare unequal with the address of all objects.
2856 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
2857 NULL pointer since that would mean wrapping, which we assume here not to
2858 happen. So, we can exclude NULL from the valid range of pointer
2859 arithmetic. */
2860 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
2861 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
2863 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2866 /* Determine information about number of iterations of a LOOP from the fact
2867 that signed arithmetics in STMT does not overflow. */
2869 static void
2870 infer_loop_bounds_from_signedness (struct loop *loop, gimple stmt)
2872 tree def, base, step, scev, type, low, high;
2874 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2875 return;
2877 def = gimple_assign_lhs (stmt);
2879 if (TREE_CODE (def) != SSA_NAME)
2880 return;
2882 type = TREE_TYPE (def);
2883 if (!INTEGRAL_TYPE_P (type)
2884 || !TYPE_OVERFLOW_UNDEFINED (type))
2885 return;
2887 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2888 if (chrec_contains_undetermined (scev))
2889 return;
2891 base = initial_condition_in_loop_num (scev, loop->num);
2892 step = evolution_part_in_loop_num (scev, loop->num);
2894 if (!base || !step
2895 || TREE_CODE (step) != INTEGER_CST
2896 || tree_contains_chrecs (base, NULL)
2897 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2898 return;
2900 low = lower_bound_in_type (type, type);
2901 high = upper_bound_in_type (type, type);
2903 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2906 /* The following analyzers are extracting informations on the bounds
2907 of LOOP from the following undefined behaviors:
2909 - data references should not access elements over the statically
2910 allocated size,
2912 - signed variables should not overflow when flag_wrapv is not set.
2915 static void
2916 infer_loop_bounds_from_undefined (struct loop *loop)
2918 unsigned i;
2919 basic_block *bbs;
2920 gimple_stmt_iterator bsi;
2921 basic_block bb;
2922 bool reliable;
2924 bbs = get_loop_body (loop);
2926 for (i = 0; i < loop->num_nodes; i++)
2928 bb = bbs[i];
2930 /* If BB is not executed in each iteration of the loop, we cannot
2931 use the operations in it to infer reliable upper bound on the
2932 # of iterations of the loop. However, we can use it as a guess.
2933 Reliable guesses come only from array bounds. */
2934 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
2936 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2938 gimple stmt = gsi_stmt (bsi);
2940 infer_loop_bounds_from_array (loop, stmt);
2942 if (reliable)
2944 infer_loop_bounds_from_signedness (loop, stmt);
2945 infer_loop_bounds_from_pointer_arith (loop, stmt);
2951 free (bbs);
2954 /* Converts VAL to double_int. */
2956 static double_int
2957 gcov_type_to_double_int (gcov_type val)
2959 double_int ret;
2961 ret.low = (unsigned HOST_WIDE_INT) val;
2962 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
2963 the size of type. */
2964 val >>= HOST_BITS_PER_WIDE_INT - 1;
2965 val >>= 1;
2966 ret.high = (unsigned HOST_WIDE_INT) val;
2968 return ret;
2971 /* Compare double ints, callback for qsort. */
2974 double_int_cmp (const void *p1, const void *p2)
2976 const double_int *d1 = (const double_int *)p1;
2977 const double_int *d2 = (const double_int *)p2;
2978 if (*d1 == *d2)
2979 return 0;
2980 if (d1->ult (*d2))
2981 return -1;
2982 return 1;
2985 /* Return index of BOUND in BOUNDS array sorted in increasing order.
2986 Lookup by binary search. */
2989 bound_index (vec<double_int> bounds, double_int bound)
2991 unsigned int end = bounds.length ();
2992 unsigned int begin = 0;
2994 /* Find a matching index by means of a binary search. */
2995 while (begin != end)
2997 unsigned int middle = (begin + end) / 2;
2998 double_int index = bounds[middle];
3000 if (index == bound)
3001 return middle;
3002 else if (index.ult (bound))
3003 begin = middle + 1;
3004 else
3005 end = middle;
3007 gcc_unreachable ();
3010 /* Used to hold vector of queues of basic blocks bellow. */
3011 typedef vec<basic_block> bb_queue;
3013 /* We recorded loop bounds only for statements dominating loop latch (and thus
3014 executed each loop iteration). If there are any bounds on statements not
3015 dominating the loop latch we can improve the estimate by walking the loop
3016 body and seeing if every path from loop header to loop latch contains
3017 some bounded statement. */
3019 static void
3020 discover_iteration_bound_by_body_walk (struct loop *loop)
3022 pointer_map_t *bb_bounds;
3023 struct nb_iter_bound *elt;
3024 vec<double_int> bounds = vNULL;
3025 vec<bb_queue> queues = vNULL;
3026 bb_queue queue = bb_queue();
3027 ptrdiff_t queue_index;
3028 ptrdiff_t latch_index = 0;
3029 pointer_map_t *block_priority;
3031 /* Discover what bounds may interest us. */
3032 for (elt = loop->bounds; elt; elt = elt->next)
3034 double_int bound = elt->bound;
3036 /* Exit terminates loop at given iteration, while non-exits produce undefined
3037 effect on the next iteration. */
3038 if (!elt->is_exit)
3040 bound += double_int_one;
3041 /* If an overflow occurred, ignore the result. */
3042 if (bound.is_zero ())
3043 continue;
3046 if (!loop->any_upper_bound
3047 || bound.ult (loop->nb_iterations_upper_bound))
3048 bounds.safe_push (bound);
3051 /* Exit early if there is nothing to do. */
3052 if (!bounds.exists ())
3053 return;
3055 if (dump_file && (dump_flags & TDF_DETAILS))
3056 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3058 /* Sort the bounds in decreasing order. */
3059 qsort (bounds.address (), bounds.length (),
3060 sizeof (double_int), double_int_cmp);
3062 /* For every basic block record the lowest bound that is guaranteed to
3063 terminate the loop. */
3065 bb_bounds = pointer_map_create ();
3066 for (elt = loop->bounds; elt; elt = elt->next)
3068 double_int bound = elt->bound;
3069 if (!elt->is_exit)
3071 bound += double_int_one;
3072 /* If an overflow occurred, ignore the result. */
3073 if (bound.is_zero ())
3074 continue;
3077 if (!loop->any_upper_bound
3078 || bound.ult (loop->nb_iterations_upper_bound))
3080 ptrdiff_t index = bound_index (bounds, bound);
3081 void **entry = pointer_map_contains (bb_bounds,
3082 gimple_bb (elt->stmt));
3083 if (!entry)
3084 *pointer_map_insert (bb_bounds,
3085 gimple_bb (elt->stmt)) = (void *)index;
3086 else if ((ptrdiff_t)*entry > index)
3087 *entry = (void *)index;
3091 block_priority = pointer_map_create ();
3093 /* Perform shortest path discovery loop->header ... loop->latch.
3095 The "distance" is given by the smallest loop bound of basic block
3096 present in the path and we look for path with largest smallest bound
3097 on it.
3099 To avoid the need for fibonaci heap on double ints we simply compress
3100 double ints into indexes to BOUNDS array and then represent the queue
3101 as arrays of queues for every index.
3102 Index of BOUNDS.length() means that the execution of given BB has
3103 no bounds determined.
3105 VISITED is a pointer map translating basic block into smallest index
3106 it was inserted into the priority queue with. */
3107 latch_index = -1;
3109 /* Start walk in loop header with index set to infinite bound. */
3110 queue_index = bounds.length ();
3111 queues.safe_grow_cleared (queue_index + 1);
3112 queue.safe_push (loop->header);
3113 queues[queue_index] = queue;
3114 *pointer_map_insert (block_priority, loop->header) = (void *)queue_index;
3116 for (; queue_index >= 0; queue_index--)
3118 if (latch_index < queue_index)
3120 while (queues[queue_index].length ())
3122 basic_block bb;
3123 ptrdiff_t bound_index = queue_index;
3124 void **entry;
3125 edge e;
3126 edge_iterator ei;
3128 queue = queues[queue_index];
3129 bb = queue.pop ();
3131 /* OK, we later inserted the BB with lower priority, skip it. */
3132 if ((ptrdiff_t)*pointer_map_contains (block_priority, bb) > queue_index)
3133 continue;
3135 /* See if we can improve the bound. */
3136 entry = pointer_map_contains (bb_bounds, bb);
3137 if (entry && (ptrdiff_t)*entry < bound_index)
3138 bound_index = (ptrdiff_t)*entry;
3140 /* Insert succesors into the queue, watch for latch edge
3141 and record greatest index we saw. */
3142 FOR_EACH_EDGE (e, ei, bb->succs)
3144 bool insert = false;
3145 void **entry;
3147 if (loop_exit_edge_p (loop, e))
3148 continue;
3150 if (e == loop_latch_edge (loop)
3151 && latch_index < bound_index)
3152 latch_index = bound_index;
3153 else if (!(entry = pointer_map_contains (block_priority, e->dest)))
3155 insert = true;
3156 *pointer_map_insert (block_priority, e->dest) = (void *)bound_index;
3158 else if ((ptrdiff_t)*entry < bound_index)
3160 insert = true;
3161 *entry = (void *)bound_index;
3164 if (insert)
3166 bb_queue queue2 = queues[bound_index];
3167 queue2.safe_push (e->dest);
3168 queues[bound_index] = queue2;
3173 else
3174 queues[queue_index].release ();
3177 gcc_assert (latch_index >= 0);
3178 if ((unsigned)latch_index < bounds.length ())
3180 if (dump_file && (dump_flags & TDF_DETAILS))
3182 fprintf (dump_file, "Found better loop bound ");
3183 dump_double_int (dump_file, bounds[latch_index], true);
3184 fprintf (dump_file, "\n");
3186 record_niter_bound (loop, bounds[latch_index], false, true);
3189 queues.release ();
3190 pointer_map_destroy (bb_bounds);
3191 pointer_map_destroy (block_priority);
3194 /* See if every path cross the loop goes through a statement that is known
3195 to not execute at the last iteration. In that case we can decrese iteration
3196 count by 1. */
3198 static void
3199 maybe_lower_iteration_bound (struct loop *loop)
3201 pointer_set_t *not_executed_last_iteration = NULL;
3202 struct nb_iter_bound *elt;
3203 bool found_exit = false;
3204 vec<basic_block> queue = vNULL;
3205 bitmap visited;
3207 /* Collect all statements with interesting (i.e. lower than
3208 nb_iterations_upper_bound) bound on them.
3210 TODO: Due to the way record_estimate choose estimates to store, the bounds
3211 will be always nb_iterations_upper_bound-1. We can change this to record
3212 also statements not dominating the loop latch and update the walk bellow
3213 to the shortest path algorthm. */
3214 for (elt = loop->bounds; elt; elt = elt->next)
3216 if (!elt->is_exit
3217 && elt->bound.ult (loop->nb_iterations_upper_bound))
3219 if (!not_executed_last_iteration)
3220 not_executed_last_iteration = pointer_set_create ();
3221 pointer_set_insert (not_executed_last_iteration, elt->stmt);
3224 if (!not_executed_last_iteration)
3225 return;
3227 /* Start DFS walk in the loop header and see if we can reach the
3228 loop latch or any of the exits (including statements with side
3229 effects that may terminate the loop otherwise) without visiting
3230 any of the statements known to have undefined effect on the last
3231 iteration. */
3232 queue.safe_push (loop->header);
3233 visited = BITMAP_ALLOC (NULL);
3234 bitmap_set_bit (visited, loop->header->index);
3235 found_exit = false;
3239 basic_block bb = queue.pop ();
3240 gimple_stmt_iterator gsi;
3241 bool stmt_found = false;
3243 /* Loop for possible exits and statements bounding the execution. */
3244 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3246 gimple stmt = gsi_stmt (gsi);
3247 if (pointer_set_contains (not_executed_last_iteration, stmt))
3249 stmt_found = true;
3250 break;
3252 if (gimple_has_side_effects (stmt))
3254 found_exit = true;
3255 break;
3258 if (found_exit)
3259 break;
3261 /* If no bounding statement is found, continue the walk. */
3262 if (!stmt_found)
3264 edge e;
3265 edge_iterator ei;
3267 FOR_EACH_EDGE (e, ei, bb->succs)
3269 if (loop_exit_edge_p (loop, e)
3270 || e == loop_latch_edge (loop))
3272 found_exit = true;
3273 break;
3275 if (bitmap_set_bit (visited, e->dest->index))
3276 queue.safe_push (e->dest);
3280 while (queue.length () && !found_exit);
3282 /* If every path through the loop reach bounding statement before exit,
3283 then we know the last iteration of the loop will have undefined effect
3284 and we can decrease number of iterations. */
3286 if (!found_exit)
3288 if (dump_file && (dump_flags & TDF_DETAILS))
3289 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3290 "undefined statement must be executed at the last iteration.\n");
3291 record_niter_bound (loop, loop->nb_iterations_upper_bound - double_int_one,
3292 false, true);
3294 BITMAP_FREE (visited);
3295 queue.release ();
3298 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3299 is true also use estimates derived from undefined behavior. */
3301 void
3302 estimate_numbers_of_iterations_loop (struct loop *loop)
3304 vec<edge> exits;
3305 tree niter, type;
3306 unsigned i;
3307 struct tree_niter_desc niter_desc;
3308 edge ex;
3309 double_int bound;
3310 edge likely_exit;
3312 /* Give up if we already have tried to compute an estimation. */
3313 if (loop->estimate_state != EST_NOT_COMPUTED)
3314 return;
3316 loop->estimate_state = EST_AVAILABLE;
3317 /* Force estimate compuation but leave any existing upper bound in place. */
3318 loop->any_estimate = false;
3320 exits = get_loop_exit_edges (loop);
3321 likely_exit = single_likely_exit (loop);
3322 FOR_EACH_VEC_ELT (exits, i, ex)
3324 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
3325 continue;
3327 niter = niter_desc.niter;
3328 type = TREE_TYPE (niter);
3329 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3330 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3331 build_int_cst (type, 0),
3332 niter);
3333 record_estimate (loop, niter, niter_desc.max,
3334 last_stmt (ex->src),
3335 true, ex == likely_exit, true);
3337 exits.release ();
3339 infer_loop_bounds_from_undefined (loop);
3341 discover_iteration_bound_by_body_walk (loop);
3343 maybe_lower_iteration_bound (loop);
3345 /* If we have a measured profile, use it to estimate the number of
3346 iterations. */
3347 if (loop->header->count != 0)
3349 gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
3350 bound = gcov_type_to_double_int (nit);
3351 record_niter_bound (loop, bound, true, false);
3355 /* Sets NIT to the estimated number of executions of the latch of the
3356 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3357 large as the number of iterations. If we have no reliable estimate,
3358 the function returns false, otherwise returns true. */
3360 bool
3361 estimated_loop_iterations (struct loop *loop, double_int *nit)
3363 /* When SCEV information is available, try to update loop iterations
3364 estimate. Otherwise just return whatever we recorded earlier. */
3365 if (scev_initialized_p ())
3366 estimate_numbers_of_iterations_loop (loop);
3368 /* Even if the bound is not recorded, possibly we can derrive one from
3369 profile. */
3370 if (!loop->any_estimate)
3372 if (loop->header->count)
3374 *nit = gcov_type_to_double_int
3375 (expected_loop_iterations_unbounded (loop) + 1);
3376 return true;
3378 return false;
3381 *nit = loop->nb_iterations_estimate;
3382 return true;
3385 /* Sets NIT to an upper bound for the maximum number of executions of the
3386 latch of the LOOP. If we have no reliable estimate, the function returns
3387 false, otherwise returns true. */
3389 bool
3390 max_loop_iterations (struct loop *loop, double_int *nit)
3392 /* When SCEV information is available, try to update loop iterations
3393 estimate. Otherwise just return whatever we recorded earlier. */
3394 if (scev_initialized_p ())
3395 estimate_numbers_of_iterations_loop (loop);
3396 if (!loop->any_upper_bound)
3397 return false;
3399 *nit = loop->nb_iterations_upper_bound;
3400 return true;
3403 /* Similar to estimated_loop_iterations, but returns the estimate only
3404 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3405 on the number of iterations of LOOP could not be derived, returns -1. */
3407 HOST_WIDE_INT
3408 estimated_loop_iterations_int (struct loop *loop)
3410 double_int nit;
3411 HOST_WIDE_INT hwi_nit;
3413 if (!estimated_loop_iterations (loop, &nit))
3414 return -1;
3416 if (!nit.fits_shwi ())
3417 return -1;
3418 hwi_nit = nit.to_shwi ();
3420 return hwi_nit < 0 ? -1 : hwi_nit;
3423 /* Similar to max_loop_iterations, but returns the estimate only
3424 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3425 on the number of iterations of LOOP could not be derived, returns -1. */
3427 HOST_WIDE_INT
3428 max_loop_iterations_int (struct loop *loop)
3430 double_int nit;
3431 HOST_WIDE_INT hwi_nit;
3433 if (!max_loop_iterations (loop, &nit))
3434 return -1;
3436 if (!nit.fits_shwi ())
3437 return -1;
3438 hwi_nit = nit.to_shwi ();
3440 return hwi_nit < 0 ? -1 : hwi_nit;
3443 /* Returns an upper bound on the number of executions of statements
3444 in the LOOP. For statements before the loop exit, this exceeds
3445 the number of execution of the latch by one. */
3447 HOST_WIDE_INT
3448 max_stmt_executions_int (struct loop *loop)
3450 HOST_WIDE_INT nit = max_loop_iterations_int (loop);
3451 HOST_WIDE_INT snit;
3453 if (nit == -1)
3454 return -1;
3456 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3458 /* If the computation overflows, return -1. */
3459 return snit < 0 ? -1 : snit;
3462 /* Returns an estimate for the number of executions of statements
3463 in the LOOP. For statements before the loop exit, this exceeds
3464 the number of execution of the latch by one. */
3466 HOST_WIDE_INT
3467 estimated_stmt_executions_int (struct loop *loop)
3469 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3470 HOST_WIDE_INT snit;
3472 if (nit == -1)
3473 return -1;
3475 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3477 /* If the computation overflows, return -1. */
3478 return snit < 0 ? -1 : snit;
3481 /* Sets NIT to the estimated maximum number of executions of the latch of the
3482 LOOP, plus one. If we have no reliable estimate, the function returns
3483 false, otherwise returns true. */
3485 bool
3486 max_stmt_executions (struct loop *loop, double_int *nit)
3488 double_int nit_minus_one;
3490 if (!max_loop_iterations (loop, nit))
3491 return false;
3493 nit_minus_one = *nit;
3495 *nit += double_int_one;
3497 return (*nit).ugt (nit_minus_one);
3500 /* Sets NIT to the estimated number of executions of the latch of the
3501 LOOP, plus one. If we have no reliable estimate, the function returns
3502 false, otherwise returns true. */
3504 bool
3505 estimated_stmt_executions (struct loop *loop, double_int *nit)
3507 double_int nit_minus_one;
3509 if (!estimated_loop_iterations (loop, nit))
3510 return false;
3512 nit_minus_one = *nit;
3514 *nit += double_int_one;
3516 return (*nit).ugt (nit_minus_one);
3519 /* Records estimates on numbers of iterations of loops. */
3521 void
3522 estimate_numbers_of_iterations (void)
3524 loop_iterator li;
3525 struct loop *loop;
3527 /* We don't want to issue signed overflow warnings while getting
3528 loop iteration estimates. */
3529 fold_defer_overflow_warnings ();
3531 FOR_EACH_LOOP (li, loop, 0)
3533 estimate_numbers_of_iterations_loop (loop);
3536 fold_undefer_and_ignore_overflow_warnings ();
3539 /* Returns true if statement S1 dominates statement S2. */
3541 bool
3542 stmt_dominates_stmt_p (gimple s1, gimple s2)
3544 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
3546 if (!bb1
3547 || s1 == s2)
3548 return true;
3550 if (bb1 == bb2)
3552 gimple_stmt_iterator bsi;
3554 if (gimple_code (s2) == GIMPLE_PHI)
3555 return false;
3557 if (gimple_code (s1) == GIMPLE_PHI)
3558 return true;
3560 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
3561 if (gsi_stmt (bsi) == s1)
3562 return true;
3564 return false;
3567 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
3570 /* Returns true when we can prove that the number of executions of
3571 STMT in the loop is at most NITER, according to the bound on
3572 the number of executions of the statement NITER_BOUND->stmt recorded in
3573 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
3575 ??? This code can become quite a CPU hog - we can have many bounds,
3576 and large basic block forcing stmt_dominates_stmt_p to be queried
3577 many times on a large basic blocks, so the whole thing is O(n^2)
3578 for scev_probably_wraps_p invocation (that can be done n times).
3580 It would make more sense (and give better answers) to remember BB
3581 bounds computed by discover_iteration_bound_by_body_walk. */
3583 static bool
3584 n_of_executions_at_most (gimple stmt,
3585 struct nb_iter_bound *niter_bound,
3586 tree niter)
3588 double_int bound = niter_bound->bound;
3589 tree nit_type = TREE_TYPE (niter), e;
3590 enum tree_code cmp;
3592 gcc_assert (TYPE_UNSIGNED (nit_type));
3594 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3595 the number of iterations is small. */
3596 if (!double_int_fits_to_tree_p (nit_type, bound))
3597 return false;
3599 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3600 times. This means that:
3602 -- if NITER_BOUND->is_exit is true, then everything after
3603 it at most NITER_BOUND->bound times.
3605 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3606 is executed, then NITER_BOUND->stmt is executed as well in the same
3607 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
3609 If we can determine that NITER_BOUND->stmt is always executed
3610 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
3611 We conclude that if both statements belong to the same
3612 basic block and STMT is before NITER_BOUND->stmt and there are no
3613 statements with side effects in between. */
3615 if (niter_bound->is_exit)
3617 if (stmt == niter_bound->stmt
3618 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3619 return false;
3620 cmp = GE_EXPR;
3622 else
3624 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3626 gimple_stmt_iterator bsi;
3627 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
3628 || gimple_code (stmt) == GIMPLE_PHI
3629 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
3630 return false;
3632 /* By stmt_dominates_stmt_p we already know that STMT appears
3633 before NITER_BOUND->STMT. Still need to test that the loop
3634 can not be terinated by a side effect in between. */
3635 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
3636 gsi_next (&bsi))
3637 if (gimple_has_side_effects (gsi_stmt (bsi)))
3638 return false;
3639 bound += double_int_one;
3640 if (bound.is_zero ()
3641 || !double_int_fits_to_tree_p (nit_type, bound))
3642 return false;
3644 cmp = GT_EXPR;
3647 e = fold_binary (cmp, boolean_type_node,
3648 niter, double_int_to_tree (nit_type, bound));
3649 return e && integer_nonzerop (e);
3652 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
3654 bool
3655 nowrap_type_p (tree type)
3657 if (INTEGRAL_TYPE_P (type)
3658 && TYPE_OVERFLOW_UNDEFINED (type))
3659 return true;
3661 if (POINTER_TYPE_P (type))
3662 return true;
3664 return false;
3667 /* Return false only when the induction variable BASE + STEP * I is
3668 known to not overflow: i.e. when the number of iterations is small
3669 enough with respect to the step and initial condition in order to
3670 keep the evolution confined in TYPEs bounds. Return true when the
3671 iv is known to overflow or when the property is not computable.
3673 USE_OVERFLOW_SEMANTICS is true if this function should assume that
3674 the rules for overflow of the given language apply (e.g., that signed
3675 arithmetics in C does not overflow). */
3677 bool
3678 scev_probably_wraps_p (tree base, tree step,
3679 gimple at_stmt, struct loop *loop,
3680 bool use_overflow_semantics)
3682 tree delta, step_abs;
3683 tree unsigned_type, valid_niter;
3684 tree type = TREE_TYPE (step);
3685 tree e;
3686 double_int niter;
3687 struct nb_iter_bound *bound;
3689 /* FIXME: We really need something like
3690 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
3692 We used to test for the following situation that frequently appears
3693 during address arithmetics:
3695 D.1621_13 = (long unsigned intD.4) D.1620_12;
3696 D.1622_14 = D.1621_13 * 8;
3697 D.1623_15 = (doubleD.29 *) D.1622_14;
3699 And derived that the sequence corresponding to D_14
3700 can be proved to not wrap because it is used for computing a
3701 memory access; however, this is not really the case -- for example,
3702 if D_12 = (unsigned char) [254,+,1], then D_14 has values
3703 2032, 2040, 0, 8, ..., but the code is still legal. */
3705 if (chrec_contains_undetermined (base)
3706 || chrec_contains_undetermined (step))
3707 return true;
3709 if (integer_zerop (step))
3710 return false;
3712 /* If we can use the fact that signed and pointer arithmetics does not
3713 wrap, we are done. */
3714 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
3715 return false;
3717 /* To be able to use estimates on number of iterations of the loop,
3718 we must have an upper bound on the absolute value of the step. */
3719 if (TREE_CODE (step) != INTEGER_CST)
3720 return true;
3722 /* Don't issue signed overflow warnings. */
3723 fold_defer_overflow_warnings ();
3725 /* Otherwise, compute the number of iterations before we reach the
3726 bound of the type, and verify that the loop is exited before this
3727 occurs. */
3728 unsigned_type = unsigned_type_for (type);
3729 base = fold_convert (unsigned_type, base);
3731 if (tree_int_cst_sign_bit (step))
3733 tree extreme = fold_convert (unsigned_type,
3734 lower_bound_in_type (type, type));
3735 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3736 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
3737 fold_convert (unsigned_type, step));
3739 else
3741 tree extreme = fold_convert (unsigned_type,
3742 upper_bound_in_type (type, type));
3743 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3744 step_abs = fold_convert (unsigned_type, step);
3747 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3749 estimate_numbers_of_iterations_loop (loop);
3751 if (max_loop_iterations (loop, &niter)
3752 && double_int_fits_to_tree_p (TREE_TYPE (valid_niter), niter)
3753 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
3754 double_int_to_tree (TREE_TYPE (valid_niter),
3755 niter))) != NULL
3756 && integer_nonzerop (e))
3758 fold_undefer_and_ignore_overflow_warnings ();
3759 return false;
3761 if (at_stmt)
3762 for (bound = loop->bounds; bound; bound = bound->next)
3764 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
3766 fold_undefer_and_ignore_overflow_warnings ();
3767 return false;
3771 fold_undefer_and_ignore_overflow_warnings ();
3773 /* At this point we still don't have a proof that the iv does not
3774 overflow: give up. */
3775 return true;
3778 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3780 void
3781 free_numbers_of_iterations_estimates_loop (struct loop *loop)
3783 struct nb_iter_bound *bound, *next;
3785 loop->nb_iterations = NULL;
3786 loop->estimate_state = EST_NOT_COMPUTED;
3787 for (bound = loop->bounds; bound; bound = next)
3789 next = bound->next;
3790 ggc_free (bound);
3793 loop->bounds = NULL;
3796 /* Frees the information on upper bounds on numbers of iterations of loops. */
3798 void
3799 free_numbers_of_iterations_estimates (void)
3801 loop_iterator li;
3802 struct loop *loop;
3804 FOR_EACH_LOOP (li, loop, 0)
3806 free_numbers_of_iterations_estimates_loop (loop);
3810 /* Substitute value VAL for ssa name NAME inside expressions held
3811 at LOOP. */
3813 void
3814 substitute_in_loop_info (struct loop *loop, tree name, tree val)
3816 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);