2008-05-30 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob934afa78d7dbfa7ae52b2301d761d6ecee7ce699
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "output.h"
30 #include "diagnostic.h"
31 #include "intl.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "tree-pass.h"
36 #include "ggc.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "tree-data-ref.h"
40 #include "params.h"
41 #include "flags.h"
42 #include "toplev.h"
43 #include "tree-inline.h"
44 #include "gmp.h"
46 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
48 /* The maximum number of dominator BBs we search for conditions
49 of loop header copies we use for simplifying a conditional
50 expression. */
51 #define MAX_DOMINATORS_TO_WALK 8
55 Analysis of number of iterations of an affine exit test.
59 /* Bounds on some value, BELOW <= X <= UP. */
61 typedef struct
63 mpz_t below, up;
64 } bounds;
67 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
69 static void
70 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
72 tree type = TREE_TYPE (expr);
73 tree op0, op1;
74 double_int off;
75 bool negate = false;
77 *var = expr;
78 mpz_set_ui (offset, 0);
80 switch (TREE_CODE (expr))
82 case MINUS_EXPR:
83 negate = true;
84 /* Fallthru. */
86 case PLUS_EXPR:
87 case POINTER_PLUS_EXPR:
88 op0 = TREE_OPERAND (expr, 0);
89 op1 = TREE_OPERAND (expr, 1);
91 if (TREE_CODE (op1) != INTEGER_CST)
92 break;
94 *var = op0;
95 /* Always sign extend the offset. */
96 off = double_int_sext (tree_to_double_int (op1),
97 TYPE_PRECISION (type));
98 mpz_set_double_int (offset, off, false);
99 break;
101 case INTEGER_CST:
102 *var = build_int_cst_type (type, 0);
103 off = tree_to_double_int (expr);
104 mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
105 break;
107 default:
108 break;
112 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
113 in TYPE to MIN and MAX. */
115 static void
116 determine_value_range (tree type, tree var, mpz_t off,
117 mpz_t min, mpz_t max)
119 /* If the expression is a constant, we know its value exactly. */
120 if (integer_zerop (var))
122 mpz_set (min, off);
123 mpz_set (max, off);
124 return;
127 /* If the computation may wrap, we know nothing about the value, except for
128 the range of the type. */
129 get_type_static_bounds (type, min, max);
130 if (!nowrap_type_p (type))
131 return;
133 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
134 add it to MIN, otherwise to MAX. */
135 if (mpz_sgn (off) < 0)
136 mpz_add (max, max, off);
137 else
138 mpz_add (min, min, off);
141 /* Stores the bounds on the difference of the values of the expressions
142 (var + X) and (var + Y), computed in TYPE, to BNDS. */
144 static void
145 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
146 bounds *bnds)
148 int rel = mpz_cmp (x, y);
149 bool may_wrap = !nowrap_type_p (type);
150 mpz_t m;
152 /* If X == Y, then the expressions are always equal.
153 If X > Y, there are the following possibilities:
154 a) neither of var + X and var + Y overflow or underflow, or both of
155 them do. Then their difference is X - Y.
156 b) var + X overflows, and var + Y does not. Then the values of the
157 expressions are var + X - M and var + Y, where M is the range of
158 the type, and their difference is X - Y - M.
159 c) var + Y underflows and var + X does not. Their difference again
160 is M - X + Y.
161 Therefore, if the arithmetics in type does not overflow, then the
162 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
163 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
164 (X - Y, X - Y + M). */
166 if (rel == 0)
168 mpz_set_ui (bnds->below, 0);
169 mpz_set_ui (bnds->up, 0);
170 return;
173 mpz_init (m);
174 mpz_set_double_int (m, double_int_mask (TYPE_PRECISION (type)), true);
175 mpz_add_ui (m, m, 1);
176 mpz_sub (bnds->up, x, y);
177 mpz_set (bnds->below, bnds->up);
179 if (may_wrap)
181 if (rel > 0)
182 mpz_sub (bnds->below, bnds->below, m);
183 else
184 mpz_add (bnds->up, bnds->up, m);
187 mpz_clear (m);
190 /* From condition C0 CMP C1 derives information regarding the
191 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
192 and stores it to BNDS. */
194 static void
195 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
196 tree vary, mpz_t offy,
197 tree c0, enum tree_code cmp, tree c1,
198 bounds *bnds)
200 tree varc0, varc1, tmp, ctype;
201 mpz_t offc0, offc1, loffx, loffy, bnd;
202 bool lbound = false;
203 bool no_wrap = nowrap_type_p (type);
204 bool x_ok, y_ok;
206 switch (cmp)
208 case LT_EXPR:
209 case LE_EXPR:
210 case GT_EXPR:
211 case GE_EXPR:
212 STRIP_SIGN_NOPS (c0);
213 STRIP_SIGN_NOPS (c1);
214 ctype = TREE_TYPE (c0);
215 if (!useless_type_conversion_p (ctype, type))
216 return;
218 break;
220 case EQ_EXPR:
221 /* We could derive quite precise information from EQ_EXPR, however, such
222 a guard is unlikely to appear, so we do not bother with handling
223 it. */
224 return;
226 case NE_EXPR:
227 /* NE_EXPR comparisons do not contain much of useful information, except for
228 special case of comparing with the bounds of the type. */
229 if (TREE_CODE (c1) != INTEGER_CST
230 || !INTEGRAL_TYPE_P (type))
231 return;
233 /* Ensure that the condition speaks about an expression in the same type
234 as X and Y. */
235 ctype = TREE_TYPE (c0);
236 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
237 return;
238 c0 = fold_convert (type, c0);
239 c1 = fold_convert (type, c1);
241 if (TYPE_MIN_VALUE (type)
242 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
244 cmp = GT_EXPR;
245 break;
247 if (TYPE_MAX_VALUE (type)
248 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
250 cmp = LT_EXPR;
251 break;
254 return;
255 default:
256 return;
259 mpz_init (offc0);
260 mpz_init (offc1);
261 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
262 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
264 /* We are only interested in comparisons of expressions based on VARX and
265 VARY. TODO -- we might also be able to derive some bounds from
266 expressions containing just one of the variables. */
268 if (operand_equal_p (varx, varc1, 0))
270 tmp = varc0; varc0 = varc1; varc1 = tmp;
271 mpz_swap (offc0, offc1);
272 cmp = swap_tree_comparison (cmp);
275 if (!operand_equal_p (varx, varc0, 0)
276 || !operand_equal_p (vary, varc1, 0))
277 goto end;
279 mpz_init_set (loffx, offx);
280 mpz_init_set (loffy, offy);
282 if (cmp == GT_EXPR || cmp == GE_EXPR)
284 tmp = varx; varx = vary; vary = tmp;
285 mpz_swap (offc0, offc1);
286 mpz_swap (loffx, loffy);
287 cmp = swap_tree_comparison (cmp);
288 lbound = true;
291 /* If there is no overflow, the condition implies that
293 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
295 The overflows and underflows may complicate things a bit; each
296 overflow decreases the appropriate offset by M, and underflow
297 increases it by M. The above inequality would not necessarily be
298 true if
300 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
301 VARX + OFFC0 overflows, but VARX + OFFX does not.
302 This may only happen if OFFX < OFFC0.
303 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
304 VARY + OFFC1 underflows and VARY + OFFY does not.
305 This may only happen if OFFY > OFFC1. */
307 if (no_wrap)
309 x_ok = true;
310 y_ok = true;
312 else
314 x_ok = (integer_zerop (varx)
315 || mpz_cmp (loffx, offc0) >= 0);
316 y_ok = (integer_zerop (vary)
317 || mpz_cmp (loffy, offc1) <= 0);
320 if (x_ok && y_ok)
322 mpz_init (bnd);
323 mpz_sub (bnd, loffx, loffy);
324 mpz_add (bnd, bnd, offc1);
325 mpz_sub (bnd, bnd, offc0);
327 if (cmp == LT_EXPR)
328 mpz_sub_ui (bnd, bnd, 1);
330 if (lbound)
332 mpz_neg (bnd, bnd);
333 if (mpz_cmp (bnds->below, bnd) < 0)
334 mpz_set (bnds->below, bnd);
336 else
338 if (mpz_cmp (bnd, bnds->up) < 0)
339 mpz_set (bnds->up, bnd);
341 mpz_clear (bnd);
344 mpz_clear (loffx);
345 mpz_clear (loffy);
346 end:
347 mpz_clear (offc0);
348 mpz_clear (offc1);
351 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
352 The subtraction is considered to be performed in arbitrary precision,
353 without overflows.
355 We do not attempt to be too clever regarding the value ranges of X and
356 Y; most of the time, they are just integers or ssa names offsetted by
357 integer. However, we try to use the information contained in the
358 comparisons before the loop (usually created by loop header copying). */
360 static void
361 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
363 tree type = TREE_TYPE (x);
364 tree varx, vary;
365 mpz_t offx, offy;
366 mpz_t minx, maxx, miny, maxy;
367 int cnt = 0;
368 edge e;
369 basic_block bb;
370 tree cond, c0, c1;
371 enum tree_code cmp;
373 /* Get rid of unnecessary casts, but preserve the value of
374 the expressions. */
375 STRIP_SIGN_NOPS (x);
376 STRIP_SIGN_NOPS (y);
378 mpz_init (bnds->below);
379 mpz_init (bnds->up);
380 mpz_init (offx);
381 mpz_init (offy);
382 split_to_var_and_offset (x, &varx, offx);
383 split_to_var_and_offset (y, &vary, offy);
385 if (!integer_zerop (varx)
386 && operand_equal_p (varx, vary, 0))
388 /* Special case VARX == VARY -- we just need to compare the
389 offsets. The matters are a bit more complicated in the
390 case addition of offsets may wrap. */
391 bound_difference_of_offsetted_base (type, offx, offy, bnds);
393 else
395 /* Otherwise, use the value ranges to determine the initial
396 estimates on below and up. */
397 mpz_init (minx);
398 mpz_init (maxx);
399 mpz_init (miny);
400 mpz_init (maxy);
401 determine_value_range (type, varx, offx, minx, maxx);
402 determine_value_range (type, vary, offy, miny, maxy);
404 mpz_sub (bnds->below, minx, maxy);
405 mpz_sub (bnds->up, maxx, miny);
406 mpz_clear (minx);
407 mpz_clear (maxx);
408 mpz_clear (miny);
409 mpz_clear (maxy);
412 /* If both X and Y are constants, we cannot get any more precise. */
413 if (integer_zerop (varx) && integer_zerop (vary))
414 goto end;
416 /* Now walk the dominators of the loop header and use the entry
417 guards to refine the estimates. */
418 for (bb = loop->header;
419 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
420 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
422 if (!single_pred_p (bb))
423 continue;
424 e = single_pred_edge (bb);
426 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
427 continue;
429 cond = COND_EXPR_COND (last_stmt (e->src));
430 if (!COMPARISON_CLASS_P (cond))
431 continue;
432 c0 = TREE_OPERAND (cond, 0);
433 cmp = TREE_CODE (cond);
434 c1 = TREE_OPERAND (cond, 1);
436 if (e->flags & EDGE_FALSE_VALUE)
437 cmp = invert_tree_comparison (cmp, false);
439 refine_bounds_using_guard (type, varx, offx, vary, offy,
440 c0, cmp, c1, bnds);
441 ++cnt;
444 end:
445 mpz_clear (offx);
446 mpz_clear (offy);
449 /* Update the bounds in BNDS that restrict the value of X to the bounds
450 that restrict the value of X + DELTA. X can be obtained as a
451 difference of two values in TYPE. */
453 static void
454 bounds_add (bounds *bnds, double_int delta, tree type)
456 mpz_t mdelta, max;
458 mpz_init (mdelta);
459 mpz_set_double_int (mdelta, delta, false);
461 mpz_init (max);
462 mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
464 mpz_add (bnds->up, bnds->up, mdelta);
465 mpz_add (bnds->below, bnds->below, mdelta);
467 if (mpz_cmp (bnds->up, max) > 0)
468 mpz_set (bnds->up, max);
470 mpz_neg (max, max);
471 if (mpz_cmp (bnds->below, max) < 0)
472 mpz_set (bnds->below, max);
474 mpz_clear (mdelta);
475 mpz_clear (max);
478 /* Update the bounds in BNDS that restrict the value of X to the bounds
479 that restrict the value of -X. */
481 static void
482 bounds_negate (bounds *bnds)
484 mpz_t tmp;
486 mpz_init_set (tmp, bnds->up);
487 mpz_neg (bnds->up, bnds->below);
488 mpz_neg (bnds->below, tmp);
489 mpz_clear (tmp);
492 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
494 static tree
495 inverse (tree x, tree mask)
497 tree type = TREE_TYPE (x);
498 tree rslt;
499 unsigned ctr = tree_floor_log2 (mask);
501 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
503 unsigned HOST_WIDE_INT ix;
504 unsigned HOST_WIDE_INT imask;
505 unsigned HOST_WIDE_INT irslt = 1;
507 gcc_assert (cst_and_fits_in_hwi (x));
508 gcc_assert (cst_and_fits_in_hwi (mask));
510 ix = int_cst_value (x);
511 imask = int_cst_value (mask);
513 for (; ctr; ctr--)
515 irslt *= ix;
516 ix *= ix;
518 irslt &= imask;
520 rslt = build_int_cst_type (type, irslt);
522 else
524 rslt = build_int_cst (type, 1);
525 for (; ctr; ctr--)
527 rslt = int_const_binop (MULT_EXPR, rslt, x, 0);
528 x = int_const_binop (MULT_EXPR, x, x, 0);
530 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask, 0);
533 return rslt;
536 /* Derives the upper bound BND on the number of executions of loop with exit
537 condition S * i <> C, assuming that the loop is not infinite. If
538 NO_OVERFLOW is true, then the control variable of the loop does not
539 overflow. If NO_OVERFLOW is true or BNDS.below >= 0, then BNDS.up
540 contains the upper bound on the value of C. */
542 static void
543 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
544 bounds *bnds)
546 double_int max;
547 mpz_t d;
549 /* If the control variable does not overflow, the number of iterations is
550 at most c / s. Otherwise it is at most the period of the control
551 variable. */
552 if (!no_overflow && !multiple_of_p (TREE_TYPE (c), c, s))
554 max = double_int_mask (TYPE_PRECISION (TREE_TYPE (c))
555 - tree_low_cst (num_ending_zeros (s), 1));
556 mpz_set_double_int (bnd, max, true);
557 return;
560 /* Determine the upper bound on C. */
561 if (no_overflow || mpz_sgn (bnds->below) >= 0)
562 mpz_set (bnd, bnds->up);
563 else if (TREE_CODE (c) == INTEGER_CST)
564 mpz_set_double_int (bnd, tree_to_double_int (c), true);
565 else
566 mpz_set_double_int (bnd, double_int_mask (TYPE_PRECISION (TREE_TYPE (c))),
567 true);
569 mpz_init (d);
570 mpz_set_double_int (d, tree_to_double_int (s), true);
571 mpz_fdiv_q (bnd, bnd, d);
572 mpz_clear (d);
575 /* Determines number of iterations of loop whose ending condition
576 is IV <> FINAL. TYPE is the type of the iv. The number of
577 iterations is stored to NITER. NEVER_INFINITE is true if
578 we know that the exit must be taken eventually, i.e., that the IV
579 ever reaches the value FINAL (we derived this earlier, and possibly set
580 NITER->assumptions to make sure this is the case). BNDS contains the
581 bounds on the difference FINAL - IV->base. */
583 static bool
584 number_of_iterations_ne (tree type, affine_iv *iv, tree final,
585 struct tree_niter_desc *niter, bool never_infinite,
586 bounds *bnds)
588 tree niter_type = unsigned_type_for (type);
589 tree s, c, d, bits, assumption, tmp, bound;
590 mpz_t max;
592 niter->control = *iv;
593 niter->bound = final;
594 niter->cmp = NE_EXPR;
596 /* Rearrange the terms so that we get inequality S * i <> C, with S
597 positive. Also cast everything to the unsigned type. If IV does
598 not overflow, BNDS bounds the value of C. Also, this is the
599 case if the computation |FINAL - IV->base| does not overflow, i.e.,
600 if BNDS->below in the result is nonnegative. */
601 if (tree_int_cst_sign_bit (iv->step))
603 s = fold_convert (niter_type,
604 fold_build1 (NEGATE_EXPR, type, iv->step));
605 c = fold_build2 (MINUS_EXPR, niter_type,
606 fold_convert (niter_type, iv->base),
607 fold_convert (niter_type, final));
608 bounds_negate (bnds);
610 else
612 s = fold_convert (niter_type, iv->step);
613 c = fold_build2 (MINUS_EXPR, niter_type,
614 fold_convert (niter_type, final),
615 fold_convert (niter_type, iv->base));
618 mpz_init (max);
619 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds);
620 niter->max = mpz_get_double_int (niter_type, max, false);
621 mpz_clear (max);
623 /* First the trivial cases -- when the step is 1. */
624 if (integer_onep (s))
626 niter->niter = c;
627 return true;
630 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
631 is infinite. Otherwise, the number of iterations is
632 (inverse(s/d) * (c/d)) mod (size of mode/d). */
633 bits = num_ending_zeros (s);
634 bound = build_low_bits_mask (niter_type,
635 (TYPE_PRECISION (niter_type)
636 - tree_low_cst (bits, 1)));
638 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
639 build_int_cst (niter_type, 1), bits);
640 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
642 if (!never_infinite)
644 /* If we cannot assume that the loop is not infinite, record the
645 assumptions for divisibility of c. */
646 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
647 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
648 assumption, build_int_cst (niter_type, 0));
649 if (!integer_nonzerop (assumption))
650 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
651 niter->assumptions, assumption);
654 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
655 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
656 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
657 return true;
660 /* Checks whether we can determine the final value of the control variable
661 of the loop with ending condition IV0 < IV1 (computed in TYPE).
662 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
663 of the step. The assumptions necessary to ensure that the computation
664 of the final value does not overflow are recorded in NITER. If we
665 find the final value, we adjust DELTA and return TRUE. Otherwise
666 we return false. BNDS bounds the value of IV1->base - IV0->base,
667 and will be updated by the same amount as DELTA. */
669 static bool
670 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
671 struct tree_niter_desc *niter,
672 tree *delta, tree step,
673 bounds *bnds)
675 tree niter_type = TREE_TYPE (step);
676 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
677 tree tmod;
678 mpz_t mmod;
679 tree assumption = boolean_true_node, bound, noloop;
680 bool ret = false;
681 tree type1 = type;
682 if (POINTER_TYPE_P (type))
683 type1 = sizetype;
685 if (TREE_CODE (mod) != INTEGER_CST)
686 return false;
687 if (integer_nonzerop (mod))
688 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
689 tmod = fold_convert (type1, mod);
691 mpz_init (mmod);
692 mpz_set_double_int (mmod, tree_to_double_int (mod), true);
693 mpz_neg (mmod, mmod);
695 if (integer_nonzerop (iv0->step))
697 /* The final value of the iv is iv1->base + MOD, assuming that this
698 computation does not overflow, and that
699 iv0->base <= iv1->base + MOD. */
700 if (!iv1->no_overflow && !integer_zerop (mod))
702 bound = fold_build2 (MINUS_EXPR, type,
703 TYPE_MAX_VALUE (type1), tmod);
704 assumption = fold_build2 (LE_EXPR, boolean_type_node,
705 iv1->base, bound);
706 if (integer_zerop (assumption))
707 goto end;
709 if (mpz_cmp (mmod, bnds->below) < 0)
710 noloop = boolean_false_node;
711 else
712 noloop = fold_build2 (GT_EXPR, boolean_type_node,
713 iv0->base,
714 fold_build2 (PLUS_EXPR, type1,
715 iv1->base, tmod));
717 else
719 /* The final value of the iv is iv0->base - MOD, assuming that this
720 computation does not overflow, and that
721 iv0->base - MOD <= iv1->base. */
722 if (!iv0->no_overflow && !integer_zerop (mod))
724 bound = fold_build2 (PLUS_EXPR, type1,
725 TYPE_MIN_VALUE (type1), tmod);
726 assumption = fold_build2 (GE_EXPR, boolean_type_node,
727 iv0->base, bound);
728 if (integer_zerop (assumption))
729 goto end;
731 if (mpz_cmp (mmod, bnds->below) < 0)
732 noloop = boolean_false_node;
733 else
734 noloop = fold_build2 (GT_EXPR, boolean_type_node,
735 fold_build2 (MINUS_EXPR, type1,
736 iv0->base, tmod),
737 iv1->base);
740 if (!integer_nonzerop (assumption))
741 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
742 niter->assumptions,
743 assumption);
744 if (!integer_zerop (noloop))
745 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
746 niter->may_be_zero,
747 noloop);
748 bounds_add (bnds, tree_to_double_int (mod), type);
749 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
751 ret = true;
752 end:
753 mpz_clear (mmod);
754 return ret;
757 /* Add assertions to NITER that ensure that the control variable of the loop
758 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
759 are TYPE. Returns false if we can prove that there is an overflow, true
760 otherwise. STEP is the absolute value of the step. */
762 static bool
763 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
764 struct tree_niter_desc *niter, tree step)
766 tree bound, d, assumption, diff;
767 tree niter_type = TREE_TYPE (step);
769 if (integer_nonzerop (iv0->step))
771 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
772 if (iv0->no_overflow)
773 return true;
775 /* If iv0->base is a constant, we can determine the last value before
776 overflow precisely; otherwise we conservatively assume
777 MAX - STEP + 1. */
779 if (TREE_CODE (iv0->base) == INTEGER_CST)
781 d = fold_build2 (MINUS_EXPR, niter_type,
782 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
783 fold_convert (niter_type, iv0->base));
784 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
786 else
787 diff = fold_build2 (MINUS_EXPR, niter_type, step,
788 build_int_cst (niter_type, 1));
789 bound = fold_build2 (MINUS_EXPR, type,
790 TYPE_MAX_VALUE (type), fold_convert (type, diff));
791 assumption = fold_build2 (LE_EXPR, boolean_type_node,
792 iv1->base, bound);
794 else
796 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
797 if (iv1->no_overflow)
798 return true;
800 if (TREE_CODE (iv1->base) == INTEGER_CST)
802 d = fold_build2 (MINUS_EXPR, niter_type,
803 fold_convert (niter_type, iv1->base),
804 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
805 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
807 else
808 diff = fold_build2 (MINUS_EXPR, niter_type, step,
809 build_int_cst (niter_type, 1));
810 bound = fold_build2 (PLUS_EXPR, type,
811 TYPE_MIN_VALUE (type), fold_convert (type, diff));
812 assumption = fold_build2 (GE_EXPR, boolean_type_node,
813 iv0->base, bound);
816 if (integer_zerop (assumption))
817 return false;
818 if (!integer_nonzerop (assumption))
819 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
820 niter->assumptions, assumption);
822 iv0->no_overflow = true;
823 iv1->no_overflow = true;
824 return true;
827 /* Add an assumption to NITER that a loop whose ending condition
828 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
829 bounds the value of IV1->base - IV0->base. */
831 static void
832 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
833 struct tree_niter_desc *niter, bounds *bnds)
835 tree assumption = boolean_true_node, bound, diff;
836 tree mbz, mbzl, mbzr, type1;
837 bool rolls_p, no_overflow_p;
838 double_int dstep;
839 mpz_t mstep, max;
841 /* We are going to compute the number of iterations as
842 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
843 variant of TYPE. This formula only works if
845 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
847 (where MAX is the maximum value of the unsigned variant of TYPE, and
848 the computations in this formula are performed in full precision
849 (without overflows).
851 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
852 we have a condition of form iv0->base - step < iv1->base before the loop,
853 and for loops iv0->base < iv1->base - step * i the condition
854 iv0->base < iv1->base + step, due to loop header copying, which enable us
855 to prove the lower bound.
857 The upper bound is more complicated. Unless the expressions for initial
858 and final value themselves contain enough information, we usually cannot
859 derive it from the context. */
861 /* First check whether the answer does not follow from the bounds we gathered
862 before. */
863 if (integer_nonzerop (iv0->step))
864 dstep = tree_to_double_int (iv0->step);
865 else
867 dstep = double_int_sext (tree_to_double_int (iv1->step),
868 TYPE_PRECISION (type));
869 dstep = double_int_neg (dstep);
872 mpz_init (mstep);
873 mpz_set_double_int (mstep, dstep, true);
874 mpz_neg (mstep, mstep);
875 mpz_add_ui (mstep, mstep, 1);
877 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
879 mpz_init (max);
880 mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
881 mpz_add (max, max, mstep);
882 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
883 /* For pointers, only values lying inside a single object
884 can be compared or manipulated by pointer arithmetics.
885 Gcc in general does not allow or handle objects larger
886 than half of the address space, hence the upper bound
887 is satisfied for pointers. */
888 || POINTER_TYPE_P (type));
889 mpz_clear (mstep);
890 mpz_clear (max);
892 if (rolls_p && no_overflow_p)
893 return;
895 type1 = type;
896 if (POINTER_TYPE_P (type))
897 type1 = sizetype;
899 /* Now the hard part; we must formulate the assumption(s) as expressions, and
900 we must be careful not to introduce overflow. */
902 if (integer_nonzerop (iv0->step))
904 diff = fold_build2 (MINUS_EXPR, type1,
905 iv0->step, build_int_cst (type1, 1));
907 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
908 0 address never belongs to any object, we can assume this for
909 pointers. */
910 if (!POINTER_TYPE_P (type))
912 bound = fold_build2 (PLUS_EXPR, type1,
913 TYPE_MIN_VALUE (type), diff);
914 assumption = fold_build2 (GE_EXPR, boolean_type_node,
915 iv0->base, bound);
918 /* And then we can compute iv0->base - diff, and compare it with
919 iv1->base. */
920 mbzl = fold_build2 (MINUS_EXPR, type1,
921 fold_convert (type1, iv0->base), diff);
922 mbzr = fold_convert (type1, iv1->base);
924 else
926 diff = fold_build2 (PLUS_EXPR, type1,
927 iv1->step, build_int_cst (type1, 1));
929 if (!POINTER_TYPE_P (type))
931 bound = fold_build2 (PLUS_EXPR, type1,
932 TYPE_MAX_VALUE (type), diff);
933 assumption = fold_build2 (LE_EXPR, boolean_type_node,
934 iv1->base, bound);
937 mbzl = fold_convert (type1, iv0->base);
938 mbzr = fold_build2 (MINUS_EXPR, type1,
939 fold_convert (type1, iv1->base), diff);
942 if (!integer_nonzerop (assumption))
943 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
944 niter->assumptions, assumption);
945 if (!rolls_p)
947 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
948 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
949 niter->may_be_zero, mbz);
953 /* Determines number of iterations of loop whose ending condition
954 is IV0 < IV1. TYPE is the type of the iv. The number of
955 iterations is stored to NITER. BNDS bounds the difference
956 IV1->base - IV0->base. */
958 static bool
959 number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
960 struct tree_niter_desc *niter,
961 bool never_infinite ATTRIBUTE_UNUSED,
962 bounds *bnds)
964 tree niter_type = unsigned_type_for (type);
965 tree delta, step, s;
966 mpz_t mstep, tmp;
968 if (integer_nonzerop (iv0->step))
970 niter->control = *iv0;
971 niter->cmp = LT_EXPR;
972 niter->bound = iv1->base;
974 else
976 niter->control = *iv1;
977 niter->cmp = GT_EXPR;
978 niter->bound = iv0->base;
981 delta = fold_build2 (MINUS_EXPR, niter_type,
982 fold_convert (niter_type, iv1->base),
983 fold_convert (niter_type, iv0->base));
985 /* First handle the special case that the step is +-1. */
986 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
987 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
989 /* for (i = iv0->base; i < iv1->base; i++)
993 for (i = iv1->base; i > iv0->base; i--).
995 In both cases # of iterations is iv1->base - iv0->base, assuming that
996 iv1->base >= iv0->base.
998 First try to derive a lower bound on the value of
999 iv1->base - iv0->base, computed in full precision. If the difference
1000 is nonnegative, we are done, otherwise we must record the
1001 condition. */
1003 if (mpz_sgn (bnds->below) < 0)
1004 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1005 iv1->base, iv0->base);
1006 niter->niter = delta;
1007 niter->max = mpz_get_double_int (niter_type, bnds->up, false);
1008 return true;
1011 if (integer_nonzerop (iv0->step))
1012 step = fold_convert (niter_type, iv0->step);
1013 else
1014 step = fold_convert (niter_type,
1015 fold_build1 (NEGATE_EXPR, type, iv1->step));
1017 /* If we can determine the final value of the control iv exactly, we can
1018 transform the condition to != comparison. In particular, this will be
1019 the case if DELTA is constant. */
1020 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1021 bnds))
1023 affine_iv zps;
1025 zps.base = build_int_cst (niter_type, 0);
1026 zps.step = step;
1027 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1028 zps does not overflow. */
1029 zps.no_overflow = true;
1031 return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1034 /* Make sure that the control iv does not overflow. */
1035 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1036 return false;
1038 /* We determine the number of iterations as (delta + step - 1) / step. For
1039 this to work, we must know that iv1->base >= iv0->base - step + 1,
1040 otherwise the loop does not roll. */
1041 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1043 s = fold_build2 (MINUS_EXPR, niter_type,
1044 step, build_int_cst (niter_type, 1));
1045 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1046 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1048 mpz_init (mstep);
1049 mpz_init (tmp);
1050 mpz_set_double_int (mstep, tree_to_double_int (step), true);
1051 mpz_add (tmp, bnds->up, mstep);
1052 mpz_sub_ui (tmp, tmp, 1);
1053 mpz_fdiv_q (tmp, tmp, mstep);
1054 niter->max = mpz_get_double_int (niter_type, tmp, false);
1055 mpz_clear (mstep);
1056 mpz_clear (tmp);
1058 return true;
1061 /* Determines number of iterations of loop whose ending condition
1062 is IV0 <= IV1. TYPE is the type of the iv. The number of
1063 iterations is stored to NITER. NEVER_INFINITE is true if
1064 we know that this condition must eventually become false (we derived this
1065 earlier, and possibly set NITER->assumptions to make sure this
1066 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1068 static bool
1069 number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1070 struct tree_niter_desc *niter, bool never_infinite,
1071 bounds *bnds)
1073 tree assumption;
1074 tree type1 = type;
1075 if (POINTER_TYPE_P (type))
1076 type1 = sizetype;
1078 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1079 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1080 value of the type. This we must know anyway, since if it is
1081 equal to this value, the loop rolls forever. */
1083 if (!never_infinite)
1085 if (integer_nonzerop (iv0->step))
1086 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1087 iv1->base, TYPE_MAX_VALUE (type1));
1088 else
1089 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1090 iv0->base, TYPE_MIN_VALUE (type1));
1092 if (integer_zerop (assumption))
1093 return false;
1094 if (!integer_nonzerop (assumption))
1095 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1096 niter->assumptions, assumption);
1099 if (integer_nonzerop (iv0->step))
1100 iv1->base = fold_build2 (PLUS_EXPR, type1,
1101 iv1->base, build_int_cst (type1, 1));
1102 else
1103 iv0->base = fold_build2 (MINUS_EXPR, type1,
1104 iv0->base, build_int_cst (type1, 1));
1106 bounds_add (bnds, double_int_one, type1);
1108 return number_of_iterations_lt (type, iv0, iv1, niter, never_infinite, bnds);
1111 /* Dumps description of affine induction variable IV to FILE. */
1113 static void
1114 dump_affine_iv (FILE *file, affine_iv *iv)
1116 if (!integer_zerop (iv->step))
1117 fprintf (file, "[");
1119 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1121 if (!integer_zerop (iv->step))
1123 fprintf (file, ", + , ");
1124 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1125 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1129 /* Determine the number of iterations according to condition (for staying
1130 inside loop) which compares two induction variables using comparison
1131 operator CODE. The induction variable on left side of the comparison
1132 is IV0, the right-hand side is IV1. Both induction variables must have
1133 type TYPE, which must be an integer or pointer type. The steps of the
1134 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1136 LOOP is the loop whose number of iterations we are determining.
1138 ONLY_EXIT is true if we are sure this is the only way the loop could be
1139 exited (including possibly non-returning function calls, exceptions, etc.)
1140 -- in this case we can use the information whether the control induction
1141 variables can overflow or not in a more efficient way.
1143 The results (number of iterations and assumptions as described in
1144 comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
1145 Returns false if it fails to determine number of iterations, true if it
1146 was determined (possibly with some assumptions). */
1148 static bool
1149 number_of_iterations_cond (struct loop *loop,
1150 tree type, affine_iv *iv0, enum tree_code code,
1151 affine_iv *iv1, struct tree_niter_desc *niter,
1152 bool only_exit)
1154 bool never_infinite, ret;
1155 bounds bnds;
1157 /* The meaning of these assumptions is this:
1158 if !assumptions
1159 then the rest of information does not have to be valid
1160 if may_be_zero then the loop does not roll, even if
1161 niter != 0. */
1162 niter->assumptions = boolean_true_node;
1163 niter->may_be_zero = boolean_false_node;
1164 niter->niter = NULL_TREE;
1165 niter->max = double_int_zero;
1167 niter->bound = NULL_TREE;
1168 niter->cmp = ERROR_MARK;
1170 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1171 the control variable is on lhs. */
1172 if (code == GE_EXPR || code == GT_EXPR
1173 || (code == NE_EXPR && integer_zerop (iv0->step)))
1175 SWAP (iv0, iv1);
1176 code = swap_tree_comparison (code);
1179 if (!only_exit)
1181 /* If this is not the only possible exit from the loop, the information
1182 that the induction variables cannot overflow as derived from
1183 signedness analysis cannot be relied upon. We use them e.g. in the
1184 following way: given loop for (i = 0; i <= n; i++), if i is
1185 signed, it cannot overflow, thus this loop is equivalent to
1186 for (i = 0; i < n + 1; i++); however, if n == MAX, but the loop
1187 is exited in some other way before i overflows, this transformation
1188 is incorrect (the new loop exits immediately). */
1189 iv0->no_overflow = false;
1190 iv1->no_overflow = false;
1193 if (POINTER_TYPE_P (type))
1195 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1196 to the same object. If they do, the control variable cannot wrap
1197 (as wrap around the bounds of memory will never return a pointer
1198 that would be guaranteed to point to the same object, even if we
1199 avoid undefined behavior by casting to size_t and back). The
1200 restrictions on pointer arithmetics and comparisons of pointers
1201 ensure that using the no-overflow assumptions is correct in this
1202 case even if ONLY_EXIT is false. */
1203 iv0->no_overflow = true;
1204 iv1->no_overflow = true;
1207 /* If the control induction variable does not overflow, the loop obviously
1208 cannot be infinite. */
1209 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1210 never_infinite = true;
1211 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1212 never_infinite = true;
1213 else
1214 never_infinite = false;
1216 /* We can handle the case when neither of the sides of the comparison is
1217 invariant, provided that the test is NE_EXPR. This rarely occurs in
1218 practice, but it is simple enough to manage. */
1219 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1221 if (code != NE_EXPR)
1222 return false;
1224 iv0->step = fold_binary_to_constant (MINUS_EXPR, type,
1225 iv0->step, iv1->step);
1226 iv0->no_overflow = false;
1227 iv1->step = build_int_cst (type, 0);
1228 iv1->no_overflow = true;
1231 /* If the result of the comparison is a constant, the loop is weird. More
1232 precise handling would be possible, but the situation is not common enough
1233 to waste time on it. */
1234 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1235 return false;
1237 /* Ignore loops of while (i-- < 10) type. */
1238 if (code != NE_EXPR)
1240 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1241 return false;
1243 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1244 return false;
1247 /* If the loop exits immediately, there is nothing to do. */
1248 if (integer_zerop (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
1250 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1251 niter->max = double_int_zero;
1252 return true;
1255 /* OK, now we know we have a senseful loop. Handle several cases, depending
1256 on what comparison operator is used. */
1257 bound_difference (loop, iv1->base, iv0->base, &bnds);
1259 if (dump_file && (dump_flags & TDF_DETAILS))
1261 fprintf (dump_file,
1262 "Analyzing # of iterations of loop %d\n", loop->num);
1264 fprintf (dump_file, " exit condition ");
1265 dump_affine_iv (dump_file, iv0);
1266 fprintf (dump_file, " %s ",
1267 code == NE_EXPR ? "!="
1268 : code == LT_EXPR ? "<"
1269 : "<=");
1270 dump_affine_iv (dump_file, iv1);
1271 fprintf (dump_file, "\n");
1273 fprintf (dump_file, " bounds on difference of bases: ");
1274 mpz_out_str (dump_file, 10, bnds.below);
1275 fprintf (dump_file, " ... ");
1276 mpz_out_str (dump_file, 10, bnds.up);
1277 fprintf (dump_file, "\n");
1280 switch (code)
1282 case NE_EXPR:
1283 gcc_assert (integer_zerop (iv1->step));
1284 ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1285 never_infinite, &bnds);
1286 break;
1288 case LT_EXPR:
1289 ret = number_of_iterations_lt (type, iv0, iv1, niter, never_infinite,
1290 &bnds);
1291 break;
1293 case LE_EXPR:
1294 ret = number_of_iterations_le (type, iv0, iv1, niter, never_infinite,
1295 &bnds);
1296 break;
1298 default:
1299 gcc_unreachable ();
1302 mpz_clear (bnds.up);
1303 mpz_clear (bnds.below);
1305 if (dump_file && (dump_flags & TDF_DETAILS))
1307 if (ret)
1309 fprintf (dump_file, " result:\n");
1310 if (!integer_nonzerop (niter->assumptions))
1312 fprintf (dump_file, " under assumptions ");
1313 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1314 fprintf (dump_file, "\n");
1317 if (!integer_zerop (niter->may_be_zero))
1319 fprintf (dump_file, " zero if ");
1320 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1321 fprintf (dump_file, "\n");
1324 fprintf (dump_file, " # of iterations ");
1325 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1326 fprintf (dump_file, ", bounded by ");
1327 dump_double_int (dump_file, niter->max, true);
1328 fprintf (dump_file, "\n");
1330 else
1331 fprintf (dump_file, " failed\n\n");
1333 return ret;
1336 /* Substitute NEW for OLD in EXPR and fold the result. */
1338 static tree
1339 simplify_replace_tree (tree expr, tree old, tree new_tree)
1341 unsigned i, n;
1342 tree ret = NULL_TREE, e, se;
1344 if (!expr)
1345 return NULL_TREE;
1347 if (expr == old
1348 || operand_equal_p (expr, old, 0))
1349 return unshare_expr (new_tree);
1351 if (!EXPR_P (expr) && !GIMPLE_STMT_P (expr))
1352 return expr;
1354 n = TREE_OPERAND_LENGTH (expr);
1355 for (i = 0; i < n; i++)
1357 e = TREE_OPERAND (expr, i);
1358 se = simplify_replace_tree (e, old, new_tree);
1359 if (e == se)
1360 continue;
1362 if (!ret)
1363 ret = copy_node (expr);
1365 TREE_OPERAND (ret, i) = se;
1368 return (ret ? fold (ret) : expr);
1371 /* Expand definitions of ssa names in EXPR as long as they are simple
1372 enough, and return the new expression. */
1374 tree
1375 expand_simple_operations (tree expr)
1377 unsigned i, n;
1378 tree ret = NULL_TREE, e, ee, stmt;
1379 enum tree_code code;
1381 if (expr == NULL_TREE)
1382 return expr;
1384 if (is_gimple_min_invariant (expr))
1385 return expr;
1387 code = TREE_CODE (expr);
1388 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1390 n = TREE_OPERAND_LENGTH (expr);
1391 for (i = 0; i < n; i++)
1393 e = TREE_OPERAND (expr, i);
1394 ee = expand_simple_operations (e);
1395 if (e == ee)
1396 continue;
1398 if (!ret)
1399 ret = copy_node (expr);
1401 TREE_OPERAND (ret, i) = ee;
1404 if (!ret)
1405 return expr;
1407 fold_defer_overflow_warnings ();
1408 ret = fold (ret);
1409 fold_undefer_and_ignore_overflow_warnings ();
1410 return ret;
1413 if (TREE_CODE (expr) != SSA_NAME)
1414 return expr;
1416 stmt = SSA_NAME_DEF_STMT (expr);
1417 if (TREE_CODE (stmt) == PHI_NODE)
1419 basic_block src, dest;
1421 if (PHI_NUM_ARGS (stmt) != 1)
1422 return expr;
1423 e = PHI_ARG_DEF (stmt, 0);
1425 /* Avoid propagating through loop exit phi nodes, which
1426 could break loop-closed SSA form restrictions. */
1427 dest = bb_for_stmt (stmt);
1428 src = single_pred (dest);
1429 if (TREE_CODE (e) == SSA_NAME
1430 && src->loop_father != dest->loop_father)
1431 return expr;
1433 return expand_simple_operations (e);
1435 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
1436 return expr;
1438 e = GIMPLE_STMT_OPERAND (stmt, 1);
1439 if (/* Casts are simple. */
1440 !CONVERT_EXPR_P (e)
1441 /* Copies are simple. */
1442 && TREE_CODE (e) != SSA_NAME
1443 /* Assignments of invariants are simple. */
1444 && !is_gimple_min_invariant (e)
1445 /* And increments and decrements by a constant are simple. */
1446 && !((TREE_CODE (e) == PLUS_EXPR
1447 || TREE_CODE (e) == MINUS_EXPR
1448 || TREE_CODE (e) == POINTER_PLUS_EXPR)
1449 && is_gimple_min_invariant (TREE_OPERAND (e, 1))))
1450 return expr;
1452 return expand_simple_operations (e);
1455 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1456 expression (or EXPR unchanged, if no simplification was possible). */
1458 static tree
1459 tree_simplify_using_condition_1 (tree cond, tree expr)
1461 bool changed;
1462 tree e, te, e0, e1, e2, notcond;
1463 enum tree_code code = TREE_CODE (expr);
1465 if (code == INTEGER_CST)
1466 return expr;
1468 if (code == TRUTH_OR_EXPR
1469 || code == TRUTH_AND_EXPR
1470 || code == COND_EXPR)
1472 changed = false;
1474 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1475 if (TREE_OPERAND (expr, 0) != e0)
1476 changed = true;
1478 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1479 if (TREE_OPERAND (expr, 1) != e1)
1480 changed = true;
1482 if (code == COND_EXPR)
1484 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1485 if (TREE_OPERAND (expr, 2) != e2)
1486 changed = true;
1488 else
1489 e2 = NULL_TREE;
1491 if (changed)
1493 if (code == COND_EXPR)
1494 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1495 else
1496 expr = fold_build2 (code, boolean_type_node, e0, e1);
1499 return expr;
1502 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1503 propagation, and vice versa. Fold does not handle this, since it is
1504 considered too expensive. */
1505 if (TREE_CODE (cond) == EQ_EXPR)
1507 e0 = TREE_OPERAND (cond, 0);
1508 e1 = TREE_OPERAND (cond, 1);
1510 /* We know that e0 == e1. Check whether we cannot simplify expr
1511 using this fact. */
1512 e = simplify_replace_tree (expr, e0, e1);
1513 if (integer_zerop (e) || integer_nonzerop (e))
1514 return e;
1516 e = simplify_replace_tree (expr, e1, e0);
1517 if (integer_zerop (e) || integer_nonzerop (e))
1518 return e;
1520 if (TREE_CODE (expr) == EQ_EXPR)
1522 e0 = TREE_OPERAND (expr, 0);
1523 e1 = TREE_OPERAND (expr, 1);
1525 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1526 e = simplify_replace_tree (cond, e0, e1);
1527 if (integer_zerop (e))
1528 return e;
1529 e = simplify_replace_tree (cond, e1, e0);
1530 if (integer_zerop (e))
1531 return e;
1533 if (TREE_CODE (expr) == NE_EXPR)
1535 e0 = TREE_OPERAND (expr, 0);
1536 e1 = TREE_OPERAND (expr, 1);
1538 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1539 e = simplify_replace_tree (cond, e0, e1);
1540 if (integer_zerop (e))
1541 return boolean_true_node;
1542 e = simplify_replace_tree (cond, e1, e0);
1543 if (integer_zerop (e))
1544 return boolean_true_node;
1547 te = expand_simple_operations (expr);
1549 /* Check whether COND ==> EXPR. */
1550 notcond = invert_truthvalue (cond);
1551 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1552 if (e && integer_nonzerop (e))
1553 return e;
1555 /* Check whether COND ==> not EXPR. */
1556 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1557 if (e && integer_zerop (e))
1558 return e;
1560 return expr;
1563 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1564 expression (or EXPR unchanged, if no simplification was possible).
1565 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1566 of simple operations in definitions of ssa names in COND are expanded,
1567 so that things like casts or incrementing the value of the bound before
1568 the loop do not cause us to fail. */
1570 static tree
1571 tree_simplify_using_condition (tree cond, tree expr)
1573 cond = expand_simple_operations (cond);
1575 return tree_simplify_using_condition_1 (cond, expr);
1578 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1579 Returns the simplified expression (or EXPR unchanged, if no
1580 simplification was possible).*/
1582 static tree
1583 simplify_using_initial_conditions (struct loop *loop, tree expr)
1585 edge e;
1586 basic_block bb;
1587 tree cond;
1588 int cnt = 0;
1590 if (TREE_CODE (expr) == INTEGER_CST)
1591 return expr;
1593 /* Limit walking the dominators to avoid quadraticness in
1594 the number of BBs times the number of loops in degenerate
1595 cases. */
1596 for (bb = loop->header;
1597 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
1598 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1600 if (!single_pred_p (bb))
1601 continue;
1602 e = single_pred_edge (bb);
1604 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
1605 continue;
1607 cond = COND_EXPR_COND (last_stmt (e->src));
1608 if (e->flags & EDGE_FALSE_VALUE)
1609 cond = invert_truthvalue (cond);
1610 expr = tree_simplify_using_condition (cond, expr);
1611 ++cnt;
1614 return expr;
1617 /* Tries to simplify EXPR using the evolutions of the loop invariants
1618 in the superloops of LOOP. Returns the simplified expression
1619 (or EXPR unchanged, if no simplification was possible). */
1621 static tree
1622 simplify_using_outer_evolutions (struct loop *loop, tree expr)
1624 enum tree_code code = TREE_CODE (expr);
1625 bool changed;
1626 tree e, e0, e1, e2;
1628 if (is_gimple_min_invariant (expr))
1629 return expr;
1631 if (code == TRUTH_OR_EXPR
1632 || code == TRUTH_AND_EXPR
1633 || code == COND_EXPR)
1635 changed = false;
1637 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
1638 if (TREE_OPERAND (expr, 0) != e0)
1639 changed = true;
1641 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
1642 if (TREE_OPERAND (expr, 1) != e1)
1643 changed = true;
1645 if (code == COND_EXPR)
1647 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
1648 if (TREE_OPERAND (expr, 2) != e2)
1649 changed = true;
1651 else
1652 e2 = NULL_TREE;
1654 if (changed)
1656 if (code == COND_EXPR)
1657 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1658 else
1659 expr = fold_build2 (code, boolean_type_node, e0, e1);
1662 return expr;
1665 e = instantiate_parameters (loop, expr);
1666 if (is_gimple_min_invariant (e))
1667 return e;
1669 return expr;
1672 /* Returns true if EXIT is the only possible exit from LOOP. */
1674 static bool
1675 loop_only_exit_p (const struct loop *loop, const_edge exit)
1677 basic_block *body;
1678 block_stmt_iterator bsi;
1679 unsigned i;
1680 tree call;
1682 if (exit != single_exit (loop))
1683 return false;
1685 body = get_loop_body (loop);
1686 for (i = 0; i < loop->num_nodes; i++)
1688 for (bsi = bsi_start (body[0]); !bsi_end_p (bsi); bsi_next (&bsi))
1690 call = get_call_expr_in (bsi_stmt (bsi));
1691 if (call && TREE_SIDE_EFFECTS (call))
1693 free (body);
1694 return false;
1699 free (body);
1700 return true;
1703 /* Stores description of number of iterations of LOOP derived from
1704 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1705 useful information could be derived (and fields of NITER has
1706 meaning described in comments at struct tree_niter_desc
1707 declaration), false otherwise. If WARN is true and
1708 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1709 potentially unsafe assumptions. */
1711 bool
1712 number_of_iterations_exit (struct loop *loop, edge exit,
1713 struct tree_niter_desc *niter,
1714 bool warn)
1716 tree stmt, cond, type;
1717 tree op0, op1;
1718 enum tree_code code;
1719 affine_iv iv0, iv1;
1721 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
1722 return false;
1724 niter->assumptions = boolean_false_node;
1725 stmt = last_stmt (exit->src);
1726 if (!stmt || TREE_CODE (stmt) != COND_EXPR)
1727 return false;
1729 /* We want the condition for staying inside loop. */
1730 cond = COND_EXPR_COND (stmt);
1731 if (exit->flags & EDGE_TRUE_VALUE)
1732 cond = invert_truthvalue (cond);
1734 code = TREE_CODE (cond);
1735 switch (code)
1737 case GT_EXPR:
1738 case GE_EXPR:
1739 case NE_EXPR:
1740 case LT_EXPR:
1741 case LE_EXPR:
1742 break;
1744 default:
1745 return false;
1748 op0 = TREE_OPERAND (cond, 0);
1749 op1 = TREE_OPERAND (cond, 1);
1750 type = TREE_TYPE (op0);
1752 if (TREE_CODE (type) != INTEGER_TYPE
1753 && !POINTER_TYPE_P (type))
1754 return false;
1756 if (!simple_iv (loop, stmt, op0, &iv0, false))
1757 return false;
1758 if (!simple_iv (loop, stmt, op1, &iv1, false))
1759 return false;
1761 /* We don't want to see undefined signed overflow warnings while
1762 computing the number of iterations. */
1763 fold_defer_overflow_warnings ();
1765 iv0.base = expand_simple_operations (iv0.base);
1766 iv1.base = expand_simple_operations (iv1.base);
1767 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1768 loop_only_exit_p (loop, exit)))
1770 fold_undefer_and_ignore_overflow_warnings ();
1771 return false;
1774 if (optimize >= 3)
1776 niter->assumptions = simplify_using_outer_evolutions (loop,
1777 niter->assumptions);
1778 niter->may_be_zero = simplify_using_outer_evolutions (loop,
1779 niter->may_be_zero);
1780 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
1783 niter->assumptions
1784 = simplify_using_initial_conditions (loop,
1785 niter->assumptions);
1786 niter->may_be_zero
1787 = simplify_using_initial_conditions (loop,
1788 niter->may_be_zero);
1790 fold_undefer_and_ignore_overflow_warnings ();
1792 if (integer_onep (niter->assumptions))
1793 return true;
1795 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1796 But if we can prove that there is overflow or some other source of weird
1797 behavior, ignore the loop even with -funsafe-loop-optimizations. */
1798 if (integer_zerop (niter->assumptions))
1799 return false;
1801 if (flag_unsafe_loop_optimizations)
1802 niter->assumptions = boolean_true_node;
1804 if (warn)
1806 const char *wording;
1807 location_t loc = EXPR_LOCATION (stmt);
1809 /* We can provide a more specific warning if one of the operator is
1810 constant and the other advances by +1 or -1. */
1811 if (!integer_zerop (iv1.step)
1812 ? (integer_zerop (iv0.step)
1813 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
1814 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
1815 wording =
1816 flag_unsafe_loop_optimizations
1817 ? N_("assuming that the loop is not infinite")
1818 : N_("cannot optimize possibly infinite loops");
1819 else
1820 wording =
1821 flag_unsafe_loop_optimizations
1822 ? N_("assuming that the loop counter does not overflow")
1823 : N_("cannot optimize loop, the loop counter may overflow");
1825 if (LOCATION_LINE (loc) > 0)
1826 warning (OPT_Wunsafe_loop_optimizations, "%H%s", &loc, gettext (wording));
1827 else
1828 warning (OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
1831 return flag_unsafe_loop_optimizations;
1834 /* Try to determine the number of iterations of LOOP. If we succeed,
1835 expression giving number of iterations is returned and *EXIT is
1836 set to the edge from that the information is obtained. Otherwise
1837 chrec_dont_know is returned. */
1839 tree
1840 find_loop_niter (struct loop *loop, edge *exit)
1842 unsigned i;
1843 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1844 edge ex;
1845 tree niter = NULL_TREE, aniter;
1846 struct tree_niter_desc desc;
1848 *exit = NULL;
1849 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
1851 if (!just_once_each_iteration_p (loop, ex->src))
1852 continue;
1854 if (!number_of_iterations_exit (loop, ex, &desc, false))
1855 continue;
1857 if (integer_nonzerop (desc.may_be_zero))
1859 /* We exit in the first iteration through this exit.
1860 We won't find anything better. */
1861 niter = build_int_cst (unsigned_type_node, 0);
1862 *exit = ex;
1863 break;
1866 if (!integer_zerop (desc.may_be_zero))
1867 continue;
1869 aniter = desc.niter;
1871 if (!niter)
1873 /* Nothing recorded yet. */
1874 niter = aniter;
1875 *exit = ex;
1876 continue;
1879 /* Prefer constants, the lower the better. */
1880 if (TREE_CODE (aniter) != INTEGER_CST)
1881 continue;
1883 if (TREE_CODE (niter) != INTEGER_CST)
1885 niter = aniter;
1886 *exit = ex;
1887 continue;
1890 if (tree_int_cst_lt (aniter, niter))
1892 niter = aniter;
1893 *exit = ex;
1894 continue;
1897 VEC_free (edge, heap, exits);
1899 return niter ? niter : chrec_dont_know;
1904 Analysis of a number of iterations of a loop by a brute-force evaluation.
1908 /* Bound on the number of iterations we try to evaluate. */
1910 #define MAX_ITERATIONS_TO_TRACK \
1911 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
1913 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
1914 result by a chain of operations such that all but exactly one of their
1915 operands are constants. */
1917 static tree
1918 chain_of_csts_start (struct loop *loop, tree x)
1920 tree stmt = SSA_NAME_DEF_STMT (x);
1921 tree use;
1922 basic_block bb = bb_for_stmt (stmt);
1924 if (!bb
1925 || !flow_bb_inside_loop_p (loop, bb))
1926 return NULL_TREE;
1928 if (TREE_CODE (stmt) == PHI_NODE)
1930 if (bb == loop->header)
1931 return stmt;
1933 return NULL_TREE;
1936 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
1937 return NULL_TREE;
1939 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
1940 return NULL_TREE;
1941 if (SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF) == NULL_DEF_OPERAND_P)
1942 return NULL_TREE;
1944 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1945 if (use == NULL_USE_OPERAND_P)
1946 return NULL_TREE;
1948 return chain_of_csts_start (loop, use);
1951 /* Determines whether the expression X is derived from a result of a phi node
1952 in header of LOOP such that
1954 * the derivation of X consists only from operations with constants
1955 * the initial value of the phi node is constant
1956 * the value of the phi node in the next iteration can be derived from the
1957 value in the current iteration by a chain of operations with constants.
1959 If such phi node exists, it is returned. If X is a constant, X is returned
1960 unchanged. Otherwise NULL_TREE is returned. */
1962 static tree
1963 get_base_for (struct loop *loop, tree x)
1965 tree phi, init, next;
1967 if (is_gimple_min_invariant (x))
1968 return x;
1970 phi = chain_of_csts_start (loop, x);
1971 if (!phi)
1972 return NULL_TREE;
1974 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1975 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1977 if (TREE_CODE (next) != SSA_NAME)
1978 return NULL_TREE;
1980 if (!is_gimple_min_invariant (init))
1981 return NULL_TREE;
1983 if (chain_of_csts_start (loop, next) != phi)
1984 return NULL_TREE;
1986 return phi;
1989 /* Given an expression X, then
1991 * if X is NULL_TREE, we return the constant BASE.
1992 * otherwise X is a SSA name, whose value in the considered loop is derived
1993 by a chain of operations with constant from a result of a phi node in
1994 the header of the loop. Then we return value of X when the value of the
1995 result of this phi node is given by the constant BASE. */
1997 static tree
1998 get_val_for (tree x, tree base)
2000 tree stmt, nx, val;
2001 use_operand_p op;
2002 ssa_op_iter iter;
2004 gcc_assert (is_gimple_min_invariant (base));
2006 if (!x)
2007 return base;
2009 stmt = SSA_NAME_DEF_STMT (x);
2010 if (TREE_CODE (stmt) == PHI_NODE)
2011 return base;
2013 FOR_EACH_SSA_USE_OPERAND (op, stmt, iter, SSA_OP_USE)
2015 nx = USE_FROM_PTR (op);
2016 val = get_val_for (nx, base);
2017 SET_USE (op, val);
2018 val = fold (GIMPLE_STMT_OPERAND (stmt, 1));
2019 SET_USE (op, nx);
2020 /* only iterate loop once. */
2021 return val;
2024 /* Should never reach here. */
2025 gcc_unreachable ();
2028 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2029 by brute force -- i.e. by determining the value of the operands of the
2030 condition at EXIT in first few iterations of the loop (assuming that
2031 these values are constant) and determining the first one in that the
2032 condition is not satisfied. Returns the constant giving the number
2033 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2035 tree
2036 loop_niter_by_eval (struct loop *loop, edge exit)
2038 tree cond, cnd, acnd;
2039 tree op[2], val[2], next[2], aval[2], phi[2];
2040 unsigned i, j;
2041 enum tree_code cmp;
2043 cond = last_stmt (exit->src);
2044 if (!cond || TREE_CODE (cond) != COND_EXPR)
2045 return chrec_dont_know;
2047 cnd = COND_EXPR_COND (cond);
2048 if (exit->flags & EDGE_TRUE_VALUE)
2049 cnd = invert_truthvalue (cnd);
2051 cmp = TREE_CODE (cnd);
2052 switch (cmp)
2054 case EQ_EXPR:
2055 case NE_EXPR:
2056 case GT_EXPR:
2057 case GE_EXPR:
2058 case LT_EXPR:
2059 case LE_EXPR:
2060 for (j = 0; j < 2; j++)
2061 op[j] = TREE_OPERAND (cnd, j);
2062 break;
2064 default:
2065 return chrec_dont_know;
2068 for (j = 0; j < 2; j++)
2070 phi[j] = get_base_for (loop, op[j]);
2071 if (!phi[j])
2072 return chrec_dont_know;
2075 for (j = 0; j < 2; j++)
2077 if (TREE_CODE (phi[j]) == PHI_NODE)
2079 val[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_preheader_edge (loop));
2080 next[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_latch_edge (loop));
2082 else
2084 val[j] = phi[j];
2085 next[j] = NULL_TREE;
2086 op[j] = NULL_TREE;
2090 /* Don't issue signed overflow warnings. */
2091 fold_defer_overflow_warnings ();
2093 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2095 for (j = 0; j < 2; j++)
2096 aval[j] = get_val_for (op[j], val[j]);
2098 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2099 if (acnd && integer_zerop (acnd))
2101 fold_undefer_and_ignore_overflow_warnings ();
2102 if (dump_file && (dump_flags & TDF_DETAILS))
2103 fprintf (dump_file,
2104 "Proved that loop %d iterates %d times using brute force.\n",
2105 loop->num, i);
2106 return build_int_cst (unsigned_type_node, i);
2109 for (j = 0; j < 2; j++)
2111 val[j] = get_val_for (next[j], val[j]);
2112 if (!is_gimple_min_invariant (val[j]))
2114 fold_undefer_and_ignore_overflow_warnings ();
2115 return chrec_dont_know;
2120 fold_undefer_and_ignore_overflow_warnings ();
2122 return chrec_dont_know;
2125 /* Finds the exit of the LOOP by that the loop exits after a constant
2126 number of iterations and stores the exit edge to *EXIT. The constant
2127 giving the number of iterations of LOOP is returned. The number of
2128 iterations is determined using loop_niter_by_eval (i.e. by brute force
2129 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2130 determines the number of iterations, chrec_dont_know is returned. */
2132 tree
2133 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2135 unsigned i;
2136 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
2137 edge ex;
2138 tree niter = NULL_TREE, aniter;
2140 *exit = NULL;
2141 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2143 if (!just_once_each_iteration_p (loop, ex->src))
2144 continue;
2146 aniter = loop_niter_by_eval (loop, ex);
2147 if (chrec_contains_undetermined (aniter))
2148 continue;
2150 if (niter
2151 && !tree_int_cst_lt (aniter, niter))
2152 continue;
2154 niter = aniter;
2155 *exit = ex;
2157 VEC_free (edge, heap, exits);
2159 return niter ? niter : chrec_dont_know;
2164 Analysis of upper bounds on number of iterations of a loop.
2168 /* Returns a constant upper bound on the value of expression VAL. VAL
2169 is considered to be unsigned. If its type is signed, its value must
2170 be nonnegative. */
2172 static double_int
2173 derive_constant_upper_bound (const_tree val)
2175 tree type = TREE_TYPE (val);
2176 tree op0, op1, subtype, maxt;
2177 double_int bnd, max, mmax, cst;
2178 tree stmt;
2180 if (INTEGRAL_TYPE_P (type))
2181 maxt = TYPE_MAX_VALUE (type);
2182 else
2183 maxt = upper_bound_in_type (type, type);
2185 max = tree_to_double_int (maxt);
2187 switch (TREE_CODE (val))
2189 case INTEGER_CST:
2190 return tree_to_double_int (val);
2192 CASE_CONVERT:
2193 op0 = TREE_OPERAND (val, 0);
2194 subtype = TREE_TYPE (op0);
2195 if (!TYPE_UNSIGNED (subtype)
2196 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2197 that OP0 is nonnegative. */
2198 && TYPE_UNSIGNED (type)
2199 && !tree_expr_nonnegative_p (op0))
2201 /* If we cannot prove that the casted expression is nonnegative,
2202 we cannot establish more useful upper bound than the precision
2203 of the type gives us. */
2204 return max;
2207 /* We now know that op0 is an nonnegative value. Try deriving an upper
2208 bound for it. */
2209 bnd = derive_constant_upper_bound (op0);
2211 /* If the bound does not fit in TYPE, max. value of TYPE could be
2212 attained. */
2213 if (double_int_ucmp (max, bnd) < 0)
2214 return max;
2216 return bnd;
2218 case PLUS_EXPR:
2219 case POINTER_PLUS_EXPR:
2220 case MINUS_EXPR:
2221 op0 = TREE_OPERAND (val, 0);
2222 op1 = TREE_OPERAND (val, 1);
2224 if (TREE_CODE (op1) != INTEGER_CST
2225 || !tree_expr_nonnegative_p (op0))
2226 return max;
2228 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2229 choose the most logical way how to treat this constant regardless
2230 of the signedness of the type. */
2231 cst = tree_to_double_int (op1);
2232 cst = double_int_sext (cst, TYPE_PRECISION (type));
2233 if (TREE_CODE (val) == PLUS_EXPR)
2234 cst = double_int_neg (cst);
2236 bnd = derive_constant_upper_bound (op0);
2238 if (double_int_negative_p (cst))
2240 cst = double_int_neg (cst);
2241 /* Avoid CST == 0x80000... */
2242 if (double_int_negative_p (cst))
2243 return max;;
2245 /* OP0 + CST. We need to check that
2246 BND <= MAX (type) - CST. */
2248 mmax = double_int_add (max, double_int_neg (cst));
2249 if (double_int_ucmp (bnd, mmax) > 0)
2250 return max;
2252 return double_int_add (bnd, cst);
2254 else
2256 /* OP0 - CST, where CST >= 0.
2258 If TYPE is signed, we have already verified that OP0 >= 0, and we
2259 know that the result is nonnegative. This implies that
2260 VAL <= BND - CST.
2262 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2263 otherwise the operation underflows.
2266 /* This should only happen if the type is unsigned; however, for
2267 buggy programs that use overflowing signed arithmetics even with
2268 -fno-wrapv, this condition may also be true for signed values. */
2269 if (double_int_ucmp (bnd, cst) < 0)
2270 return max;
2272 if (TYPE_UNSIGNED (type))
2274 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2275 double_int_to_tree (type, cst));
2276 if (!tem || integer_nonzerop (tem))
2277 return max;
2280 bnd = double_int_add (bnd, double_int_neg (cst));
2283 return bnd;
2285 case FLOOR_DIV_EXPR:
2286 case EXACT_DIV_EXPR:
2287 op0 = TREE_OPERAND (val, 0);
2288 op1 = TREE_OPERAND (val, 1);
2289 if (TREE_CODE (op1) != INTEGER_CST
2290 || tree_int_cst_sign_bit (op1))
2291 return max;
2293 bnd = derive_constant_upper_bound (op0);
2294 return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);
2296 case BIT_AND_EXPR:
2297 op1 = TREE_OPERAND (val, 1);
2298 if (TREE_CODE (op1) != INTEGER_CST
2299 || tree_int_cst_sign_bit (op1))
2300 return max;
2301 return tree_to_double_int (op1);
2303 case SSA_NAME:
2304 stmt = SSA_NAME_DEF_STMT (val);
2305 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
2306 || GIMPLE_STMT_OPERAND (stmt, 0) != val)
2307 return max;
2308 return derive_constant_upper_bound (GIMPLE_STMT_OPERAND (stmt, 1));
2310 default:
2311 return max;
2315 /* Records that every statement in LOOP is executed I_BOUND times.
2316 REALISTIC is true if I_BOUND is expected to be close the the real number
2317 of iterations. UPPER is true if we are sure the loop iterates at most
2318 I_BOUND times. */
2320 static void
2321 record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
2322 bool upper)
2324 /* Update the bounds only when there is no previous estimation, or when the current
2325 estimation is smaller. */
2326 if (upper
2327 && (!loop->any_upper_bound
2328 || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
2330 loop->any_upper_bound = true;
2331 loop->nb_iterations_upper_bound = i_bound;
2333 if (realistic
2334 && (!loop->any_estimate
2335 || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
2337 loop->any_estimate = true;
2338 loop->nb_iterations_estimate = i_bound;
2342 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2343 is true if the loop is exited immediately after STMT, and this exit
2344 is taken at last when the STMT is executed BOUND + 1 times.
2345 REALISTIC is true if BOUND is expected to be close the the real number
2346 of iterations. UPPER is true if we are sure the loop iterates at most
2347 BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
2349 static void
2350 record_estimate (struct loop *loop, tree bound, double_int i_bound,
2351 tree at_stmt, bool is_exit, bool realistic, bool upper)
2353 double_int delta;
2354 edge exit;
2356 if (dump_file && (dump_flags & TDF_DETAILS))
2358 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2359 print_generic_expr (dump_file, at_stmt, TDF_SLIM);
2360 fprintf (dump_file, " is %sexecuted at most ",
2361 upper ? "" : "probably ");
2362 print_generic_expr (dump_file, bound, TDF_SLIM);
2363 fprintf (dump_file, " (bounded by ");
2364 dump_double_int (dump_file, i_bound, true);
2365 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2368 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2369 real number of iterations. */
2370 if (TREE_CODE (bound) != INTEGER_CST)
2371 realistic = false;
2372 if (!upper && !realistic)
2373 return;
2375 /* If we have a guaranteed upper bound, record it in the appropriate
2376 list. */
2377 if (upper)
2379 struct nb_iter_bound *elt = GGC_NEW (struct nb_iter_bound);
2381 elt->bound = i_bound;
2382 elt->stmt = at_stmt;
2383 elt->is_exit = is_exit;
2384 elt->next = loop->bounds;
2385 loop->bounds = elt;
2388 /* Update the number of iteration estimates according to the bound.
2389 If at_stmt is an exit, then every statement in the loop is
2390 executed at most BOUND + 1 times. If it is not an exit, then
2391 some of the statements before it could be executed BOUND + 2
2392 times, if an exit of LOOP is before stmt. */
2393 exit = single_exit (loop);
2394 if (is_exit
2395 || (exit != NULL
2396 && dominated_by_p (CDI_DOMINATORS,
2397 exit->src, bb_for_stmt (at_stmt))))
2398 delta = double_int_one;
2399 else
2400 delta = double_int_two;
2401 i_bound = double_int_add (i_bound, delta);
2403 /* If an overflow occurred, ignore the result. */
2404 if (double_int_ucmp (i_bound, delta) < 0)
2405 return;
2407 record_niter_bound (loop, i_bound, realistic, upper);
2410 /* Record the estimate on number of iterations of LOOP based on the fact that
2411 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2412 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2413 estimated number of iterations is expected to be close to the real one.
2414 UPPER is true if we are sure the induction variable does not wrap. */
2416 static void
2417 record_nonwrapping_iv (struct loop *loop, tree base, tree step, tree stmt,
2418 tree low, tree high, bool realistic, bool upper)
2420 tree niter_bound, extreme, delta;
2421 tree type = TREE_TYPE (base), unsigned_type;
2422 double_int max;
2424 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2425 return;
2427 if (dump_file && (dump_flags & TDF_DETAILS))
2429 fprintf (dump_file, "Induction variable (");
2430 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
2431 fprintf (dump_file, ") ");
2432 print_generic_expr (dump_file, base, TDF_SLIM);
2433 fprintf (dump_file, " + ");
2434 print_generic_expr (dump_file, step, TDF_SLIM);
2435 fprintf (dump_file, " * iteration does not wrap in statement ");
2436 print_generic_expr (dump_file, stmt, TDF_SLIM);
2437 fprintf (dump_file, " in loop %d.\n", loop->num);
2440 unsigned_type = unsigned_type_for (type);
2441 base = fold_convert (unsigned_type, base);
2442 step = fold_convert (unsigned_type, step);
2444 if (tree_int_cst_sign_bit (step))
2446 extreme = fold_convert (unsigned_type, low);
2447 if (TREE_CODE (base) != INTEGER_CST)
2448 base = fold_convert (unsigned_type, high);
2449 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
2450 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
2452 else
2454 extreme = fold_convert (unsigned_type, high);
2455 if (TREE_CODE (base) != INTEGER_CST)
2456 base = fold_convert (unsigned_type, low);
2457 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
2460 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2461 would get out of the range. */
2462 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2463 max = derive_constant_upper_bound (niter_bound);
2464 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2467 /* Returns true if REF is a reference to an array at the end of a dynamically
2468 allocated structure. If this is the case, the array may be allocated larger
2469 than its upper bound implies. */
2471 static bool
2472 array_at_struct_end_p (tree ref)
2474 tree base = get_base_address (ref);
2475 tree parent, field;
2477 /* Unless the reference is through a pointer, the size of the array matches
2478 its declaration. */
2479 if (!base || !INDIRECT_REF_P (base))
2480 return false;
2482 for (;handled_component_p (ref); ref = parent)
2484 parent = TREE_OPERAND (ref, 0);
2486 if (TREE_CODE (ref) == COMPONENT_REF)
2488 /* All fields of a union are at its end. */
2489 if (TREE_CODE (TREE_TYPE (parent)) == UNION_TYPE)
2490 continue;
2492 /* Unless the field is at the end of the struct, we are done. */
2493 field = TREE_OPERAND (ref, 1);
2494 if (TREE_CHAIN (field))
2495 return false;
2498 /* The other options are ARRAY_REF, ARRAY_RANGE_REF, VIEW_CONVERT_EXPR.
2499 In all these cases, we might be accessing the last element, and
2500 although in practice this will probably never happen, it is legal for
2501 the indices of this last element to exceed the bounds of the array.
2502 Therefore, continue checking. */
2505 gcc_assert (INDIRECT_REF_P (ref));
2506 return true;
2509 /* Determine information about number of iterations a LOOP from the index
2510 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2511 guaranteed to be executed in every iteration of LOOP. Callback for
2512 for_each_index. */
2514 struct ilb_data
2516 struct loop *loop;
2517 tree stmt;
2518 bool reliable;
2521 static bool
2522 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
2524 struct ilb_data *data = (struct ilb_data *) dta;
2525 tree ev, init, step;
2526 tree low, high, type, next;
2527 bool sign, upper = data->reliable, at_end = false;
2528 struct loop *loop = data->loop;
2530 if (TREE_CODE (base) != ARRAY_REF)
2531 return true;
2533 /* For arrays at the end of the structure, we are not guaranteed that they
2534 do not really extend over their declared size. However, for arrays of
2535 size greater than one, this is unlikely to be intended. */
2536 if (array_at_struct_end_p (base))
2538 at_end = true;
2539 upper = false;
2542 ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
2543 init = initial_condition (ev);
2544 step = evolution_part_in_loop_num (ev, loop->num);
2546 if (!init
2547 || !step
2548 || TREE_CODE (step) != INTEGER_CST
2549 || integer_zerop (step)
2550 || tree_contains_chrecs (init, NULL)
2551 || chrec_contains_symbols_defined_in_loop (init, loop->num))
2552 return true;
2554 low = array_ref_low_bound (base);
2555 high = array_ref_up_bound (base);
2557 /* The case of nonconstant bounds could be handled, but it would be
2558 complicated. */
2559 if (TREE_CODE (low) != INTEGER_CST
2560 || !high
2561 || TREE_CODE (high) != INTEGER_CST)
2562 return true;
2563 sign = tree_int_cst_sign_bit (step);
2564 type = TREE_TYPE (step);
2566 /* The array of length 1 at the end of a structure most likely extends
2567 beyond its bounds. */
2568 if (at_end
2569 && operand_equal_p (low, high, 0))
2570 return true;
2572 /* In case the relevant bound of the array does not fit in type, or
2573 it does, but bound + step (in type) still belongs into the range of the
2574 array, the index may wrap and still stay within the range of the array
2575 (consider e.g. if the array is indexed by the full range of
2576 unsigned char).
2578 To make things simpler, we require both bounds to fit into type, although
2579 there are cases where this would not be strictly necessary. */
2580 if (!int_fits_type_p (high, type)
2581 || !int_fits_type_p (low, type))
2582 return true;
2583 low = fold_convert (type, low);
2584 high = fold_convert (type, high);
2586 if (sign)
2587 next = fold_binary (PLUS_EXPR, type, low, step);
2588 else
2589 next = fold_binary (PLUS_EXPR, type, high, step);
2591 if (tree_int_cst_compare (low, next) <= 0
2592 && tree_int_cst_compare (next, high) <= 0)
2593 return true;
2595 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true, upper);
2596 return true;
2599 /* Determine information about number of iterations a LOOP from the bounds
2600 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2601 STMT is guaranteed to be executed in every iteration of LOOP.*/
2603 static void
2604 infer_loop_bounds_from_ref (struct loop *loop, tree stmt, tree ref,
2605 bool reliable)
2607 struct ilb_data data;
2609 data.loop = loop;
2610 data.stmt = stmt;
2611 data.reliable = reliable;
2612 for_each_index (&ref, idx_infer_loop_bounds, &data);
2615 /* Determine information about number of iterations of a LOOP from the way
2616 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2617 executed in every iteration of LOOP. */
2619 static void
2620 infer_loop_bounds_from_array (struct loop *loop, tree stmt, bool reliable)
2622 tree call;
2624 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
2626 tree op0 = GIMPLE_STMT_OPERAND (stmt, 0);
2627 tree op1 = GIMPLE_STMT_OPERAND (stmt, 1);
2629 /* For each memory access, analyze its access function
2630 and record a bound on the loop iteration domain. */
2631 if (REFERENCE_CLASS_P (op0))
2632 infer_loop_bounds_from_ref (loop, stmt, op0, reliable);
2634 if (REFERENCE_CLASS_P (op1))
2635 infer_loop_bounds_from_ref (loop, stmt, op1, reliable);
2639 call = get_call_expr_in (stmt);
2640 if (call)
2642 tree arg;
2643 call_expr_arg_iterator iter;
2645 FOR_EACH_CALL_EXPR_ARG (arg, iter, call)
2646 if (REFERENCE_CLASS_P (arg))
2647 infer_loop_bounds_from_ref (loop, stmt, arg, reliable);
2651 /* Determine information about number of iterations of a LOOP from the fact
2652 that signed arithmetics in STMT does not overflow. */
2654 static void
2655 infer_loop_bounds_from_signedness (struct loop *loop, tree stmt)
2657 tree def, base, step, scev, type, low, high;
2659 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2660 return;
2662 def = GIMPLE_STMT_OPERAND (stmt, 0);
2664 if (TREE_CODE (def) != SSA_NAME)
2665 return;
2667 type = TREE_TYPE (def);
2668 if (!INTEGRAL_TYPE_P (type)
2669 || !TYPE_OVERFLOW_UNDEFINED (type))
2670 return;
2672 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2673 if (chrec_contains_undetermined (scev))
2674 return;
2676 base = initial_condition_in_loop_num (scev, loop->num);
2677 step = evolution_part_in_loop_num (scev, loop->num);
2679 if (!base || !step
2680 || TREE_CODE (step) != INTEGER_CST
2681 || tree_contains_chrecs (base, NULL)
2682 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2683 return;
2685 low = lower_bound_in_type (type, type);
2686 high = upper_bound_in_type (type, type);
2688 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2691 /* The following analyzers are extracting informations on the bounds
2692 of LOOP from the following undefined behaviors:
2694 - data references should not access elements over the statically
2695 allocated size,
2697 - signed variables should not overflow when flag_wrapv is not set.
2700 static void
2701 infer_loop_bounds_from_undefined (struct loop *loop)
2703 unsigned i;
2704 basic_block *bbs;
2705 block_stmt_iterator bsi;
2706 basic_block bb;
2707 bool reliable;
2709 bbs = get_loop_body (loop);
2711 for (i = 0; i < loop->num_nodes; i++)
2713 bb = bbs[i];
2715 /* If BB is not executed in each iteration of the loop, we cannot
2716 use the operations in it to infer reliable upper bound on the
2717 # of iterations of the loop. However, we can use it as a guess. */
2718 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
2720 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
2722 tree stmt = bsi_stmt (bsi);
2724 infer_loop_bounds_from_array (loop, stmt, reliable);
2726 if (reliable)
2727 infer_loop_bounds_from_signedness (loop, stmt);
2732 free (bbs);
2735 /* Converts VAL to double_int. */
2737 static double_int
2738 gcov_type_to_double_int (gcov_type val)
2740 double_int ret;
2742 ret.low = (unsigned HOST_WIDE_INT) val;
2743 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
2744 the size of type. */
2745 val >>= HOST_BITS_PER_WIDE_INT - 1;
2746 val >>= 1;
2747 ret.high = (unsigned HOST_WIDE_INT) val;
2749 return ret;
2752 /* Records estimates on numbers of iterations of LOOP. */
2754 void
2755 estimate_numbers_of_iterations_loop (struct loop *loop)
2757 VEC (edge, heap) *exits;
2758 tree niter, type;
2759 unsigned i;
2760 struct tree_niter_desc niter_desc;
2761 edge ex;
2762 double_int bound;
2764 /* Give up if we already have tried to compute an estimation. */
2765 if (loop->estimate_state != EST_NOT_COMPUTED)
2766 return;
2767 loop->estimate_state = EST_AVAILABLE;
2768 loop->any_upper_bound = false;
2769 loop->any_estimate = false;
2771 exits = get_loop_exit_edges (loop);
2772 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2774 if (!number_of_iterations_exit (loop, ex, &niter_desc, false))
2775 continue;
2777 niter = niter_desc.niter;
2778 type = TREE_TYPE (niter);
2779 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
2780 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
2781 build_int_cst (type, 0),
2782 niter);
2783 record_estimate (loop, niter, niter_desc.max,
2784 last_stmt (ex->src),
2785 true, true, true);
2787 VEC_free (edge, heap, exits);
2789 infer_loop_bounds_from_undefined (loop);
2791 /* If we have a measured profile, use it to estimate the number of
2792 iterations. */
2793 if (loop->header->count != 0)
2795 gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
2796 bound = gcov_type_to_double_int (nit);
2797 record_niter_bound (loop, bound, true, false);
2800 /* If an upper bound is smaller than the realistic estimate of the
2801 number of iterations, use the upper bound instead. */
2802 if (loop->any_upper_bound
2803 && loop->any_estimate
2804 && double_int_ucmp (loop->nb_iterations_upper_bound,
2805 loop->nb_iterations_estimate) < 0)
2806 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
2809 /* Records estimates on numbers of iterations of loops. */
2811 void
2812 estimate_numbers_of_iterations (void)
2814 loop_iterator li;
2815 struct loop *loop;
2817 /* We don't want to issue signed overflow warnings while getting
2818 loop iteration estimates. */
2819 fold_defer_overflow_warnings ();
2821 FOR_EACH_LOOP (li, loop, 0)
2823 estimate_numbers_of_iterations_loop (loop);
2826 fold_undefer_and_ignore_overflow_warnings ();
2829 /* Returns true if statement S1 dominates statement S2. */
2831 bool
2832 stmt_dominates_stmt_p (tree s1, tree s2)
2834 basic_block bb1 = bb_for_stmt (s1), bb2 = bb_for_stmt (s2);
2836 if (!bb1
2837 || s1 == s2)
2838 return true;
2840 if (bb1 == bb2)
2842 block_stmt_iterator bsi;
2844 for (bsi = bsi_start (bb1); bsi_stmt (bsi) != s2; bsi_next (&bsi))
2845 if (bsi_stmt (bsi) == s1)
2846 return true;
2848 return false;
2851 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
2854 /* Returns true when we can prove that the number of executions of
2855 STMT in the loop is at most NITER, according to the bound on
2856 the number of executions of the statement NITER_BOUND->stmt recorded in
2857 NITER_BOUND. If STMT is NULL, we must prove this bound for all
2858 statements in the loop. */
2860 static bool
2861 n_of_executions_at_most (tree stmt,
2862 struct nb_iter_bound *niter_bound,
2863 tree niter)
2865 double_int bound = niter_bound->bound;
2866 tree nit_type = TREE_TYPE (niter), e;
2867 enum tree_code cmp;
2869 gcc_assert (TYPE_UNSIGNED (nit_type));
2871 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
2872 the number of iterations is small. */
2873 if (!double_int_fits_to_tree_p (nit_type, bound))
2874 return false;
2876 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
2877 times. This means that:
2879 -- if NITER_BOUND->is_exit is true, then everything before
2880 NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
2881 times, and everything after it at most NITER_BOUND->bound times.
2883 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
2884 is executed, then NITER_BOUND->stmt is executed as well in the same
2885 iteration (we conclude that if both statements belong to the same
2886 basic block, or if STMT is after NITER_BOUND->stmt), then STMT
2887 is executed at most NITER_BOUND->bound + 1 times. Otherwise STMT is
2888 executed at most NITER_BOUND->bound + 2 times. */
2890 if (niter_bound->is_exit)
2892 if (stmt
2893 && stmt != niter_bound->stmt
2894 && stmt_dominates_stmt_p (niter_bound->stmt, stmt))
2895 cmp = GE_EXPR;
2896 else
2897 cmp = GT_EXPR;
2899 else
2901 if (!stmt
2902 || (bb_for_stmt (stmt) != bb_for_stmt (niter_bound->stmt)
2903 && !stmt_dominates_stmt_p (niter_bound->stmt, stmt)))
2905 bound = double_int_add (bound, double_int_one);
2906 if (double_int_zero_p (bound)
2907 || !double_int_fits_to_tree_p (nit_type, bound))
2908 return false;
2910 cmp = GT_EXPR;
2913 e = fold_binary (cmp, boolean_type_node,
2914 niter, double_int_to_tree (nit_type, bound));
2915 return e && integer_nonzerop (e);
2918 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
2920 bool
2921 nowrap_type_p (tree type)
2923 if (INTEGRAL_TYPE_P (type)
2924 && TYPE_OVERFLOW_UNDEFINED (type))
2925 return true;
2927 if (POINTER_TYPE_P (type))
2928 return true;
2930 return false;
2933 /* Return false only when the induction variable BASE + STEP * I is
2934 known to not overflow: i.e. when the number of iterations is small
2935 enough with respect to the step and initial condition in order to
2936 keep the evolution confined in TYPEs bounds. Return true when the
2937 iv is known to overflow or when the property is not computable.
2939 USE_OVERFLOW_SEMANTICS is true if this function should assume that
2940 the rules for overflow of the given language apply (e.g., that signed
2941 arithmetics in C does not overflow). */
2943 bool
2944 scev_probably_wraps_p (tree base, tree step,
2945 tree at_stmt, struct loop *loop,
2946 bool use_overflow_semantics)
2948 struct nb_iter_bound *bound;
2949 tree delta, step_abs;
2950 tree unsigned_type, valid_niter;
2951 tree type = TREE_TYPE (step);
2953 /* FIXME: We really need something like
2954 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
2956 We used to test for the following situation that frequently appears
2957 during address arithmetics:
2959 D.1621_13 = (long unsigned intD.4) D.1620_12;
2960 D.1622_14 = D.1621_13 * 8;
2961 D.1623_15 = (doubleD.29 *) D.1622_14;
2963 And derived that the sequence corresponding to D_14
2964 can be proved to not wrap because it is used for computing a
2965 memory access; however, this is not really the case -- for example,
2966 if D_12 = (unsigned char) [254,+,1], then D_14 has values
2967 2032, 2040, 0, 8, ..., but the code is still legal. */
2969 if (chrec_contains_undetermined (base)
2970 || chrec_contains_undetermined (step))
2971 return true;
2973 if (integer_zerop (step))
2974 return false;
2976 /* If we can use the fact that signed and pointer arithmetics does not
2977 wrap, we are done. */
2978 if (use_overflow_semantics && nowrap_type_p (type))
2979 return false;
2981 /* To be able to use estimates on number of iterations of the loop,
2982 we must have an upper bound on the absolute value of the step. */
2983 if (TREE_CODE (step) != INTEGER_CST)
2984 return true;
2986 /* Don't issue signed overflow warnings. */
2987 fold_defer_overflow_warnings ();
2989 /* Otherwise, compute the number of iterations before we reach the
2990 bound of the type, and verify that the loop is exited before this
2991 occurs. */
2992 unsigned_type = unsigned_type_for (type);
2993 base = fold_convert (unsigned_type, base);
2995 if (tree_int_cst_sign_bit (step))
2997 tree extreme = fold_convert (unsigned_type,
2998 lower_bound_in_type (type, type));
2999 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3000 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
3001 fold_convert (unsigned_type, step));
3003 else
3005 tree extreme = fold_convert (unsigned_type,
3006 upper_bound_in_type (type, type));
3007 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3008 step_abs = fold_convert (unsigned_type, step);
3011 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3013 estimate_numbers_of_iterations_loop (loop);
3014 for (bound = loop->bounds; bound; bound = bound->next)
3016 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
3018 fold_undefer_and_ignore_overflow_warnings ();
3019 return false;
3023 fold_undefer_and_ignore_overflow_warnings ();
3025 /* At this point we still don't have a proof that the iv does not
3026 overflow: give up. */
3027 return true;
3030 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3032 void
3033 free_numbers_of_iterations_estimates_loop (struct loop *loop)
3035 struct nb_iter_bound *bound, *next;
3037 loop->nb_iterations = NULL;
3038 loop->estimate_state = EST_NOT_COMPUTED;
3039 for (bound = loop->bounds; bound; bound = next)
3041 next = bound->next;
3042 ggc_free (bound);
3045 loop->bounds = NULL;
3048 /* Frees the information on upper bounds on numbers of iterations of loops. */
3050 void
3051 free_numbers_of_iterations_estimates (void)
3053 loop_iterator li;
3054 struct loop *loop;
3056 FOR_EACH_LOOP (li, loop, 0)
3058 free_numbers_of_iterations_estimates_loop (loop);
3062 /* Substitute value VAL for ssa name NAME inside expressions held
3063 at LOOP. */
3065 void
3066 substitute_in_loop_info (struct loop *loop, tree name, tree val)
3068 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);