mips.c (mips16_copy_fpr_return_value): New function, split out from...
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob40e7051c265bf9d70dc54ac93179dbf15a8a9b27
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "output.h"
30 #include "diagnostic.h"
31 #include "intl.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "tree-pass.h"
36 #include "ggc.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "tree-data-ref.h"
40 #include "params.h"
41 #include "flags.h"
42 #include "toplev.h"
43 #include "tree-inline.h"
44 #include "gmp.h"
46 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
48 /* The maximum number of dominator BBs we search for conditions
49 of loop header copies we use for simplifying a conditional
50 expression. */
51 #define MAX_DOMINATORS_TO_WALK 8
55 Analysis of number of iterations of an affine exit test.
59 /* Bounds on some value, BELOW <= X <= UP. */
61 typedef struct
63 mpz_t below, up;
64 } bounds;
67 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
69 static void
70 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
72 tree type = TREE_TYPE (expr);
73 tree op0, op1;
74 double_int off;
75 bool negate = false;
77 *var = expr;
78 mpz_set_ui (offset, 0);
80 switch (TREE_CODE (expr))
82 case MINUS_EXPR:
83 negate = true;
84 /* Fallthru. */
86 case PLUS_EXPR:
87 case POINTER_PLUS_EXPR:
88 op0 = TREE_OPERAND (expr, 0);
89 op1 = TREE_OPERAND (expr, 1);
91 if (TREE_CODE (op1) != INTEGER_CST)
92 break;
94 *var = op0;
95 /* Always sign extend the offset. */
96 off = double_int_sext (tree_to_double_int (op1),
97 TYPE_PRECISION (type));
98 mpz_set_double_int (offset, off, false);
99 break;
101 case INTEGER_CST:
102 *var = build_int_cst_type (type, 0);
103 off = tree_to_double_int (expr);
104 mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
105 break;
107 default:
108 break;
112 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
113 in TYPE to MIN and MAX. */
115 static void
116 determine_value_range (tree type, tree var, mpz_t off,
117 mpz_t min, mpz_t max)
119 /* If the expression is a constant, we know its value exactly. */
120 if (integer_zerop (var))
122 mpz_set (min, off);
123 mpz_set (max, off);
124 return;
127 /* If the computation may wrap, we know nothing about the value, except for
128 the range of the type. */
129 get_type_static_bounds (type, min, max);
130 if (!nowrap_type_p (type))
131 return;
133 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
134 add it to MIN, otherwise to MAX. */
135 if (mpz_sgn (off) < 0)
136 mpz_add (max, max, off);
137 else
138 mpz_add (min, min, off);
141 /* Stores the bounds on the difference of the values of the expressions
142 (var + X) and (var + Y), computed in TYPE, to BNDS. */
144 static void
145 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
146 bounds *bnds)
148 int rel = mpz_cmp (x, y);
149 bool may_wrap = !nowrap_type_p (type);
150 mpz_t m;
152 /* If X == Y, then the expressions are always equal.
153 If X > Y, there are the following possibilities:
154 a) neither of var + X and var + Y overflow or underflow, or both of
155 them do. Then their difference is X - Y.
156 b) var + X overflows, and var + Y does not. Then the values of the
157 expressions are var + X - M and var + Y, where M is the range of
158 the type, and their difference is X - Y - M.
159 c) var + Y underflows and var + X does not. Their difference again
160 is M - X + Y.
161 Therefore, if the arithmetics in type does not overflow, then the
162 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
163 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
164 (X - Y, X - Y + M). */
166 if (rel == 0)
168 mpz_set_ui (bnds->below, 0);
169 mpz_set_ui (bnds->up, 0);
170 return;
173 mpz_init (m);
174 mpz_set_double_int (m, double_int_mask (TYPE_PRECISION (type)), true);
175 mpz_add_ui (m, m, 1);
176 mpz_sub (bnds->up, x, y);
177 mpz_set (bnds->below, bnds->up);
179 if (may_wrap)
181 if (rel > 0)
182 mpz_sub (bnds->below, bnds->below, m);
183 else
184 mpz_add (bnds->up, bnds->up, m);
187 mpz_clear (m);
190 /* From condition C0 CMP C1 derives information regarding the
191 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
192 and stores it to BNDS. */
194 static void
195 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
196 tree vary, mpz_t offy,
197 tree c0, enum tree_code cmp, tree c1,
198 bounds *bnds)
200 tree varc0, varc1, tmp, ctype;
201 mpz_t offc0, offc1, loffx, loffy, bnd;
202 bool lbound = false;
203 bool no_wrap = nowrap_type_p (type);
204 bool x_ok, y_ok;
206 switch (cmp)
208 case LT_EXPR:
209 case LE_EXPR:
210 case GT_EXPR:
211 case GE_EXPR:
212 STRIP_SIGN_NOPS (c0);
213 STRIP_SIGN_NOPS (c1);
214 ctype = TREE_TYPE (c0);
215 if (!useless_type_conversion_p (ctype, type))
216 return;
218 break;
220 case EQ_EXPR:
221 /* We could derive quite precise information from EQ_EXPR, however, such
222 a guard is unlikely to appear, so we do not bother with handling
223 it. */
224 return;
226 case NE_EXPR:
227 /* NE_EXPR comparisons do not contain much of useful information, except for
228 special case of comparing with the bounds of the type. */
229 if (TREE_CODE (c1) != INTEGER_CST
230 || !INTEGRAL_TYPE_P (type))
231 return;
233 /* Ensure that the condition speaks about an expression in the same type
234 as X and Y. */
235 ctype = TREE_TYPE (c0);
236 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
237 return;
238 c0 = fold_convert (type, c0);
239 c1 = fold_convert (type, c1);
241 if (TYPE_MIN_VALUE (type)
242 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
244 cmp = GT_EXPR;
245 break;
247 if (TYPE_MAX_VALUE (type)
248 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
250 cmp = LT_EXPR;
251 break;
254 return;
255 default:
256 return;
259 mpz_init (offc0);
260 mpz_init (offc1);
261 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
262 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
264 /* We are only interested in comparisons of expressions based on VARX and
265 VARY. TODO -- we might also be able to derive some bounds from
266 expressions containing just one of the variables. */
268 if (operand_equal_p (varx, varc1, 0))
270 tmp = varc0; varc0 = varc1; varc1 = tmp;
271 mpz_swap (offc0, offc1);
272 cmp = swap_tree_comparison (cmp);
275 if (!operand_equal_p (varx, varc0, 0)
276 || !operand_equal_p (vary, varc1, 0))
277 goto end;
279 mpz_init_set (loffx, offx);
280 mpz_init_set (loffy, offy);
282 if (cmp == GT_EXPR || cmp == GE_EXPR)
284 tmp = varx; varx = vary; vary = tmp;
285 mpz_swap (offc0, offc1);
286 mpz_swap (loffx, loffy);
287 cmp = swap_tree_comparison (cmp);
288 lbound = true;
291 /* If there is no overflow, the condition implies that
293 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
295 The overflows and underflows may complicate things a bit; each
296 overflow decreases the appropriate offset by M, and underflow
297 increases it by M. The above inequality would not necessarily be
298 true if
300 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
301 VARX + OFFC0 overflows, but VARX + OFFX does not.
302 This may only happen if OFFX < OFFC0.
303 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
304 VARY + OFFC1 underflows and VARY + OFFY does not.
305 This may only happen if OFFY > OFFC1. */
307 if (no_wrap)
309 x_ok = true;
310 y_ok = true;
312 else
314 x_ok = (integer_zerop (varx)
315 || mpz_cmp (loffx, offc0) >= 0);
316 y_ok = (integer_zerop (vary)
317 || mpz_cmp (loffy, offc1) <= 0);
320 if (x_ok && y_ok)
322 mpz_init (bnd);
323 mpz_sub (bnd, loffx, loffy);
324 mpz_add (bnd, bnd, offc1);
325 mpz_sub (bnd, bnd, offc0);
327 if (cmp == LT_EXPR)
328 mpz_sub_ui (bnd, bnd, 1);
330 if (lbound)
332 mpz_neg (bnd, bnd);
333 if (mpz_cmp (bnds->below, bnd) < 0)
334 mpz_set (bnds->below, bnd);
336 else
338 if (mpz_cmp (bnd, bnds->up) < 0)
339 mpz_set (bnds->up, bnd);
341 mpz_clear (bnd);
344 mpz_clear (loffx);
345 mpz_clear (loffy);
346 end:
347 mpz_clear (offc0);
348 mpz_clear (offc1);
351 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
352 The subtraction is considered to be performed in arbitrary precision,
353 without overflows.
355 We do not attempt to be too clever regarding the value ranges of X and
356 Y; most of the time, they are just integers or ssa names offsetted by
357 integer. However, we try to use the information contained in the
358 comparisons before the loop (usually created by loop header copying). */
360 static void
361 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
363 tree type = TREE_TYPE (x);
364 tree varx, vary;
365 mpz_t offx, offy;
366 mpz_t minx, maxx, miny, maxy;
367 int cnt = 0;
368 edge e;
369 basic_block bb;
370 tree cond, c0, c1;
371 enum tree_code cmp;
373 /* Get rid of unnecessary casts, but preserve the value of
374 the expressions. */
375 STRIP_SIGN_NOPS (x);
376 STRIP_SIGN_NOPS (y);
378 mpz_init (bnds->below);
379 mpz_init (bnds->up);
380 mpz_init (offx);
381 mpz_init (offy);
382 split_to_var_and_offset (x, &varx, offx);
383 split_to_var_and_offset (y, &vary, offy);
385 if (!integer_zerop (varx)
386 && operand_equal_p (varx, vary, 0))
388 /* Special case VARX == VARY -- we just need to compare the
389 offsets. The matters are a bit more complicated in the
390 case addition of offsets may wrap. */
391 bound_difference_of_offsetted_base (type, offx, offy, bnds);
393 else
395 /* Otherwise, use the value ranges to determine the initial
396 estimates on below and up. */
397 mpz_init (minx);
398 mpz_init (maxx);
399 mpz_init (miny);
400 mpz_init (maxy);
401 determine_value_range (type, varx, offx, minx, maxx);
402 determine_value_range (type, vary, offy, miny, maxy);
404 mpz_sub (bnds->below, minx, maxy);
405 mpz_sub (bnds->up, maxx, miny);
406 mpz_clear (minx);
407 mpz_clear (maxx);
408 mpz_clear (miny);
409 mpz_clear (maxy);
412 /* If both X and Y are constants, we cannot get any more precise. */
413 if (integer_zerop (varx) && integer_zerop (vary))
414 goto end;
416 /* Now walk the dominators of the loop header and use the entry
417 guards to refine the estimates. */
418 for (bb = loop->header;
419 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
420 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
422 if (!single_pred_p (bb))
423 continue;
424 e = single_pred_edge (bb);
426 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
427 continue;
429 cond = COND_EXPR_COND (last_stmt (e->src));
430 if (!COMPARISON_CLASS_P (cond))
431 continue;
432 c0 = TREE_OPERAND (cond, 0);
433 cmp = TREE_CODE (cond);
434 c1 = TREE_OPERAND (cond, 1);
436 if (e->flags & EDGE_FALSE_VALUE)
437 cmp = invert_tree_comparison (cmp, false);
439 refine_bounds_using_guard (type, varx, offx, vary, offy,
440 c0, cmp, c1, bnds);
441 ++cnt;
444 end:
445 mpz_clear (offx);
446 mpz_clear (offy);
449 /* Update the bounds in BNDS that restrict the value of X to the bounds
450 that restrict the value of X + DELTA. X can be obtained as a
451 difference of two values in TYPE. */
453 static void
454 bounds_add (bounds *bnds, double_int delta, tree type)
456 mpz_t mdelta, max;
458 mpz_init (mdelta);
459 mpz_set_double_int (mdelta, delta, false);
461 mpz_init (max);
462 mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
464 mpz_add (bnds->up, bnds->up, mdelta);
465 mpz_add (bnds->below, bnds->below, mdelta);
467 if (mpz_cmp (bnds->up, max) > 0)
468 mpz_set (bnds->up, max);
470 mpz_neg (max, max);
471 if (mpz_cmp (bnds->below, max) < 0)
472 mpz_set (bnds->below, max);
474 mpz_clear (mdelta);
475 mpz_clear (max);
478 /* Update the bounds in BNDS that restrict the value of X to the bounds
479 that restrict the value of -X. */
481 static void
482 bounds_negate (bounds *bnds)
484 mpz_t tmp;
486 mpz_init_set (tmp, bnds->up);
487 mpz_neg (bnds->up, bnds->below);
488 mpz_neg (bnds->below, tmp);
489 mpz_clear (tmp);
492 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
494 static tree
495 inverse (tree x, tree mask)
497 tree type = TREE_TYPE (x);
498 tree rslt;
499 unsigned ctr = tree_floor_log2 (mask);
501 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
503 unsigned HOST_WIDE_INT ix;
504 unsigned HOST_WIDE_INT imask;
505 unsigned HOST_WIDE_INT irslt = 1;
507 gcc_assert (cst_and_fits_in_hwi (x));
508 gcc_assert (cst_and_fits_in_hwi (mask));
510 ix = int_cst_value (x);
511 imask = int_cst_value (mask);
513 for (; ctr; ctr--)
515 irslt *= ix;
516 ix *= ix;
518 irslt &= imask;
520 rslt = build_int_cst_type (type, irslt);
522 else
524 rslt = build_int_cst (type, 1);
525 for (; ctr; ctr--)
527 rslt = int_const_binop (MULT_EXPR, rslt, x, 0);
528 x = int_const_binop (MULT_EXPR, x, x, 0);
530 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask, 0);
533 return rslt;
536 /* Derives the upper bound BND on the number of executions of loop with exit
537 condition S * i <> C, assuming that the loop is not infinite. If
538 NO_OVERFLOW is true, then the control variable of the loop does not
539 overflow. If NO_OVERFLOW is true or BNDS.below >= 0, then BNDS.up
540 contains the upper bound on the value of C. */
542 static void
543 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
544 bounds *bnds)
546 double_int max;
547 mpz_t d;
549 /* If the control variable does not overflow, the number of iterations is
550 at most c / s. Otherwise it is at most the period of the control
551 variable. */
552 if (!no_overflow && !multiple_of_p (TREE_TYPE (c), c, s))
554 max = double_int_mask (TYPE_PRECISION (TREE_TYPE (c))
555 - tree_low_cst (num_ending_zeros (s), 1));
556 mpz_set_double_int (bnd, max, true);
557 return;
560 /* Determine the upper bound on C. */
561 if (no_overflow || mpz_sgn (bnds->below) >= 0)
562 mpz_set (bnd, bnds->up);
563 else if (TREE_CODE (c) == INTEGER_CST)
564 mpz_set_double_int (bnd, tree_to_double_int (c), true);
565 else
566 mpz_set_double_int (bnd, double_int_mask (TYPE_PRECISION (TREE_TYPE (c))),
567 true);
569 mpz_init (d);
570 mpz_set_double_int (d, tree_to_double_int (s), true);
571 mpz_fdiv_q (bnd, bnd, d);
572 mpz_clear (d);
575 /* Determines number of iterations of loop whose ending condition
576 is IV <> FINAL. TYPE is the type of the iv. The number of
577 iterations is stored to NITER. NEVER_INFINITE is true if
578 we know that the exit must be taken eventually, i.e., that the IV
579 ever reaches the value FINAL (we derived this earlier, and possibly set
580 NITER->assumptions to make sure this is the case). BNDS contains the
581 bounds on the difference FINAL - IV->base. */
583 static bool
584 number_of_iterations_ne (tree type, affine_iv *iv, tree final,
585 struct tree_niter_desc *niter, bool never_infinite,
586 bounds *bnds)
588 tree niter_type = unsigned_type_for (type);
589 tree s, c, d, bits, assumption, tmp, bound;
590 mpz_t max;
592 niter->control = *iv;
593 niter->bound = final;
594 niter->cmp = NE_EXPR;
596 /* Rearrange the terms so that we get inequality S * i <> C, with S
597 positive. Also cast everything to the unsigned type. If IV does
598 not overflow, BNDS bounds the value of C. Also, this is the
599 case if the computation |FINAL - IV->base| does not overflow, i.e.,
600 if BNDS->below in the result is nonnegative. */
601 if (tree_int_cst_sign_bit (iv->step))
603 s = fold_convert (niter_type,
604 fold_build1 (NEGATE_EXPR, type, iv->step));
605 c = fold_build2 (MINUS_EXPR, niter_type,
606 fold_convert (niter_type, iv->base),
607 fold_convert (niter_type, final));
608 bounds_negate (bnds);
610 else
612 s = fold_convert (niter_type, iv->step);
613 c = fold_build2 (MINUS_EXPR, niter_type,
614 fold_convert (niter_type, final),
615 fold_convert (niter_type, iv->base));
618 mpz_init (max);
619 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds);
620 niter->max = mpz_get_double_int (niter_type, max, false);
621 mpz_clear (max);
623 /* First the trivial cases -- when the step is 1. */
624 if (integer_onep (s))
626 niter->niter = c;
627 return true;
630 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
631 is infinite. Otherwise, the number of iterations is
632 (inverse(s/d) * (c/d)) mod (size of mode/d). */
633 bits = num_ending_zeros (s);
634 bound = build_low_bits_mask (niter_type,
635 (TYPE_PRECISION (niter_type)
636 - tree_low_cst (bits, 1)));
638 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
639 build_int_cst (niter_type, 1), bits);
640 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
642 if (!never_infinite)
644 /* If we cannot assume that the loop is not infinite, record the
645 assumptions for divisibility of c. */
646 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
647 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
648 assumption, build_int_cst (niter_type, 0));
649 if (!integer_nonzerop (assumption))
650 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
651 niter->assumptions, assumption);
654 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
655 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
656 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
657 return true;
660 /* Checks whether we can determine the final value of the control variable
661 of the loop with ending condition IV0 < IV1 (computed in TYPE).
662 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
663 of the step. The assumptions necessary to ensure that the computation
664 of the final value does not overflow are recorded in NITER. If we
665 find the final value, we adjust DELTA and return TRUE. Otherwise
666 we return false. BNDS bounds the value of IV1->base - IV0->base,
667 and will be updated by the same amount as DELTA. */
669 static bool
670 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
671 struct tree_niter_desc *niter,
672 tree *delta, tree step,
673 bounds *bnds)
675 tree niter_type = TREE_TYPE (step);
676 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
677 tree tmod;
678 mpz_t mmod;
679 tree assumption = boolean_true_node, bound, noloop;
680 bool ret = false;
681 tree type1 = type;
682 if (POINTER_TYPE_P (type))
683 type1 = sizetype;
685 if (TREE_CODE (mod) != INTEGER_CST)
686 return false;
687 if (integer_nonzerop (mod))
688 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
689 tmod = fold_convert (type1, mod);
691 mpz_init (mmod);
692 mpz_set_double_int (mmod, tree_to_double_int (mod), true);
693 mpz_neg (mmod, mmod);
695 if (integer_nonzerop (iv0->step))
697 /* The final value of the iv is iv1->base + MOD, assuming that this
698 computation does not overflow, and that
699 iv0->base <= iv1->base + MOD. */
700 if (!iv1->no_overflow && !integer_zerop (mod))
702 bound = fold_build2 (MINUS_EXPR, type,
703 TYPE_MAX_VALUE (type1), tmod);
704 assumption = fold_build2 (LE_EXPR, boolean_type_node,
705 iv1->base, bound);
706 if (integer_zerop (assumption))
707 goto end;
709 if (mpz_cmp (mmod, bnds->below) < 0)
710 noloop = boolean_false_node;
711 else
712 noloop = fold_build2 (GT_EXPR, boolean_type_node,
713 iv0->base,
714 fold_build2 (PLUS_EXPR, type1,
715 iv1->base, tmod));
717 else
719 /* The final value of the iv is iv0->base - MOD, assuming that this
720 computation does not overflow, and that
721 iv0->base - MOD <= iv1->base. */
722 if (!iv0->no_overflow && !integer_zerop (mod))
724 bound = fold_build2 (PLUS_EXPR, type1,
725 TYPE_MIN_VALUE (type1), tmod);
726 assumption = fold_build2 (GE_EXPR, boolean_type_node,
727 iv0->base, bound);
728 if (integer_zerop (assumption))
729 goto end;
731 if (mpz_cmp (mmod, bnds->below) < 0)
732 noloop = boolean_false_node;
733 else
734 noloop = fold_build2 (GT_EXPR, boolean_type_node,
735 fold_build2 (MINUS_EXPR, type1,
736 iv0->base, tmod),
737 iv1->base);
740 if (!integer_nonzerop (assumption))
741 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
742 niter->assumptions,
743 assumption);
744 if (!integer_zerop (noloop))
745 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
746 niter->may_be_zero,
747 noloop);
748 bounds_add (bnds, tree_to_double_int (mod), type);
749 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
751 ret = true;
752 end:
753 mpz_clear (mmod);
754 return ret;
757 /* Add assertions to NITER that ensure that the control variable of the loop
758 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
759 are TYPE. Returns false if we can prove that there is an overflow, true
760 otherwise. STEP is the absolute value of the step. */
762 static bool
763 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
764 struct tree_niter_desc *niter, tree step)
766 tree bound, d, assumption, diff;
767 tree niter_type = TREE_TYPE (step);
769 if (integer_nonzerop (iv0->step))
771 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
772 if (iv0->no_overflow)
773 return true;
775 /* If iv0->base is a constant, we can determine the last value before
776 overflow precisely; otherwise we conservatively assume
777 MAX - STEP + 1. */
779 if (TREE_CODE (iv0->base) == INTEGER_CST)
781 d = fold_build2 (MINUS_EXPR, niter_type,
782 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
783 fold_convert (niter_type, iv0->base));
784 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
786 else
787 diff = fold_build2 (MINUS_EXPR, niter_type, step,
788 build_int_cst (niter_type, 1));
789 bound = fold_build2 (MINUS_EXPR, type,
790 TYPE_MAX_VALUE (type), fold_convert (type, diff));
791 assumption = fold_build2 (LE_EXPR, boolean_type_node,
792 iv1->base, bound);
794 else
796 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
797 if (iv1->no_overflow)
798 return true;
800 if (TREE_CODE (iv1->base) == INTEGER_CST)
802 d = fold_build2 (MINUS_EXPR, niter_type,
803 fold_convert (niter_type, iv1->base),
804 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
805 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
807 else
808 diff = fold_build2 (MINUS_EXPR, niter_type, step,
809 build_int_cst (niter_type, 1));
810 bound = fold_build2 (PLUS_EXPR, type,
811 TYPE_MIN_VALUE (type), fold_convert (type, diff));
812 assumption = fold_build2 (GE_EXPR, boolean_type_node,
813 iv0->base, bound);
816 if (integer_zerop (assumption))
817 return false;
818 if (!integer_nonzerop (assumption))
819 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
820 niter->assumptions, assumption);
822 iv0->no_overflow = true;
823 iv1->no_overflow = true;
824 return true;
827 /* Add an assumption to NITER that a loop whose ending condition
828 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
829 bounds the value of IV1->base - IV0->base. */
831 static void
832 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
833 struct tree_niter_desc *niter, bounds *bnds)
835 tree assumption = boolean_true_node, bound, diff;
836 tree mbz, mbzl, mbzr, type1;
837 bool rolls_p, no_overflow_p;
838 double_int dstep;
839 mpz_t mstep, max;
841 /* We are going to compute the number of iterations as
842 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
843 variant of TYPE. This formula only works if
845 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
847 (where MAX is the maximum value of the unsigned variant of TYPE, and
848 the computations in this formula are performed in full precision
849 (without overflows).
851 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
852 we have a condition of form iv0->base - step < iv1->base before the loop,
853 and for loops iv0->base < iv1->base - step * i the condition
854 iv0->base < iv1->base + step, due to loop header copying, which enable us
855 to prove the lower bound.
857 The upper bound is more complicated. Unless the expressions for initial
858 and final value themselves contain enough information, we usually cannot
859 derive it from the context. */
861 /* First check whether the answer does not follow from the bounds we gathered
862 before. */
863 if (integer_nonzerop (iv0->step))
864 dstep = tree_to_double_int (iv0->step);
865 else
867 dstep = double_int_sext (tree_to_double_int (iv1->step),
868 TYPE_PRECISION (type));
869 dstep = double_int_neg (dstep);
872 mpz_init (mstep);
873 mpz_set_double_int (mstep, dstep, true);
874 mpz_neg (mstep, mstep);
875 mpz_add_ui (mstep, mstep, 1);
877 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
879 mpz_init (max);
880 mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
881 mpz_add (max, max, mstep);
882 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
883 /* For pointers, only values lying inside a single object
884 can be compared or manipulated by pointer arithmetics.
885 Gcc in general does not allow or handle objects larger
886 than half of the address space, hence the upper bound
887 is satisfied for pointers. */
888 || POINTER_TYPE_P (type));
889 mpz_clear (mstep);
890 mpz_clear (max);
892 if (rolls_p && no_overflow_p)
893 return;
895 type1 = type;
896 if (POINTER_TYPE_P (type))
897 type1 = sizetype;
899 /* Now the hard part; we must formulate the assumption(s) as expressions, and
900 we must be careful not to introduce overflow. */
902 if (integer_nonzerop (iv0->step))
904 diff = fold_build2 (MINUS_EXPR, type1,
905 iv0->step, build_int_cst (type1, 1));
907 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
908 0 address never belongs to any object, we can assume this for
909 pointers. */
910 if (!POINTER_TYPE_P (type))
912 bound = fold_build2 (PLUS_EXPR, type1,
913 TYPE_MIN_VALUE (type), diff);
914 assumption = fold_build2 (GE_EXPR, boolean_type_node,
915 iv0->base, bound);
918 /* And then we can compute iv0->base - diff, and compare it with
919 iv1->base. */
920 mbzl = fold_build2 (MINUS_EXPR, type1,
921 fold_convert (type1, iv0->base), diff);
922 mbzr = fold_convert (type1, iv1->base);
924 else
926 diff = fold_build2 (PLUS_EXPR, type1,
927 iv1->step, build_int_cst (type1, 1));
929 if (!POINTER_TYPE_P (type))
931 bound = fold_build2 (PLUS_EXPR, type1,
932 TYPE_MAX_VALUE (type), diff);
933 assumption = fold_build2 (LE_EXPR, boolean_type_node,
934 iv1->base, bound);
937 mbzl = fold_convert (type1, iv0->base);
938 mbzr = fold_build2 (MINUS_EXPR, type1,
939 fold_convert (type1, iv1->base), diff);
942 if (!integer_nonzerop (assumption))
943 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
944 niter->assumptions, assumption);
945 if (!rolls_p)
947 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
948 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
949 niter->may_be_zero, mbz);
953 /* Determines number of iterations of loop whose ending condition
954 is IV0 < IV1. TYPE is the type of the iv. The number of
955 iterations is stored to NITER. BNDS bounds the difference
956 IV1->base - IV0->base. */
958 static bool
959 number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
960 struct tree_niter_desc *niter,
961 bool never_infinite ATTRIBUTE_UNUSED,
962 bounds *bnds)
964 tree niter_type = unsigned_type_for (type);
965 tree delta, step, s;
966 mpz_t mstep, tmp;
968 if (integer_nonzerop (iv0->step))
970 niter->control = *iv0;
971 niter->cmp = LT_EXPR;
972 niter->bound = iv1->base;
974 else
976 niter->control = *iv1;
977 niter->cmp = GT_EXPR;
978 niter->bound = iv0->base;
981 delta = fold_build2 (MINUS_EXPR, niter_type,
982 fold_convert (niter_type, iv1->base),
983 fold_convert (niter_type, iv0->base));
985 /* First handle the special case that the step is +-1. */
986 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
987 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
989 /* for (i = iv0->base; i < iv1->base; i++)
993 for (i = iv1->base; i > iv0->base; i--).
995 In both cases # of iterations is iv1->base - iv0->base, assuming that
996 iv1->base >= iv0->base.
998 First try to derive a lower bound on the value of
999 iv1->base - iv0->base, computed in full precision. If the difference
1000 is nonnegative, we are done, otherwise we must record the
1001 condition. */
1003 if (mpz_sgn (bnds->below) < 0)
1004 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1005 iv1->base, iv0->base);
1006 niter->niter = delta;
1007 niter->max = mpz_get_double_int (niter_type, bnds->up, false);
1008 return true;
1011 if (integer_nonzerop (iv0->step))
1012 step = fold_convert (niter_type, iv0->step);
1013 else
1014 step = fold_convert (niter_type,
1015 fold_build1 (NEGATE_EXPR, type, iv1->step));
1017 /* If we can determine the final value of the control iv exactly, we can
1018 transform the condition to != comparison. In particular, this will be
1019 the case if DELTA is constant. */
1020 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1021 bnds))
1023 affine_iv zps;
1025 zps.base = build_int_cst (niter_type, 0);
1026 zps.step = step;
1027 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1028 zps does not overflow. */
1029 zps.no_overflow = true;
1031 return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1034 /* Make sure that the control iv does not overflow. */
1035 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1036 return false;
1038 /* We determine the number of iterations as (delta + step - 1) / step. For
1039 this to work, we must know that iv1->base >= iv0->base - step + 1,
1040 otherwise the loop does not roll. */
1041 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1043 s = fold_build2 (MINUS_EXPR, niter_type,
1044 step, build_int_cst (niter_type, 1));
1045 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1046 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1048 mpz_init (mstep);
1049 mpz_init (tmp);
1050 mpz_set_double_int (mstep, tree_to_double_int (step), true);
1051 mpz_add (tmp, bnds->up, mstep);
1052 mpz_sub_ui (tmp, tmp, 1);
1053 mpz_fdiv_q (tmp, tmp, mstep);
1054 niter->max = mpz_get_double_int (niter_type, tmp, false);
1055 mpz_clear (mstep);
1056 mpz_clear (tmp);
1058 return true;
1061 /* Determines number of iterations of loop whose ending condition
1062 is IV0 <= IV1. TYPE is the type of the iv. The number of
1063 iterations is stored to NITER. NEVER_INFINITE is true if
1064 we know that this condition must eventually become false (we derived this
1065 earlier, and possibly set NITER->assumptions to make sure this
1066 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1068 static bool
1069 number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1070 struct tree_niter_desc *niter, bool never_infinite,
1071 bounds *bnds)
1073 tree assumption;
1074 tree type1 = type;
1075 if (POINTER_TYPE_P (type))
1076 type1 = sizetype;
1078 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1079 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1080 value of the type. This we must know anyway, since if it is
1081 equal to this value, the loop rolls forever. */
1083 if (!never_infinite)
1085 if (integer_nonzerop (iv0->step))
1086 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1087 iv1->base, TYPE_MAX_VALUE (type1));
1088 else
1089 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1090 iv0->base, TYPE_MIN_VALUE (type1));
1092 if (integer_zerop (assumption))
1093 return false;
1094 if (!integer_nonzerop (assumption))
1095 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1096 niter->assumptions, assumption);
1099 if (integer_nonzerop (iv0->step))
1100 iv1->base = fold_build2 (PLUS_EXPR, type1,
1101 iv1->base, build_int_cst (type1, 1));
1102 else
1103 iv0->base = fold_build2 (MINUS_EXPR, type1,
1104 iv0->base, build_int_cst (type1, 1));
1106 bounds_add (bnds, double_int_one, type1);
1108 return number_of_iterations_lt (type, iv0, iv1, niter, never_infinite, bnds);
1111 /* Dumps description of affine induction variable IV to FILE. */
1113 static void
1114 dump_affine_iv (FILE *file, affine_iv *iv)
1116 if (!integer_zerop (iv->step))
1117 fprintf (file, "[");
1119 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1121 if (!integer_zerop (iv->step))
1123 fprintf (file, ", + , ");
1124 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1125 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1129 /* Determine the number of iterations according to condition (for staying
1130 inside loop) which compares two induction variables using comparison
1131 operator CODE. The induction variable on left side of the comparison
1132 is IV0, the right-hand side is IV1. Both induction variables must have
1133 type TYPE, which must be an integer or pointer type. The steps of the
1134 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1136 LOOP is the loop whose number of iterations we are determining.
1138 ONLY_EXIT is true if we are sure this is the only way the loop could be
1139 exited (including possibly non-returning function calls, exceptions, etc.)
1140 -- in this case we can use the information whether the control induction
1141 variables can overflow or not in a more efficient way.
1143 The results (number of iterations and assumptions as described in
1144 comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
1145 Returns false if it fails to determine number of iterations, true if it
1146 was determined (possibly with some assumptions). */
1148 static bool
1149 number_of_iterations_cond (struct loop *loop,
1150 tree type, affine_iv *iv0, enum tree_code code,
1151 affine_iv *iv1, struct tree_niter_desc *niter,
1152 bool only_exit)
1154 bool never_infinite, ret;
1155 bounds bnds;
1157 /* The meaning of these assumptions is this:
1158 if !assumptions
1159 then the rest of information does not have to be valid
1160 if may_be_zero then the loop does not roll, even if
1161 niter != 0. */
1162 niter->assumptions = boolean_true_node;
1163 niter->may_be_zero = boolean_false_node;
1164 niter->niter = NULL_TREE;
1165 niter->max = double_int_zero;
1167 niter->bound = NULL_TREE;
1168 niter->cmp = ERROR_MARK;
1170 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1171 the control variable is on lhs. */
1172 if (code == GE_EXPR || code == GT_EXPR
1173 || (code == NE_EXPR && integer_zerop (iv0->step)))
1175 SWAP (iv0, iv1);
1176 code = swap_tree_comparison (code);
1179 if (!only_exit)
1181 /* If this is not the only possible exit from the loop, the information
1182 that the induction variables cannot overflow as derived from
1183 signedness analysis cannot be relied upon. We use them e.g. in the
1184 following way: given loop for (i = 0; i <= n; i++), if i is
1185 signed, it cannot overflow, thus this loop is equivalent to
1186 for (i = 0; i < n + 1; i++); however, if n == MAX, but the loop
1187 is exited in some other way before i overflows, this transformation
1188 is incorrect (the new loop exits immediately). */
1189 iv0->no_overflow = false;
1190 iv1->no_overflow = false;
1193 if (POINTER_TYPE_P (type))
1195 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1196 to the same object. If they do, the control variable cannot wrap
1197 (as wrap around the bounds of memory will never return a pointer
1198 that would be guaranteed to point to the same object, even if we
1199 avoid undefined behavior by casting to size_t and back). The
1200 restrictions on pointer arithmetics and comparisons of pointers
1201 ensure that using the no-overflow assumptions is correct in this
1202 case even if ONLY_EXIT is false. */
1203 iv0->no_overflow = true;
1204 iv1->no_overflow = true;
1207 /* If the control induction variable does not overflow, the loop obviously
1208 cannot be infinite. */
1209 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1210 never_infinite = true;
1211 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1212 never_infinite = true;
1213 else
1214 never_infinite = false;
1216 /* We can handle the case when neither of the sides of the comparison is
1217 invariant, provided that the test is NE_EXPR. This rarely occurs in
1218 practice, but it is simple enough to manage. */
1219 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1221 if (code != NE_EXPR)
1222 return false;
1224 iv0->step = fold_binary_to_constant (MINUS_EXPR, type,
1225 iv0->step, iv1->step);
1226 iv0->no_overflow = false;
1227 iv1->step = build_int_cst (type, 0);
1228 iv1->no_overflow = true;
1231 /* If the result of the comparison is a constant, the loop is weird. More
1232 precise handling would be possible, but the situation is not common enough
1233 to waste time on it. */
1234 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1235 return false;
1237 /* Ignore loops of while (i-- < 10) type. */
1238 if (code != NE_EXPR)
1240 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1241 return false;
1243 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1244 return false;
1247 /* If the loop exits immediately, there is nothing to do. */
1248 if (integer_zerop (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
1250 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1251 niter->max = double_int_zero;
1252 return true;
1255 /* OK, now we know we have a senseful loop. Handle several cases, depending
1256 on what comparison operator is used. */
1257 bound_difference (loop, iv1->base, iv0->base, &bnds);
1259 if (dump_file && (dump_flags & TDF_DETAILS))
1261 fprintf (dump_file,
1262 "Analyzing # of iterations of loop %d\n", loop->num);
1264 fprintf (dump_file, " exit condition ");
1265 dump_affine_iv (dump_file, iv0);
1266 fprintf (dump_file, " %s ",
1267 code == NE_EXPR ? "!="
1268 : code == LT_EXPR ? "<"
1269 : "<=");
1270 dump_affine_iv (dump_file, iv1);
1271 fprintf (dump_file, "\n");
1273 fprintf (dump_file, " bounds on difference of bases: ");
1274 mpz_out_str (dump_file, 10, bnds.below);
1275 fprintf (dump_file, " ... ");
1276 mpz_out_str (dump_file, 10, bnds.up);
1277 fprintf (dump_file, "\n");
1280 switch (code)
1282 case NE_EXPR:
1283 gcc_assert (integer_zerop (iv1->step));
1284 ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1285 never_infinite, &bnds);
1286 break;
1288 case LT_EXPR:
1289 ret = number_of_iterations_lt (type, iv0, iv1, niter, never_infinite,
1290 &bnds);
1291 break;
1293 case LE_EXPR:
1294 ret = number_of_iterations_le (type, iv0, iv1, niter, never_infinite,
1295 &bnds);
1296 break;
1298 default:
1299 gcc_unreachable ();
1302 mpz_clear (bnds.up);
1303 mpz_clear (bnds.below);
1305 if (dump_file && (dump_flags & TDF_DETAILS))
1307 if (ret)
1309 fprintf (dump_file, " result:\n");
1310 if (!integer_nonzerop (niter->assumptions))
1312 fprintf (dump_file, " under assumptions ");
1313 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1314 fprintf (dump_file, "\n");
1317 if (!integer_zerop (niter->may_be_zero))
1319 fprintf (dump_file, " zero if ");
1320 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1321 fprintf (dump_file, "\n");
1324 fprintf (dump_file, " # of iterations ");
1325 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1326 fprintf (dump_file, ", bounded by ");
1327 dump_double_int (dump_file, niter->max, true);
1328 fprintf (dump_file, "\n");
1330 else
1331 fprintf (dump_file, " failed\n\n");
1333 return ret;
1336 /* Substitute NEW for OLD in EXPR and fold the result. */
1338 static tree
1339 simplify_replace_tree (tree expr, tree old, tree new_tree)
1341 unsigned i, n;
1342 tree ret = NULL_TREE, e, se;
1344 if (!expr)
1345 return NULL_TREE;
1347 if (expr == old
1348 || operand_equal_p (expr, old, 0))
1349 return unshare_expr (new_tree);
1351 if (!EXPR_P (expr) && !GIMPLE_STMT_P (expr))
1352 return expr;
1354 n = TREE_OPERAND_LENGTH (expr);
1355 for (i = 0; i < n; i++)
1357 e = TREE_OPERAND (expr, i);
1358 se = simplify_replace_tree (e, old, new_tree);
1359 if (e == se)
1360 continue;
1362 if (!ret)
1363 ret = copy_node (expr);
1365 TREE_OPERAND (ret, i) = se;
1368 return (ret ? fold (ret) : expr);
1371 /* Expand definitions of ssa names in EXPR as long as they are simple
1372 enough, and return the new expression. */
1374 tree
1375 expand_simple_operations (tree expr)
1377 unsigned i, n;
1378 tree ret = NULL_TREE, e, ee, stmt;
1379 enum tree_code code;
1381 if (expr == NULL_TREE)
1382 return expr;
1384 if (is_gimple_min_invariant (expr))
1385 return expr;
1387 code = TREE_CODE (expr);
1388 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1390 n = TREE_OPERAND_LENGTH (expr);
1391 for (i = 0; i < n; i++)
1393 e = TREE_OPERAND (expr, i);
1394 ee = expand_simple_operations (e);
1395 if (e == ee)
1396 continue;
1398 if (!ret)
1399 ret = copy_node (expr);
1401 TREE_OPERAND (ret, i) = ee;
1404 if (!ret)
1405 return expr;
1407 fold_defer_overflow_warnings ();
1408 ret = fold (ret);
1409 fold_undefer_and_ignore_overflow_warnings ();
1410 return ret;
1413 if (TREE_CODE (expr) != SSA_NAME)
1414 return expr;
1416 stmt = SSA_NAME_DEF_STMT (expr);
1417 if (TREE_CODE (stmt) == PHI_NODE)
1419 basic_block src, dest;
1421 if (PHI_NUM_ARGS (stmt) != 1)
1422 return expr;
1423 e = PHI_ARG_DEF (stmt, 0);
1425 /* Avoid propagating through loop exit phi nodes, which
1426 could break loop-closed SSA form restrictions. */
1427 dest = bb_for_stmt (stmt);
1428 src = single_pred (dest);
1429 if (TREE_CODE (e) == SSA_NAME
1430 && src->loop_father != dest->loop_father)
1431 return expr;
1433 return expand_simple_operations (e);
1435 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
1436 return expr;
1438 e = GIMPLE_STMT_OPERAND (stmt, 1);
1439 if (/* Casts are simple. */
1440 TREE_CODE (e) != NOP_EXPR
1441 && TREE_CODE (e) != CONVERT_EXPR
1442 /* Copies are simple. */
1443 && TREE_CODE (e) != SSA_NAME
1444 /* Assignments of invariants are simple. */
1445 && !is_gimple_min_invariant (e)
1446 /* And increments and decrements by a constant are simple. */
1447 && !((TREE_CODE (e) == PLUS_EXPR
1448 || TREE_CODE (e) == MINUS_EXPR
1449 || TREE_CODE (e) == POINTER_PLUS_EXPR)
1450 && is_gimple_min_invariant (TREE_OPERAND (e, 1))))
1451 return expr;
1453 return expand_simple_operations (e);
1456 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1457 expression (or EXPR unchanged, if no simplification was possible). */
1459 static tree
1460 tree_simplify_using_condition_1 (tree cond, tree expr)
1462 bool changed;
1463 tree e, te, e0, e1, e2, notcond;
1464 enum tree_code code = TREE_CODE (expr);
1466 if (code == INTEGER_CST)
1467 return expr;
1469 if (code == TRUTH_OR_EXPR
1470 || code == TRUTH_AND_EXPR
1471 || code == COND_EXPR)
1473 changed = false;
1475 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1476 if (TREE_OPERAND (expr, 0) != e0)
1477 changed = true;
1479 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1480 if (TREE_OPERAND (expr, 1) != e1)
1481 changed = true;
1483 if (code == COND_EXPR)
1485 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1486 if (TREE_OPERAND (expr, 2) != e2)
1487 changed = true;
1489 else
1490 e2 = NULL_TREE;
1492 if (changed)
1494 if (code == COND_EXPR)
1495 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1496 else
1497 expr = fold_build2 (code, boolean_type_node, e0, e1);
1500 return expr;
1503 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1504 propagation, and vice versa. Fold does not handle this, since it is
1505 considered too expensive. */
1506 if (TREE_CODE (cond) == EQ_EXPR)
1508 e0 = TREE_OPERAND (cond, 0);
1509 e1 = TREE_OPERAND (cond, 1);
1511 /* We know that e0 == e1. Check whether we cannot simplify expr
1512 using this fact. */
1513 e = simplify_replace_tree (expr, e0, e1);
1514 if (integer_zerop (e) || integer_nonzerop (e))
1515 return e;
1517 e = simplify_replace_tree (expr, e1, e0);
1518 if (integer_zerop (e) || integer_nonzerop (e))
1519 return e;
1521 if (TREE_CODE (expr) == EQ_EXPR)
1523 e0 = TREE_OPERAND (expr, 0);
1524 e1 = TREE_OPERAND (expr, 1);
1526 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1527 e = simplify_replace_tree (cond, e0, e1);
1528 if (integer_zerop (e))
1529 return e;
1530 e = simplify_replace_tree (cond, e1, e0);
1531 if (integer_zerop (e))
1532 return e;
1534 if (TREE_CODE (expr) == NE_EXPR)
1536 e0 = TREE_OPERAND (expr, 0);
1537 e1 = TREE_OPERAND (expr, 1);
1539 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1540 e = simplify_replace_tree (cond, e0, e1);
1541 if (integer_zerop (e))
1542 return boolean_true_node;
1543 e = simplify_replace_tree (cond, e1, e0);
1544 if (integer_zerop (e))
1545 return boolean_true_node;
1548 te = expand_simple_operations (expr);
1550 /* Check whether COND ==> EXPR. */
1551 notcond = invert_truthvalue (cond);
1552 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1553 if (e && integer_nonzerop (e))
1554 return e;
1556 /* Check whether COND ==> not EXPR. */
1557 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1558 if (e && integer_zerop (e))
1559 return e;
1561 return expr;
1564 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1565 expression (or EXPR unchanged, if no simplification was possible).
1566 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1567 of simple operations in definitions of ssa names in COND are expanded,
1568 so that things like casts or incrementing the value of the bound before
1569 the loop do not cause us to fail. */
1571 static tree
1572 tree_simplify_using_condition (tree cond, tree expr)
1574 cond = expand_simple_operations (cond);
1576 return tree_simplify_using_condition_1 (cond, expr);
1579 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1580 Returns the simplified expression (or EXPR unchanged, if no
1581 simplification was possible).*/
1583 static tree
1584 simplify_using_initial_conditions (struct loop *loop, tree expr)
1586 edge e;
1587 basic_block bb;
1588 tree cond;
1589 int cnt = 0;
1591 if (TREE_CODE (expr) == INTEGER_CST)
1592 return expr;
1594 /* Limit walking the dominators to avoid quadraticness in
1595 the number of BBs times the number of loops in degenerate
1596 cases. */
1597 for (bb = loop->header;
1598 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
1599 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1601 if (!single_pred_p (bb))
1602 continue;
1603 e = single_pred_edge (bb);
1605 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
1606 continue;
1608 cond = COND_EXPR_COND (last_stmt (e->src));
1609 if (e->flags & EDGE_FALSE_VALUE)
1610 cond = invert_truthvalue (cond);
1611 expr = tree_simplify_using_condition (cond, expr);
1612 ++cnt;
1615 return expr;
1618 /* Tries to simplify EXPR using the evolutions of the loop invariants
1619 in the superloops of LOOP. Returns the simplified expression
1620 (or EXPR unchanged, if no simplification was possible). */
1622 static tree
1623 simplify_using_outer_evolutions (struct loop *loop, tree expr)
1625 enum tree_code code = TREE_CODE (expr);
1626 bool changed;
1627 tree e, e0, e1, e2;
1629 if (is_gimple_min_invariant (expr))
1630 return expr;
1632 if (code == TRUTH_OR_EXPR
1633 || code == TRUTH_AND_EXPR
1634 || code == COND_EXPR)
1636 changed = false;
1638 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
1639 if (TREE_OPERAND (expr, 0) != e0)
1640 changed = true;
1642 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
1643 if (TREE_OPERAND (expr, 1) != e1)
1644 changed = true;
1646 if (code == COND_EXPR)
1648 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
1649 if (TREE_OPERAND (expr, 2) != e2)
1650 changed = true;
1652 else
1653 e2 = NULL_TREE;
1655 if (changed)
1657 if (code == COND_EXPR)
1658 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1659 else
1660 expr = fold_build2 (code, boolean_type_node, e0, e1);
1663 return expr;
1666 e = instantiate_parameters (loop, expr);
1667 if (is_gimple_min_invariant (e))
1668 return e;
1670 return expr;
1673 /* Returns true if EXIT is the only possible exit from LOOP. */
1675 static bool
1676 loop_only_exit_p (const struct loop *loop, const_edge exit)
1678 basic_block *body;
1679 block_stmt_iterator bsi;
1680 unsigned i;
1681 tree call;
1683 if (exit != single_exit (loop))
1684 return false;
1686 body = get_loop_body (loop);
1687 for (i = 0; i < loop->num_nodes; i++)
1689 for (bsi = bsi_start (body[0]); !bsi_end_p (bsi); bsi_next (&bsi))
1691 call = get_call_expr_in (bsi_stmt (bsi));
1692 if (call && TREE_SIDE_EFFECTS (call))
1694 free (body);
1695 return false;
1700 free (body);
1701 return true;
1704 /* Stores description of number of iterations of LOOP derived from
1705 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1706 useful information could be derived (and fields of NITER has
1707 meaning described in comments at struct tree_niter_desc
1708 declaration), false otherwise. If WARN is true and
1709 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1710 potentially unsafe assumptions. */
1712 bool
1713 number_of_iterations_exit (struct loop *loop, edge exit,
1714 struct tree_niter_desc *niter,
1715 bool warn)
1717 tree stmt, cond, type;
1718 tree op0, op1;
1719 enum tree_code code;
1720 affine_iv iv0, iv1;
1722 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
1723 return false;
1725 niter->assumptions = boolean_false_node;
1726 stmt = last_stmt (exit->src);
1727 if (!stmt || TREE_CODE (stmt) != COND_EXPR)
1728 return false;
1730 /* We want the condition for staying inside loop. */
1731 cond = COND_EXPR_COND (stmt);
1732 if (exit->flags & EDGE_TRUE_VALUE)
1733 cond = invert_truthvalue (cond);
1735 code = TREE_CODE (cond);
1736 switch (code)
1738 case GT_EXPR:
1739 case GE_EXPR:
1740 case NE_EXPR:
1741 case LT_EXPR:
1742 case LE_EXPR:
1743 break;
1745 default:
1746 return false;
1749 op0 = TREE_OPERAND (cond, 0);
1750 op1 = TREE_OPERAND (cond, 1);
1751 type = TREE_TYPE (op0);
1753 if (TREE_CODE (type) != INTEGER_TYPE
1754 && !POINTER_TYPE_P (type))
1755 return false;
1757 if (!simple_iv (loop, stmt, op0, &iv0, false))
1758 return false;
1759 if (!simple_iv (loop, stmt, op1, &iv1, false))
1760 return false;
1762 /* We don't want to see undefined signed overflow warnings while
1763 computing the number of iterations. */
1764 fold_defer_overflow_warnings ();
1766 iv0.base = expand_simple_operations (iv0.base);
1767 iv1.base = expand_simple_operations (iv1.base);
1768 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1769 loop_only_exit_p (loop, exit)))
1771 fold_undefer_and_ignore_overflow_warnings ();
1772 return false;
1775 if (optimize >= 3)
1777 niter->assumptions = simplify_using_outer_evolutions (loop,
1778 niter->assumptions);
1779 niter->may_be_zero = simplify_using_outer_evolutions (loop,
1780 niter->may_be_zero);
1781 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
1784 niter->assumptions
1785 = simplify_using_initial_conditions (loop,
1786 niter->assumptions);
1787 niter->may_be_zero
1788 = simplify_using_initial_conditions (loop,
1789 niter->may_be_zero);
1791 fold_undefer_and_ignore_overflow_warnings ();
1793 if (integer_onep (niter->assumptions))
1794 return true;
1796 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1797 But if we can prove that there is overflow or some other source of weird
1798 behavior, ignore the loop even with -funsafe-loop-optimizations. */
1799 if (integer_zerop (niter->assumptions))
1800 return false;
1802 if (flag_unsafe_loop_optimizations)
1803 niter->assumptions = boolean_true_node;
1805 if (warn)
1807 const char *wording;
1808 location_t loc = EXPR_LOCATION (stmt);
1810 /* We can provide a more specific warning if one of the operator is
1811 constant and the other advances by +1 or -1. */
1812 if (!integer_zerop (iv1.step)
1813 ? (integer_zerop (iv0.step)
1814 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
1815 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
1816 wording =
1817 flag_unsafe_loop_optimizations
1818 ? N_("assuming that the loop is not infinite")
1819 : N_("cannot optimize possibly infinite loops");
1820 else
1821 wording =
1822 flag_unsafe_loop_optimizations
1823 ? N_("assuming that the loop counter does not overflow")
1824 : N_("cannot optimize loop, the loop counter may overflow");
1826 if (LOCATION_LINE (loc) > 0)
1827 warning (OPT_Wunsafe_loop_optimizations, "%H%s", &loc, gettext (wording));
1828 else
1829 warning (OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
1832 return flag_unsafe_loop_optimizations;
1835 /* Try to determine the number of iterations of LOOP. If we succeed,
1836 expression giving number of iterations is returned and *EXIT is
1837 set to the edge from that the information is obtained. Otherwise
1838 chrec_dont_know is returned. */
1840 tree
1841 find_loop_niter (struct loop *loop, edge *exit)
1843 unsigned i;
1844 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1845 edge ex;
1846 tree niter = NULL_TREE, aniter;
1847 struct tree_niter_desc desc;
1849 *exit = NULL;
1850 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
1852 if (!just_once_each_iteration_p (loop, ex->src))
1853 continue;
1855 if (!number_of_iterations_exit (loop, ex, &desc, false))
1856 continue;
1858 if (integer_nonzerop (desc.may_be_zero))
1860 /* We exit in the first iteration through this exit.
1861 We won't find anything better. */
1862 niter = build_int_cst (unsigned_type_node, 0);
1863 *exit = ex;
1864 break;
1867 if (!integer_zerop (desc.may_be_zero))
1868 continue;
1870 aniter = desc.niter;
1872 if (!niter)
1874 /* Nothing recorded yet. */
1875 niter = aniter;
1876 *exit = ex;
1877 continue;
1880 /* Prefer constants, the lower the better. */
1881 if (TREE_CODE (aniter) != INTEGER_CST)
1882 continue;
1884 if (TREE_CODE (niter) != INTEGER_CST)
1886 niter = aniter;
1887 *exit = ex;
1888 continue;
1891 if (tree_int_cst_lt (aniter, niter))
1893 niter = aniter;
1894 *exit = ex;
1895 continue;
1898 VEC_free (edge, heap, exits);
1900 return niter ? niter : chrec_dont_know;
1905 Analysis of a number of iterations of a loop by a brute-force evaluation.
1909 /* Bound on the number of iterations we try to evaluate. */
1911 #define MAX_ITERATIONS_TO_TRACK \
1912 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
1914 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
1915 result by a chain of operations such that all but exactly one of their
1916 operands are constants. */
1918 static tree
1919 chain_of_csts_start (struct loop *loop, tree x)
1921 tree stmt = SSA_NAME_DEF_STMT (x);
1922 tree use;
1923 basic_block bb = bb_for_stmt (stmt);
1925 if (!bb
1926 || !flow_bb_inside_loop_p (loop, bb))
1927 return NULL_TREE;
1929 if (TREE_CODE (stmt) == PHI_NODE)
1931 if (bb == loop->header)
1932 return stmt;
1934 return NULL_TREE;
1937 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
1938 return NULL_TREE;
1940 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
1941 return NULL_TREE;
1942 if (SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF) == NULL_DEF_OPERAND_P)
1943 return NULL_TREE;
1945 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1946 if (use == NULL_USE_OPERAND_P)
1947 return NULL_TREE;
1949 return chain_of_csts_start (loop, use);
1952 /* Determines whether the expression X is derived from a result of a phi node
1953 in header of LOOP such that
1955 * the derivation of X consists only from operations with constants
1956 * the initial value of the phi node is constant
1957 * the value of the phi node in the next iteration can be derived from the
1958 value in the current iteration by a chain of operations with constants.
1960 If such phi node exists, it is returned. If X is a constant, X is returned
1961 unchanged. Otherwise NULL_TREE is returned. */
1963 static tree
1964 get_base_for (struct loop *loop, tree x)
1966 tree phi, init, next;
1968 if (is_gimple_min_invariant (x))
1969 return x;
1971 phi = chain_of_csts_start (loop, x);
1972 if (!phi)
1973 return NULL_TREE;
1975 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1976 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1978 if (TREE_CODE (next) != SSA_NAME)
1979 return NULL_TREE;
1981 if (!is_gimple_min_invariant (init))
1982 return NULL_TREE;
1984 if (chain_of_csts_start (loop, next) != phi)
1985 return NULL_TREE;
1987 return phi;
1990 /* Given an expression X, then
1992 * if X is NULL_TREE, we return the constant BASE.
1993 * otherwise X is a SSA name, whose value in the considered loop is derived
1994 by a chain of operations with constant from a result of a phi node in
1995 the header of the loop. Then we return value of X when the value of the
1996 result of this phi node is given by the constant BASE. */
1998 static tree
1999 get_val_for (tree x, tree base)
2001 tree stmt, nx, val;
2002 use_operand_p op;
2003 ssa_op_iter iter;
2005 gcc_assert (is_gimple_min_invariant (base));
2007 if (!x)
2008 return base;
2010 stmt = SSA_NAME_DEF_STMT (x);
2011 if (TREE_CODE (stmt) == PHI_NODE)
2012 return base;
2014 FOR_EACH_SSA_USE_OPERAND (op, stmt, iter, SSA_OP_USE)
2016 nx = USE_FROM_PTR (op);
2017 val = get_val_for (nx, base);
2018 SET_USE (op, val);
2019 val = fold (GIMPLE_STMT_OPERAND (stmt, 1));
2020 SET_USE (op, nx);
2021 /* only iterate loop once. */
2022 return val;
2025 /* Should never reach here. */
2026 gcc_unreachable ();
2029 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2030 by brute force -- i.e. by determining the value of the operands of the
2031 condition at EXIT in first few iterations of the loop (assuming that
2032 these values are constant) and determining the first one in that the
2033 condition is not satisfied. Returns the constant giving the number
2034 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2036 tree
2037 loop_niter_by_eval (struct loop *loop, edge exit)
2039 tree cond, cnd, acnd;
2040 tree op[2], val[2], next[2], aval[2], phi[2];
2041 unsigned i, j;
2042 enum tree_code cmp;
2044 cond = last_stmt (exit->src);
2045 if (!cond || TREE_CODE (cond) != COND_EXPR)
2046 return chrec_dont_know;
2048 cnd = COND_EXPR_COND (cond);
2049 if (exit->flags & EDGE_TRUE_VALUE)
2050 cnd = invert_truthvalue (cnd);
2052 cmp = TREE_CODE (cnd);
2053 switch (cmp)
2055 case EQ_EXPR:
2056 case NE_EXPR:
2057 case GT_EXPR:
2058 case GE_EXPR:
2059 case LT_EXPR:
2060 case LE_EXPR:
2061 for (j = 0; j < 2; j++)
2062 op[j] = TREE_OPERAND (cnd, j);
2063 break;
2065 default:
2066 return chrec_dont_know;
2069 for (j = 0; j < 2; j++)
2071 phi[j] = get_base_for (loop, op[j]);
2072 if (!phi[j])
2073 return chrec_dont_know;
2076 for (j = 0; j < 2; j++)
2078 if (TREE_CODE (phi[j]) == PHI_NODE)
2080 val[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_preheader_edge (loop));
2081 next[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_latch_edge (loop));
2083 else
2085 val[j] = phi[j];
2086 next[j] = NULL_TREE;
2087 op[j] = NULL_TREE;
2091 /* Don't issue signed overflow warnings. */
2092 fold_defer_overflow_warnings ();
2094 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2096 for (j = 0; j < 2; j++)
2097 aval[j] = get_val_for (op[j], val[j]);
2099 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2100 if (acnd && integer_zerop (acnd))
2102 fold_undefer_and_ignore_overflow_warnings ();
2103 if (dump_file && (dump_flags & TDF_DETAILS))
2104 fprintf (dump_file,
2105 "Proved that loop %d iterates %d times using brute force.\n",
2106 loop->num, i);
2107 return build_int_cst (unsigned_type_node, i);
2110 for (j = 0; j < 2; j++)
2112 val[j] = get_val_for (next[j], val[j]);
2113 if (!is_gimple_min_invariant (val[j]))
2115 fold_undefer_and_ignore_overflow_warnings ();
2116 return chrec_dont_know;
2121 fold_undefer_and_ignore_overflow_warnings ();
2123 return chrec_dont_know;
2126 /* Finds the exit of the LOOP by that the loop exits after a constant
2127 number of iterations and stores the exit edge to *EXIT. The constant
2128 giving the number of iterations of LOOP is returned. The number of
2129 iterations is determined using loop_niter_by_eval (i.e. by brute force
2130 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2131 determines the number of iterations, chrec_dont_know is returned. */
2133 tree
2134 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2136 unsigned i;
2137 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
2138 edge ex;
2139 tree niter = NULL_TREE, aniter;
2141 *exit = NULL;
2142 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2144 if (!just_once_each_iteration_p (loop, ex->src))
2145 continue;
2147 aniter = loop_niter_by_eval (loop, ex);
2148 if (chrec_contains_undetermined (aniter))
2149 continue;
2151 if (niter
2152 && !tree_int_cst_lt (aniter, niter))
2153 continue;
2155 niter = aniter;
2156 *exit = ex;
2158 VEC_free (edge, heap, exits);
2160 return niter ? niter : chrec_dont_know;
2165 Analysis of upper bounds on number of iterations of a loop.
2169 /* Returns a constant upper bound on the value of expression VAL. VAL
2170 is considered to be unsigned. If its type is signed, its value must
2171 be nonnegative. */
2173 static double_int
2174 derive_constant_upper_bound (const_tree val)
2176 tree type = TREE_TYPE (val);
2177 tree op0, op1, subtype, maxt;
2178 double_int bnd, max, mmax, cst;
2179 tree stmt;
2181 if (INTEGRAL_TYPE_P (type))
2182 maxt = TYPE_MAX_VALUE (type);
2183 else
2184 maxt = upper_bound_in_type (type, type);
2186 max = tree_to_double_int (maxt);
2188 switch (TREE_CODE (val))
2190 case INTEGER_CST:
2191 return tree_to_double_int (val);
2193 case NOP_EXPR:
2194 case CONVERT_EXPR:
2195 op0 = TREE_OPERAND (val, 0);
2196 subtype = TREE_TYPE (op0);
2197 if (!TYPE_UNSIGNED (subtype)
2198 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2199 that OP0 is nonnegative. */
2200 && TYPE_UNSIGNED (type)
2201 && !tree_expr_nonnegative_p (op0))
2203 /* If we cannot prove that the casted expression is nonnegative,
2204 we cannot establish more useful upper bound than the precision
2205 of the type gives us. */
2206 return max;
2209 /* We now know that op0 is an nonnegative value. Try deriving an upper
2210 bound for it. */
2211 bnd = derive_constant_upper_bound (op0);
2213 /* If the bound does not fit in TYPE, max. value of TYPE could be
2214 attained. */
2215 if (double_int_ucmp (max, bnd) < 0)
2216 return max;
2218 return bnd;
2220 case PLUS_EXPR:
2221 case POINTER_PLUS_EXPR:
2222 case MINUS_EXPR:
2223 op0 = TREE_OPERAND (val, 0);
2224 op1 = TREE_OPERAND (val, 1);
2226 if (TREE_CODE (op1) != INTEGER_CST
2227 || !tree_expr_nonnegative_p (op0))
2228 return max;
2230 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2231 choose the most logical way how to treat this constant regardless
2232 of the signedness of the type. */
2233 cst = tree_to_double_int (op1);
2234 cst = double_int_sext (cst, TYPE_PRECISION (type));
2235 if (TREE_CODE (val) == PLUS_EXPR)
2236 cst = double_int_neg (cst);
2238 bnd = derive_constant_upper_bound (op0);
2240 if (double_int_negative_p (cst))
2242 cst = double_int_neg (cst);
2243 /* Avoid CST == 0x80000... */
2244 if (double_int_negative_p (cst))
2245 return max;;
2247 /* OP0 + CST. We need to check that
2248 BND <= MAX (type) - CST. */
2250 mmax = double_int_add (max, double_int_neg (cst));
2251 if (double_int_ucmp (bnd, mmax) > 0)
2252 return max;
2254 return double_int_add (bnd, cst);
2256 else
2258 /* OP0 - CST, where CST >= 0.
2260 If TYPE is signed, we have already verified that OP0 >= 0, and we
2261 know that the result is nonnegative. This implies that
2262 VAL <= BND - CST.
2264 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2265 otherwise the operation underflows.
2268 /* This should only happen if the type is unsigned; however, for
2269 buggy programs that use overflowing signed arithmetics even with
2270 -fno-wrapv, this condition may also be true for signed values. */
2271 if (double_int_ucmp (bnd, cst) < 0)
2272 return max;
2274 if (TYPE_UNSIGNED (type))
2276 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2277 double_int_to_tree (type, cst));
2278 if (!tem || integer_nonzerop (tem))
2279 return max;
2282 bnd = double_int_add (bnd, double_int_neg (cst));
2285 return bnd;
2287 case FLOOR_DIV_EXPR:
2288 case EXACT_DIV_EXPR:
2289 op0 = TREE_OPERAND (val, 0);
2290 op1 = TREE_OPERAND (val, 1);
2291 if (TREE_CODE (op1) != INTEGER_CST
2292 || tree_int_cst_sign_bit (op1))
2293 return max;
2295 bnd = derive_constant_upper_bound (op0);
2296 return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);
2298 case BIT_AND_EXPR:
2299 op1 = TREE_OPERAND (val, 1);
2300 if (TREE_CODE (op1) != INTEGER_CST
2301 || tree_int_cst_sign_bit (op1))
2302 return max;
2303 return tree_to_double_int (op1);
2305 case SSA_NAME:
2306 stmt = SSA_NAME_DEF_STMT (val);
2307 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
2308 || GIMPLE_STMT_OPERAND (stmt, 0) != val)
2309 return max;
2310 return derive_constant_upper_bound (GIMPLE_STMT_OPERAND (stmt, 1));
2312 default:
2313 return max;
2317 /* Records that every statement in LOOP is executed I_BOUND times.
2318 REALISTIC is true if I_BOUND is expected to be close the the real number
2319 of iterations. UPPER is true if we are sure the loop iterates at most
2320 I_BOUND times. */
2322 static void
2323 record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
2324 bool upper)
2326 /* Update the bounds only when there is no previous estimation, or when the current
2327 estimation is smaller. */
2328 if (upper
2329 && (!loop->any_upper_bound
2330 || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
2332 loop->any_upper_bound = true;
2333 loop->nb_iterations_upper_bound = i_bound;
2335 if (realistic
2336 && (!loop->any_estimate
2337 || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
2339 loop->any_estimate = true;
2340 loop->nb_iterations_estimate = i_bound;
2344 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2345 is true if the loop is exited immediately after STMT, and this exit
2346 is taken at last when the STMT is executed BOUND + 1 times.
2347 REALISTIC is true if BOUND is expected to be close the the real number
2348 of iterations. UPPER is true if we are sure the loop iterates at most
2349 BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
2351 static void
2352 record_estimate (struct loop *loop, tree bound, double_int i_bound,
2353 tree at_stmt, bool is_exit, bool realistic, bool upper)
2355 double_int delta;
2356 edge exit;
2358 if (dump_file && (dump_flags & TDF_DETAILS))
2360 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2361 print_generic_expr (dump_file, at_stmt, TDF_SLIM);
2362 fprintf (dump_file, " is %sexecuted at most ",
2363 upper ? "" : "probably ");
2364 print_generic_expr (dump_file, bound, TDF_SLIM);
2365 fprintf (dump_file, " (bounded by ");
2366 dump_double_int (dump_file, i_bound, true);
2367 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2370 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2371 real number of iterations. */
2372 if (TREE_CODE (bound) != INTEGER_CST)
2373 realistic = false;
2374 if (!upper && !realistic)
2375 return;
2377 /* If we have a guaranteed upper bound, record it in the appropriate
2378 list. */
2379 if (upper)
2381 struct nb_iter_bound *elt = GGC_NEW (struct nb_iter_bound);
2383 elt->bound = i_bound;
2384 elt->stmt = at_stmt;
2385 elt->is_exit = is_exit;
2386 elt->next = loop->bounds;
2387 loop->bounds = elt;
2390 /* Update the number of iteration estimates according to the bound.
2391 If at_stmt is an exit, then every statement in the loop is
2392 executed at most BOUND + 1 times. If it is not an exit, then
2393 some of the statements before it could be executed BOUND + 2
2394 times, if an exit of LOOP is before stmt. */
2395 exit = single_exit (loop);
2396 if (is_exit
2397 || (exit != NULL
2398 && dominated_by_p (CDI_DOMINATORS,
2399 exit->src, bb_for_stmt (at_stmt))))
2400 delta = double_int_one;
2401 else
2402 delta = double_int_two;
2403 i_bound = double_int_add (i_bound, delta);
2405 /* If an overflow occurred, ignore the result. */
2406 if (double_int_ucmp (i_bound, delta) < 0)
2407 return;
2409 record_niter_bound (loop, i_bound, realistic, upper);
2412 /* Record the estimate on number of iterations of LOOP based on the fact that
2413 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2414 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2415 estimated number of iterations is expected to be close to the real one.
2416 UPPER is true if we are sure the induction variable does not wrap. */
2418 static void
2419 record_nonwrapping_iv (struct loop *loop, tree base, tree step, tree stmt,
2420 tree low, tree high, bool realistic, bool upper)
2422 tree niter_bound, extreme, delta;
2423 tree type = TREE_TYPE (base), unsigned_type;
2424 double_int max;
2426 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2427 return;
2429 if (dump_file && (dump_flags & TDF_DETAILS))
2431 fprintf (dump_file, "Induction variable (");
2432 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
2433 fprintf (dump_file, ") ");
2434 print_generic_expr (dump_file, base, TDF_SLIM);
2435 fprintf (dump_file, " + ");
2436 print_generic_expr (dump_file, step, TDF_SLIM);
2437 fprintf (dump_file, " * iteration does not wrap in statement ");
2438 print_generic_expr (dump_file, stmt, TDF_SLIM);
2439 fprintf (dump_file, " in loop %d.\n", loop->num);
2442 unsigned_type = unsigned_type_for (type);
2443 base = fold_convert (unsigned_type, base);
2444 step = fold_convert (unsigned_type, step);
2446 if (tree_int_cst_sign_bit (step))
2448 extreme = fold_convert (unsigned_type, low);
2449 if (TREE_CODE (base) != INTEGER_CST)
2450 base = fold_convert (unsigned_type, high);
2451 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
2452 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
2454 else
2456 extreme = fold_convert (unsigned_type, high);
2457 if (TREE_CODE (base) != INTEGER_CST)
2458 base = fold_convert (unsigned_type, low);
2459 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
2462 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2463 would get out of the range. */
2464 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2465 max = derive_constant_upper_bound (niter_bound);
2466 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2469 /* Returns true if REF is a reference to an array at the end of a dynamically
2470 allocated structure. If this is the case, the array may be allocated larger
2471 than its upper bound implies. */
2473 static bool
2474 array_at_struct_end_p (tree ref)
2476 tree base = get_base_address (ref);
2477 tree parent, field;
2479 /* Unless the reference is through a pointer, the size of the array matches
2480 its declaration. */
2481 if (!base || !INDIRECT_REF_P (base))
2482 return false;
2484 for (;handled_component_p (ref); ref = parent)
2486 parent = TREE_OPERAND (ref, 0);
2488 if (TREE_CODE (ref) == COMPONENT_REF)
2490 /* All fields of a union are at its end. */
2491 if (TREE_CODE (TREE_TYPE (parent)) == UNION_TYPE)
2492 continue;
2494 /* Unless the field is at the end of the struct, we are done. */
2495 field = TREE_OPERAND (ref, 1);
2496 if (TREE_CHAIN (field))
2497 return false;
2500 /* The other options are ARRAY_REF, ARRAY_RANGE_REF, VIEW_CONVERT_EXPR.
2501 In all these cases, we might be accessing the last element, and
2502 although in practice this will probably never happen, it is legal for
2503 the indices of this last element to exceed the bounds of the array.
2504 Therefore, continue checking. */
2507 gcc_assert (INDIRECT_REF_P (ref));
2508 return true;
2511 /* Determine information about number of iterations a LOOP from the index
2512 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2513 guaranteed to be executed in every iteration of LOOP. Callback for
2514 for_each_index. */
2516 struct ilb_data
2518 struct loop *loop;
2519 tree stmt;
2520 bool reliable;
2523 static bool
2524 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
2526 struct ilb_data *data = (struct ilb_data *) dta;
2527 tree ev, init, step;
2528 tree low, high, type, next;
2529 bool sign, upper = data->reliable, at_end = false;
2530 struct loop *loop = data->loop;
2532 if (TREE_CODE (base) != ARRAY_REF)
2533 return true;
2535 /* For arrays at the end of the structure, we are not guaranteed that they
2536 do not really extend over their declared size. However, for arrays of
2537 size greater than one, this is unlikely to be intended. */
2538 if (array_at_struct_end_p (base))
2540 at_end = true;
2541 upper = false;
2544 ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
2545 init = initial_condition (ev);
2546 step = evolution_part_in_loop_num (ev, loop->num);
2548 if (!init
2549 || !step
2550 || TREE_CODE (step) != INTEGER_CST
2551 || integer_zerop (step)
2552 || tree_contains_chrecs (init, NULL)
2553 || chrec_contains_symbols_defined_in_loop (init, loop->num))
2554 return true;
2556 low = array_ref_low_bound (base);
2557 high = array_ref_up_bound (base);
2559 /* The case of nonconstant bounds could be handled, but it would be
2560 complicated. */
2561 if (TREE_CODE (low) != INTEGER_CST
2562 || !high
2563 || TREE_CODE (high) != INTEGER_CST)
2564 return true;
2565 sign = tree_int_cst_sign_bit (step);
2566 type = TREE_TYPE (step);
2568 /* The array of length 1 at the end of a structure most likely extends
2569 beyond its bounds. */
2570 if (at_end
2571 && operand_equal_p (low, high, 0))
2572 return true;
2574 /* In case the relevant bound of the array does not fit in type, or
2575 it does, but bound + step (in type) still belongs into the range of the
2576 array, the index may wrap and still stay within the range of the array
2577 (consider e.g. if the array is indexed by the full range of
2578 unsigned char).
2580 To make things simpler, we require both bounds to fit into type, although
2581 there are cases where this would not be strictly necessary. */
2582 if (!int_fits_type_p (high, type)
2583 || !int_fits_type_p (low, type))
2584 return true;
2585 low = fold_convert (type, low);
2586 high = fold_convert (type, high);
2588 if (sign)
2589 next = fold_binary (PLUS_EXPR, type, low, step);
2590 else
2591 next = fold_binary (PLUS_EXPR, type, high, step);
2593 if (tree_int_cst_compare (low, next) <= 0
2594 && tree_int_cst_compare (next, high) <= 0)
2595 return true;
2597 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true, upper);
2598 return true;
2601 /* Determine information about number of iterations a LOOP from the bounds
2602 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2603 STMT is guaranteed to be executed in every iteration of LOOP.*/
2605 static void
2606 infer_loop_bounds_from_ref (struct loop *loop, tree stmt, tree ref,
2607 bool reliable)
2609 struct ilb_data data;
2611 data.loop = loop;
2612 data.stmt = stmt;
2613 data.reliable = reliable;
2614 for_each_index (&ref, idx_infer_loop_bounds, &data);
2617 /* Determine information about number of iterations of a LOOP from the way
2618 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2619 executed in every iteration of LOOP. */
2621 static void
2622 infer_loop_bounds_from_array (struct loop *loop, tree stmt, bool reliable)
2624 tree call;
2626 if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
2628 tree op0 = GIMPLE_STMT_OPERAND (stmt, 0);
2629 tree op1 = GIMPLE_STMT_OPERAND (stmt, 1);
2631 /* For each memory access, analyze its access function
2632 and record a bound on the loop iteration domain. */
2633 if (REFERENCE_CLASS_P (op0))
2634 infer_loop_bounds_from_ref (loop, stmt, op0, reliable);
2636 if (REFERENCE_CLASS_P (op1))
2637 infer_loop_bounds_from_ref (loop, stmt, op1, reliable);
2641 call = get_call_expr_in (stmt);
2642 if (call)
2644 tree arg;
2645 call_expr_arg_iterator iter;
2647 FOR_EACH_CALL_EXPR_ARG (arg, iter, call)
2648 if (REFERENCE_CLASS_P (arg))
2649 infer_loop_bounds_from_ref (loop, stmt, arg, reliable);
2653 /* Determine information about number of iterations of a LOOP from the fact
2654 that signed arithmetics in STMT does not overflow. */
2656 static void
2657 infer_loop_bounds_from_signedness (struct loop *loop, tree stmt)
2659 tree def, base, step, scev, type, low, high;
2661 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2662 return;
2664 def = GIMPLE_STMT_OPERAND (stmt, 0);
2666 if (TREE_CODE (def) != SSA_NAME)
2667 return;
2669 type = TREE_TYPE (def);
2670 if (!INTEGRAL_TYPE_P (type)
2671 || !TYPE_OVERFLOW_UNDEFINED (type))
2672 return;
2674 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2675 if (chrec_contains_undetermined (scev))
2676 return;
2678 base = initial_condition_in_loop_num (scev, loop->num);
2679 step = evolution_part_in_loop_num (scev, loop->num);
2681 if (!base || !step
2682 || TREE_CODE (step) != INTEGER_CST
2683 || tree_contains_chrecs (base, NULL)
2684 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2685 return;
2687 low = lower_bound_in_type (type, type);
2688 high = upper_bound_in_type (type, type);
2690 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2693 /* The following analyzers are extracting informations on the bounds
2694 of LOOP from the following undefined behaviors:
2696 - data references should not access elements over the statically
2697 allocated size,
2699 - signed variables should not overflow when flag_wrapv is not set.
2702 static void
2703 infer_loop_bounds_from_undefined (struct loop *loop)
2705 unsigned i;
2706 basic_block *bbs;
2707 block_stmt_iterator bsi;
2708 basic_block bb;
2709 bool reliable;
2711 bbs = get_loop_body (loop);
2713 for (i = 0; i < loop->num_nodes; i++)
2715 bb = bbs[i];
2717 /* If BB is not executed in each iteration of the loop, we cannot
2718 use the operations in it to infer reliable upper bound on the
2719 # of iterations of the loop. However, we can use it as a guess. */
2720 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
2722 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
2724 tree stmt = bsi_stmt (bsi);
2726 infer_loop_bounds_from_array (loop, stmt, reliable);
2728 if (reliable)
2729 infer_loop_bounds_from_signedness (loop, stmt);
2734 free (bbs);
2737 /* Converts VAL to double_int. */
2739 static double_int
2740 gcov_type_to_double_int (gcov_type val)
2742 double_int ret;
2744 ret.low = (unsigned HOST_WIDE_INT) val;
2745 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
2746 the size of type. */
2747 val >>= HOST_BITS_PER_WIDE_INT - 1;
2748 val >>= 1;
2749 ret.high = (unsigned HOST_WIDE_INT) val;
2751 return ret;
2754 /* Records estimates on numbers of iterations of LOOP. */
2756 void
2757 estimate_numbers_of_iterations_loop (struct loop *loop)
2759 VEC (edge, heap) *exits;
2760 tree niter, type;
2761 unsigned i;
2762 struct tree_niter_desc niter_desc;
2763 edge ex;
2764 double_int bound;
2766 /* Give up if we already have tried to compute an estimation. */
2767 if (loop->estimate_state != EST_NOT_COMPUTED)
2768 return;
2769 loop->estimate_state = EST_AVAILABLE;
2770 loop->any_upper_bound = false;
2771 loop->any_estimate = false;
2773 exits = get_loop_exit_edges (loop);
2774 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2776 if (!number_of_iterations_exit (loop, ex, &niter_desc, false))
2777 continue;
2779 niter = niter_desc.niter;
2780 type = TREE_TYPE (niter);
2781 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
2782 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
2783 build_int_cst (type, 0),
2784 niter);
2785 record_estimate (loop, niter, niter_desc.max,
2786 last_stmt (ex->src),
2787 true, true, true);
2789 VEC_free (edge, heap, exits);
2791 infer_loop_bounds_from_undefined (loop);
2793 /* If we have a measured profile, use it to estimate the number of
2794 iterations. */
2795 if (loop->header->count != 0)
2797 gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
2798 bound = gcov_type_to_double_int (nit);
2799 record_niter_bound (loop, bound, true, false);
2802 /* If an upper bound is smaller than the realistic estimate of the
2803 number of iterations, use the upper bound instead. */
2804 if (loop->any_upper_bound
2805 && loop->any_estimate
2806 && double_int_ucmp (loop->nb_iterations_upper_bound,
2807 loop->nb_iterations_estimate) < 0)
2808 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
2811 /* Records estimates on numbers of iterations of loops. */
2813 void
2814 estimate_numbers_of_iterations (void)
2816 loop_iterator li;
2817 struct loop *loop;
2819 /* We don't want to issue signed overflow warnings while getting
2820 loop iteration estimates. */
2821 fold_defer_overflow_warnings ();
2823 FOR_EACH_LOOP (li, loop, 0)
2825 estimate_numbers_of_iterations_loop (loop);
2828 fold_undefer_and_ignore_overflow_warnings ();
2831 /* Returns true if statement S1 dominates statement S2. */
2833 bool
2834 stmt_dominates_stmt_p (tree s1, tree s2)
2836 basic_block bb1 = bb_for_stmt (s1), bb2 = bb_for_stmt (s2);
2838 if (!bb1
2839 || s1 == s2)
2840 return true;
2842 if (bb1 == bb2)
2844 block_stmt_iterator bsi;
2846 for (bsi = bsi_start (bb1); bsi_stmt (bsi) != s2; bsi_next (&bsi))
2847 if (bsi_stmt (bsi) == s1)
2848 return true;
2850 return false;
2853 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
2856 /* Returns true when we can prove that the number of executions of
2857 STMT in the loop is at most NITER, according to the bound on
2858 the number of executions of the statement NITER_BOUND->stmt recorded in
2859 NITER_BOUND. If STMT is NULL, we must prove this bound for all
2860 statements in the loop. */
2862 static bool
2863 n_of_executions_at_most (tree stmt,
2864 struct nb_iter_bound *niter_bound,
2865 tree niter)
2867 double_int bound = niter_bound->bound;
2868 tree nit_type = TREE_TYPE (niter), e;
2869 enum tree_code cmp;
2871 gcc_assert (TYPE_UNSIGNED (nit_type));
2873 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
2874 the number of iterations is small. */
2875 if (!double_int_fits_to_tree_p (nit_type, bound))
2876 return false;
2878 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
2879 times. This means that:
2881 -- if NITER_BOUND->is_exit is true, then everything before
2882 NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
2883 times, and everything after it at most NITER_BOUND->bound times.
2885 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
2886 is executed, then NITER_BOUND->stmt is executed as well in the same
2887 iteration (we conclude that if both statements belong to the same
2888 basic block, or if STMT is after NITER_BOUND->stmt), then STMT
2889 is executed at most NITER_BOUND->bound + 1 times. Otherwise STMT is
2890 executed at most NITER_BOUND->bound + 2 times. */
2892 if (niter_bound->is_exit)
2894 if (stmt
2895 && stmt != niter_bound->stmt
2896 && stmt_dominates_stmt_p (niter_bound->stmt, stmt))
2897 cmp = GE_EXPR;
2898 else
2899 cmp = GT_EXPR;
2901 else
2903 if (!stmt
2904 || (bb_for_stmt (stmt) != bb_for_stmt (niter_bound->stmt)
2905 && !stmt_dominates_stmt_p (niter_bound->stmt, stmt)))
2907 bound = double_int_add (bound, double_int_one);
2908 if (double_int_zero_p (bound)
2909 || !double_int_fits_to_tree_p (nit_type, bound))
2910 return false;
2912 cmp = GT_EXPR;
2915 e = fold_binary (cmp, boolean_type_node,
2916 niter, double_int_to_tree (nit_type, bound));
2917 return e && integer_nonzerop (e);
2920 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
2922 bool
2923 nowrap_type_p (tree type)
2925 if (INTEGRAL_TYPE_P (type)
2926 && TYPE_OVERFLOW_UNDEFINED (type))
2927 return true;
2929 if (POINTER_TYPE_P (type))
2930 return true;
2932 return false;
2935 /* Return false only when the induction variable BASE + STEP * I is
2936 known to not overflow: i.e. when the number of iterations is small
2937 enough with respect to the step and initial condition in order to
2938 keep the evolution confined in TYPEs bounds. Return true when the
2939 iv is known to overflow or when the property is not computable.
2941 USE_OVERFLOW_SEMANTICS is true if this function should assume that
2942 the rules for overflow of the given language apply (e.g., that signed
2943 arithmetics in C does not overflow). */
2945 bool
2946 scev_probably_wraps_p (tree base, tree step,
2947 tree at_stmt, struct loop *loop,
2948 bool use_overflow_semantics)
2950 struct nb_iter_bound *bound;
2951 tree delta, step_abs;
2952 tree unsigned_type, valid_niter;
2953 tree type = TREE_TYPE (step);
2955 /* FIXME: We really need something like
2956 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
2958 We used to test for the following situation that frequently appears
2959 during address arithmetics:
2961 D.1621_13 = (long unsigned intD.4) D.1620_12;
2962 D.1622_14 = D.1621_13 * 8;
2963 D.1623_15 = (doubleD.29 *) D.1622_14;
2965 And derived that the sequence corresponding to D_14
2966 can be proved to not wrap because it is used for computing a
2967 memory access; however, this is not really the case -- for example,
2968 if D_12 = (unsigned char) [254,+,1], then D_14 has values
2969 2032, 2040, 0, 8, ..., but the code is still legal. */
2971 if (chrec_contains_undetermined (base)
2972 || chrec_contains_undetermined (step))
2973 return true;
2975 if (integer_zerop (step))
2976 return false;
2978 /* If we can use the fact that signed and pointer arithmetics does not
2979 wrap, we are done. */
2980 if (use_overflow_semantics && nowrap_type_p (type))
2981 return false;
2983 /* To be able to use estimates on number of iterations of the loop,
2984 we must have an upper bound on the absolute value of the step. */
2985 if (TREE_CODE (step) != INTEGER_CST)
2986 return true;
2988 /* Don't issue signed overflow warnings. */
2989 fold_defer_overflow_warnings ();
2991 /* Otherwise, compute the number of iterations before we reach the
2992 bound of the type, and verify that the loop is exited before this
2993 occurs. */
2994 unsigned_type = unsigned_type_for (type);
2995 base = fold_convert (unsigned_type, base);
2997 if (tree_int_cst_sign_bit (step))
2999 tree extreme = fold_convert (unsigned_type,
3000 lower_bound_in_type (type, type));
3001 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3002 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
3003 fold_convert (unsigned_type, step));
3005 else
3007 tree extreme = fold_convert (unsigned_type,
3008 upper_bound_in_type (type, type));
3009 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3010 step_abs = fold_convert (unsigned_type, step);
3013 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3015 estimate_numbers_of_iterations_loop (loop);
3016 for (bound = loop->bounds; bound; bound = bound->next)
3018 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
3020 fold_undefer_and_ignore_overflow_warnings ();
3021 return false;
3025 fold_undefer_and_ignore_overflow_warnings ();
3027 /* At this point we still don't have a proof that the iv does not
3028 overflow: give up. */
3029 return true;
3032 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3034 void
3035 free_numbers_of_iterations_estimates_loop (struct loop *loop)
3037 struct nb_iter_bound *bound, *next;
3039 loop->nb_iterations = NULL;
3040 loop->estimate_state = EST_NOT_COMPUTED;
3041 for (bound = loop->bounds; bound; bound = next)
3043 next = bound->next;
3044 ggc_free (bound);
3047 loop->bounds = NULL;
3050 /* Frees the information on upper bounds on numbers of iterations of loops. */
3052 void
3053 free_numbers_of_iterations_estimates (void)
3055 loop_iterator li;
3056 struct loop *loop;
3058 FOR_EACH_LOOP (li, loop, 0)
3060 free_numbers_of_iterations_estimates_loop (loop);
3064 /* Substitute value VAL for ssa name NAME inside expressions held
3065 at LOOP. */
3067 void
3068 substitute_in_loop_info (struct loop *loop, tree name, tree val)
3070 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);