2009-04-16 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob6547382bbb4212b13a311aea11536c0846329090
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
3 Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "output.h"
31 #include "diagnostic.h"
32 #include "intl.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "tree-pass.h"
37 #include "ggc.h"
38 #include "tree-chrec.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-data-ref.h"
41 #include "params.h"
42 #include "flags.h"
43 #include "toplev.h"
44 #include "tree-inline.h"
45 #include "gmp.h"
47 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
49 /* The maximum number of dominator BBs we search for conditions
50 of loop header copies we use for simplifying a conditional
51 expression. */
52 #define MAX_DOMINATORS_TO_WALK 8
56 Analysis of number of iterations of an affine exit test.
60 /* Bounds on some value, BELOW <= X <= UP. */
62 typedef struct
64 mpz_t below, up;
65 } bounds;
68 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
70 static void
71 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
73 tree type = TREE_TYPE (expr);
74 tree op0, op1;
75 double_int off;
76 bool negate = false;
78 *var = expr;
79 mpz_set_ui (offset, 0);
81 switch (TREE_CODE (expr))
83 case MINUS_EXPR:
84 negate = true;
85 /* Fallthru. */
87 case PLUS_EXPR:
88 case POINTER_PLUS_EXPR:
89 op0 = TREE_OPERAND (expr, 0);
90 op1 = TREE_OPERAND (expr, 1);
92 if (TREE_CODE (op1) != INTEGER_CST)
93 break;
95 *var = op0;
96 /* Always sign extend the offset. */
97 off = double_int_sext (tree_to_double_int (op1),
98 TYPE_PRECISION (type));
99 mpz_set_double_int (offset, off, false);
100 break;
102 case INTEGER_CST:
103 *var = build_int_cst_type (type, 0);
104 off = tree_to_double_int (expr);
105 mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
106 break;
108 default:
109 break;
113 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
114 in TYPE to MIN and MAX. */
116 static void
117 determine_value_range (tree type, tree var, mpz_t off,
118 mpz_t min, mpz_t max)
120 /* If the expression is a constant, we know its value exactly. */
121 if (integer_zerop (var))
123 mpz_set (min, off);
124 mpz_set (max, off);
125 return;
128 /* If the computation may wrap, we know nothing about the value, except for
129 the range of the type. */
130 get_type_static_bounds (type, min, max);
131 if (!nowrap_type_p (type))
132 return;
134 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
135 add it to MIN, otherwise to MAX. */
136 if (mpz_sgn (off) < 0)
137 mpz_add (max, max, off);
138 else
139 mpz_add (min, min, off);
142 /* Stores the bounds on the difference of the values of the expressions
143 (var + X) and (var + Y), computed in TYPE, to BNDS. */
145 static void
146 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
147 bounds *bnds)
149 int rel = mpz_cmp (x, y);
150 bool may_wrap = !nowrap_type_p (type);
151 mpz_t m;
153 /* If X == Y, then the expressions are always equal.
154 If X > Y, there are the following possibilities:
155 a) neither of var + X and var + Y overflow or underflow, or both of
156 them do. Then their difference is X - Y.
157 b) var + X overflows, and var + Y does not. Then the values of the
158 expressions are var + X - M and var + Y, where M is the range of
159 the type, and their difference is X - Y - M.
160 c) var + Y underflows and var + X does not. Their difference again
161 is M - X + Y.
162 Therefore, if the arithmetics in type does not overflow, then the
163 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
164 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
165 (X - Y, X - Y + M). */
167 if (rel == 0)
169 mpz_set_ui (bnds->below, 0);
170 mpz_set_ui (bnds->up, 0);
171 return;
174 mpz_init (m);
175 mpz_set_double_int (m, double_int_mask (TYPE_PRECISION (type)), true);
176 mpz_add_ui (m, m, 1);
177 mpz_sub (bnds->up, x, y);
178 mpz_set (bnds->below, bnds->up);
180 if (may_wrap)
182 if (rel > 0)
183 mpz_sub (bnds->below, bnds->below, m);
184 else
185 mpz_add (bnds->up, bnds->up, m);
188 mpz_clear (m);
191 /* From condition C0 CMP C1 derives information regarding the
192 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
193 and stores it to BNDS. */
195 static void
196 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
197 tree vary, mpz_t offy,
198 tree c0, enum tree_code cmp, tree c1,
199 bounds *bnds)
201 tree varc0, varc1, tmp, ctype;
202 mpz_t offc0, offc1, loffx, loffy, bnd;
203 bool lbound = false;
204 bool no_wrap = nowrap_type_p (type);
205 bool x_ok, y_ok;
207 switch (cmp)
209 case LT_EXPR:
210 case LE_EXPR:
211 case GT_EXPR:
212 case GE_EXPR:
213 STRIP_SIGN_NOPS (c0);
214 STRIP_SIGN_NOPS (c1);
215 ctype = TREE_TYPE (c0);
216 if (!useless_type_conversion_p (ctype, type))
217 return;
219 break;
221 case EQ_EXPR:
222 /* We could derive quite precise information from EQ_EXPR, however, such
223 a guard is unlikely to appear, so we do not bother with handling
224 it. */
225 return;
227 case NE_EXPR:
228 /* NE_EXPR comparisons do not contain much of useful information, except for
229 special case of comparing with the bounds of the type. */
230 if (TREE_CODE (c1) != INTEGER_CST
231 || !INTEGRAL_TYPE_P (type))
232 return;
234 /* Ensure that the condition speaks about an expression in the same type
235 as X and Y. */
236 ctype = TREE_TYPE (c0);
237 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
238 return;
239 c0 = fold_convert (type, c0);
240 c1 = fold_convert (type, c1);
242 if (TYPE_MIN_VALUE (type)
243 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
245 cmp = GT_EXPR;
246 break;
248 if (TYPE_MAX_VALUE (type)
249 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
251 cmp = LT_EXPR;
252 break;
255 return;
256 default:
257 return;
260 mpz_init (offc0);
261 mpz_init (offc1);
262 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
263 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
265 /* We are only interested in comparisons of expressions based on VARX and
266 VARY. TODO -- we might also be able to derive some bounds from
267 expressions containing just one of the variables. */
269 if (operand_equal_p (varx, varc1, 0))
271 tmp = varc0; varc0 = varc1; varc1 = tmp;
272 mpz_swap (offc0, offc1);
273 cmp = swap_tree_comparison (cmp);
276 if (!operand_equal_p (varx, varc0, 0)
277 || !operand_equal_p (vary, varc1, 0))
278 goto end;
280 mpz_init_set (loffx, offx);
281 mpz_init_set (loffy, offy);
283 if (cmp == GT_EXPR || cmp == GE_EXPR)
285 tmp = varx; varx = vary; vary = tmp;
286 mpz_swap (offc0, offc1);
287 mpz_swap (loffx, loffy);
288 cmp = swap_tree_comparison (cmp);
289 lbound = true;
292 /* If there is no overflow, the condition implies that
294 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
296 The overflows and underflows may complicate things a bit; each
297 overflow decreases the appropriate offset by M, and underflow
298 increases it by M. The above inequality would not necessarily be
299 true if
301 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
302 VARX + OFFC0 overflows, but VARX + OFFX does not.
303 This may only happen if OFFX < OFFC0.
304 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
305 VARY + OFFC1 underflows and VARY + OFFY does not.
306 This may only happen if OFFY > OFFC1. */
308 if (no_wrap)
310 x_ok = true;
311 y_ok = true;
313 else
315 x_ok = (integer_zerop (varx)
316 || mpz_cmp (loffx, offc0) >= 0);
317 y_ok = (integer_zerop (vary)
318 || mpz_cmp (loffy, offc1) <= 0);
321 if (x_ok && y_ok)
323 mpz_init (bnd);
324 mpz_sub (bnd, loffx, loffy);
325 mpz_add (bnd, bnd, offc1);
326 mpz_sub (bnd, bnd, offc0);
328 if (cmp == LT_EXPR)
329 mpz_sub_ui (bnd, bnd, 1);
331 if (lbound)
333 mpz_neg (bnd, bnd);
334 if (mpz_cmp (bnds->below, bnd) < 0)
335 mpz_set (bnds->below, bnd);
337 else
339 if (mpz_cmp (bnd, bnds->up) < 0)
340 mpz_set (bnds->up, bnd);
342 mpz_clear (bnd);
345 mpz_clear (loffx);
346 mpz_clear (loffy);
347 end:
348 mpz_clear (offc0);
349 mpz_clear (offc1);
352 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
353 The subtraction is considered to be performed in arbitrary precision,
354 without overflows.
356 We do not attempt to be too clever regarding the value ranges of X and
357 Y; most of the time, they are just integers or ssa names offsetted by
358 integer. However, we try to use the information contained in the
359 comparisons before the loop (usually created by loop header copying). */
361 static void
362 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
364 tree type = TREE_TYPE (x);
365 tree varx, vary;
366 mpz_t offx, offy;
367 mpz_t minx, maxx, miny, maxy;
368 int cnt = 0;
369 edge e;
370 basic_block bb;
371 tree c0, c1;
372 gimple cond;
373 enum tree_code cmp;
375 /* Get rid of unnecessary casts, but preserve the value of
376 the expressions. */
377 STRIP_SIGN_NOPS (x);
378 STRIP_SIGN_NOPS (y);
380 mpz_init (bnds->below);
381 mpz_init (bnds->up);
382 mpz_init (offx);
383 mpz_init (offy);
384 split_to_var_and_offset (x, &varx, offx);
385 split_to_var_and_offset (y, &vary, offy);
387 if (!integer_zerop (varx)
388 && operand_equal_p (varx, vary, 0))
390 /* Special case VARX == VARY -- we just need to compare the
391 offsets. The matters are a bit more complicated in the
392 case addition of offsets may wrap. */
393 bound_difference_of_offsetted_base (type, offx, offy, bnds);
395 else
397 /* Otherwise, use the value ranges to determine the initial
398 estimates on below and up. */
399 mpz_init (minx);
400 mpz_init (maxx);
401 mpz_init (miny);
402 mpz_init (maxy);
403 determine_value_range (type, varx, offx, minx, maxx);
404 determine_value_range (type, vary, offy, miny, maxy);
406 mpz_sub (bnds->below, minx, maxy);
407 mpz_sub (bnds->up, maxx, miny);
408 mpz_clear (minx);
409 mpz_clear (maxx);
410 mpz_clear (miny);
411 mpz_clear (maxy);
414 /* If both X and Y are constants, we cannot get any more precise. */
415 if (integer_zerop (varx) && integer_zerop (vary))
416 goto end;
418 /* Now walk the dominators of the loop header and use the entry
419 guards to refine the estimates. */
420 for (bb = loop->header;
421 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
422 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
424 if (!single_pred_p (bb))
425 continue;
426 e = single_pred_edge (bb);
428 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
429 continue;
431 cond = last_stmt (e->src);
432 c0 = gimple_cond_lhs (cond);
433 cmp = gimple_cond_code (cond);
434 c1 = gimple_cond_rhs (cond);
436 if (e->flags & EDGE_FALSE_VALUE)
437 cmp = invert_tree_comparison (cmp, false);
439 refine_bounds_using_guard (type, varx, offx, vary, offy,
440 c0, cmp, c1, bnds);
441 ++cnt;
444 end:
445 mpz_clear (offx);
446 mpz_clear (offy);
449 /* Update the bounds in BNDS that restrict the value of X to the bounds
450 that restrict the value of X + DELTA. X can be obtained as a
451 difference of two values in TYPE. */
453 static void
454 bounds_add (bounds *bnds, double_int delta, tree type)
456 mpz_t mdelta, max;
458 mpz_init (mdelta);
459 mpz_set_double_int (mdelta, delta, false);
461 mpz_init (max);
462 mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
464 mpz_add (bnds->up, bnds->up, mdelta);
465 mpz_add (bnds->below, bnds->below, mdelta);
467 if (mpz_cmp (bnds->up, max) > 0)
468 mpz_set (bnds->up, max);
470 mpz_neg (max, max);
471 if (mpz_cmp (bnds->below, max) < 0)
472 mpz_set (bnds->below, max);
474 mpz_clear (mdelta);
475 mpz_clear (max);
478 /* Update the bounds in BNDS that restrict the value of X to the bounds
479 that restrict the value of -X. */
481 static void
482 bounds_negate (bounds *bnds)
484 mpz_t tmp;
486 mpz_init_set (tmp, bnds->up);
487 mpz_neg (bnds->up, bnds->below);
488 mpz_neg (bnds->below, tmp);
489 mpz_clear (tmp);
492 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
494 static tree
495 inverse (tree x, tree mask)
497 tree type = TREE_TYPE (x);
498 tree rslt;
499 unsigned ctr = tree_floor_log2 (mask);
501 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
503 unsigned HOST_WIDE_INT ix;
504 unsigned HOST_WIDE_INT imask;
505 unsigned HOST_WIDE_INT irslt = 1;
507 gcc_assert (cst_and_fits_in_hwi (x));
508 gcc_assert (cst_and_fits_in_hwi (mask));
510 ix = int_cst_value (x);
511 imask = int_cst_value (mask);
513 for (; ctr; ctr--)
515 irslt *= ix;
516 ix *= ix;
518 irslt &= imask;
520 rslt = build_int_cst_type (type, irslt);
522 else
524 rslt = build_int_cst (type, 1);
525 for (; ctr; ctr--)
527 rslt = int_const_binop (MULT_EXPR, rslt, x, 0);
528 x = int_const_binop (MULT_EXPR, x, x, 0);
530 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask, 0);
533 return rslt;
536 /* Derives the upper bound BND on the number of executions of loop with exit
537 condition S * i <> C, assuming that the loop is not infinite. If
538 NO_OVERFLOW is true, then the control variable of the loop does not
539 overflow. If NO_OVERFLOW is true or BNDS.below >= 0, then BNDS.up
540 contains the upper bound on the value of C. */
542 static void
543 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
544 bounds *bnds)
546 double_int max;
547 mpz_t d;
549 /* If the control variable does not overflow, the number of iterations is
550 at most c / s. Otherwise it is at most the period of the control
551 variable. */
552 if (!no_overflow && !multiple_of_p (TREE_TYPE (c), c, s))
554 max = double_int_mask (TYPE_PRECISION (TREE_TYPE (c))
555 - tree_low_cst (num_ending_zeros (s), 1));
556 mpz_set_double_int (bnd, max, true);
557 return;
560 /* Determine the upper bound on C. */
561 if (no_overflow || mpz_sgn (bnds->below) >= 0)
562 mpz_set (bnd, bnds->up);
563 else if (TREE_CODE (c) == INTEGER_CST)
564 mpz_set_double_int (bnd, tree_to_double_int (c), true);
565 else
566 mpz_set_double_int (bnd, double_int_mask (TYPE_PRECISION (TREE_TYPE (c))),
567 true);
569 mpz_init (d);
570 mpz_set_double_int (d, tree_to_double_int (s), true);
571 mpz_fdiv_q (bnd, bnd, d);
572 mpz_clear (d);
575 /* Determines number of iterations of loop whose ending condition
576 is IV <> FINAL. TYPE is the type of the iv. The number of
577 iterations is stored to NITER. NEVER_INFINITE is true if
578 we know that the exit must be taken eventually, i.e., that the IV
579 ever reaches the value FINAL (we derived this earlier, and possibly set
580 NITER->assumptions to make sure this is the case). BNDS contains the
581 bounds on the difference FINAL - IV->base. */
583 static bool
584 number_of_iterations_ne (tree type, affine_iv *iv, tree final,
585 struct tree_niter_desc *niter, bool never_infinite,
586 bounds *bnds)
588 tree niter_type = unsigned_type_for (type);
589 tree s, c, d, bits, assumption, tmp, bound;
590 mpz_t max;
592 niter->control = *iv;
593 niter->bound = final;
594 niter->cmp = NE_EXPR;
596 /* Rearrange the terms so that we get inequality S * i <> C, with S
597 positive. Also cast everything to the unsigned type. If IV does
598 not overflow, BNDS bounds the value of C. Also, this is the
599 case if the computation |FINAL - IV->base| does not overflow, i.e.,
600 if BNDS->below in the result is nonnegative. */
601 if (tree_int_cst_sign_bit (iv->step))
603 s = fold_convert (niter_type,
604 fold_build1 (NEGATE_EXPR, type, iv->step));
605 c = fold_build2 (MINUS_EXPR, niter_type,
606 fold_convert (niter_type, iv->base),
607 fold_convert (niter_type, final));
608 bounds_negate (bnds);
610 else
612 s = fold_convert (niter_type, iv->step);
613 c = fold_build2 (MINUS_EXPR, niter_type,
614 fold_convert (niter_type, final),
615 fold_convert (niter_type, iv->base));
618 mpz_init (max);
619 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds);
620 niter->max = mpz_get_double_int (niter_type, max, false);
621 mpz_clear (max);
623 /* First the trivial cases -- when the step is 1. */
624 if (integer_onep (s))
626 niter->niter = c;
627 return true;
630 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
631 is infinite. Otherwise, the number of iterations is
632 (inverse(s/d) * (c/d)) mod (size of mode/d). */
633 bits = num_ending_zeros (s);
634 bound = build_low_bits_mask (niter_type,
635 (TYPE_PRECISION (niter_type)
636 - tree_low_cst (bits, 1)));
638 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
639 build_int_cst (niter_type, 1), bits);
640 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
642 if (!never_infinite)
644 /* If we cannot assume that the loop is not infinite, record the
645 assumptions for divisibility of c. */
646 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
647 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
648 assumption, build_int_cst (niter_type, 0));
649 if (!integer_nonzerop (assumption))
650 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
651 niter->assumptions, assumption);
654 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
655 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
656 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
657 return true;
660 /* Checks whether we can determine the final value of the control variable
661 of the loop with ending condition IV0 < IV1 (computed in TYPE).
662 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
663 of the step. The assumptions necessary to ensure that the computation
664 of the final value does not overflow are recorded in NITER. If we
665 find the final value, we adjust DELTA and return TRUE. Otherwise
666 we return false. BNDS bounds the value of IV1->base - IV0->base,
667 and will be updated by the same amount as DELTA. */
669 static bool
670 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
671 struct tree_niter_desc *niter,
672 tree *delta, tree step,
673 bounds *bnds)
675 tree niter_type = TREE_TYPE (step);
676 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
677 tree tmod;
678 mpz_t mmod;
679 tree assumption = boolean_true_node, bound, noloop;
680 bool ret = false;
681 tree type1 = type;
682 if (POINTER_TYPE_P (type))
683 type1 = sizetype;
685 if (TREE_CODE (mod) != INTEGER_CST)
686 return false;
687 if (integer_nonzerop (mod))
688 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
689 tmod = fold_convert (type1, mod);
691 mpz_init (mmod);
692 mpz_set_double_int (mmod, tree_to_double_int (mod), true);
693 mpz_neg (mmod, mmod);
695 if (integer_nonzerop (iv0->step))
697 /* The final value of the iv is iv1->base + MOD, assuming that this
698 computation does not overflow, and that
699 iv0->base <= iv1->base + MOD. */
700 if (!iv0->no_overflow && !integer_zerop (mod))
702 bound = fold_build2 (MINUS_EXPR, type1,
703 TYPE_MAX_VALUE (type1), tmod);
704 if (POINTER_TYPE_P (type))
705 bound = fold_convert (type, bound);
706 assumption = fold_build2 (LE_EXPR, boolean_type_node,
707 iv1->base, bound);
708 if (integer_zerop (assumption))
709 goto end;
711 if (mpz_cmp (mmod, bnds->below) < 0)
712 noloop = boolean_false_node;
713 else if (POINTER_TYPE_P (type))
714 noloop = fold_build2 (GT_EXPR, boolean_type_node,
715 iv0->base,
716 fold_build2 (POINTER_PLUS_EXPR, type,
717 iv1->base, tmod));
718 else
719 noloop = fold_build2 (GT_EXPR, boolean_type_node,
720 iv0->base,
721 fold_build2 (PLUS_EXPR, type1,
722 iv1->base, tmod));
724 else
726 /* The final value of the iv is iv0->base - MOD, assuming that this
727 computation does not overflow, and that
728 iv0->base - MOD <= iv1->base. */
729 if (!iv1->no_overflow && !integer_zerop (mod))
731 bound = fold_build2 (PLUS_EXPR, type1,
732 TYPE_MIN_VALUE (type1), tmod);
733 if (POINTER_TYPE_P (type))
734 bound = fold_convert (type, bound);
735 assumption = fold_build2 (GE_EXPR, boolean_type_node,
736 iv0->base, bound);
737 if (integer_zerop (assumption))
738 goto end;
740 if (mpz_cmp (mmod, bnds->below) < 0)
741 noloop = boolean_false_node;
742 else if (POINTER_TYPE_P (type))
743 noloop = fold_build2 (GT_EXPR, boolean_type_node,
744 fold_build2 (POINTER_PLUS_EXPR, type,
745 iv0->base,
746 fold_build1 (NEGATE_EXPR,
747 type1, tmod)),
748 iv1->base);
749 else
750 noloop = fold_build2 (GT_EXPR, boolean_type_node,
751 fold_build2 (MINUS_EXPR, type1,
752 iv0->base, tmod),
753 iv1->base);
756 if (!integer_nonzerop (assumption))
757 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
758 niter->assumptions,
759 assumption);
760 if (!integer_zerop (noloop))
761 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
762 niter->may_be_zero,
763 noloop);
764 bounds_add (bnds, tree_to_double_int (mod), type);
765 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
767 ret = true;
768 end:
769 mpz_clear (mmod);
770 return ret;
773 /* Add assertions to NITER that ensure that the control variable of the loop
774 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
775 are TYPE. Returns false if we can prove that there is an overflow, true
776 otherwise. STEP is the absolute value of the step. */
778 static bool
779 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
780 struct tree_niter_desc *niter, tree step)
782 tree bound, d, assumption, diff;
783 tree niter_type = TREE_TYPE (step);
785 if (integer_nonzerop (iv0->step))
787 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
788 if (iv0->no_overflow)
789 return true;
791 /* If iv0->base is a constant, we can determine the last value before
792 overflow precisely; otherwise we conservatively assume
793 MAX - STEP + 1. */
795 if (TREE_CODE (iv0->base) == INTEGER_CST)
797 d = fold_build2 (MINUS_EXPR, niter_type,
798 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
799 fold_convert (niter_type, iv0->base));
800 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
802 else
803 diff = fold_build2 (MINUS_EXPR, niter_type, step,
804 build_int_cst (niter_type, 1));
805 bound = fold_build2 (MINUS_EXPR, type,
806 TYPE_MAX_VALUE (type), fold_convert (type, diff));
807 assumption = fold_build2 (LE_EXPR, boolean_type_node,
808 iv1->base, bound);
810 else
812 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
813 if (iv1->no_overflow)
814 return true;
816 if (TREE_CODE (iv1->base) == INTEGER_CST)
818 d = fold_build2 (MINUS_EXPR, niter_type,
819 fold_convert (niter_type, iv1->base),
820 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
821 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
823 else
824 diff = fold_build2 (MINUS_EXPR, niter_type, step,
825 build_int_cst (niter_type, 1));
826 bound = fold_build2 (PLUS_EXPR, type,
827 TYPE_MIN_VALUE (type), fold_convert (type, diff));
828 assumption = fold_build2 (GE_EXPR, boolean_type_node,
829 iv0->base, bound);
832 if (integer_zerop (assumption))
833 return false;
834 if (!integer_nonzerop (assumption))
835 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
836 niter->assumptions, assumption);
838 iv0->no_overflow = true;
839 iv1->no_overflow = true;
840 return true;
843 /* Add an assumption to NITER that a loop whose ending condition
844 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
845 bounds the value of IV1->base - IV0->base. */
847 static void
848 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
849 struct tree_niter_desc *niter, bounds *bnds)
851 tree assumption = boolean_true_node, bound, diff;
852 tree mbz, mbzl, mbzr, type1;
853 bool rolls_p, no_overflow_p;
854 double_int dstep;
855 mpz_t mstep, max;
857 /* We are going to compute the number of iterations as
858 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
859 variant of TYPE. This formula only works if
861 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
863 (where MAX is the maximum value of the unsigned variant of TYPE, and
864 the computations in this formula are performed in full precision
865 (without overflows).
867 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
868 we have a condition of form iv0->base - step < iv1->base before the loop,
869 and for loops iv0->base < iv1->base - step * i the condition
870 iv0->base < iv1->base + step, due to loop header copying, which enable us
871 to prove the lower bound.
873 The upper bound is more complicated. Unless the expressions for initial
874 and final value themselves contain enough information, we usually cannot
875 derive it from the context. */
877 /* First check whether the answer does not follow from the bounds we gathered
878 before. */
879 if (integer_nonzerop (iv0->step))
880 dstep = tree_to_double_int (iv0->step);
881 else
883 dstep = double_int_sext (tree_to_double_int (iv1->step),
884 TYPE_PRECISION (type));
885 dstep = double_int_neg (dstep);
888 mpz_init (mstep);
889 mpz_set_double_int (mstep, dstep, true);
890 mpz_neg (mstep, mstep);
891 mpz_add_ui (mstep, mstep, 1);
893 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
895 mpz_init (max);
896 mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
897 mpz_add (max, max, mstep);
898 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
899 /* For pointers, only values lying inside a single object
900 can be compared or manipulated by pointer arithmetics.
901 Gcc in general does not allow or handle objects larger
902 than half of the address space, hence the upper bound
903 is satisfied for pointers. */
904 || POINTER_TYPE_P (type));
905 mpz_clear (mstep);
906 mpz_clear (max);
908 if (rolls_p && no_overflow_p)
909 return;
911 type1 = type;
912 if (POINTER_TYPE_P (type))
913 type1 = sizetype;
915 /* Now the hard part; we must formulate the assumption(s) as expressions, and
916 we must be careful not to introduce overflow. */
918 if (integer_nonzerop (iv0->step))
920 diff = fold_build2 (MINUS_EXPR, type1,
921 iv0->step, build_int_cst (type1, 1));
923 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
924 0 address never belongs to any object, we can assume this for
925 pointers. */
926 if (!POINTER_TYPE_P (type))
928 bound = fold_build2 (PLUS_EXPR, type1,
929 TYPE_MIN_VALUE (type), diff);
930 assumption = fold_build2 (GE_EXPR, boolean_type_node,
931 iv0->base, bound);
934 /* And then we can compute iv0->base - diff, and compare it with
935 iv1->base. */
936 mbzl = fold_build2 (MINUS_EXPR, type1,
937 fold_convert (type1, iv0->base), diff);
938 mbzr = fold_convert (type1, iv1->base);
940 else
942 diff = fold_build2 (PLUS_EXPR, type1,
943 iv1->step, build_int_cst (type1, 1));
945 if (!POINTER_TYPE_P (type))
947 bound = fold_build2 (PLUS_EXPR, type1,
948 TYPE_MAX_VALUE (type), diff);
949 assumption = fold_build2 (LE_EXPR, boolean_type_node,
950 iv1->base, bound);
953 mbzl = fold_convert (type1, iv0->base);
954 mbzr = fold_build2 (MINUS_EXPR, type1,
955 fold_convert (type1, iv1->base), diff);
958 if (!integer_nonzerop (assumption))
959 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
960 niter->assumptions, assumption);
961 if (!rolls_p)
963 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
964 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
965 niter->may_be_zero, mbz);
969 /* Determines number of iterations of loop whose ending condition
970 is IV0 < IV1. TYPE is the type of the iv. The number of
971 iterations is stored to NITER. BNDS bounds the difference
972 IV1->base - IV0->base. */
974 static bool
975 number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
976 struct tree_niter_desc *niter,
977 bool never_infinite ATTRIBUTE_UNUSED,
978 bounds *bnds)
980 tree niter_type = unsigned_type_for (type);
981 tree delta, step, s;
982 mpz_t mstep, tmp;
984 if (integer_nonzerop (iv0->step))
986 niter->control = *iv0;
987 niter->cmp = LT_EXPR;
988 niter->bound = iv1->base;
990 else
992 niter->control = *iv1;
993 niter->cmp = GT_EXPR;
994 niter->bound = iv0->base;
997 delta = fold_build2 (MINUS_EXPR, niter_type,
998 fold_convert (niter_type, iv1->base),
999 fold_convert (niter_type, iv0->base));
1001 /* First handle the special case that the step is +-1. */
1002 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1003 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1005 /* for (i = iv0->base; i < iv1->base; i++)
1009 for (i = iv1->base; i > iv0->base; i--).
1011 In both cases # of iterations is iv1->base - iv0->base, assuming that
1012 iv1->base >= iv0->base.
1014 First try to derive a lower bound on the value of
1015 iv1->base - iv0->base, computed in full precision. If the difference
1016 is nonnegative, we are done, otherwise we must record the
1017 condition. */
1019 if (mpz_sgn (bnds->below) < 0)
1020 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1021 iv1->base, iv0->base);
1022 niter->niter = delta;
1023 niter->max = mpz_get_double_int (niter_type, bnds->up, false);
1024 return true;
1027 if (integer_nonzerop (iv0->step))
1028 step = fold_convert (niter_type, iv0->step);
1029 else
1030 step = fold_convert (niter_type,
1031 fold_build1 (NEGATE_EXPR, type, iv1->step));
1033 /* If we can determine the final value of the control iv exactly, we can
1034 transform the condition to != comparison. In particular, this will be
1035 the case if DELTA is constant. */
1036 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1037 bnds))
1039 affine_iv zps;
1041 zps.base = build_int_cst (niter_type, 0);
1042 zps.step = step;
1043 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1044 zps does not overflow. */
1045 zps.no_overflow = true;
1047 return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1050 /* Make sure that the control iv does not overflow. */
1051 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1052 return false;
1054 /* We determine the number of iterations as (delta + step - 1) / step. For
1055 this to work, we must know that iv1->base >= iv0->base - step + 1,
1056 otherwise the loop does not roll. */
1057 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1059 s = fold_build2 (MINUS_EXPR, niter_type,
1060 step, build_int_cst (niter_type, 1));
1061 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1062 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1064 mpz_init (mstep);
1065 mpz_init (tmp);
1066 mpz_set_double_int (mstep, tree_to_double_int (step), true);
1067 mpz_add (tmp, bnds->up, mstep);
1068 mpz_sub_ui (tmp, tmp, 1);
1069 mpz_fdiv_q (tmp, tmp, mstep);
1070 niter->max = mpz_get_double_int (niter_type, tmp, false);
1071 mpz_clear (mstep);
1072 mpz_clear (tmp);
1074 return true;
1077 /* Determines number of iterations of loop whose ending condition
1078 is IV0 <= IV1. TYPE is the type of the iv. The number of
1079 iterations is stored to NITER. NEVER_INFINITE is true if
1080 we know that this condition must eventually become false (we derived this
1081 earlier, and possibly set NITER->assumptions to make sure this
1082 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1084 static bool
1085 number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1086 struct tree_niter_desc *niter, bool never_infinite,
1087 bounds *bnds)
1089 tree assumption;
1090 tree type1 = type;
1091 if (POINTER_TYPE_P (type))
1092 type1 = sizetype;
1094 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1095 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1096 value of the type. This we must know anyway, since if it is
1097 equal to this value, the loop rolls forever. */
1099 if (!never_infinite)
1101 if (integer_nonzerop (iv0->step))
1102 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1103 iv1->base, TYPE_MAX_VALUE (type));
1104 else
1105 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1106 iv0->base, TYPE_MIN_VALUE (type));
1108 if (integer_zerop (assumption))
1109 return false;
1110 if (!integer_nonzerop (assumption))
1111 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1112 niter->assumptions, assumption);
1115 if (integer_nonzerop (iv0->step))
1117 if (POINTER_TYPE_P (type))
1118 iv1->base = fold_build2 (POINTER_PLUS_EXPR, type, iv1->base,
1119 build_int_cst (type1, 1));
1120 else
1121 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1122 build_int_cst (type1, 1));
1124 else if (POINTER_TYPE_P (type))
1125 iv0->base = fold_build2 (POINTER_PLUS_EXPR, type, iv0->base,
1126 fold_build1 (NEGATE_EXPR, type1,
1127 build_int_cst (type1, 1)));
1128 else
1129 iv0->base = fold_build2 (MINUS_EXPR, type1,
1130 iv0->base, build_int_cst (type1, 1));
1132 bounds_add (bnds, double_int_one, type1);
1134 return number_of_iterations_lt (type, iv0, iv1, niter, never_infinite, bnds);
1137 /* Dumps description of affine induction variable IV to FILE. */
1139 static void
1140 dump_affine_iv (FILE *file, affine_iv *iv)
1142 if (!integer_zerop (iv->step))
1143 fprintf (file, "[");
1145 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1147 if (!integer_zerop (iv->step))
1149 fprintf (file, ", + , ");
1150 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1151 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1155 /* Determine the number of iterations according to condition (for staying
1156 inside loop) which compares two induction variables using comparison
1157 operator CODE. The induction variable on left side of the comparison
1158 is IV0, the right-hand side is IV1. Both induction variables must have
1159 type TYPE, which must be an integer or pointer type. The steps of the
1160 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1162 LOOP is the loop whose number of iterations we are determining.
1164 ONLY_EXIT is true if we are sure this is the only way the loop could be
1165 exited (including possibly non-returning function calls, exceptions, etc.)
1166 -- in this case we can use the information whether the control induction
1167 variables can overflow or not in a more efficient way.
1169 The results (number of iterations and assumptions as described in
1170 comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
1171 Returns false if it fails to determine number of iterations, true if it
1172 was determined (possibly with some assumptions). */
1174 static bool
1175 number_of_iterations_cond (struct loop *loop,
1176 tree type, affine_iv *iv0, enum tree_code code,
1177 affine_iv *iv1, struct tree_niter_desc *niter,
1178 bool only_exit)
1180 bool never_infinite, ret;
1181 bounds bnds;
1183 /* The meaning of these assumptions is this:
1184 if !assumptions
1185 then the rest of information does not have to be valid
1186 if may_be_zero then the loop does not roll, even if
1187 niter != 0. */
1188 niter->assumptions = boolean_true_node;
1189 niter->may_be_zero = boolean_false_node;
1190 niter->niter = NULL_TREE;
1191 niter->max = double_int_zero;
1193 niter->bound = NULL_TREE;
1194 niter->cmp = ERROR_MARK;
1196 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1197 the control variable is on lhs. */
1198 if (code == GE_EXPR || code == GT_EXPR
1199 || (code == NE_EXPR && integer_zerop (iv0->step)))
1201 SWAP (iv0, iv1);
1202 code = swap_tree_comparison (code);
1205 if (!only_exit)
1207 /* If this is not the only possible exit from the loop, the information
1208 that the induction variables cannot overflow as derived from
1209 signedness analysis cannot be relied upon. We use them e.g. in the
1210 following way: given loop for (i = 0; i <= n; i++), if i is
1211 signed, it cannot overflow, thus this loop is equivalent to
1212 for (i = 0; i < n + 1; i++); however, if n == MAX, but the loop
1213 is exited in some other way before i overflows, this transformation
1214 is incorrect (the new loop exits immediately). */
1215 iv0->no_overflow = false;
1216 iv1->no_overflow = false;
1219 if (POINTER_TYPE_P (type))
1221 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1222 to the same object. If they do, the control variable cannot wrap
1223 (as wrap around the bounds of memory will never return a pointer
1224 that would be guaranteed to point to the same object, even if we
1225 avoid undefined behavior by casting to size_t and back). The
1226 restrictions on pointer arithmetics and comparisons of pointers
1227 ensure that using the no-overflow assumptions is correct in this
1228 case even if ONLY_EXIT is false. */
1229 iv0->no_overflow = true;
1230 iv1->no_overflow = true;
1233 /* If the control induction variable does not overflow, the loop obviously
1234 cannot be infinite. */
1235 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1236 never_infinite = true;
1237 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1238 never_infinite = true;
1239 else
1240 never_infinite = false;
1242 /* We can handle the case when neither of the sides of the comparison is
1243 invariant, provided that the test is NE_EXPR. This rarely occurs in
1244 practice, but it is simple enough to manage. */
1245 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1247 if (code != NE_EXPR)
1248 return false;
1250 iv0->step = fold_binary_to_constant (MINUS_EXPR, type,
1251 iv0->step, iv1->step);
1252 iv0->no_overflow = false;
1253 iv1->step = build_int_cst (type, 0);
1254 iv1->no_overflow = true;
1257 /* If the result of the comparison is a constant, the loop is weird. More
1258 precise handling would be possible, but the situation is not common enough
1259 to waste time on it. */
1260 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1261 return false;
1263 /* Ignore loops of while (i-- < 10) type. */
1264 if (code != NE_EXPR)
1266 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1267 return false;
1269 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1270 return false;
1273 /* If the loop exits immediately, there is nothing to do. */
1274 if (integer_zerop (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
1276 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1277 niter->max = double_int_zero;
1278 return true;
1281 /* OK, now we know we have a senseful loop. Handle several cases, depending
1282 on what comparison operator is used. */
1283 bound_difference (loop, iv1->base, iv0->base, &bnds);
1285 if (dump_file && (dump_flags & TDF_DETAILS))
1287 fprintf (dump_file,
1288 "Analyzing # of iterations of loop %d\n", loop->num);
1290 fprintf (dump_file, " exit condition ");
1291 dump_affine_iv (dump_file, iv0);
1292 fprintf (dump_file, " %s ",
1293 code == NE_EXPR ? "!="
1294 : code == LT_EXPR ? "<"
1295 : "<=");
1296 dump_affine_iv (dump_file, iv1);
1297 fprintf (dump_file, "\n");
1299 fprintf (dump_file, " bounds on difference of bases: ");
1300 mpz_out_str (dump_file, 10, bnds.below);
1301 fprintf (dump_file, " ... ");
1302 mpz_out_str (dump_file, 10, bnds.up);
1303 fprintf (dump_file, "\n");
1306 switch (code)
1308 case NE_EXPR:
1309 gcc_assert (integer_zerop (iv1->step));
1310 ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1311 never_infinite, &bnds);
1312 break;
1314 case LT_EXPR:
1315 ret = number_of_iterations_lt (type, iv0, iv1, niter, never_infinite,
1316 &bnds);
1317 break;
1319 case LE_EXPR:
1320 ret = number_of_iterations_le (type, iv0, iv1, niter, never_infinite,
1321 &bnds);
1322 break;
1324 default:
1325 gcc_unreachable ();
1328 mpz_clear (bnds.up);
1329 mpz_clear (bnds.below);
1331 if (dump_file && (dump_flags & TDF_DETAILS))
1333 if (ret)
1335 fprintf (dump_file, " result:\n");
1336 if (!integer_nonzerop (niter->assumptions))
1338 fprintf (dump_file, " under assumptions ");
1339 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1340 fprintf (dump_file, "\n");
1343 if (!integer_zerop (niter->may_be_zero))
1345 fprintf (dump_file, " zero if ");
1346 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1347 fprintf (dump_file, "\n");
1350 fprintf (dump_file, " # of iterations ");
1351 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1352 fprintf (dump_file, ", bounded by ");
1353 dump_double_int (dump_file, niter->max, true);
1354 fprintf (dump_file, "\n");
1356 else
1357 fprintf (dump_file, " failed\n\n");
1359 return ret;
1362 /* Substitute NEW for OLD in EXPR and fold the result. */
1364 static tree
1365 simplify_replace_tree (tree expr, tree old, tree new_tree)
1367 unsigned i, n;
1368 tree ret = NULL_TREE, e, se;
1370 if (!expr)
1371 return NULL_TREE;
1373 if (expr == old
1374 || operand_equal_p (expr, old, 0))
1375 return unshare_expr (new_tree);
1377 if (!EXPR_P (expr))
1378 return expr;
1380 n = TREE_OPERAND_LENGTH (expr);
1381 for (i = 0; i < n; i++)
1383 e = TREE_OPERAND (expr, i);
1384 se = simplify_replace_tree (e, old, new_tree);
1385 if (e == se)
1386 continue;
1388 if (!ret)
1389 ret = copy_node (expr);
1391 TREE_OPERAND (ret, i) = se;
1394 return (ret ? fold (ret) : expr);
1397 /* Expand definitions of ssa names in EXPR as long as they are simple
1398 enough, and return the new expression. */
1400 tree
1401 expand_simple_operations (tree expr)
1403 unsigned i, n;
1404 tree ret = NULL_TREE, e, ee, e1;
1405 enum tree_code code;
1406 gimple stmt;
1408 if (expr == NULL_TREE)
1409 return expr;
1411 if (is_gimple_min_invariant (expr))
1412 return expr;
1414 code = TREE_CODE (expr);
1415 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1417 n = TREE_OPERAND_LENGTH (expr);
1418 for (i = 0; i < n; i++)
1420 e = TREE_OPERAND (expr, i);
1421 ee = expand_simple_operations (e);
1422 if (e == ee)
1423 continue;
1425 if (!ret)
1426 ret = copy_node (expr);
1428 TREE_OPERAND (ret, i) = ee;
1431 if (!ret)
1432 return expr;
1434 fold_defer_overflow_warnings ();
1435 ret = fold (ret);
1436 fold_undefer_and_ignore_overflow_warnings ();
1437 return ret;
1440 if (TREE_CODE (expr) != SSA_NAME)
1441 return expr;
1443 stmt = SSA_NAME_DEF_STMT (expr);
1444 if (gimple_code (stmt) == GIMPLE_PHI)
1446 basic_block src, dest;
1448 if (gimple_phi_num_args (stmt) != 1)
1449 return expr;
1450 e = PHI_ARG_DEF (stmt, 0);
1452 /* Avoid propagating through loop exit phi nodes, which
1453 could break loop-closed SSA form restrictions. */
1454 dest = gimple_bb (stmt);
1455 src = single_pred (dest);
1456 if (TREE_CODE (e) == SSA_NAME
1457 && src->loop_father != dest->loop_father)
1458 return expr;
1460 return expand_simple_operations (e);
1462 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1463 return expr;
1465 e = gimple_assign_rhs1 (stmt);
1466 code = gimple_assign_rhs_code (stmt);
1467 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1469 if (is_gimple_min_invariant (e))
1470 return e;
1472 if (code == SSA_NAME)
1473 return expand_simple_operations (e);
1475 return expr;
1478 switch (code)
1480 CASE_CONVERT:
1481 /* Casts are simple. */
1482 ee = expand_simple_operations (e);
1483 return fold_build1 (code, TREE_TYPE (expr), ee);
1485 case PLUS_EXPR:
1486 case MINUS_EXPR:
1487 case POINTER_PLUS_EXPR:
1488 /* And increments and decrements by a constant are simple. */
1489 e1 = gimple_assign_rhs2 (stmt);
1490 if (!is_gimple_min_invariant (e1))
1491 return expr;
1493 ee = expand_simple_operations (e);
1494 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1496 default:
1497 return expr;
1501 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1502 expression (or EXPR unchanged, if no simplification was possible). */
1504 static tree
1505 tree_simplify_using_condition_1 (tree cond, tree expr)
1507 bool changed;
1508 tree e, te, e0, e1, e2, notcond;
1509 enum tree_code code = TREE_CODE (expr);
1511 if (code == INTEGER_CST)
1512 return expr;
1514 if (code == TRUTH_OR_EXPR
1515 || code == TRUTH_AND_EXPR
1516 || code == COND_EXPR)
1518 changed = false;
1520 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1521 if (TREE_OPERAND (expr, 0) != e0)
1522 changed = true;
1524 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1525 if (TREE_OPERAND (expr, 1) != e1)
1526 changed = true;
1528 if (code == COND_EXPR)
1530 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1531 if (TREE_OPERAND (expr, 2) != e2)
1532 changed = true;
1534 else
1535 e2 = NULL_TREE;
1537 if (changed)
1539 if (code == COND_EXPR)
1540 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1541 else
1542 expr = fold_build2 (code, boolean_type_node, e0, e1);
1545 return expr;
1548 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1549 propagation, and vice versa. Fold does not handle this, since it is
1550 considered too expensive. */
1551 if (TREE_CODE (cond) == EQ_EXPR)
1553 e0 = TREE_OPERAND (cond, 0);
1554 e1 = TREE_OPERAND (cond, 1);
1556 /* We know that e0 == e1. Check whether we cannot simplify expr
1557 using this fact. */
1558 e = simplify_replace_tree (expr, e0, e1);
1559 if (integer_zerop (e) || integer_nonzerop (e))
1560 return e;
1562 e = simplify_replace_tree (expr, e1, e0);
1563 if (integer_zerop (e) || integer_nonzerop (e))
1564 return e;
1566 if (TREE_CODE (expr) == EQ_EXPR)
1568 e0 = TREE_OPERAND (expr, 0);
1569 e1 = TREE_OPERAND (expr, 1);
1571 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1572 e = simplify_replace_tree (cond, e0, e1);
1573 if (integer_zerop (e))
1574 return e;
1575 e = simplify_replace_tree (cond, e1, e0);
1576 if (integer_zerop (e))
1577 return e;
1579 if (TREE_CODE (expr) == NE_EXPR)
1581 e0 = TREE_OPERAND (expr, 0);
1582 e1 = TREE_OPERAND (expr, 1);
1584 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1585 e = simplify_replace_tree (cond, e0, e1);
1586 if (integer_zerop (e))
1587 return boolean_true_node;
1588 e = simplify_replace_tree (cond, e1, e0);
1589 if (integer_zerop (e))
1590 return boolean_true_node;
1593 te = expand_simple_operations (expr);
1595 /* Check whether COND ==> EXPR. */
1596 notcond = invert_truthvalue (cond);
1597 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1598 if (e && integer_nonzerop (e))
1599 return e;
1601 /* Check whether COND ==> not EXPR. */
1602 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1603 if (e && integer_zerop (e))
1604 return e;
1606 return expr;
1609 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1610 expression (or EXPR unchanged, if no simplification was possible).
1611 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1612 of simple operations in definitions of ssa names in COND are expanded,
1613 so that things like casts or incrementing the value of the bound before
1614 the loop do not cause us to fail. */
1616 static tree
1617 tree_simplify_using_condition (tree cond, tree expr)
1619 cond = expand_simple_operations (cond);
1621 return tree_simplify_using_condition_1 (cond, expr);
1624 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1625 Returns the simplified expression (or EXPR unchanged, if no
1626 simplification was possible).*/
1628 static tree
1629 simplify_using_initial_conditions (struct loop *loop, tree expr)
1631 edge e;
1632 basic_block bb;
1633 gimple stmt;
1634 tree cond;
1635 int cnt = 0;
1637 if (TREE_CODE (expr) == INTEGER_CST)
1638 return expr;
1640 /* Limit walking the dominators to avoid quadraticness in
1641 the number of BBs times the number of loops in degenerate
1642 cases. */
1643 for (bb = loop->header;
1644 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
1645 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1647 if (!single_pred_p (bb))
1648 continue;
1649 e = single_pred_edge (bb);
1651 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
1652 continue;
1654 stmt = last_stmt (e->src);
1655 cond = fold_build2 (gimple_cond_code (stmt),
1656 boolean_type_node,
1657 gimple_cond_lhs (stmt),
1658 gimple_cond_rhs (stmt));
1659 if (e->flags & EDGE_FALSE_VALUE)
1660 cond = invert_truthvalue (cond);
1661 expr = tree_simplify_using_condition (cond, expr);
1662 ++cnt;
1665 return expr;
1668 /* Tries to simplify EXPR using the evolutions of the loop invariants
1669 in the superloops of LOOP. Returns the simplified expression
1670 (or EXPR unchanged, if no simplification was possible). */
1672 static tree
1673 simplify_using_outer_evolutions (struct loop *loop, tree expr)
1675 enum tree_code code = TREE_CODE (expr);
1676 bool changed;
1677 tree e, e0, e1, e2;
1679 if (is_gimple_min_invariant (expr))
1680 return expr;
1682 if (code == TRUTH_OR_EXPR
1683 || code == TRUTH_AND_EXPR
1684 || code == COND_EXPR)
1686 changed = false;
1688 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
1689 if (TREE_OPERAND (expr, 0) != e0)
1690 changed = true;
1692 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
1693 if (TREE_OPERAND (expr, 1) != e1)
1694 changed = true;
1696 if (code == COND_EXPR)
1698 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
1699 if (TREE_OPERAND (expr, 2) != e2)
1700 changed = true;
1702 else
1703 e2 = NULL_TREE;
1705 if (changed)
1707 if (code == COND_EXPR)
1708 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1709 else
1710 expr = fold_build2 (code, boolean_type_node, e0, e1);
1713 return expr;
1716 e = instantiate_parameters (loop, expr);
1717 if (is_gimple_min_invariant (e))
1718 return e;
1720 return expr;
1723 /* Returns true if EXIT is the only possible exit from LOOP. */
1725 bool
1726 loop_only_exit_p (const struct loop *loop, const_edge exit)
1728 basic_block *body;
1729 gimple_stmt_iterator bsi;
1730 unsigned i;
1731 gimple call;
1733 if (exit != single_exit (loop))
1734 return false;
1736 body = get_loop_body (loop);
1737 for (i = 0; i < loop->num_nodes; i++)
1739 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
1741 call = gsi_stmt (bsi);
1742 if (gimple_code (call) != GIMPLE_CALL)
1743 continue;
1745 if (gimple_has_side_effects (call))
1747 free (body);
1748 return false;
1753 free (body);
1754 return true;
1757 /* Stores description of number of iterations of LOOP derived from
1758 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1759 useful information could be derived (and fields of NITER has
1760 meaning described in comments at struct tree_niter_desc
1761 declaration), false otherwise. If WARN is true and
1762 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1763 potentially unsafe assumptions. */
1765 bool
1766 number_of_iterations_exit (struct loop *loop, edge exit,
1767 struct tree_niter_desc *niter,
1768 bool warn)
1770 gimple stmt;
1771 tree type;
1772 tree op0, op1;
1773 enum tree_code code;
1774 affine_iv iv0, iv1;
1776 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
1777 return false;
1779 niter->assumptions = boolean_false_node;
1780 stmt = last_stmt (exit->src);
1781 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1782 return false;
1784 /* We want the condition for staying inside loop. */
1785 code = gimple_cond_code (stmt);
1786 if (exit->flags & EDGE_TRUE_VALUE)
1787 code = invert_tree_comparison (code, false);
1789 switch (code)
1791 case GT_EXPR:
1792 case GE_EXPR:
1793 case NE_EXPR:
1794 case LT_EXPR:
1795 case LE_EXPR:
1796 break;
1798 default:
1799 return false;
1802 op0 = gimple_cond_lhs (stmt);
1803 op1 = gimple_cond_rhs (stmt);
1804 type = TREE_TYPE (op0);
1806 if (TREE_CODE (type) != INTEGER_TYPE
1807 && !POINTER_TYPE_P (type))
1808 return false;
1810 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
1811 return false;
1812 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
1813 return false;
1815 /* We don't want to see undefined signed overflow warnings while
1816 computing the number of iterations. */
1817 fold_defer_overflow_warnings ();
1819 iv0.base = expand_simple_operations (iv0.base);
1820 iv1.base = expand_simple_operations (iv1.base);
1821 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1822 loop_only_exit_p (loop, exit)))
1824 fold_undefer_and_ignore_overflow_warnings ();
1825 return false;
1828 if (optimize >= 3)
1830 niter->assumptions = simplify_using_outer_evolutions (loop,
1831 niter->assumptions);
1832 niter->may_be_zero = simplify_using_outer_evolutions (loop,
1833 niter->may_be_zero);
1834 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
1837 niter->assumptions
1838 = simplify_using_initial_conditions (loop,
1839 niter->assumptions);
1840 niter->may_be_zero
1841 = simplify_using_initial_conditions (loop,
1842 niter->may_be_zero);
1844 fold_undefer_and_ignore_overflow_warnings ();
1846 if (integer_onep (niter->assumptions))
1847 return true;
1849 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1850 But if we can prove that there is overflow or some other source of weird
1851 behavior, ignore the loop even with -funsafe-loop-optimizations. */
1852 if (integer_zerop (niter->assumptions))
1853 return false;
1855 if (flag_unsafe_loop_optimizations)
1856 niter->assumptions = boolean_true_node;
1858 if (warn)
1860 const char *wording;
1861 location_t loc = gimple_location (stmt);
1863 /* We can provide a more specific warning if one of the operator is
1864 constant and the other advances by +1 or -1. */
1865 if (!integer_zerop (iv1.step)
1866 ? (integer_zerop (iv0.step)
1867 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
1868 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
1869 wording =
1870 flag_unsafe_loop_optimizations
1871 ? N_("assuming that the loop is not infinite")
1872 : N_("cannot optimize possibly infinite loops");
1873 else
1874 wording =
1875 flag_unsafe_loop_optimizations
1876 ? N_("assuming that the loop counter does not overflow")
1877 : N_("cannot optimize loop, the loop counter may overflow");
1879 if (LOCATION_LINE (loc) > 0)
1880 warning (OPT_Wunsafe_loop_optimizations, "%H%s", &loc, gettext (wording));
1881 else
1882 warning (OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
1885 return flag_unsafe_loop_optimizations;
1888 /* Try to determine the number of iterations of LOOP. If we succeed,
1889 expression giving number of iterations is returned and *EXIT is
1890 set to the edge from that the information is obtained. Otherwise
1891 chrec_dont_know is returned. */
1893 tree
1894 find_loop_niter (struct loop *loop, edge *exit)
1896 unsigned i;
1897 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1898 edge ex;
1899 tree niter = NULL_TREE, aniter;
1900 struct tree_niter_desc desc;
1902 *exit = NULL;
1903 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
1905 if (!just_once_each_iteration_p (loop, ex->src))
1906 continue;
1908 if (!number_of_iterations_exit (loop, ex, &desc, false))
1909 continue;
1911 if (integer_nonzerop (desc.may_be_zero))
1913 /* We exit in the first iteration through this exit.
1914 We won't find anything better. */
1915 niter = build_int_cst (unsigned_type_node, 0);
1916 *exit = ex;
1917 break;
1920 if (!integer_zerop (desc.may_be_zero))
1921 continue;
1923 aniter = desc.niter;
1925 if (!niter)
1927 /* Nothing recorded yet. */
1928 niter = aniter;
1929 *exit = ex;
1930 continue;
1933 /* Prefer constants, the lower the better. */
1934 if (TREE_CODE (aniter) != INTEGER_CST)
1935 continue;
1937 if (TREE_CODE (niter) != INTEGER_CST)
1939 niter = aniter;
1940 *exit = ex;
1941 continue;
1944 if (tree_int_cst_lt (aniter, niter))
1946 niter = aniter;
1947 *exit = ex;
1948 continue;
1951 VEC_free (edge, heap, exits);
1953 return niter ? niter : chrec_dont_know;
1958 Analysis of a number of iterations of a loop by a brute-force evaluation.
1962 /* Bound on the number of iterations we try to evaluate. */
1964 #define MAX_ITERATIONS_TO_TRACK \
1965 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
1967 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
1968 result by a chain of operations such that all but exactly one of their
1969 operands are constants. */
1971 static gimple
1972 chain_of_csts_start (struct loop *loop, tree x)
1974 gimple stmt = SSA_NAME_DEF_STMT (x);
1975 tree use;
1976 basic_block bb = gimple_bb (stmt);
1977 enum tree_code code;
1979 if (!bb
1980 || !flow_bb_inside_loop_p (loop, bb))
1981 return NULL;
1983 if (gimple_code (stmt) == GIMPLE_PHI)
1985 if (bb == loop->header)
1986 return stmt;
1988 return NULL;
1991 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1992 return NULL;
1994 code = gimple_assign_rhs_code (stmt);
1995 if (gimple_references_memory_p (stmt)
1996 || TREE_CODE_CLASS (code) == tcc_reference
1997 || (code == ADDR_EXPR
1998 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
1999 return NULL;
2001 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2002 if (use == NULL_TREE)
2003 return NULL;
2005 return chain_of_csts_start (loop, use);
2008 /* Determines whether the expression X is derived from a result of a phi node
2009 in header of LOOP such that
2011 * the derivation of X consists only from operations with constants
2012 * the initial value of the phi node is constant
2013 * the value of the phi node in the next iteration can be derived from the
2014 value in the current iteration by a chain of operations with constants.
2016 If such phi node exists, it is returned, otherwise NULL is returned. */
2018 static gimple
2019 get_base_for (struct loop *loop, tree x)
2021 gimple phi;
2022 tree init, next;
2024 if (is_gimple_min_invariant (x))
2025 return NULL;
2027 phi = chain_of_csts_start (loop, x);
2028 if (!phi)
2029 return NULL;
2031 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2032 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2034 if (TREE_CODE (next) != SSA_NAME)
2035 return NULL;
2037 if (!is_gimple_min_invariant (init))
2038 return NULL;
2040 if (chain_of_csts_start (loop, next) != phi)
2041 return NULL;
2043 return phi;
2046 /* Given an expression X, then
2048 * if X is NULL_TREE, we return the constant BASE.
2049 * otherwise X is a SSA name, whose value in the considered loop is derived
2050 by a chain of operations with constant from a result of a phi node in
2051 the header of the loop. Then we return value of X when the value of the
2052 result of this phi node is given by the constant BASE. */
2054 static tree
2055 get_val_for (tree x, tree base)
2057 gimple stmt;
2059 gcc_assert (is_gimple_min_invariant (base));
2061 if (!x)
2062 return base;
2064 stmt = SSA_NAME_DEF_STMT (x);
2065 if (gimple_code (stmt) == GIMPLE_PHI)
2066 return base;
2068 gcc_assert (is_gimple_assign (stmt));
2070 /* STMT must be either an assignment of a single SSA name or an
2071 expression involving an SSA name and a constant. Try to fold that
2072 expression using the value for the SSA name. */
2073 if (gimple_assign_ssa_name_copy_p (stmt))
2074 return get_val_for (gimple_assign_rhs1 (stmt), base);
2075 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2076 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2078 return fold_build1 (gimple_assign_rhs_code (stmt),
2079 gimple_expr_type (stmt),
2080 get_val_for (gimple_assign_rhs1 (stmt), base));
2082 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2084 tree rhs1 = gimple_assign_rhs1 (stmt);
2085 tree rhs2 = gimple_assign_rhs2 (stmt);
2086 if (TREE_CODE (rhs1) == SSA_NAME)
2087 rhs1 = get_val_for (rhs1, base);
2088 else if (TREE_CODE (rhs2) == SSA_NAME)
2089 rhs2 = get_val_for (rhs2, base);
2090 else
2091 gcc_unreachable ();
2092 return fold_build2 (gimple_assign_rhs_code (stmt),
2093 gimple_expr_type (stmt), rhs1, rhs2);
2095 else
2096 gcc_unreachable ();
2100 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2101 by brute force -- i.e. by determining the value of the operands of the
2102 condition at EXIT in first few iterations of the loop (assuming that
2103 these values are constant) and determining the first one in that the
2104 condition is not satisfied. Returns the constant giving the number
2105 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2107 tree
2108 loop_niter_by_eval (struct loop *loop, edge exit)
2110 tree acnd;
2111 tree op[2], val[2], next[2], aval[2];
2112 gimple phi, cond;
2113 unsigned i, j;
2114 enum tree_code cmp;
2116 cond = last_stmt (exit->src);
2117 if (!cond || gimple_code (cond) != GIMPLE_COND)
2118 return chrec_dont_know;
2120 cmp = gimple_cond_code (cond);
2121 if (exit->flags & EDGE_TRUE_VALUE)
2122 cmp = invert_tree_comparison (cmp, false);
2124 switch (cmp)
2126 case EQ_EXPR:
2127 case NE_EXPR:
2128 case GT_EXPR:
2129 case GE_EXPR:
2130 case LT_EXPR:
2131 case LE_EXPR:
2132 op[0] = gimple_cond_lhs (cond);
2133 op[1] = gimple_cond_rhs (cond);
2134 break;
2136 default:
2137 return chrec_dont_know;
2140 for (j = 0; j < 2; j++)
2142 if (is_gimple_min_invariant (op[j]))
2144 val[j] = op[j];
2145 next[j] = NULL_TREE;
2146 op[j] = NULL_TREE;
2148 else
2150 phi = get_base_for (loop, op[j]);
2151 if (!phi)
2152 return chrec_dont_know;
2153 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2154 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2158 /* Don't issue signed overflow warnings. */
2159 fold_defer_overflow_warnings ();
2161 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2163 for (j = 0; j < 2; j++)
2164 aval[j] = get_val_for (op[j], val[j]);
2166 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2167 if (acnd && integer_zerop (acnd))
2169 fold_undefer_and_ignore_overflow_warnings ();
2170 if (dump_file && (dump_flags & TDF_DETAILS))
2171 fprintf (dump_file,
2172 "Proved that loop %d iterates %d times using brute force.\n",
2173 loop->num, i);
2174 return build_int_cst (unsigned_type_node, i);
2177 for (j = 0; j < 2; j++)
2179 val[j] = get_val_for (next[j], val[j]);
2180 if (!is_gimple_min_invariant (val[j]))
2182 fold_undefer_and_ignore_overflow_warnings ();
2183 return chrec_dont_know;
2188 fold_undefer_and_ignore_overflow_warnings ();
2190 return chrec_dont_know;
2193 /* Finds the exit of the LOOP by that the loop exits after a constant
2194 number of iterations and stores the exit edge to *EXIT. The constant
2195 giving the number of iterations of LOOP is returned. The number of
2196 iterations is determined using loop_niter_by_eval (i.e. by brute force
2197 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2198 determines the number of iterations, chrec_dont_know is returned. */
2200 tree
2201 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2203 unsigned i;
2204 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
2205 edge ex;
2206 tree niter = NULL_TREE, aniter;
2208 *exit = NULL;
2209 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2211 if (!just_once_each_iteration_p (loop, ex->src))
2212 continue;
2214 aniter = loop_niter_by_eval (loop, ex);
2215 if (chrec_contains_undetermined (aniter))
2216 continue;
2218 if (niter
2219 && !tree_int_cst_lt (aniter, niter))
2220 continue;
2222 niter = aniter;
2223 *exit = ex;
2225 VEC_free (edge, heap, exits);
2227 return niter ? niter : chrec_dont_know;
2232 Analysis of upper bounds on number of iterations of a loop.
2236 static double_int derive_constant_upper_bound_ops (tree, tree,
2237 enum tree_code, tree);
2239 /* Returns a constant upper bound on the value of the right-hand side of
2240 an assignment statement STMT. */
2242 static double_int
2243 derive_constant_upper_bound_assign (gimple stmt)
2245 enum tree_code code = gimple_assign_rhs_code (stmt);
2246 tree op0 = gimple_assign_rhs1 (stmt);
2247 tree op1 = gimple_assign_rhs2 (stmt);
2249 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2250 op0, code, op1);
2253 /* Returns a constant upper bound on the value of expression VAL. VAL
2254 is considered to be unsigned. If its type is signed, its value must
2255 be nonnegative. */
2257 static double_int
2258 derive_constant_upper_bound (tree val)
2260 enum tree_code code;
2261 tree op0, op1;
2263 extract_ops_from_tree (val, &code, &op0, &op1);
2264 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2267 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2268 whose type is TYPE. The expression is considered to be unsigned. If
2269 its type is signed, its value must be nonnegative. */
2271 static double_int
2272 derive_constant_upper_bound_ops (tree type, tree op0,
2273 enum tree_code code, tree op1)
2275 tree subtype, maxt;
2276 double_int bnd, max, mmax, cst;
2277 gimple stmt;
2279 if (INTEGRAL_TYPE_P (type))
2280 maxt = TYPE_MAX_VALUE (type);
2281 else
2282 maxt = upper_bound_in_type (type, type);
2284 max = tree_to_double_int (maxt);
2286 switch (code)
2288 case INTEGER_CST:
2289 return tree_to_double_int (op0);
2291 CASE_CONVERT:
2292 subtype = TREE_TYPE (op0);
2293 if (!TYPE_UNSIGNED (subtype)
2294 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2295 that OP0 is nonnegative. */
2296 && TYPE_UNSIGNED (type)
2297 && !tree_expr_nonnegative_p (op0))
2299 /* If we cannot prove that the casted expression is nonnegative,
2300 we cannot establish more useful upper bound than the precision
2301 of the type gives us. */
2302 return max;
2305 /* We now know that op0 is an nonnegative value. Try deriving an upper
2306 bound for it. */
2307 bnd = derive_constant_upper_bound (op0);
2309 /* If the bound does not fit in TYPE, max. value of TYPE could be
2310 attained. */
2311 if (double_int_ucmp (max, bnd) < 0)
2312 return max;
2314 return bnd;
2316 case PLUS_EXPR:
2317 case POINTER_PLUS_EXPR:
2318 case MINUS_EXPR:
2319 if (TREE_CODE (op1) != INTEGER_CST
2320 || !tree_expr_nonnegative_p (op0))
2321 return max;
2323 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2324 choose the most logical way how to treat this constant regardless
2325 of the signedness of the type. */
2326 cst = tree_to_double_int (op1);
2327 cst = double_int_sext (cst, TYPE_PRECISION (type));
2328 if (code != MINUS_EXPR)
2329 cst = double_int_neg (cst);
2331 bnd = derive_constant_upper_bound (op0);
2333 if (double_int_negative_p (cst))
2335 cst = double_int_neg (cst);
2336 /* Avoid CST == 0x80000... */
2337 if (double_int_negative_p (cst))
2338 return max;;
2340 /* OP0 + CST. We need to check that
2341 BND <= MAX (type) - CST. */
2343 mmax = double_int_add (max, double_int_neg (cst));
2344 if (double_int_ucmp (bnd, mmax) > 0)
2345 return max;
2347 return double_int_add (bnd, cst);
2349 else
2351 /* OP0 - CST, where CST >= 0.
2353 If TYPE is signed, we have already verified that OP0 >= 0, and we
2354 know that the result is nonnegative. This implies that
2355 VAL <= BND - CST.
2357 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2358 otherwise the operation underflows.
2361 /* This should only happen if the type is unsigned; however, for
2362 buggy programs that use overflowing signed arithmetics even with
2363 -fno-wrapv, this condition may also be true for signed values. */
2364 if (double_int_ucmp (bnd, cst) < 0)
2365 return max;
2367 if (TYPE_UNSIGNED (type))
2369 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2370 double_int_to_tree (type, cst));
2371 if (!tem || integer_nonzerop (tem))
2372 return max;
2375 bnd = double_int_add (bnd, double_int_neg (cst));
2378 return bnd;
2380 case FLOOR_DIV_EXPR:
2381 case EXACT_DIV_EXPR:
2382 if (TREE_CODE (op1) != INTEGER_CST
2383 || tree_int_cst_sign_bit (op1))
2384 return max;
2386 bnd = derive_constant_upper_bound (op0);
2387 return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);
2389 case BIT_AND_EXPR:
2390 if (TREE_CODE (op1) != INTEGER_CST
2391 || tree_int_cst_sign_bit (op1))
2392 return max;
2393 return tree_to_double_int (op1);
2395 case SSA_NAME:
2396 stmt = SSA_NAME_DEF_STMT (op0);
2397 if (gimple_code (stmt) != GIMPLE_ASSIGN
2398 || gimple_assign_lhs (stmt) != op0)
2399 return max;
2400 return derive_constant_upper_bound_assign (stmt);
2402 default:
2403 return max;
2407 /* Records that every statement in LOOP is executed I_BOUND times.
2408 REALISTIC is true if I_BOUND is expected to be close to the real number
2409 of iterations. UPPER is true if we are sure the loop iterates at most
2410 I_BOUND times. */
2412 static void
2413 record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
2414 bool upper)
2416 /* Update the bounds only when there is no previous estimation, or when the current
2417 estimation is smaller. */
2418 if (upper
2419 && (!loop->any_upper_bound
2420 || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
2422 loop->any_upper_bound = true;
2423 loop->nb_iterations_upper_bound = i_bound;
2425 if (realistic
2426 && (!loop->any_estimate
2427 || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
2429 loop->any_estimate = true;
2430 loop->nb_iterations_estimate = i_bound;
2434 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2435 is true if the loop is exited immediately after STMT, and this exit
2436 is taken at last when the STMT is executed BOUND + 1 times.
2437 REALISTIC is true if BOUND is expected to be close to the real number
2438 of iterations. UPPER is true if we are sure the loop iterates at most
2439 BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
2441 static void
2442 record_estimate (struct loop *loop, tree bound, double_int i_bound,
2443 gimple at_stmt, bool is_exit, bool realistic, bool upper)
2445 double_int delta;
2446 edge exit;
2448 if (dump_file && (dump_flags & TDF_DETAILS))
2450 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2451 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2452 fprintf (dump_file, " is %sexecuted at most ",
2453 upper ? "" : "probably ");
2454 print_generic_expr (dump_file, bound, TDF_SLIM);
2455 fprintf (dump_file, " (bounded by ");
2456 dump_double_int (dump_file, i_bound, true);
2457 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2460 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2461 real number of iterations. */
2462 if (TREE_CODE (bound) != INTEGER_CST)
2463 realistic = false;
2464 if (!upper && !realistic)
2465 return;
2467 /* If we have a guaranteed upper bound, record it in the appropriate
2468 list. */
2469 if (upper)
2471 struct nb_iter_bound *elt = GGC_NEW (struct nb_iter_bound);
2473 elt->bound = i_bound;
2474 elt->stmt = at_stmt;
2475 elt->is_exit = is_exit;
2476 elt->next = loop->bounds;
2477 loop->bounds = elt;
2480 /* Update the number of iteration estimates according to the bound.
2481 If at_stmt is an exit, then every statement in the loop is
2482 executed at most BOUND + 1 times. If it is not an exit, then
2483 some of the statements before it could be executed BOUND + 2
2484 times, if an exit of LOOP is before stmt. */
2485 exit = single_exit (loop);
2486 if (is_exit
2487 || (exit != NULL
2488 && dominated_by_p (CDI_DOMINATORS,
2489 exit->src, gimple_bb (at_stmt))))
2490 delta = double_int_one;
2491 else
2492 delta = double_int_two;
2493 i_bound = double_int_add (i_bound, delta);
2495 /* If an overflow occurred, ignore the result. */
2496 if (double_int_ucmp (i_bound, delta) < 0)
2497 return;
2499 record_niter_bound (loop, i_bound, realistic, upper);
2502 /* Record the estimate on number of iterations of LOOP based on the fact that
2503 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2504 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2505 estimated number of iterations is expected to be close to the real one.
2506 UPPER is true if we are sure the induction variable does not wrap. */
2508 static void
2509 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
2510 tree low, tree high, bool realistic, bool upper)
2512 tree niter_bound, extreme, delta;
2513 tree type = TREE_TYPE (base), unsigned_type;
2514 double_int max;
2516 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2517 return;
2519 if (dump_file && (dump_flags & TDF_DETAILS))
2521 fprintf (dump_file, "Induction variable (");
2522 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
2523 fprintf (dump_file, ") ");
2524 print_generic_expr (dump_file, base, TDF_SLIM);
2525 fprintf (dump_file, " + ");
2526 print_generic_expr (dump_file, step, TDF_SLIM);
2527 fprintf (dump_file, " * iteration does not wrap in statement ");
2528 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2529 fprintf (dump_file, " in loop %d.\n", loop->num);
2532 unsigned_type = unsigned_type_for (type);
2533 base = fold_convert (unsigned_type, base);
2534 step = fold_convert (unsigned_type, step);
2536 if (tree_int_cst_sign_bit (step))
2538 extreme = fold_convert (unsigned_type, low);
2539 if (TREE_CODE (base) != INTEGER_CST)
2540 base = fold_convert (unsigned_type, high);
2541 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
2542 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
2544 else
2546 extreme = fold_convert (unsigned_type, high);
2547 if (TREE_CODE (base) != INTEGER_CST)
2548 base = fold_convert (unsigned_type, low);
2549 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
2552 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2553 would get out of the range. */
2554 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2555 max = derive_constant_upper_bound (niter_bound);
2556 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2559 /* Returns true if REF is a reference to an array at the end of a dynamically
2560 allocated structure. If this is the case, the array may be allocated larger
2561 than its upper bound implies. */
2563 static bool
2564 array_at_struct_end_p (tree ref)
2566 tree base = get_base_address (ref);
2567 tree parent, field;
2569 /* Unless the reference is through a pointer, the size of the array matches
2570 its declaration. */
2571 if (!base || !INDIRECT_REF_P (base))
2572 return false;
2574 for (;handled_component_p (ref); ref = parent)
2576 parent = TREE_OPERAND (ref, 0);
2578 if (TREE_CODE (ref) == COMPONENT_REF)
2580 /* All fields of a union are at its end. */
2581 if (TREE_CODE (TREE_TYPE (parent)) == UNION_TYPE)
2582 continue;
2584 /* Unless the field is at the end of the struct, we are done. */
2585 field = TREE_OPERAND (ref, 1);
2586 if (TREE_CHAIN (field))
2587 return false;
2590 /* The other options are ARRAY_REF, ARRAY_RANGE_REF, VIEW_CONVERT_EXPR.
2591 In all these cases, we might be accessing the last element, and
2592 although in practice this will probably never happen, it is legal for
2593 the indices of this last element to exceed the bounds of the array.
2594 Therefore, continue checking. */
2597 gcc_assert (INDIRECT_REF_P (ref));
2598 return true;
2601 /* Determine information about number of iterations a LOOP from the index
2602 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2603 guaranteed to be executed in every iteration of LOOP. Callback for
2604 for_each_index. */
2606 struct ilb_data
2608 struct loop *loop;
2609 gimple stmt;
2610 bool reliable;
2613 static bool
2614 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
2616 struct ilb_data *data = (struct ilb_data *) dta;
2617 tree ev, init, step;
2618 tree low, high, type, next;
2619 bool sign, upper = data->reliable, at_end = false;
2620 struct loop *loop = data->loop;
2622 if (TREE_CODE (base) != ARRAY_REF)
2623 return true;
2625 /* For arrays at the end of the structure, we are not guaranteed that they
2626 do not really extend over their declared size. However, for arrays of
2627 size greater than one, this is unlikely to be intended. */
2628 if (array_at_struct_end_p (base))
2630 at_end = true;
2631 upper = false;
2634 ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
2635 init = initial_condition (ev);
2636 step = evolution_part_in_loop_num (ev, loop->num);
2638 if (!init
2639 || !step
2640 || TREE_CODE (step) != INTEGER_CST
2641 || integer_zerop (step)
2642 || tree_contains_chrecs (init, NULL)
2643 || chrec_contains_symbols_defined_in_loop (init, loop->num))
2644 return true;
2646 low = array_ref_low_bound (base);
2647 high = array_ref_up_bound (base);
2649 /* The case of nonconstant bounds could be handled, but it would be
2650 complicated. */
2651 if (TREE_CODE (low) != INTEGER_CST
2652 || !high
2653 || TREE_CODE (high) != INTEGER_CST)
2654 return true;
2655 sign = tree_int_cst_sign_bit (step);
2656 type = TREE_TYPE (step);
2658 /* The array of length 1 at the end of a structure most likely extends
2659 beyond its bounds. */
2660 if (at_end
2661 && operand_equal_p (low, high, 0))
2662 return true;
2664 /* In case the relevant bound of the array does not fit in type, or
2665 it does, but bound + step (in type) still belongs into the range of the
2666 array, the index may wrap and still stay within the range of the array
2667 (consider e.g. if the array is indexed by the full range of
2668 unsigned char).
2670 To make things simpler, we require both bounds to fit into type, although
2671 there are cases where this would not be strictly necessary. */
2672 if (!int_fits_type_p (high, type)
2673 || !int_fits_type_p (low, type))
2674 return true;
2675 low = fold_convert (type, low);
2676 high = fold_convert (type, high);
2678 if (sign)
2679 next = fold_binary (PLUS_EXPR, type, low, step);
2680 else
2681 next = fold_binary (PLUS_EXPR, type, high, step);
2683 if (tree_int_cst_compare (low, next) <= 0
2684 && tree_int_cst_compare (next, high) <= 0)
2685 return true;
2687 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true, upper);
2688 return true;
2691 /* Determine information about number of iterations a LOOP from the bounds
2692 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2693 STMT is guaranteed to be executed in every iteration of LOOP.*/
2695 static void
2696 infer_loop_bounds_from_ref (struct loop *loop, gimple stmt, tree ref,
2697 bool reliable)
2699 struct ilb_data data;
2701 data.loop = loop;
2702 data.stmt = stmt;
2703 data.reliable = reliable;
2704 for_each_index (&ref, idx_infer_loop_bounds, &data);
2707 /* Determine information about number of iterations of a LOOP from the way
2708 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2709 executed in every iteration of LOOP. */
2711 static void
2712 infer_loop_bounds_from_array (struct loop *loop, gimple stmt, bool reliable)
2714 if (is_gimple_assign (stmt))
2716 tree op0 = gimple_assign_lhs (stmt);
2717 tree op1 = gimple_assign_rhs1 (stmt);
2719 /* For each memory access, analyze its access function
2720 and record a bound on the loop iteration domain. */
2721 if (REFERENCE_CLASS_P (op0))
2722 infer_loop_bounds_from_ref (loop, stmt, op0, reliable);
2724 if (REFERENCE_CLASS_P (op1))
2725 infer_loop_bounds_from_ref (loop, stmt, op1, reliable);
2727 else if (is_gimple_call (stmt))
2729 tree arg, lhs;
2730 unsigned i, n = gimple_call_num_args (stmt);
2732 lhs = gimple_call_lhs (stmt);
2733 if (lhs && REFERENCE_CLASS_P (lhs))
2734 infer_loop_bounds_from_ref (loop, stmt, lhs, reliable);
2736 for (i = 0; i < n; i++)
2738 arg = gimple_call_arg (stmt, i);
2739 if (REFERENCE_CLASS_P (arg))
2740 infer_loop_bounds_from_ref (loop, stmt, arg, reliable);
2745 /* Determine information about number of iterations of a LOOP from the fact
2746 that signed arithmetics in STMT does not overflow. */
2748 static void
2749 infer_loop_bounds_from_signedness (struct loop *loop, gimple stmt)
2751 tree def, base, step, scev, type, low, high;
2753 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2754 return;
2756 def = gimple_assign_lhs (stmt);
2758 if (TREE_CODE (def) != SSA_NAME)
2759 return;
2761 type = TREE_TYPE (def);
2762 if (!INTEGRAL_TYPE_P (type)
2763 || !TYPE_OVERFLOW_UNDEFINED (type))
2764 return;
2766 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2767 if (chrec_contains_undetermined (scev))
2768 return;
2770 base = initial_condition_in_loop_num (scev, loop->num);
2771 step = evolution_part_in_loop_num (scev, loop->num);
2773 if (!base || !step
2774 || TREE_CODE (step) != INTEGER_CST
2775 || tree_contains_chrecs (base, NULL)
2776 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2777 return;
2779 low = lower_bound_in_type (type, type);
2780 high = upper_bound_in_type (type, type);
2782 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2785 /* The following analyzers are extracting informations on the bounds
2786 of LOOP from the following undefined behaviors:
2788 - data references should not access elements over the statically
2789 allocated size,
2791 - signed variables should not overflow when flag_wrapv is not set.
2794 static void
2795 infer_loop_bounds_from_undefined (struct loop *loop)
2797 unsigned i;
2798 basic_block *bbs;
2799 gimple_stmt_iterator bsi;
2800 basic_block bb;
2801 bool reliable;
2803 bbs = get_loop_body (loop);
2805 for (i = 0; i < loop->num_nodes; i++)
2807 bb = bbs[i];
2809 /* If BB is not executed in each iteration of the loop, we cannot
2810 use the operations in it to infer reliable upper bound on the
2811 # of iterations of the loop. However, we can use it as a guess. */
2812 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
2814 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2816 gimple stmt = gsi_stmt (bsi);
2818 infer_loop_bounds_from_array (loop, stmt, reliable);
2820 if (reliable)
2821 infer_loop_bounds_from_signedness (loop, stmt);
2826 free (bbs);
2829 /* Converts VAL to double_int. */
2831 static double_int
2832 gcov_type_to_double_int (gcov_type val)
2834 double_int ret;
2836 ret.low = (unsigned HOST_WIDE_INT) val;
2837 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
2838 the size of type. */
2839 val >>= HOST_BITS_PER_WIDE_INT - 1;
2840 val >>= 1;
2841 ret.high = (unsigned HOST_WIDE_INT) val;
2843 return ret;
2846 /* Records estimates on numbers of iterations of LOOP. */
2848 void
2849 estimate_numbers_of_iterations_loop (struct loop *loop)
2851 VEC (edge, heap) *exits;
2852 tree niter, type;
2853 unsigned i;
2854 struct tree_niter_desc niter_desc;
2855 edge ex;
2856 double_int bound;
2858 /* Give up if we already have tried to compute an estimation. */
2859 if (loop->estimate_state != EST_NOT_COMPUTED)
2860 return;
2861 loop->estimate_state = EST_AVAILABLE;
2862 loop->any_upper_bound = false;
2863 loop->any_estimate = false;
2865 exits = get_loop_exit_edges (loop);
2866 for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
2868 if (!number_of_iterations_exit (loop, ex, &niter_desc, false))
2869 continue;
2871 niter = niter_desc.niter;
2872 type = TREE_TYPE (niter);
2873 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
2874 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
2875 build_int_cst (type, 0),
2876 niter);
2877 record_estimate (loop, niter, niter_desc.max,
2878 last_stmt (ex->src),
2879 true, true, true);
2881 VEC_free (edge, heap, exits);
2883 infer_loop_bounds_from_undefined (loop);
2885 /* If we have a measured profile, use it to estimate the number of
2886 iterations. */
2887 if (loop->header->count != 0)
2889 gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
2890 bound = gcov_type_to_double_int (nit);
2891 record_niter_bound (loop, bound, true, false);
2894 /* If an upper bound is smaller than the realistic estimate of the
2895 number of iterations, use the upper bound instead. */
2896 if (loop->any_upper_bound
2897 && loop->any_estimate
2898 && double_int_ucmp (loop->nb_iterations_upper_bound,
2899 loop->nb_iterations_estimate) < 0)
2900 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
2903 /* Records estimates on numbers of iterations of loops. */
2905 void
2906 estimate_numbers_of_iterations (void)
2908 loop_iterator li;
2909 struct loop *loop;
2911 /* We don't want to issue signed overflow warnings while getting
2912 loop iteration estimates. */
2913 fold_defer_overflow_warnings ();
2915 FOR_EACH_LOOP (li, loop, 0)
2917 estimate_numbers_of_iterations_loop (loop);
2920 fold_undefer_and_ignore_overflow_warnings ();
2923 /* Returns true if statement S1 dominates statement S2. */
2925 bool
2926 stmt_dominates_stmt_p (gimple s1, gimple s2)
2928 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
2930 if (!bb1
2931 || s1 == s2)
2932 return true;
2934 if (bb1 == bb2)
2936 gimple_stmt_iterator bsi;
2938 if (gimple_code (s2) == GIMPLE_PHI)
2939 return false;
2941 if (gimple_code (s1) == GIMPLE_PHI)
2942 return true;
2944 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
2945 if (gsi_stmt (bsi) == s1)
2946 return true;
2948 return false;
2951 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
2954 /* Returns true when we can prove that the number of executions of
2955 STMT in the loop is at most NITER, according to the bound on
2956 the number of executions of the statement NITER_BOUND->stmt recorded in
2957 NITER_BOUND. If STMT is NULL, we must prove this bound for all
2958 statements in the loop. */
2960 static bool
2961 n_of_executions_at_most (gimple stmt,
2962 struct nb_iter_bound *niter_bound,
2963 tree niter)
2965 double_int bound = niter_bound->bound;
2966 tree nit_type = TREE_TYPE (niter), e;
2967 enum tree_code cmp;
2969 gcc_assert (TYPE_UNSIGNED (nit_type));
2971 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
2972 the number of iterations is small. */
2973 if (!double_int_fits_to_tree_p (nit_type, bound))
2974 return false;
2976 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
2977 times. This means that:
2979 -- if NITER_BOUND->is_exit is true, then everything before
2980 NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
2981 times, and everything after it at most NITER_BOUND->bound times.
2983 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
2984 is executed, then NITER_BOUND->stmt is executed as well in the same
2985 iteration (we conclude that if both statements belong to the same
2986 basic block, or if STMT is after NITER_BOUND->stmt), then STMT
2987 is executed at most NITER_BOUND->bound + 1 times. Otherwise STMT is
2988 executed at most NITER_BOUND->bound + 2 times. */
2990 if (niter_bound->is_exit)
2992 if (stmt
2993 && stmt != niter_bound->stmt
2994 && stmt_dominates_stmt_p (niter_bound->stmt, stmt))
2995 cmp = GE_EXPR;
2996 else
2997 cmp = GT_EXPR;
2999 else
3001 if (!stmt
3002 || (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
3003 && !stmt_dominates_stmt_p (niter_bound->stmt, stmt)))
3005 bound = double_int_add (bound, double_int_one);
3006 if (double_int_zero_p (bound)
3007 || !double_int_fits_to_tree_p (nit_type, bound))
3008 return false;
3010 cmp = GT_EXPR;
3013 e = fold_binary (cmp, boolean_type_node,
3014 niter, double_int_to_tree (nit_type, bound));
3015 return e && integer_nonzerop (e);
3018 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
3020 bool
3021 nowrap_type_p (tree type)
3023 if (INTEGRAL_TYPE_P (type)
3024 && TYPE_OVERFLOW_UNDEFINED (type))
3025 return true;
3027 if (POINTER_TYPE_P (type))
3028 return true;
3030 return false;
3033 /* Return false only when the induction variable BASE + STEP * I is
3034 known to not overflow: i.e. when the number of iterations is small
3035 enough with respect to the step and initial condition in order to
3036 keep the evolution confined in TYPEs bounds. Return true when the
3037 iv is known to overflow or when the property is not computable.
3039 USE_OVERFLOW_SEMANTICS is true if this function should assume that
3040 the rules for overflow of the given language apply (e.g., that signed
3041 arithmetics in C does not overflow). */
3043 bool
3044 scev_probably_wraps_p (tree base, tree step,
3045 gimple at_stmt, struct loop *loop,
3046 bool use_overflow_semantics)
3048 struct nb_iter_bound *bound;
3049 tree delta, step_abs;
3050 tree unsigned_type, valid_niter;
3051 tree type = TREE_TYPE (step);
3053 /* FIXME: We really need something like
3054 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
3056 We used to test for the following situation that frequently appears
3057 during address arithmetics:
3059 D.1621_13 = (long unsigned intD.4) D.1620_12;
3060 D.1622_14 = D.1621_13 * 8;
3061 D.1623_15 = (doubleD.29 *) D.1622_14;
3063 And derived that the sequence corresponding to D_14
3064 can be proved to not wrap because it is used for computing a
3065 memory access; however, this is not really the case -- for example,
3066 if D_12 = (unsigned char) [254,+,1], then D_14 has values
3067 2032, 2040, 0, 8, ..., but the code is still legal. */
3069 if (chrec_contains_undetermined (base)
3070 || chrec_contains_undetermined (step))
3071 return true;
3073 if (integer_zerop (step))
3074 return false;
3076 /* If we can use the fact that signed and pointer arithmetics does not
3077 wrap, we are done. */
3078 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
3079 return false;
3081 /* To be able to use estimates on number of iterations of the loop,
3082 we must have an upper bound on the absolute value of the step. */
3083 if (TREE_CODE (step) != INTEGER_CST)
3084 return true;
3086 /* Don't issue signed overflow warnings. */
3087 fold_defer_overflow_warnings ();
3089 /* Otherwise, compute the number of iterations before we reach the
3090 bound of the type, and verify that the loop is exited before this
3091 occurs. */
3092 unsigned_type = unsigned_type_for (type);
3093 base = fold_convert (unsigned_type, base);
3095 if (tree_int_cst_sign_bit (step))
3097 tree extreme = fold_convert (unsigned_type,
3098 lower_bound_in_type (type, type));
3099 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3100 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
3101 fold_convert (unsigned_type, step));
3103 else
3105 tree extreme = fold_convert (unsigned_type,
3106 upper_bound_in_type (type, type));
3107 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3108 step_abs = fold_convert (unsigned_type, step);
3111 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3113 estimate_numbers_of_iterations_loop (loop);
3114 for (bound = loop->bounds; bound; bound = bound->next)
3116 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
3118 fold_undefer_and_ignore_overflow_warnings ();
3119 return false;
3123 fold_undefer_and_ignore_overflow_warnings ();
3125 /* At this point we still don't have a proof that the iv does not
3126 overflow: give up. */
3127 return true;
3130 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3132 void
3133 free_numbers_of_iterations_estimates_loop (struct loop *loop)
3135 struct nb_iter_bound *bound, *next;
3137 loop->nb_iterations = NULL;
3138 loop->estimate_state = EST_NOT_COMPUTED;
3139 for (bound = loop->bounds; bound; bound = next)
3141 next = bound->next;
3142 ggc_free (bound);
3145 loop->bounds = NULL;
3148 /* Frees the information on upper bounds on numbers of iterations of loops. */
3150 void
3151 free_numbers_of_iterations_estimates (void)
3153 loop_iterator li;
3154 struct loop *loop;
3156 FOR_EACH_LOOP (li, loop, 0)
3158 free_numbers_of_iterations_estimates_loop (loop);
3162 /* Substitute value VAL for ssa name NAME inside expressions held
3163 at LOOP. */
3165 void
3166 substitute_in_loop_info (struct loop *loop, tree name, tree val)
3168 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);