[RTL-ifcvt] PR rtl-optimization/68506: Fix emitting order of insns in IF-THEN-JOIN...
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob6d480c0df419f565b0faabf0c4029f487c73e2dc
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
47 /* The maximum number of dominator BBs we search for conditions
48 of loop header copies we use for simplifying a conditional
49 expression. */
50 #define MAX_DOMINATORS_TO_WALK 8
54 Analysis of number of iterations of an affine exit test.
58 /* Bounds on some value, BELOW <= X <= UP. */
60 struct bounds
62 mpz_t below, up;
66 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
68 static void
69 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
71 tree type = TREE_TYPE (expr);
72 tree op0, op1;
73 bool negate = false;
75 *var = expr;
76 mpz_set_ui (offset, 0);
78 switch (TREE_CODE (expr))
80 case MINUS_EXPR:
81 negate = true;
82 /* Fallthru. */
84 case PLUS_EXPR:
85 case POINTER_PLUS_EXPR:
86 op0 = TREE_OPERAND (expr, 0);
87 op1 = TREE_OPERAND (expr, 1);
89 if (TREE_CODE (op1) != INTEGER_CST)
90 break;
92 *var = op0;
93 /* Always sign extend the offset. */
94 wi::to_mpz (op1, offset, SIGNED);
95 if (negate)
96 mpz_neg (offset, offset);
97 break;
99 case INTEGER_CST:
100 *var = build_int_cst_type (type, 0);
101 wi::to_mpz (expr, offset, TYPE_SIGN (type));
102 break;
104 default:
105 break;
109 /* From condition C0 CMP C1 derives information regarding the value range
110 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
112 static void
113 refine_value_range_using_guard (tree type, tree var,
114 tree c0, enum tree_code cmp, tree c1,
115 mpz_t below, mpz_t up)
117 tree varc0, varc1, ctype;
118 mpz_t offc0, offc1;
119 mpz_t mint, maxt, minc1, maxc1;
120 wide_int minv, maxv;
121 bool no_wrap = nowrap_type_p (type);
122 bool c0_ok, c1_ok;
123 signop sgn = TYPE_SIGN (type);
125 switch (cmp)
127 case LT_EXPR:
128 case LE_EXPR:
129 case GT_EXPR:
130 case GE_EXPR:
131 STRIP_SIGN_NOPS (c0);
132 STRIP_SIGN_NOPS (c1);
133 ctype = TREE_TYPE (c0);
134 if (!useless_type_conversion_p (ctype, type))
135 return;
137 break;
139 case EQ_EXPR:
140 /* We could derive quite precise information from EQ_EXPR, however,
141 such a guard is unlikely to appear, so we do not bother with
142 handling it. */
143 return;
145 case NE_EXPR:
146 /* NE_EXPR comparisons do not contain much of useful information,
147 except for cases of comparing with bounds. */
148 if (TREE_CODE (c1) != INTEGER_CST
149 || !INTEGRAL_TYPE_P (type))
150 return;
152 /* Ensure that the condition speaks about an expression in the same
153 type as X and Y. */
154 ctype = TREE_TYPE (c0);
155 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
156 return;
157 c0 = fold_convert (type, c0);
158 c1 = fold_convert (type, c1);
160 if (operand_equal_p (var, c0, 0))
162 mpz_t valc1;
164 /* Case of comparing VAR with its below/up bounds. */
165 mpz_init (valc1);
166 wi::to_mpz (c1, valc1, TYPE_SIGN (type));
167 if (mpz_cmp (valc1, below) == 0)
168 cmp = GT_EXPR;
169 if (mpz_cmp (valc1, up) == 0)
170 cmp = LT_EXPR;
172 mpz_clear (valc1);
174 else
176 /* Case of comparing with the bounds of the type. */
177 wide_int min = wi::min_value (type);
178 wide_int max = wi::max_value (type);
180 if (wi::eq_p (c1, min))
181 cmp = GT_EXPR;
182 if (wi::eq_p (c1, max))
183 cmp = LT_EXPR;
186 /* Quick return if no useful information. */
187 if (cmp == NE_EXPR)
188 return;
190 break;
192 default:
193 return;
196 mpz_init (offc0);
197 mpz_init (offc1);
198 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
199 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
201 /* We are only interested in comparisons of expressions based on VAR. */
202 if (operand_equal_p (var, varc1, 0))
204 std::swap (varc0, varc1);
205 mpz_swap (offc0, offc1);
206 cmp = swap_tree_comparison (cmp);
208 else if (!operand_equal_p (var, varc0, 0))
210 mpz_clear (offc0);
211 mpz_clear (offc1);
212 return;
215 mpz_init (mint);
216 mpz_init (maxt);
217 get_type_static_bounds (type, mint, maxt);
218 mpz_init (minc1);
219 mpz_init (maxc1);
220 /* Setup range information for varc1. */
221 if (integer_zerop (varc1))
223 wi::to_mpz (integer_zero_node, minc1, TYPE_SIGN (type));
224 wi::to_mpz (integer_zero_node, maxc1, TYPE_SIGN (type));
226 else if (TREE_CODE (varc1) == SSA_NAME
227 && INTEGRAL_TYPE_P (type)
228 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
230 gcc_assert (wi::le_p (minv, maxv, sgn));
231 wi::to_mpz (minv, minc1, sgn);
232 wi::to_mpz (maxv, maxc1, sgn);
234 else
236 mpz_set (minc1, mint);
237 mpz_set (maxc1, maxt);
240 /* Compute valid range information for varc1 + offc1. Note nothing
241 useful can be derived if it overflows or underflows. Overflow or
242 underflow could happen when:
244 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
245 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
246 mpz_add (minc1, minc1, offc1);
247 mpz_add (maxc1, maxc1, offc1);
248 c1_ok = (no_wrap
249 || mpz_sgn (offc1) == 0
250 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
251 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
252 if (!c1_ok)
253 goto end;
255 if (mpz_cmp (minc1, mint) < 0)
256 mpz_set (minc1, mint);
257 if (mpz_cmp (maxc1, maxt) > 0)
258 mpz_set (maxc1, maxt);
260 if (cmp == LT_EXPR)
262 cmp = LE_EXPR;
263 mpz_sub_ui (maxc1, maxc1, 1);
265 if (cmp == GT_EXPR)
267 cmp = GE_EXPR;
268 mpz_add_ui (minc1, minc1, 1);
271 /* Compute range information for varc0. If there is no overflow,
272 the condition implied that
274 (varc0) cmp (varc1 + offc1 - offc0)
276 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
277 or the below bound if cmp is GE_EXPR.
279 To prove there is no overflow/underflow, we need to check below
280 four cases:
281 1) cmp == LE_EXPR && offc0 > 0
283 (varc0 + offc0) doesn't overflow
284 && (varc1 + offc1 - offc0) doesn't underflow
286 2) cmp == LE_EXPR && offc0 < 0
288 (varc0 + offc0) doesn't underflow
289 && (varc1 + offc1 - offc0) doesn't overfloe
291 In this case, (varc0 + offc0) will never underflow if we can
292 prove (varc1 + offc1 - offc0) doesn't overflow.
294 3) cmp == GE_EXPR && offc0 < 0
296 (varc0 + offc0) doesn't underflow
297 && (varc1 + offc1 - offc0) doesn't overflow
299 4) cmp == GE_EXPR && offc0 > 0
301 (varc0 + offc0) doesn't overflow
302 && (varc1 + offc1 - offc0) doesn't underflow
304 In this case, (varc0 + offc0) will never overflow if we can
305 prove (varc1 + offc1 - offc0) doesn't underflow.
307 Note we only handle case 2 and 4 in below code. */
309 mpz_sub (minc1, minc1, offc0);
310 mpz_sub (maxc1, maxc1, offc0);
311 c0_ok = (no_wrap
312 || mpz_sgn (offc0) == 0
313 || (cmp == LE_EXPR
314 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
315 || (cmp == GE_EXPR
316 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
317 if (!c0_ok)
318 goto end;
320 if (cmp == LE_EXPR)
322 if (mpz_cmp (up, maxc1) > 0)
323 mpz_set (up, maxc1);
325 else
327 if (mpz_cmp (below, minc1) < 0)
328 mpz_set (below, minc1);
331 end:
332 mpz_clear (mint);
333 mpz_clear (maxt);
334 mpz_clear (minc1);
335 mpz_clear (maxc1);
336 mpz_clear (offc0);
337 mpz_clear (offc1);
340 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
341 in TYPE to MIN and MAX. */
343 static void
344 determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
345 mpz_t min, mpz_t max)
347 int cnt = 0;
348 mpz_t minm, maxm;
349 basic_block bb;
350 wide_int minv, maxv;
351 enum value_range_type rtype = VR_VARYING;
353 /* If the expression is a constant, we know its value exactly. */
354 if (integer_zerop (var))
356 mpz_set (min, off);
357 mpz_set (max, off);
358 return;
361 get_type_static_bounds (type, min, max);
363 /* See if we have some range info from VRP. */
364 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
366 edge e = loop_preheader_edge (loop);
367 signop sgn = TYPE_SIGN (type);
368 gphi_iterator gsi;
370 /* Either for VAR itself... */
371 rtype = get_range_info (var, &minv, &maxv);
372 /* Or for PHI results in loop->header where VAR is used as
373 PHI argument from the loop preheader edge. */
374 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
376 gphi *phi = gsi.phi ();
377 wide_int minc, maxc;
378 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
379 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
380 == VR_RANGE))
382 if (rtype != VR_RANGE)
384 rtype = VR_RANGE;
385 minv = minc;
386 maxv = maxc;
388 else
390 minv = wi::max (minv, minc, sgn);
391 maxv = wi::min (maxv, maxc, sgn);
392 /* If the PHI result range are inconsistent with
393 the VAR range, give up on looking at the PHI
394 results. This can happen if VR_UNDEFINED is
395 involved. */
396 if (wi::gt_p (minv, maxv, sgn))
398 rtype = get_range_info (var, &minv, &maxv);
399 break;
404 mpz_init (minm);
405 mpz_init (maxm);
406 if (rtype != VR_RANGE)
408 mpz_set (minm, min);
409 mpz_set (maxm, max);
411 else
413 gcc_assert (wi::le_p (minv, maxv, sgn));
414 wi::to_mpz (minv, minm, sgn);
415 wi::to_mpz (maxv, maxm, sgn);
417 /* Now walk the dominators of the loop header and use the entry
418 guards to refine the estimates. */
419 for (bb = loop->header;
420 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
421 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
423 edge e;
424 tree c0, c1;
425 gimple *cond;
426 enum tree_code cmp;
428 if (!single_pred_p (bb))
429 continue;
430 e = single_pred_edge (bb);
432 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
433 continue;
435 cond = last_stmt (e->src);
436 c0 = gimple_cond_lhs (cond);
437 cmp = gimple_cond_code (cond);
438 c1 = gimple_cond_rhs (cond);
440 if (e->flags & EDGE_FALSE_VALUE)
441 cmp = invert_tree_comparison (cmp, false);
443 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
444 ++cnt;
447 mpz_add (minm, minm, off);
448 mpz_add (maxm, maxm, off);
449 /* If the computation may not wrap or off is zero, then this
450 is always fine. If off is negative and minv + off isn't
451 smaller than type's minimum, or off is positive and
452 maxv + off isn't bigger than type's maximum, use the more
453 precise range too. */
454 if (nowrap_type_p (type)
455 || mpz_sgn (off) == 0
456 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
457 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
459 mpz_set (min, minm);
460 mpz_set (max, maxm);
461 mpz_clear (minm);
462 mpz_clear (maxm);
463 return;
465 mpz_clear (minm);
466 mpz_clear (maxm);
469 /* If the computation may wrap, we know nothing about the value, except for
470 the range of the type. */
471 if (!nowrap_type_p (type))
472 return;
474 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
475 add it to MIN, otherwise to MAX. */
476 if (mpz_sgn (off) < 0)
477 mpz_add (max, max, off);
478 else
479 mpz_add (min, min, off);
482 /* Stores the bounds on the difference of the values of the expressions
483 (var + X) and (var + Y), computed in TYPE, to BNDS. */
485 static void
486 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
487 bounds *bnds)
489 int rel = mpz_cmp (x, y);
490 bool may_wrap = !nowrap_type_p (type);
491 mpz_t m;
493 /* If X == Y, then the expressions are always equal.
494 If X > Y, there are the following possibilities:
495 a) neither of var + X and var + Y overflow or underflow, or both of
496 them do. Then their difference is X - Y.
497 b) var + X overflows, and var + Y does not. Then the values of the
498 expressions are var + X - M and var + Y, where M is the range of
499 the type, and their difference is X - Y - M.
500 c) var + Y underflows and var + X does not. Their difference again
501 is M - X + Y.
502 Therefore, if the arithmetics in type does not overflow, then the
503 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
504 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
505 (X - Y, X - Y + M). */
507 if (rel == 0)
509 mpz_set_ui (bnds->below, 0);
510 mpz_set_ui (bnds->up, 0);
511 return;
514 mpz_init (m);
515 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
516 mpz_add_ui (m, m, 1);
517 mpz_sub (bnds->up, x, y);
518 mpz_set (bnds->below, bnds->up);
520 if (may_wrap)
522 if (rel > 0)
523 mpz_sub (bnds->below, bnds->below, m);
524 else
525 mpz_add (bnds->up, bnds->up, m);
528 mpz_clear (m);
531 /* From condition C0 CMP C1 derives information regarding the
532 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
533 and stores it to BNDS. */
535 static void
536 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
537 tree vary, mpz_t offy,
538 tree c0, enum tree_code cmp, tree c1,
539 bounds *bnds)
541 tree varc0, varc1, ctype;
542 mpz_t offc0, offc1, loffx, loffy, bnd;
543 bool lbound = false;
544 bool no_wrap = nowrap_type_p (type);
545 bool x_ok, y_ok;
547 switch (cmp)
549 case LT_EXPR:
550 case LE_EXPR:
551 case GT_EXPR:
552 case GE_EXPR:
553 STRIP_SIGN_NOPS (c0);
554 STRIP_SIGN_NOPS (c1);
555 ctype = TREE_TYPE (c0);
556 if (!useless_type_conversion_p (ctype, type))
557 return;
559 break;
561 case EQ_EXPR:
562 /* We could derive quite precise information from EQ_EXPR, however, such
563 a guard is unlikely to appear, so we do not bother with handling
564 it. */
565 return;
567 case NE_EXPR:
568 /* NE_EXPR comparisons do not contain much of useful information, except for
569 special case of comparing with the bounds of the type. */
570 if (TREE_CODE (c1) != INTEGER_CST
571 || !INTEGRAL_TYPE_P (type))
572 return;
574 /* Ensure that the condition speaks about an expression in the same type
575 as X and Y. */
576 ctype = TREE_TYPE (c0);
577 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
578 return;
579 c0 = fold_convert (type, c0);
580 c1 = fold_convert (type, c1);
582 if (TYPE_MIN_VALUE (type)
583 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
585 cmp = GT_EXPR;
586 break;
588 if (TYPE_MAX_VALUE (type)
589 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
591 cmp = LT_EXPR;
592 break;
595 return;
596 default:
597 return;
600 mpz_init (offc0);
601 mpz_init (offc1);
602 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
603 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
605 /* We are only interested in comparisons of expressions based on VARX and
606 VARY. TODO -- we might also be able to derive some bounds from
607 expressions containing just one of the variables. */
609 if (operand_equal_p (varx, varc1, 0))
611 std::swap (varc0, varc1);
612 mpz_swap (offc0, offc1);
613 cmp = swap_tree_comparison (cmp);
616 if (!operand_equal_p (varx, varc0, 0)
617 || !operand_equal_p (vary, varc1, 0))
618 goto end;
620 mpz_init_set (loffx, offx);
621 mpz_init_set (loffy, offy);
623 if (cmp == GT_EXPR || cmp == GE_EXPR)
625 std::swap (varx, vary);
626 mpz_swap (offc0, offc1);
627 mpz_swap (loffx, loffy);
628 cmp = swap_tree_comparison (cmp);
629 lbound = true;
632 /* If there is no overflow, the condition implies that
634 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
636 The overflows and underflows may complicate things a bit; each
637 overflow decreases the appropriate offset by M, and underflow
638 increases it by M. The above inequality would not necessarily be
639 true if
641 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
642 VARX + OFFC0 overflows, but VARX + OFFX does not.
643 This may only happen if OFFX < OFFC0.
644 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
645 VARY + OFFC1 underflows and VARY + OFFY does not.
646 This may only happen if OFFY > OFFC1. */
648 if (no_wrap)
650 x_ok = true;
651 y_ok = true;
653 else
655 x_ok = (integer_zerop (varx)
656 || mpz_cmp (loffx, offc0) >= 0);
657 y_ok = (integer_zerop (vary)
658 || mpz_cmp (loffy, offc1) <= 0);
661 if (x_ok && y_ok)
663 mpz_init (bnd);
664 mpz_sub (bnd, loffx, loffy);
665 mpz_add (bnd, bnd, offc1);
666 mpz_sub (bnd, bnd, offc0);
668 if (cmp == LT_EXPR)
669 mpz_sub_ui (bnd, bnd, 1);
671 if (lbound)
673 mpz_neg (bnd, bnd);
674 if (mpz_cmp (bnds->below, bnd) < 0)
675 mpz_set (bnds->below, bnd);
677 else
679 if (mpz_cmp (bnd, bnds->up) < 0)
680 mpz_set (bnds->up, bnd);
682 mpz_clear (bnd);
685 mpz_clear (loffx);
686 mpz_clear (loffy);
687 end:
688 mpz_clear (offc0);
689 mpz_clear (offc1);
692 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
693 The subtraction is considered to be performed in arbitrary precision,
694 without overflows.
696 We do not attempt to be too clever regarding the value ranges of X and
697 Y; most of the time, they are just integers or ssa names offsetted by
698 integer. However, we try to use the information contained in the
699 comparisons before the loop (usually created by loop header copying). */
701 static void
702 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
704 tree type = TREE_TYPE (x);
705 tree varx, vary;
706 mpz_t offx, offy;
707 mpz_t minx, maxx, miny, maxy;
708 int cnt = 0;
709 edge e;
710 basic_block bb;
711 tree c0, c1;
712 gimple *cond;
713 enum tree_code cmp;
715 /* Get rid of unnecessary casts, but preserve the value of
716 the expressions. */
717 STRIP_SIGN_NOPS (x);
718 STRIP_SIGN_NOPS (y);
720 mpz_init (bnds->below);
721 mpz_init (bnds->up);
722 mpz_init (offx);
723 mpz_init (offy);
724 split_to_var_and_offset (x, &varx, offx);
725 split_to_var_and_offset (y, &vary, offy);
727 if (!integer_zerop (varx)
728 && operand_equal_p (varx, vary, 0))
730 /* Special case VARX == VARY -- we just need to compare the
731 offsets. The matters are a bit more complicated in the
732 case addition of offsets may wrap. */
733 bound_difference_of_offsetted_base (type, offx, offy, bnds);
735 else
737 /* Otherwise, use the value ranges to determine the initial
738 estimates on below and up. */
739 mpz_init (minx);
740 mpz_init (maxx);
741 mpz_init (miny);
742 mpz_init (maxy);
743 determine_value_range (loop, type, varx, offx, minx, maxx);
744 determine_value_range (loop, type, vary, offy, miny, maxy);
746 mpz_sub (bnds->below, minx, maxy);
747 mpz_sub (bnds->up, maxx, miny);
748 mpz_clear (minx);
749 mpz_clear (maxx);
750 mpz_clear (miny);
751 mpz_clear (maxy);
754 /* If both X and Y are constants, we cannot get any more precise. */
755 if (integer_zerop (varx) && integer_zerop (vary))
756 goto end;
758 /* Now walk the dominators of the loop header and use the entry
759 guards to refine the estimates. */
760 for (bb = loop->header;
761 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
762 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
764 if (!single_pred_p (bb))
765 continue;
766 e = single_pred_edge (bb);
768 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
769 continue;
771 cond = last_stmt (e->src);
772 c0 = gimple_cond_lhs (cond);
773 cmp = gimple_cond_code (cond);
774 c1 = gimple_cond_rhs (cond);
776 if (e->flags & EDGE_FALSE_VALUE)
777 cmp = invert_tree_comparison (cmp, false);
779 refine_bounds_using_guard (type, varx, offx, vary, offy,
780 c0, cmp, c1, bnds);
781 ++cnt;
784 end:
785 mpz_clear (offx);
786 mpz_clear (offy);
789 /* Update the bounds in BNDS that restrict the value of X to the bounds
790 that restrict the value of X + DELTA. X can be obtained as a
791 difference of two values in TYPE. */
793 static void
794 bounds_add (bounds *bnds, const widest_int &delta, tree type)
796 mpz_t mdelta, max;
798 mpz_init (mdelta);
799 wi::to_mpz (delta, mdelta, SIGNED);
801 mpz_init (max);
802 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
804 mpz_add (bnds->up, bnds->up, mdelta);
805 mpz_add (bnds->below, bnds->below, mdelta);
807 if (mpz_cmp (bnds->up, max) > 0)
808 mpz_set (bnds->up, max);
810 mpz_neg (max, max);
811 if (mpz_cmp (bnds->below, max) < 0)
812 mpz_set (bnds->below, max);
814 mpz_clear (mdelta);
815 mpz_clear (max);
818 /* Update the bounds in BNDS that restrict the value of X to the bounds
819 that restrict the value of -X. */
821 static void
822 bounds_negate (bounds *bnds)
824 mpz_t tmp;
826 mpz_init_set (tmp, bnds->up);
827 mpz_neg (bnds->up, bnds->below);
828 mpz_neg (bnds->below, tmp);
829 mpz_clear (tmp);
832 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
834 static tree
835 inverse (tree x, tree mask)
837 tree type = TREE_TYPE (x);
838 tree rslt;
839 unsigned ctr = tree_floor_log2 (mask);
841 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
843 unsigned HOST_WIDE_INT ix;
844 unsigned HOST_WIDE_INT imask;
845 unsigned HOST_WIDE_INT irslt = 1;
847 gcc_assert (cst_and_fits_in_hwi (x));
848 gcc_assert (cst_and_fits_in_hwi (mask));
850 ix = int_cst_value (x);
851 imask = int_cst_value (mask);
853 for (; ctr; ctr--)
855 irslt *= ix;
856 ix *= ix;
858 irslt &= imask;
860 rslt = build_int_cst_type (type, irslt);
862 else
864 rslt = build_int_cst (type, 1);
865 for (; ctr; ctr--)
867 rslt = int_const_binop (MULT_EXPR, rslt, x);
868 x = int_const_binop (MULT_EXPR, x, x);
870 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
873 return rslt;
876 /* Derives the upper bound BND on the number of executions of loop with exit
877 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
878 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
879 that the loop ends through this exit, i.e., the induction variable ever
880 reaches the value of C.
882 The value C is equal to final - base, where final and base are the final and
883 initial value of the actual induction variable in the analysed loop. BNDS
884 bounds the value of this difference when computed in signed type with
885 unbounded range, while the computation of C is performed in an unsigned
886 type with the range matching the range of the type of the induction variable.
887 In particular, BNDS.up contains an upper bound on C in the following cases:
888 -- if the iv must reach its final value without overflow, i.e., if
889 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
890 -- if final >= base, which we know to hold when BNDS.below >= 0. */
892 static void
893 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
894 bounds *bnds, bool exit_must_be_taken)
896 widest_int max;
897 mpz_t d;
898 tree type = TREE_TYPE (c);
899 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
900 || mpz_sgn (bnds->below) >= 0);
902 if (integer_onep (s)
903 || (TREE_CODE (c) == INTEGER_CST
904 && TREE_CODE (s) == INTEGER_CST
905 && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
906 || (TYPE_OVERFLOW_UNDEFINED (type)
907 && multiple_of_p (type, c, s)))
909 /* If C is an exact multiple of S, then its value will be reached before
910 the induction variable overflows (unless the loop is exited in some
911 other way before). Note that the actual induction variable in the
912 loop (which ranges from base to final instead of from 0 to C) may
913 overflow, in which case BNDS.up will not be giving a correct upper
914 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
915 no_overflow = true;
916 exit_must_be_taken = true;
919 /* If the induction variable can overflow, the number of iterations is at
920 most the period of the control variable (or infinite, but in that case
921 the whole # of iterations analysis will fail). */
922 if (!no_overflow)
924 max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
925 wi::to_mpz (max, bnd, UNSIGNED);
926 return;
929 /* Now we know that the induction variable does not overflow, so the loop
930 iterates at most (range of type / S) times. */
931 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
933 /* If the induction variable is guaranteed to reach the value of C before
934 overflow, ... */
935 if (exit_must_be_taken)
937 /* ... then we can strengthen this to C / S, and possibly we can use
938 the upper bound on C given by BNDS. */
939 if (TREE_CODE (c) == INTEGER_CST)
940 wi::to_mpz (c, bnd, UNSIGNED);
941 else if (bnds_u_valid)
942 mpz_set (bnd, bnds->up);
945 mpz_init (d);
946 wi::to_mpz (s, d, UNSIGNED);
947 mpz_fdiv_q (bnd, bnd, d);
948 mpz_clear (d);
951 /* Determines number of iterations of loop whose ending condition
952 is IV <> FINAL. TYPE is the type of the iv. The number of
953 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
954 we know that the exit must be taken eventually, i.e., that the IV
955 ever reaches the value FINAL (we derived this earlier, and possibly set
956 NITER->assumptions to make sure this is the case). BNDS contains the
957 bounds on the difference FINAL - IV->base. */
959 static bool
960 number_of_iterations_ne (tree type, affine_iv *iv, tree final,
961 struct tree_niter_desc *niter, bool exit_must_be_taken,
962 bounds *bnds)
964 tree niter_type = unsigned_type_for (type);
965 tree s, c, d, bits, assumption, tmp, bound;
966 mpz_t max;
968 niter->control = *iv;
969 niter->bound = final;
970 niter->cmp = NE_EXPR;
972 /* Rearrange the terms so that we get inequality S * i <> C, with S
973 positive. Also cast everything to the unsigned type. If IV does
974 not overflow, BNDS bounds the value of C. Also, this is the
975 case if the computation |FINAL - IV->base| does not overflow, i.e.,
976 if BNDS->below in the result is nonnegative. */
977 if (tree_int_cst_sign_bit (iv->step))
979 s = fold_convert (niter_type,
980 fold_build1 (NEGATE_EXPR, type, iv->step));
981 c = fold_build2 (MINUS_EXPR, niter_type,
982 fold_convert (niter_type, iv->base),
983 fold_convert (niter_type, final));
984 bounds_negate (bnds);
986 else
988 s = fold_convert (niter_type, iv->step);
989 c = fold_build2 (MINUS_EXPR, niter_type,
990 fold_convert (niter_type, final),
991 fold_convert (niter_type, iv->base));
994 mpz_init (max);
995 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
996 exit_must_be_taken);
997 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
998 TYPE_SIGN (niter_type));
999 mpz_clear (max);
1001 /* First the trivial cases -- when the step is 1. */
1002 if (integer_onep (s))
1004 niter->niter = c;
1005 return true;
1008 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1009 is infinite. Otherwise, the number of iterations is
1010 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1011 bits = num_ending_zeros (s);
1012 bound = build_low_bits_mask (niter_type,
1013 (TYPE_PRECISION (niter_type)
1014 - tree_to_uhwi (bits)));
1016 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1017 build_int_cst (niter_type, 1), bits);
1018 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1020 if (!exit_must_be_taken)
1022 /* If we cannot assume that the exit is taken eventually, record the
1023 assumptions for divisibility of c. */
1024 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1025 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1026 assumption, build_int_cst (niter_type, 0));
1027 if (!integer_nonzerop (assumption))
1028 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1029 niter->assumptions, assumption);
1032 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1033 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1034 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1035 return true;
1038 /* Checks whether we can determine the final value of the control variable
1039 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1040 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1041 of the step. The assumptions necessary to ensure that the computation
1042 of the final value does not overflow are recorded in NITER. If we
1043 find the final value, we adjust DELTA and return TRUE. Otherwise
1044 we return false. BNDS bounds the value of IV1->base - IV0->base,
1045 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1046 true if we know that the exit must be taken eventually. */
1048 static bool
1049 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1050 struct tree_niter_desc *niter,
1051 tree *delta, tree step,
1052 bool exit_must_be_taken, bounds *bnds)
1054 tree niter_type = TREE_TYPE (step);
1055 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1056 tree tmod;
1057 mpz_t mmod;
1058 tree assumption = boolean_true_node, bound, noloop;
1059 bool ret = false, fv_comp_no_overflow;
1060 tree type1 = type;
1061 if (POINTER_TYPE_P (type))
1062 type1 = sizetype;
1064 if (TREE_CODE (mod) != INTEGER_CST)
1065 return false;
1066 if (integer_nonzerop (mod))
1067 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1068 tmod = fold_convert (type1, mod);
1070 mpz_init (mmod);
1071 wi::to_mpz (mod, mmod, UNSIGNED);
1072 mpz_neg (mmod, mmod);
1074 /* If the induction variable does not overflow and the exit is taken,
1075 then the computation of the final value does not overflow. This is
1076 also obviously the case if the new final value is equal to the
1077 current one. Finally, we postulate this for pointer type variables,
1078 as the code cannot rely on the object to that the pointer points being
1079 placed at the end of the address space (and more pragmatically,
1080 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1081 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1082 fv_comp_no_overflow = true;
1083 else if (!exit_must_be_taken)
1084 fv_comp_no_overflow = false;
1085 else
1086 fv_comp_no_overflow =
1087 (iv0->no_overflow && integer_nonzerop (iv0->step))
1088 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1090 if (integer_nonzerop (iv0->step))
1092 /* The final value of the iv is iv1->base + MOD, assuming that this
1093 computation does not overflow, and that
1094 iv0->base <= iv1->base + MOD. */
1095 if (!fv_comp_no_overflow)
1097 bound = fold_build2 (MINUS_EXPR, type1,
1098 TYPE_MAX_VALUE (type1), tmod);
1099 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1100 iv1->base, bound);
1101 if (integer_zerop (assumption))
1102 goto end;
1104 if (mpz_cmp (mmod, bnds->below) < 0)
1105 noloop = boolean_false_node;
1106 else if (POINTER_TYPE_P (type))
1107 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1108 iv0->base,
1109 fold_build_pointer_plus (iv1->base, tmod));
1110 else
1111 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1112 iv0->base,
1113 fold_build2 (PLUS_EXPR, type1,
1114 iv1->base, tmod));
1116 else
1118 /* The final value of the iv is iv0->base - MOD, assuming that this
1119 computation does not overflow, and that
1120 iv0->base - MOD <= iv1->base. */
1121 if (!fv_comp_no_overflow)
1123 bound = fold_build2 (PLUS_EXPR, type1,
1124 TYPE_MIN_VALUE (type1), tmod);
1125 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1126 iv0->base, bound);
1127 if (integer_zerop (assumption))
1128 goto end;
1130 if (mpz_cmp (mmod, bnds->below) < 0)
1131 noloop = boolean_false_node;
1132 else if (POINTER_TYPE_P (type))
1133 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1134 fold_build_pointer_plus (iv0->base,
1135 fold_build1 (NEGATE_EXPR,
1136 type1, tmod)),
1137 iv1->base);
1138 else
1139 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1140 fold_build2 (MINUS_EXPR, type1,
1141 iv0->base, tmod),
1142 iv1->base);
1145 if (!integer_nonzerop (assumption))
1146 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1147 niter->assumptions,
1148 assumption);
1149 if (!integer_zerop (noloop))
1150 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1151 niter->may_be_zero,
1152 noloop);
1153 bounds_add (bnds, wi::to_widest (mod), type);
1154 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1156 ret = true;
1157 end:
1158 mpz_clear (mmod);
1159 return ret;
1162 /* Add assertions to NITER that ensure that the control variable of the loop
1163 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1164 are TYPE. Returns false if we can prove that there is an overflow, true
1165 otherwise. STEP is the absolute value of the step. */
1167 static bool
1168 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1169 struct tree_niter_desc *niter, tree step)
1171 tree bound, d, assumption, diff;
1172 tree niter_type = TREE_TYPE (step);
1174 if (integer_nonzerop (iv0->step))
1176 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1177 if (iv0->no_overflow)
1178 return true;
1180 /* If iv0->base is a constant, we can determine the last value before
1181 overflow precisely; otherwise we conservatively assume
1182 MAX - STEP + 1. */
1184 if (TREE_CODE (iv0->base) == INTEGER_CST)
1186 d = fold_build2 (MINUS_EXPR, niter_type,
1187 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1188 fold_convert (niter_type, iv0->base));
1189 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1191 else
1192 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1193 build_int_cst (niter_type, 1));
1194 bound = fold_build2 (MINUS_EXPR, type,
1195 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1196 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1197 iv1->base, bound);
1199 else
1201 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1202 if (iv1->no_overflow)
1203 return true;
1205 if (TREE_CODE (iv1->base) == INTEGER_CST)
1207 d = fold_build2 (MINUS_EXPR, niter_type,
1208 fold_convert (niter_type, iv1->base),
1209 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1210 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1212 else
1213 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1214 build_int_cst (niter_type, 1));
1215 bound = fold_build2 (PLUS_EXPR, type,
1216 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1217 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1218 iv0->base, bound);
1221 if (integer_zerop (assumption))
1222 return false;
1223 if (!integer_nonzerop (assumption))
1224 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1225 niter->assumptions, assumption);
1227 iv0->no_overflow = true;
1228 iv1->no_overflow = true;
1229 return true;
1232 /* Add an assumption to NITER that a loop whose ending condition
1233 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1234 bounds the value of IV1->base - IV0->base. */
1236 static void
1237 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1238 struct tree_niter_desc *niter, bounds *bnds)
1240 tree assumption = boolean_true_node, bound, diff;
1241 tree mbz, mbzl, mbzr, type1;
1242 bool rolls_p, no_overflow_p;
1243 widest_int dstep;
1244 mpz_t mstep, max;
1246 /* We are going to compute the number of iterations as
1247 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1248 variant of TYPE. This formula only works if
1250 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1252 (where MAX is the maximum value of the unsigned variant of TYPE, and
1253 the computations in this formula are performed in full precision,
1254 i.e., without overflows).
1256 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1257 we have a condition of the form iv0->base - step < iv1->base before the loop,
1258 and for loops iv0->base < iv1->base - step * i the condition
1259 iv0->base < iv1->base + step, due to loop header copying, which enable us
1260 to prove the lower bound.
1262 The upper bound is more complicated. Unless the expressions for initial
1263 and final value themselves contain enough information, we usually cannot
1264 derive it from the context. */
1266 /* First check whether the answer does not follow from the bounds we gathered
1267 before. */
1268 if (integer_nonzerop (iv0->step))
1269 dstep = wi::to_widest (iv0->step);
1270 else
1272 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1273 dstep = -dstep;
1276 mpz_init (mstep);
1277 wi::to_mpz (dstep, mstep, UNSIGNED);
1278 mpz_neg (mstep, mstep);
1279 mpz_add_ui (mstep, mstep, 1);
1281 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1283 mpz_init (max);
1284 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1285 mpz_add (max, max, mstep);
1286 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1287 /* For pointers, only values lying inside a single object
1288 can be compared or manipulated by pointer arithmetics.
1289 Gcc in general does not allow or handle objects larger
1290 than half of the address space, hence the upper bound
1291 is satisfied for pointers. */
1292 || POINTER_TYPE_P (type));
1293 mpz_clear (mstep);
1294 mpz_clear (max);
1296 if (rolls_p && no_overflow_p)
1297 return;
1299 type1 = type;
1300 if (POINTER_TYPE_P (type))
1301 type1 = sizetype;
1303 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1304 we must be careful not to introduce overflow. */
1306 if (integer_nonzerop (iv0->step))
1308 diff = fold_build2 (MINUS_EXPR, type1,
1309 iv0->step, build_int_cst (type1, 1));
1311 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1312 0 address never belongs to any object, we can assume this for
1313 pointers. */
1314 if (!POINTER_TYPE_P (type))
1316 bound = fold_build2 (PLUS_EXPR, type1,
1317 TYPE_MIN_VALUE (type), diff);
1318 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1319 iv0->base, bound);
1322 /* And then we can compute iv0->base - diff, and compare it with
1323 iv1->base. */
1324 mbzl = fold_build2 (MINUS_EXPR, type1,
1325 fold_convert (type1, iv0->base), diff);
1326 mbzr = fold_convert (type1, iv1->base);
1328 else
1330 diff = fold_build2 (PLUS_EXPR, type1,
1331 iv1->step, build_int_cst (type1, 1));
1333 if (!POINTER_TYPE_P (type))
1335 bound = fold_build2 (PLUS_EXPR, type1,
1336 TYPE_MAX_VALUE (type), diff);
1337 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1338 iv1->base, bound);
1341 mbzl = fold_convert (type1, iv0->base);
1342 mbzr = fold_build2 (MINUS_EXPR, type1,
1343 fold_convert (type1, iv1->base), diff);
1346 if (!integer_nonzerop (assumption))
1347 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1348 niter->assumptions, assumption);
1349 if (!rolls_p)
1351 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1352 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1353 niter->may_be_zero, mbz);
1357 /* Determines number of iterations of loop whose ending condition
1358 is IV0 < IV1. TYPE is the type of the iv. The number of
1359 iterations is stored to NITER. BNDS bounds the difference
1360 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1361 that the exit must be taken eventually. */
1363 static bool
1364 number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1365 struct tree_niter_desc *niter,
1366 bool exit_must_be_taken, bounds *bnds)
1368 tree niter_type = unsigned_type_for (type);
1369 tree delta, step, s;
1370 mpz_t mstep, tmp;
1372 if (integer_nonzerop (iv0->step))
1374 niter->control = *iv0;
1375 niter->cmp = LT_EXPR;
1376 niter->bound = iv1->base;
1378 else
1380 niter->control = *iv1;
1381 niter->cmp = GT_EXPR;
1382 niter->bound = iv0->base;
1385 delta = fold_build2 (MINUS_EXPR, niter_type,
1386 fold_convert (niter_type, iv1->base),
1387 fold_convert (niter_type, iv0->base));
1389 /* First handle the special case that the step is +-1. */
1390 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1391 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1393 /* for (i = iv0->base; i < iv1->base; i++)
1397 for (i = iv1->base; i > iv0->base; i--).
1399 In both cases # of iterations is iv1->base - iv0->base, assuming that
1400 iv1->base >= iv0->base.
1402 First try to derive a lower bound on the value of
1403 iv1->base - iv0->base, computed in full precision. If the difference
1404 is nonnegative, we are done, otherwise we must record the
1405 condition. */
1407 if (mpz_sgn (bnds->below) < 0)
1408 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1409 iv1->base, iv0->base);
1410 niter->niter = delta;
1411 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1412 TYPE_SIGN (niter_type));
1413 niter->control.no_overflow = true;
1414 return true;
1417 if (integer_nonzerop (iv0->step))
1418 step = fold_convert (niter_type, iv0->step);
1419 else
1420 step = fold_convert (niter_type,
1421 fold_build1 (NEGATE_EXPR, type, iv1->step));
1423 /* If we can determine the final value of the control iv exactly, we can
1424 transform the condition to != comparison. In particular, this will be
1425 the case if DELTA is constant. */
1426 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1427 exit_must_be_taken, bnds))
1429 affine_iv zps;
1431 zps.base = build_int_cst (niter_type, 0);
1432 zps.step = step;
1433 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1434 zps does not overflow. */
1435 zps.no_overflow = true;
1437 return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1440 /* Make sure that the control iv does not overflow. */
1441 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1442 return false;
1444 /* We determine the number of iterations as (delta + step - 1) / step. For
1445 this to work, we must know that iv1->base >= iv0->base - step + 1,
1446 otherwise the loop does not roll. */
1447 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1449 s = fold_build2 (MINUS_EXPR, niter_type,
1450 step, build_int_cst (niter_type, 1));
1451 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1452 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1454 mpz_init (mstep);
1455 mpz_init (tmp);
1456 wi::to_mpz (step, mstep, UNSIGNED);
1457 mpz_add (tmp, bnds->up, mstep);
1458 mpz_sub_ui (tmp, tmp, 1);
1459 mpz_fdiv_q (tmp, tmp, mstep);
1460 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1461 TYPE_SIGN (niter_type));
1462 mpz_clear (mstep);
1463 mpz_clear (tmp);
1465 return true;
1468 /* Determines number of iterations of loop whose ending condition
1469 is IV0 <= IV1. TYPE is the type of the iv. The number of
1470 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1471 we know that this condition must eventually become false (we derived this
1472 earlier, and possibly set NITER->assumptions to make sure this
1473 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1475 static bool
1476 number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1477 struct tree_niter_desc *niter, bool exit_must_be_taken,
1478 bounds *bnds)
1480 tree assumption;
1481 tree type1 = type;
1482 if (POINTER_TYPE_P (type))
1483 type1 = sizetype;
1485 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1486 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1487 value of the type. This we must know anyway, since if it is
1488 equal to this value, the loop rolls forever. We do not check
1489 this condition for pointer type ivs, as the code cannot rely on
1490 the object to that the pointer points being placed at the end of
1491 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1492 not defined for pointers). */
1494 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1496 if (integer_nonzerop (iv0->step))
1497 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1498 iv1->base, TYPE_MAX_VALUE (type));
1499 else
1500 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1501 iv0->base, TYPE_MIN_VALUE (type));
1503 if (integer_zerop (assumption))
1504 return false;
1505 if (!integer_nonzerop (assumption))
1506 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1507 niter->assumptions, assumption);
1510 if (integer_nonzerop (iv0->step))
1512 if (POINTER_TYPE_P (type))
1513 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1514 else
1515 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1516 build_int_cst (type1, 1));
1518 else if (POINTER_TYPE_P (type))
1519 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1520 else
1521 iv0->base = fold_build2 (MINUS_EXPR, type1,
1522 iv0->base, build_int_cst (type1, 1));
1524 bounds_add (bnds, 1, type1);
1526 return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1527 bnds);
1530 /* Dumps description of affine induction variable IV to FILE. */
1532 static void
1533 dump_affine_iv (FILE *file, affine_iv *iv)
1535 if (!integer_zerop (iv->step))
1536 fprintf (file, "[");
1538 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1540 if (!integer_zerop (iv->step))
1542 fprintf (file, ", + , ");
1543 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1544 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1548 /* Determine the number of iterations according to condition (for staying
1549 inside loop) which compares two induction variables using comparison
1550 operator CODE. The induction variable on left side of the comparison
1551 is IV0, the right-hand side is IV1. Both induction variables must have
1552 type TYPE, which must be an integer or pointer type. The steps of the
1553 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1555 LOOP is the loop whose number of iterations we are determining.
1557 ONLY_EXIT is true if we are sure this is the only way the loop could be
1558 exited (including possibly non-returning function calls, exceptions, etc.)
1559 -- in this case we can use the information whether the control induction
1560 variables can overflow or not in a more efficient way.
1562 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1564 The results (number of iterations and assumptions as described in
1565 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1566 Returns false if it fails to determine number of iterations, true if it
1567 was determined (possibly with some assumptions). */
1569 static bool
1570 number_of_iterations_cond (struct loop *loop,
1571 tree type, affine_iv *iv0, enum tree_code code,
1572 affine_iv *iv1, struct tree_niter_desc *niter,
1573 bool only_exit, bool every_iteration)
1575 bool exit_must_be_taken = false, ret;
1576 bounds bnds;
1578 /* If the test is not executed every iteration, wrapping may make the test
1579 to pass again.
1580 TODO: the overflow case can be still used as unreliable estimate of upper
1581 bound. But we have no API to pass it down to number of iterations code
1582 and, at present, it will not use it anyway. */
1583 if (!every_iteration
1584 && (!iv0->no_overflow || !iv1->no_overflow
1585 || code == NE_EXPR || code == EQ_EXPR))
1586 return false;
1588 /* The meaning of these assumptions is this:
1589 if !assumptions
1590 then the rest of information does not have to be valid
1591 if may_be_zero then the loop does not roll, even if
1592 niter != 0. */
1593 niter->assumptions = boolean_true_node;
1594 niter->may_be_zero = boolean_false_node;
1595 niter->niter = NULL_TREE;
1596 niter->max = 0;
1597 niter->bound = NULL_TREE;
1598 niter->cmp = ERROR_MARK;
1600 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1601 the control variable is on lhs. */
1602 if (code == GE_EXPR || code == GT_EXPR
1603 || (code == NE_EXPR && integer_zerop (iv0->step)))
1605 std::swap (iv0, iv1);
1606 code = swap_tree_comparison (code);
1609 if (POINTER_TYPE_P (type))
1611 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1612 to the same object. If they do, the control variable cannot wrap
1613 (as wrap around the bounds of memory will never return a pointer
1614 that would be guaranteed to point to the same object, even if we
1615 avoid undefined behavior by casting to size_t and back). */
1616 iv0->no_overflow = true;
1617 iv1->no_overflow = true;
1620 /* If the control induction variable does not overflow and the only exit
1621 from the loop is the one that we analyze, we know it must be taken
1622 eventually. */
1623 if (only_exit)
1625 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1626 exit_must_be_taken = true;
1627 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1628 exit_must_be_taken = true;
1631 /* We can handle the case when neither of the sides of the comparison is
1632 invariant, provided that the test is NE_EXPR. This rarely occurs in
1633 practice, but it is simple enough to manage. */
1634 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1636 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1637 if (code != NE_EXPR)
1638 return false;
1640 iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
1641 iv0->step, iv1->step);
1642 iv0->no_overflow = false;
1643 iv1->step = build_int_cst (step_type, 0);
1644 iv1->no_overflow = true;
1647 /* If the result of the comparison is a constant, the loop is weird. More
1648 precise handling would be possible, but the situation is not common enough
1649 to waste time on it. */
1650 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1651 return false;
1653 /* Ignore loops of while (i-- < 10) type. */
1654 if (code != NE_EXPR)
1656 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1657 return false;
1659 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1660 return false;
1663 /* If the loop exits immediately, there is nothing to do. */
1664 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1665 if (tem && integer_zerop (tem))
1667 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1668 niter->max = 0;
1669 return true;
1672 /* OK, now we know we have a senseful loop. Handle several cases, depending
1673 on what comparison operator is used. */
1674 bound_difference (loop, iv1->base, iv0->base, &bnds);
1676 if (dump_file && (dump_flags & TDF_DETAILS))
1678 fprintf (dump_file,
1679 "Analyzing # of iterations of loop %d\n", loop->num);
1681 fprintf (dump_file, " exit condition ");
1682 dump_affine_iv (dump_file, iv0);
1683 fprintf (dump_file, " %s ",
1684 code == NE_EXPR ? "!="
1685 : code == LT_EXPR ? "<"
1686 : "<=");
1687 dump_affine_iv (dump_file, iv1);
1688 fprintf (dump_file, "\n");
1690 fprintf (dump_file, " bounds on difference of bases: ");
1691 mpz_out_str (dump_file, 10, bnds.below);
1692 fprintf (dump_file, " ... ");
1693 mpz_out_str (dump_file, 10, bnds.up);
1694 fprintf (dump_file, "\n");
1697 switch (code)
1699 case NE_EXPR:
1700 gcc_assert (integer_zerop (iv1->step));
1701 ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1702 exit_must_be_taken, &bnds);
1703 break;
1705 case LT_EXPR:
1706 ret = number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1707 &bnds);
1708 break;
1710 case LE_EXPR:
1711 ret = number_of_iterations_le (type, iv0, iv1, niter, exit_must_be_taken,
1712 &bnds);
1713 break;
1715 default:
1716 gcc_unreachable ();
1719 mpz_clear (bnds.up);
1720 mpz_clear (bnds.below);
1722 if (dump_file && (dump_flags & TDF_DETAILS))
1724 if (ret)
1726 fprintf (dump_file, " result:\n");
1727 if (!integer_nonzerop (niter->assumptions))
1729 fprintf (dump_file, " under assumptions ");
1730 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1731 fprintf (dump_file, "\n");
1734 if (!integer_zerop (niter->may_be_zero))
1736 fprintf (dump_file, " zero if ");
1737 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1738 fprintf (dump_file, "\n");
1741 fprintf (dump_file, " # of iterations ");
1742 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1743 fprintf (dump_file, ", bounded by ");
1744 print_decu (niter->max, dump_file);
1745 fprintf (dump_file, "\n");
1747 else
1748 fprintf (dump_file, " failed\n\n");
1750 return ret;
1753 /* Substitute NEW for OLD in EXPR and fold the result. */
1755 static tree
1756 simplify_replace_tree (tree expr, tree old, tree new_tree)
1758 unsigned i, n;
1759 tree ret = NULL_TREE, e, se;
1761 if (!expr)
1762 return NULL_TREE;
1764 /* Do not bother to replace constants. */
1765 if (CONSTANT_CLASS_P (old))
1766 return expr;
1768 if (expr == old
1769 || operand_equal_p (expr, old, 0))
1770 return unshare_expr (new_tree);
1772 if (!EXPR_P (expr))
1773 return expr;
1775 n = TREE_OPERAND_LENGTH (expr);
1776 for (i = 0; i < n; i++)
1778 e = TREE_OPERAND (expr, i);
1779 se = simplify_replace_tree (e, old, new_tree);
1780 if (e == se)
1781 continue;
1783 if (!ret)
1784 ret = copy_node (expr);
1786 TREE_OPERAND (ret, i) = se;
1789 return (ret ? fold (ret) : expr);
1792 /* Expand definitions of ssa names in EXPR as long as they are simple
1793 enough, and return the new expression. If STOP is specified, stop
1794 expanding if EXPR equals to it. */
1796 tree
1797 expand_simple_operations (tree expr, tree stop)
1799 unsigned i, n;
1800 tree ret = NULL_TREE, e, ee, e1;
1801 enum tree_code code;
1802 gimple *stmt;
1804 if (expr == NULL_TREE)
1805 return expr;
1807 if (is_gimple_min_invariant (expr))
1808 return expr;
1810 code = TREE_CODE (expr);
1811 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1813 n = TREE_OPERAND_LENGTH (expr);
1814 for (i = 0; i < n; i++)
1816 e = TREE_OPERAND (expr, i);
1817 ee = expand_simple_operations (e, stop);
1818 if (e == ee)
1819 continue;
1821 if (!ret)
1822 ret = copy_node (expr);
1824 TREE_OPERAND (ret, i) = ee;
1827 if (!ret)
1828 return expr;
1830 fold_defer_overflow_warnings ();
1831 ret = fold (ret);
1832 fold_undefer_and_ignore_overflow_warnings ();
1833 return ret;
1836 /* Stop if it's not ssa name or the one we don't want to expand. */
1837 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
1838 return expr;
1840 stmt = SSA_NAME_DEF_STMT (expr);
1841 if (gimple_code (stmt) == GIMPLE_PHI)
1843 basic_block src, dest;
1845 if (gimple_phi_num_args (stmt) != 1)
1846 return expr;
1847 e = PHI_ARG_DEF (stmt, 0);
1849 /* Avoid propagating through loop exit phi nodes, which
1850 could break loop-closed SSA form restrictions. */
1851 dest = gimple_bb (stmt);
1852 src = single_pred (dest);
1853 if (TREE_CODE (e) == SSA_NAME
1854 && src->loop_father != dest->loop_father)
1855 return expr;
1857 return expand_simple_operations (e, stop);
1859 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1860 return expr;
1862 /* Avoid expanding to expressions that contain SSA names that need
1863 to take part in abnormal coalescing. */
1864 ssa_op_iter iter;
1865 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
1866 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
1867 return expr;
1869 e = gimple_assign_rhs1 (stmt);
1870 code = gimple_assign_rhs_code (stmt);
1871 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1873 if (is_gimple_min_invariant (e))
1874 return e;
1876 if (code == SSA_NAME)
1877 return expand_simple_operations (e, stop);
1879 return expr;
1882 switch (code)
1884 CASE_CONVERT:
1885 /* Casts are simple. */
1886 ee = expand_simple_operations (e, stop);
1887 return fold_build1 (code, TREE_TYPE (expr), ee);
1889 case PLUS_EXPR:
1890 case MINUS_EXPR:
1891 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
1892 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
1893 return expr;
1894 /* Fallthru. */
1895 case POINTER_PLUS_EXPR:
1896 /* And increments and decrements by a constant are simple. */
1897 e1 = gimple_assign_rhs2 (stmt);
1898 if (!is_gimple_min_invariant (e1))
1899 return expr;
1901 ee = expand_simple_operations (e, stop);
1902 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1904 default:
1905 return expr;
1909 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1910 expression (or EXPR unchanged, if no simplification was possible). */
1912 static tree
1913 tree_simplify_using_condition_1 (tree cond, tree expr, tree stop)
1915 bool changed;
1916 tree e, te, e0, e1, e2, notcond;
1917 enum tree_code code = TREE_CODE (expr);
1919 if (code == INTEGER_CST)
1920 return expr;
1922 if (code == TRUTH_OR_EXPR
1923 || code == TRUTH_AND_EXPR
1924 || code == COND_EXPR)
1926 changed = false;
1928 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0), stop);
1929 if (TREE_OPERAND (expr, 0) != e0)
1930 changed = true;
1932 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1), stop);
1933 if (TREE_OPERAND (expr, 1) != e1)
1934 changed = true;
1936 if (code == COND_EXPR)
1938 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2), stop);
1939 if (TREE_OPERAND (expr, 2) != e2)
1940 changed = true;
1942 else
1943 e2 = NULL_TREE;
1945 if (changed)
1947 if (code == COND_EXPR)
1948 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1949 else
1950 expr = fold_build2 (code, boolean_type_node, e0, e1);
1953 return expr;
1956 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1957 propagation, and vice versa. Fold does not handle this, since it is
1958 considered too expensive. */
1959 if (TREE_CODE (cond) == EQ_EXPR)
1961 e0 = TREE_OPERAND (cond, 0);
1962 e1 = TREE_OPERAND (cond, 1);
1964 /* We know that e0 == e1. Check whether we cannot simplify expr
1965 using this fact. */
1966 e = simplify_replace_tree (expr, e0, e1);
1967 if (integer_zerop (e) || integer_nonzerop (e))
1968 return e;
1970 e = simplify_replace_tree (expr, e1, e0);
1971 if (integer_zerop (e) || integer_nonzerop (e))
1972 return e;
1974 if (TREE_CODE (expr) == EQ_EXPR)
1976 e0 = TREE_OPERAND (expr, 0);
1977 e1 = TREE_OPERAND (expr, 1);
1979 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1980 e = simplify_replace_tree (cond, e0, e1);
1981 if (integer_zerop (e))
1982 return e;
1983 e = simplify_replace_tree (cond, e1, e0);
1984 if (integer_zerop (e))
1985 return e;
1987 if (TREE_CODE (expr) == NE_EXPR)
1989 e0 = TREE_OPERAND (expr, 0);
1990 e1 = TREE_OPERAND (expr, 1);
1992 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1993 e = simplify_replace_tree (cond, e0, e1);
1994 if (integer_zerop (e))
1995 return boolean_true_node;
1996 e = simplify_replace_tree (cond, e1, e0);
1997 if (integer_zerop (e))
1998 return boolean_true_node;
2001 te = expand_simple_operations (expr, stop);
2003 /* Check whether COND ==> EXPR. */
2004 notcond = invert_truthvalue (cond);
2005 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
2006 if (e && integer_nonzerop (e))
2007 return e;
2009 /* Check whether COND ==> not EXPR. */
2010 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
2011 if (e && integer_zerop (e))
2012 return e;
2014 return expr;
2017 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2018 expression (or EXPR unchanged, if no simplification was possible).
2019 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2020 of simple operations in definitions of ssa names in COND are expanded,
2021 so that things like casts or incrementing the value of the bound before
2022 the loop do not cause us to fail. */
2024 static tree
2025 tree_simplify_using_condition (tree cond, tree expr, tree stop)
2027 cond = expand_simple_operations (cond, stop);
2029 return tree_simplify_using_condition_1 (cond, expr, stop);
2032 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2033 Returns the simplified expression (or EXPR unchanged, if no
2034 simplification was possible). */
2036 tree
2037 simplify_using_initial_conditions (struct loop *loop, tree expr, tree stop)
2039 edge e;
2040 basic_block bb;
2041 gimple *stmt;
2042 tree cond;
2043 int cnt = 0;
2045 if (TREE_CODE (expr) == INTEGER_CST)
2046 return expr;
2048 /* Limit walking the dominators to avoid quadraticness in
2049 the number of BBs times the number of loops in degenerate
2050 cases. */
2051 for (bb = loop->header;
2052 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2053 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2055 if (!single_pred_p (bb))
2056 continue;
2057 e = single_pred_edge (bb);
2059 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2060 continue;
2062 stmt = last_stmt (e->src);
2063 cond = fold_build2 (gimple_cond_code (stmt),
2064 boolean_type_node,
2065 gimple_cond_lhs (stmt),
2066 gimple_cond_rhs (stmt));
2067 if (e->flags & EDGE_FALSE_VALUE)
2068 cond = invert_truthvalue (cond);
2069 expr = tree_simplify_using_condition (cond, expr, stop);
2070 /* Break if EXPR is simplified to const values. */
2071 if (expr && (integer_zerop (expr) || integer_nonzerop (expr)))
2072 break;
2074 ++cnt;
2077 return expr;
2080 /* Tries to simplify EXPR using the evolutions of the loop invariants
2081 in the superloops of LOOP. Returns the simplified expression
2082 (or EXPR unchanged, if no simplification was possible). */
2084 static tree
2085 simplify_using_outer_evolutions (struct loop *loop, tree expr)
2087 enum tree_code code = TREE_CODE (expr);
2088 bool changed;
2089 tree e, e0, e1, e2;
2091 if (is_gimple_min_invariant (expr))
2092 return expr;
2094 if (code == TRUTH_OR_EXPR
2095 || code == TRUTH_AND_EXPR
2096 || code == COND_EXPR)
2098 changed = false;
2100 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2101 if (TREE_OPERAND (expr, 0) != e0)
2102 changed = true;
2104 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2105 if (TREE_OPERAND (expr, 1) != e1)
2106 changed = true;
2108 if (code == COND_EXPR)
2110 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2111 if (TREE_OPERAND (expr, 2) != e2)
2112 changed = true;
2114 else
2115 e2 = NULL_TREE;
2117 if (changed)
2119 if (code == COND_EXPR)
2120 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2121 else
2122 expr = fold_build2 (code, boolean_type_node, e0, e1);
2125 return expr;
2128 e = instantiate_parameters (loop, expr);
2129 if (is_gimple_min_invariant (e))
2130 return e;
2132 return expr;
2135 /* Returns true if EXIT is the only possible exit from LOOP. */
2137 bool
2138 loop_only_exit_p (const struct loop *loop, const_edge exit)
2140 basic_block *body;
2141 gimple_stmt_iterator bsi;
2142 unsigned i;
2143 gimple *call;
2145 if (exit != single_exit (loop))
2146 return false;
2148 body = get_loop_body (loop);
2149 for (i = 0; i < loop->num_nodes; i++)
2151 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2153 call = gsi_stmt (bsi);
2154 if (gimple_code (call) != GIMPLE_CALL)
2155 continue;
2157 if (gimple_has_side_effects (call))
2159 free (body);
2160 return false;
2165 free (body);
2166 return true;
2169 /* Stores description of number of iterations of LOOP derived from
2170 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
2171 useful information could be derived (and fields of NITER has
2172 meaning described in comments at struct tree_niter_desc
2173 declaration), false otherwise. If WARN is true and
2174 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
2175 potentially unsafe assumptions.
2176 When EVERY_ITERATION is true, only tests that are known to be executed
2177 every iteration are considered (i.e. only test that alone bounds the loop).
2180 bool
2181 number_of_iterations_exit (struct loop *loop, edge exit,
2182 struct tree_niter_desc *niter,
2183 bool warn, bool every_iteration)
2185 gimple *last;
2186 gcond *stmt;
2187 tree type;
2188 tree op0, op1;
2189 enum tree_code code;
2190 affine_iv iv0, iv1;
2191 bool safe;
2193 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2195 if (every_iteration && !safe)
2196 return false;
2198 niter->assumptions = boolean_false_node;
2199 niter->control.base = NULL_TREE;
2200 niter->control.step = NULL_TREE;
2201 niter->control.no_overflow = false;
2202 last = last_stmt (exit->src);
2203 if (!last)
2204 return false;
2205 stmt = dyn_cast <gcond *> (last);
2206 if (!stmt)
2207 return false;
2209 /* We want the condition for staying inside loop. */
2210 code = gimple_cond_code (stmt);
2211 if (exit->flags & EDGE_TRUE_VALUE)
2212 code = invert_tree_comparison (code, false);
2214 switch (code)
2216 case GT_EXPR:
2217 case GE_EXPR:
2218 case LT_EXPR:
2219 case LE_EXPR:
2220 case NE_EXPR:
2221 break;
2223 default:
2224 return false;
2227 op0 = gimple_cond_lhs (stmt);
2228 op1 = gimple_cond_rhs (stmt);
2229 type = TREE_TYPE (op0);
2231 if (TREE_CODE (type) != INTEGER_TYPE
2232 && !POINTER_TYPE_P (type))
2233 return false;
2235 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
2236 return false;
2237 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
2238 return false;
2240 /* We don't want to see undefined signed overflow warnings while
2241 computing the number of iterations. */
2242 fold_defer_overflow_warnings ();
2244 iv0.base = expand_simple_operations (iv0.base);
2245 iv1.base = expand_simple_operations (iv1.base);
2246 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2247 loop_only_exit_p (loop, exit), safe))
2249 fold_undefer_and_ignore_overflow_warnings ();
2250 return false;
2253 if (optimize >= 3)
2255 niter->assumptions = simplify_using_outer_evolutions (loop,
2256 niter->assumptions);
2257 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2258 niter->may_be_zero);
2259 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2262 niter->assumptions
2263 = simplify_using_initial_conditions (loop,
2264 niter->assumptions);
2265 niter->may_be_zero
2266 = simplify_using_initial_conditions (loop,
2267 niter->may_be_zero);
2269 fold_undefer_and_ignore_overflow_warnings ();
2271 /* If NITER has simplified into a constant, update MAX. */
2272 if (TREE_CODE (niter->niter) == INTEGER_CST)
2273 niter->max = wi::to_widest (niter->niter);
2275 if (integer_onep (niter->assumptions))
2276 return true;
2278 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
2279 But if we can prove that there is overflow or some other source of weird
2280 behavior, ignore the loop even with -funsafe-loop-optimizations. */
2281 if (integer_zerop (niter->assumptions) || !single_exit (loop))
2282 return false;
2284 if (flag_unsafe_loop_optimizations)
2285 niter->assumptions = boolean_true_node;
2287 if (warn)
2289 const char *wording;
2290 location_t loc = gimple_location (stmt);
2292 /* We can provide a more specific warning if one of the operator is
2293 constant and the other advances by +1 or -1. */
2294 if (!integer_zerop (iv1.step)
2295 ? (integer_zerop (iv0.step)
2296 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
2297 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
2298 wording =
2299 flag_unsafe_loop_optimizations
2300 ? N_("assuming that the loop is not infinite")
2301 : N_("cannot optimize possibly infinite loops");
2302 else
2303 wording =
2304 flag_unsafe_loop_optimizations
2305 ? N_("assuming that the loop counter does not overflow")
2306 : N_("cannot optimize loop, the loop counter may overflow");
2308 warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
2309 OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
2312 return flag_unsafe_loop_optimizations;
2315 /* Try to determine the number of iterations of LOOP. If we succeed,
2316 expression giving number of iterations is returned and *EXIT is
2317 set to the edge from that the information is obtained. Otherwise
2318 chrec_dont_know is returned. */
2320 tree
2321 find_loop_niter (struct loop *loop, edge *exit)
2323 unsigned i;
2324 vec<edge> exits = get_loop_exit_edges (loop);
2325 edge ex;
2326 tree niter = NULL_TREE, aniter;
2327 struct tree_niter_desc desc;
2329 *exit = NULL;
2330 FOR_EACH_VEC_ELT (exits, i, ex)
2332 if (!number_of_iterations_exit (loop, ex, &desc, false))
2333 continue;
2335 if (integer_nonzerop (desc.may_be_zero))
2337 /* We exit in the first iteration through this exit.
2338 We won't find anything better. */
2339 niter = build_int_cst (unsigned_type_node, 0);
2340 *exit = ex;
2341 break;
2344 if (!integer_zerop (desc.may_be_zero))
2345 continue;
2347 aniter = desc.niter;
2349 if (!niter)
2351 /* Nothing recorded yet. */
2352 niter = aniter;
2353 *exit = ex;
2354 continue;
2357 /* Prefer constants, the lower the better. */
2358 if (TREE_CODE (aniter) != INTEGER_CST)
2359 continue;
2361 if (TREE_CODE (niter) != INTEGER_CST)
2363 niter = aniter;
2364 *exit = ex;
2365 continue;
2368 if (tree_int_cst_lt (aniter, niter))
2370 niter = aniter;
2371 *exit = ex;
2372 continue;
2375 exits.release ();
2377 return niter ? niter : chrec_dont_know;
2380 /* Return true if loop is known to have bounded number of iterations. */
2382 bool
2383 finite_loop_p (struct loop *loop)
2385 widest_int nit;
2386 int flags;
2388 if (flag_unsafe_loop_optimizations)
2389 return true;
2390 flags = flags_from_decl_or_type (current_function_decl);
2391 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2393 if (dump_file && (dump_flags & TDF_DETAILS))
2394 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2395 loop->num);
2396 return true;
2399 if (loop->any_upper_bound
2400 || max_loop_iterations (loop, &nit))
2402 if (dump_file && (dump_flags & TDF_DETAILS))
2403 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2404 loop->num);
2405 return true;
2407 return false;
2412 Analysis of a number of iterations of a loop by a brute-force evaluation.
2416 /* Bound on the number of iterations we try to evaluate. */
2418 #define MAX_ITERATIONS_TO_TRACK \
2419 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2421 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2422 result by a chain of operations such that all but exactly one of their
2423 operands are constants. */
2425 static gphi *
2426 chain_of_csts_start (struct loop *loop, tree x)
2428 gimple *stmt = SSA_NAME_DEF_STMT (x);
2429 tree use;
2430 basic_block bb = gimple_bb (stmt);
2431 enum tree_code code;
2433 if (!bb
2434 || !flow_bb_inside_loop_p (loop, bb))
2435 return NULL;
2437 if (gimple_code (stmt) == GIMPLE_PHI)
2439 if (bb == loop->header)
2440 return as_a <gphi *> (stmt);
2442 return NULL;
2445 if (gimple_code (stmt) != GIMPLE_ASSIGN
2446 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2447 return NULL;
2449 code = gimple_assign_rhs_code (stmt);
2450 if (gimple_references_memory_p (stmt)
2451 || TREE_CODE_CLASS (code) == tcc_reference
2452 || (code == ADDR_EXPR
2453 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2454 return NULL;
2456 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2457 if (use == NULL_TREE)
2458 return NULL;
2460 return chain_of_csts_start (loop, use);
2463 /* Determines whether the expression X is derived from a result of a phi node
2464 in header of LOOP such that
2466 * the derivation of X consists only from operations with constants
2467 * the initial value of the phi node is constant
2468 * the value of the phi node in the next iteration can be derived from the
2469 value in the current iteration by a chain of operations with constants.
2471 If such phi node exists, it is returned, otherwise NULL is returned. */
2473 static gphi *
2474 get_base_for (struct loop *loop, tree x)
2476 gphi *phi;
2477 tree init, next;
2479 if (is_gimple_min_invariant (x))
2480 return NULL;
2482 phi = chain_of_csts_start (loop, x);
2483 if (!phi)
2484 return NULL;
2486 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2487 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2489 if (TREE_CODE (next) != SSA_NAME)
2490 return NULL;
2492 if (!is_gimple_min_invariant (init))
2493 return NULL;
2495 if (chain_of_csts_start (loop, next) != phi)
2496 return NULL;
2498 return phi;
2501 /* Given an expression X, then
2503 * if X is NULL_TREE, we return the constant BASE.
2504 * otherwise X is a SSA name, whose value in the considered loop is derived
2505 by a chain of operations with constant from a result of a phi node in
2506 the header of the loop. Then we return value of X when the value of the
2507 result of this phi node is given by the constant BASE. */
2509 static tree
2510 get_val_for (tree x, tree base)
2512 gimple *stmt;
2514 gcc_checking_assert (is_gimple_min_invariant (base));
2516 if (!x)
2517 return base;
2519 stmt = SSA_NAME_DEF_STMT (x);
2520 if (gimple_code (stmt) == GIMPLE_PHI)
2521 return base;
2523 gcc_checking_assert (is_gimple_assign (stmt));
2525 /* STMT must be either an assignment of a single SSA name or an
2526 expression involving an SSA name and a constant. Try to fold that
2527 expression using the value for the SSA name. */
2528 if (gimple_assign_ssa_name_copy_p (stmt))
2529 return get_val_for (gimple_assign_rhs1 (stmt), base);
2530 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2531 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2533 return fold_build1 (gimple_assign_rhs_code (stmt),
2534 gimple_expr_type (stmt),
2535 get_val_for (gimple_assign_rhs1 (stmt), base));
2537 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2539 tree rhs1 = gimple_assign_rhs1 (stmt);
2540 tree rhs2 = gimple_assign_rhs2 (stmt);
2541 if (TREE_CODE (rhs1) == SSA_NAME)
2542 rhs1 = get_val_for (rhs1, base);
2543 else if (TREE_CODE (rhs2) == SSA_NAME)
2544 rhs2 = get_val_for (rhs2, base);
2545 else
2546 gcc_unreachable ();
2547 return fold_build2 (gimple_assign_rhs_code (stmt),
2548 gimple_expr_type (stmt), rhs1, rhs2);
2550 else
2551 gcc_unreachable ();
2555 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2556 by brute force -- i.e. by determining the value of the operands of the
2557 condition at EXIT in first few iterations of the loop (assuming that
2558 these values are constant) and determining the first one in that the
2559 condition is not satisfied. Returns the constant giving the number
2560 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2562 tree
2563 loop_niter_by_eval (struct loop *loop, edge exit)
2565 tree acnd;
2566 tree op[2], val[2], next[2], aval[2];
2567 gphi *phi;
2568 gimple *cond;
2569 unsigned i, j;
2570 enum tree_code cmp;
2572 cond = last_stmt (exit->src);
2573 if (!cond || gimple_code (cond) != GIMPLE_COND)
2574 return chrec_dont_know;
2576 cmp = gimple_cond_code (cond);
2577 if (exit->flags & EDGE_TRUE_VALUE)
2578 cmp = invert_tree_comparison (cmp, false);
2580 switch (cmp)
2582 case EQ_EXPR:
2583 case NE_EXPR:
2584 case GT_EXPR:
2585 case GE_EXPR:
2586 case LT_EXPR:
2587 case LE_EXPR:
2588 op[0] = gimple_cond_lhs (cond);
2589 op[1] = gimple_cond_rhs (cond);
2590 break;
2592 default:
2593 return chrec_dont_know;
2596 for (j = 0; j < 2; j++)
2598 if (is_gimple_min_invariant (op[j]))
2600 val[j] = op[j];
2601 next[j] = NULL_TREE;
2602 op[j] = NULL_TREE;
2604 else
2606 phi = get_base_for (loop, op[j]);
2607 if (!phi)
2608 return chrec_dont_know;
2609 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2610 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2614 /* Don't issue signed overflow warnings. */
2615 fold_defer_overflow_warnings ();
2617 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2619 for (j = 0; j < 2; j++)
2620 aval[j] = get_val_for (op[j], val[j]);
2622 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2623 if (acnd && integer_zerop (acnd))
2625 fold_undefer_and_ignore_overflow_warnings ();
2626 if (dump_file && (dump_flags & TDF_DETAILS))
2627 fprintf (dump_file,
2628 "Proved that loop %d iterates %d times using brute force.\n",
2629 loop->num, i);
2630 return build_int_cst (unsigned_type_node, i);
2633 for (j = 0; j < 2; j++)
2635 val[j] = get_val_for (next[j], val[j]);
2636 if (!is_gimple_min_invariant (val[j]))
2638 fold_undefer_and_ignore_overflow_warnings ();
2639 return chrec_dont_know;
2644 fold_undefer_and_ignore_overflow_warnings ();
2646 return chrec_dont_know;
2649 /* Finds the exit of the LOOP by that the loop exits after a constant
2650 number of iterations and stores the exit edge to *EXIT. The constant
2651 giving the number of iterations of LOOP is returned. The number of
2652 iterations is determined using loop_niter_by_eval (i.e. by brute force
2653 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2654 determines the number of iterations, chrec_dont_know is returned. */
2656 tree
2657 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2659 unsigned i;
2660 vec<edge> exits = get_loop_exit_edges (loop);
2661 edge ex;
2662 tree niter = NULL_TREE, aniter;
2664 *exit = NULL;
2666 /* Loops with multiple exits are expensive to handle and less important. */
2667 if (!flag_expensive_optimizations
2668 && exits.length () > 1)
2670 exits.release ();
2671 return chrec_dont_know;
2674 FOR_EACH_VEC_ELT (exits, i, ex)
2676 if (!just_once_each_iteration_p (loop, ex->src))
2677 continue;
2679 aniter = loop_niter_by_eval (loop, ex);
2680 if (chrec_contains_undetermined (aniter))
2681 continue;
2683 if (niter
2684 && !tree_int_cst_lt (aniter, niter))
2685 continue;
2687 niter = aniter;
2688 *exit = ex;
2690 exits.release ();
2692 return niter ? niter : chrec_dont_know;
2697 Analysis of upper bounds on number of iterations of a loop.
2701 static widest_int derive_constant_upper_bound_ops (tree, tree,
2702 enum tree_code, tree);
2704 /* Returns a constant upper bound on the value of the right-hand side of
2705 an assignment statement STMT. */
2707 static widest_int
2708 derive_constant_upper_bound_assign (gimple *stmt)
2710 enum tree_code code = gimple_assign_rhs_code (stmt);
2711 tree op0 = gimple_assign_rhs1 (stmt);
2712 tree op1 = gimple_assign_rhs2 (stmt);
2714 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2715 op0, code, op1);
2718 /* Returns a constant upper bound on the value of expression VAL. VAL
2719 is considered to be unsigned. If its type is signed, its value must
2720 be nonnegative. */
2722 static widest_int
2723 derive_constant_upper_bound (tree val)
2725 enum tree_code code;
2726 tree op0, op1;
2728 extract_ops_from_tree (val, &code, &op0, &op1);
2729 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2732 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2733 whose type is TYPE. The expression is considered to be unsigned. If
2734 its type is signed, its value must be nonnegative. */
2736 static widest_int
2737 derive_constant_upper_bound_ops (tree type, tree op0,
2738 enum tree_code code, tree op1)
2740 tree subtype, maxt;
2741 widest_int bnd, max, mmax, cst;
2742 gimple *stmt;
2744 if (INTEGRAL_TYPE_P (type))
2745 maxt = TYPE_MAX_VALUE (type);
2746 else
2747 maxt = upper_bound_in_type (type, type);
2749 max = wi::to_widest (maxt);
2751 switch (code)
2753 case INTEGER_CST:
2754 return wi::to_widest (op0);
2756 CASE_CONVERT:
2757 subtype = TREE_TYPE (op0);
2758 if (!TYPE_UNSIGNED (subtype)
2759 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2760 that OP0 is nonnegative. */
2761 && TYPE_UNSIGNED (type)
2762 && !tree_expr_nonnegative_p (op0))
2764 /* If we cannot prove that the casted expression is nonnegative,
2765 we cannot establish more useful upper bound than the precision
2766 of the type gives us. */
2767 return max;
2770 /* We now know that op0 is an nonnegative value. Try deriving an upper
2771 bound for it. */
2772 bnd = derive_constant_upper_bound (op0);
2774 /* If the bound does not fit in TYPE, max. value of TYPE could be
2775 attained. */
2776 if (wi::ltu_p (max, bnd))
2777 return max;
2779 return bnd;
2781 case PLUS_EXPR:
2782 case POINTER_PLUS_EXPR:
2783 case MINUS_EXPR:
2784 if (TREE_CODE (op1) != INTEGER_CST
2785 || !tree_expr_nonnegative_p (op0))
2786 return max;
2788 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2789 choose the most logical way how to treat this constant regardless
2790 of the signedness of the type. */
2791 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
2792 if (code != MINUS_EXPR)
2793 cst = -cst;
2795 bnd = derive_constant_upper_bound (op0);
2797 if (wi::neg_p (cst))
2799 cst = -cst;
2800 /* Avoid CST == 0x80000... */
2801 if (wi::neg_p (cst))
2802 return max;
2804 /* OP0 + CST. We need to check that
2805 BND <= MAX (type) - CST. */
2807 mmax -= cst;
2808 if (wi::ltu_p (bnd, max))
2809 return max;
2811 return bnd + cst;
2813 else
2815 /* OP0 - CST, where CST >= 0.
2817 If TYPE is signed, we have already verified that OP0 >= 0, and we
2818 know that the result is nonnegative. This implies that
2819 VAL <= BND - CST.
2821 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2822 otherwise the operation underflows.
2825 /* This should only happen if the type is unsigned; however, for
2826 buggy programs that use overflowing signed arithmetics even with
2827 -fno-wrapv, this condition may also be true for signed values. */
2828 if (wi::ltu_p (bnd, cst))
2829 return max;
2831 if (TYPE_UNSIGNED (type))
2833 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2834 wide_int_to_tree (type, cst));
2835 if (!tem || integer_nonzerop (tem))
2836 return max;
2839 bnd -= cst;
2842 return bnd;
2844 case FLOOR_DIV_EXPR:
2845 case EXACT_DIV_EXPR:
2846 if (TREE_CODE (op1) != INTEGER_CST
2847 || tree_int_cst_sign_bit (op1))
2848 return max;
2850 bnd = derive_constant_upper_bound (op0);
2851 return wi::udiv_floor (bnd, wi::to_widest (op1));
2853 case BIT_AND_EXPR:
2854 if (TREE_CODE (op1) != INTEGER_CST
2855 || tree_int_cst_sign_bit (op1))
2856 return max;
2857 return wi::to_widest (op1);
2859 case SSA_NAME:
2860 stmt = SSA_NAME_DEF_STMT (op0);
2861 if (gimple_code (stmt) != GIMPLE_ASSIGN
2862 || gimple_assign_lhs (stmt) != op0)
2863 return max;
2864 return derive_constant_upper_bound_assign (stmt);
2866 default:
2867 return max;
2871 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2873 static void
2874 do_warn_aggressive_loop_optimizations (struct loop *loop,
2875 widest_int i_bound, gimple *stmt)
2877 /* Don't warn if the loop doesn't have known constant bound. */
2878 if (!loop->nb_iterations
2879 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
2880 || !warn_aggressive_loop_optimizations
2881 /* To avoid warning multiple times for the same loop,
2882 only start warning when we preserve loops. */
2883 || (cfun->curr_properties & PROP_loops) == 0
2884 /* Only warn once per loop. */
2885 || loop->warned_aggressive_loop_optimizations
2886 /* Only warn if undefined behavior gives us lower estimate than the
2887 known constant bound. */
2888 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
2889 /* And undefined behavior happens unconditionally. */
2890 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
2891 return;
2893 edge e = single_exit (loop);
2894 if (e == NULL)
2895 return;
2897 gimple *estmt = last_stmt (e->src);
2898 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2899 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
2900 ? UNSIGNED : SIGNED);
2901 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
2902 "iteration %s invokes undefined behavior", buf))
2903 inform (gimple_location (estmt), "within this loop");
2904 loop->warned_aggressive_loop_optimizations = true;
2907 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2908 is true if the loop is exited immediately after STMT, and this exit
2909 is taken at last when the STMT is executed BOUND + 1 times.
2910 REALISTIC is true if BOUND is expected to be close to the real number
2911 of iterations. UPPER is true if we are sure the loop iterates at most
2912 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
2914 static void
2915 record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
2916 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
2918 widest_int delta;
2920 if (dump_file && (dump_flags & TDF_DETAILS))
2922 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2923 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2924 fprintf (dump_file, " is %sexecuted at most ",
2925 upper ? "" : "probably ");
2926 print_generic_expr (dump_file, bound, TDF_SLIM);
2927 fprintf (dump_file, " (bounded by ");
2928 print_decu (i_bound, dump_file);
2929 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2932 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2933 real number of iterations. */
2934 if (TREE_CODE (bound) != INTEGER_CST)
2935 realistic = false;
2936 else
2937 gcc_checking_assert (i_bound == wi::to_widest (bound));
2938 if (!upper && !realistic)
2939 return;
2941 /* If we have a guaranteed upper bound, record it in the appropriate
2942 list, unless this is an !is_exit bound (i.e. undefined behavior in
2943 at_stmt) in a loop with known constant number of iterations. */
2944 if (upper
2945 && (is_exit
2946 || loop->nb_iterations == NULL_TREE
2947 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
2949 struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
2951 elt->bound = i_bound;
2952 elt->stmt = at_stmt;
2953 elt->is_exit = is_exit;
2954 elt->next = loop->bounds;
2955 loop->bounds = elt;
2958 /* If statement is executed on every path to the loop latch, we can directly
2959 infer the upper bound on the # of iterations of the loop. */
2960 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
2961 return;
2963 /* Update the number of iteration estimates according to the bound.
2964 If at_stmt is an exit then the loop latch is executed at most BOUND times,
2965 otherwise it can be executed BOUND + 1 times. We will lower the estimate
2966 later if such statement must be executed on last iteration */
2967 if (is_exit)
2968 delta = 0;
2969 else
2970 delta = 1;
2971 widest_int new_i_bound = i_bound + delta;
2973 /* If an overflow occurred, ignore the result. */
2974 if (wi::ltu_p (new_i_bound, delta))
2975 return;
2977 if (upper && !is_exit)
2978 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
2979 record_niter_bound (loop, new_i_bound, realistic, upper);
2982 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
2983 and doesn't overflow. */
2985 static void
2986 record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
2988 struct control_iv *iv;
2990 if (!niter->control.base || !niter->control.step)
2991 return;
2993 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
2994 return;
2996 iv = ggc_alloc<control_iv> ();
2997 iv->base = niter->control.base;
2998 iv->step = niter->control.step;
2999 iv->next = loop->control_ivs;
3000 loop->control_ivs = iv;
3002 return;
3005 /* Record the estimate on number of iterations of LOOP based on the fact that
3006 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3007 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3008 estimated number of iterations is expected to be close to the real one.
3009 UPPER is true if we are sure the induction variable does not wrap. */
3011 static void
3012 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
3013 tree low, tree high, bool realistic, bool upper)
3015 tree niter_bound, extreme, delta;
3016 tree type = TREE_TYPE (base), unsigned_type;
3017 tree orig_base = base;
3019 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3020 return;
3022 if (dump_file && (dump_flags & TDF_DETAILS))
3024 fprintf (dump_file, "Induction variable (");
3025 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3026 fprintf (dump_file, ") ");
3027 print_generic_expr (dump_file, base, TDF_SLIM);
3028 fprintf (dump_file, " + ");
3029 print_generic_expr (dump_file, step, TDF_SLIM);
3030 fprintf (dump_file, " * iteration does not wrap in statement ");
3031 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3032 fprintf (dump_file, " in loop %d.\n", loop->num);
3035 unsigned_type = unsigned_type_for (type);
3036 base = fold_convert (unsigned_type, base);
3037 step = fold_convert (unsigned_type, step);
3039 if (tree_int_cst_sign_bit (step))
3041 wide_int min, max;
3042 extreme = fold_convert (unsigned_type, low);
3043 if (TREE_CODE (orig_base) == SSA_NAME
3044 && TREE_CODE (high) == INTEGER_CST
3045 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3046 && get_range_info (orig_base, &min, &max) == VR_RANGE
3047 && wi::gts_p (high, max))
3048 base = wide_int_to_tree (unsigned_type, max);
3049 else if (TREE_CODE (base) != INTEGER_CST)
3050 base = fold_convert (unsigned_type, high);
3051 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3052 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3054 else
3056 wide_int min, max;
3057 extreme = fold_convert (unsigned_type, high);
3058 if (TREE_CODE (orig_base) == SSA_NAME
3059 && TREE_CODE (low) == INTEGER_CST
3060 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3061 && get_range_info (orig_base, &min, &max) == VR_RANGE
3062 && wi::gts_p (min, low))
3063 base = wide_int_to_tree (unsigned_type, min);
3064 else if (TREE_CODE (base) != INTEGER_CST)
3065 base = fold_convert (unsigned_type, low);
3066 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3069 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3070 would get out of the range. */
3071 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3072 widest_int max = derive_constant_upper_bound (niter_bound);
3073 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3076 /* Determine information about number of iterations a LOOP from the index
3077 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3078 guaranteed to be executed in every iteration of LOOP. Callback for
3079 for_each_index. */
3081 struct ilb_data
3083 struct loop *loop;
3084 gimple *stmt;
3087 static bool
3088 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3090 struct ilb_data *data = (struct ilb_data *) dta;
3091 tree ev, init, step;
3092 tree low, high, type, next;
3093 bool sign, upper = true, at_end = false;
3094 struct loop *loop = data->loop;
3095 bool reliable = true;
3097 if (TREE_CODE (base) != ARRAY_REF)
3098 return true;
3100 /* For arrays at the end of the structure, we are not guaranteed that they
3101 do not really extend over their declared size. However, for arrays of
3102 size greater than one, this is unlikely to be intended. */
3103 if (array_at_struct_end_p (base))
3105 at_end = true;
3106 upper = false;
3109 struct loop *dloop = loop_containing_stmt (data->stmt);
3110 if (!dloop)
3111 return true;
3113 ev = analyze_scalar_evolution (dloop, *idx);
3114 ev = instantiate_parameters (loop, ev);
3115 init = initial_condition (ev);
3116 step = evolution_part_in_loop_num (ev, loop->num);
3118 if (!init
3119 || !step
3120 || TREE_CODE (step) != INTEGER_CST
3121 || integer_zerop (step)
3122 || tree_contains_chrecs (init, NULL)
3123 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3124 return true;
3126 low = array_ref_low_bound (base);
3127 high = array_ref_up_bound (base);
3129 /* The case of nonconstant bounds could be handled, but it would be
3130 complicated. */
3131 if (TREE_CODE (low) != INTEGER_CST
3132 || !high
3133 || TREE_CODE (high) != INTEGER_CST)
3134 return true;
3135 sign = tree_int_cst_sign_bit (step);
3136 type = TREE_TYPE (step);
3138 /* The array of length 1 at the end of a structure most likely extends
3139 beyond its bounds. */
3140 if (at_end
3141 && operand_equal_p (low, high, 0))
3142 return true;
3144 /* In case the relevant bound of the array does not fit in type, or
3145 it does, but bound + step (in type) still belongs into the range of the
3146 array, the index may wrap and still stay within the range of the array
3147 (consider e.g. if the array is indexed by the full range of
3148 unsigned char).
3150 To make things simpler, we require both bounds to fit into type, although
3151 there are cases where this would not be strictly necessary. */
3152 if (!int_fits_type_p (high, type)
3153 || !int_fits_type_p (low, type))
3154 return true;
3155 low = fold_convert (type, low);
3156 high = fold_convert (type, high);
3158 if (sign)
3159 next = fold_binary (PLUS_EXPR, type, low, step);
3160 else
3161 next = fold_binary (PLUS_EXPR, type, high, step);
3163 if (tree_int_cst_compare (low, next) <= 0
3164 && tree_int_cst_compare (next, high) <= 0)
3165 return true;
3167 /* If access is not executed on every iteration, we must ensure that overlow may
3168 not make the access valid later. */
3169 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3170 && scev_probably_wraps_p (initial_condition_in_loop_num (ev, loop->num),
3171 step, data->stmt, loop, true))
3172 reliable = false;
3174 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, reliable, upper);
3175 return true;
3178 /* Determine information about number of iterations a LOOP from the bounds
3179 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3180 STMT is guaranteed to be executed in every iteration of LOOP.*/
3182 static void
3183 infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
3185 struct ilb_data data;
3187 data.loop = loop;
3188 data.stmt = stmt;
3189 for_each_index (&ref, idx_infer_loop_bounds, &data);
3192 /* Determine information about number of iterations of a LOOP from the way
3193 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3194 executed in every iteration of LOOP. */
3196 static void
3197 infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
3199 if (is_gimple_assign (stmt))
3201 tree op0 = gimple_assign_lhs (stmt);
3202 tree op1 = gimple_assign_rhs1 (stmt);
3204 /* For each memory access, analyze its access function
3205 and record a bound on the loop iteration domain. */
3206 if (REFERENCE_CLASS_P (op0))
3207 infer_loop_bounds_from_ref (loop, stmt, op0);
3209 if (REFERENCE_CLASS_P (op1))
3210 infer_loop_bounds_from_ref (loop, stmt, op1);
3212 else if (is_gimple_call (stmt))
3214 tree arg, lhs;
3215 unsigned i, n = gimple_call_num_args (stmt);
3217 lhs = gimple_call_lhs (stmt);
3218 if (lhs && REFERENCE_CLASS_P (lhs))
3219 infer_loop_bounds_from_ref (loop, stmt, lhs);
3221 for (i = 0; i < n; i++)
3223 arg = gimple_call_arg (stmt, i);
3224 if (REFERENCE_CLASS_P (arg))
3225 infer_loop_bounds_from_ref (loop, stmt, arg);
3230 /* Determine information about number of iterations of a LOOP from the fact
3231 that pointer arithmetics in STMT does not overflow. */
3233 static void
3234 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
3236 tree def, base, step, scev, type, low, high;
3237 tree var, ptr;
3239 if (!is_gimple_assign (stmt)
3240 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3241 return;
3243 def = gimple_assign_lhs (stmt);
3244 if (TREE_CODE (def) != SSA_NAME)
3245 return;
3247 type = TREE_TYPE (def);
3248 if (!nowrap_type_p (type))
3249 return;
3251 ptr = gimple_assign_rhs1 (stmt);
3252 if (!expr_invariant_in_loop_p (loop, ptr))
3253 return;
3255 var = gimple_assign_rhs2 (stmt);
3256 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3257 return;
3259 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3260 if (chrec_contains_undetermined (scev))
3261 return;
3263 base = initial_condition_in_loop_num (scev, loop->num);
3264 step = evolution_part_in_loop_num (scev, loop->num);
3266 if (!base || !step
3267 || TREE_CODE (step) != INTEGER_CST
3268 || tree_contains_chrecs (base, NULL)
3269 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3270 return;
3272 low = lower_bound_in_type (type, type);
3273 high = upper_bound_in_type (type, type);
3275 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3276 produce a NULL pointer. The contrary would mean NULL points to an object,
3277 while NULL is supposed to compare unequal with the address of all objects.
3278 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3279 NULL pointer since that would mean wrapping, which we assume here not to
3280 happen. So, we can exclude NULL from the valid range of pointer
3281 arithmetic. */
3282 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3283 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3285 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3288 /* Determine information about number of iterations of a LOOP from the fact
3289 that signed arithmetics in STMT does not overflow. */
3291 static void
3292 infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
3294 tree def, base, step, scev, type, low, high;
3296 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3297 return;
3299 def = gimple_assign_lhs (stmt);
3301 if (TREE_CODE (def) != SSA_NAME)
3302 return;
3304 type = TREE_TYPE (def);
3305 if (!INTEGRAL_TYPE_P (type)
3306 || !TYPE_OVERFLOW_UNDEFINED (type))
3307 return;
3309 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3310 if (chrec_contains_undetermined (scev))
3311 return;
3313 base = initial_condition_in_loop_num (scev, loop->num);
3314 step = evolution_part_in_loop_num (scev, loop->num);
3316 if (!base || !step
3317 || TREE_CODE (step) != INTEGER_CST
3318 || tree_contains_chrecs (base, NULL)
3319 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3320 return;
3322 low = lower_bound_in_type (type, type);
3323 high = upper_bound_in_type (type, type);
3325 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3328 /* The following analyzers are extracting informations on the bounds
3329 of LOOP from the following undefined behaviors:
3331 - data references should not access elements over the statically
3332 allocated size,
3334 - signed variables should not overflow when flag_wrapv is not set.
3337 static void
3338 infer_loop_bounds_from_undefined (struct loop *loop)
3340 unsigned i;
3341 basic_block *bbs;
3342 gimple_stmt_iterator bsi;
3343 basic_block bb;
3344 bool reliable;
3346 bbs = get_loop_body (loop);
3348 for (i = 0; i < loop->num_nodes; i++)
3350 bb = bbs[i];
3352 /* If BB is not executed in each iteration of the loop, we cannot
3353 use the operations in it to infer reliable upper bound on the
3354 # of iterations of the loop. However, we can use it as a guess.
3355 Reliable guesses come only from array bounds. */
3356 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3358 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3360 gimple *stmt = gsi_stmt (bsi);
3362 infer_loop_bounds_from_array (loop, stmt);
3364 if (reliable)
3366 infer_loop_bounds_from_signedness (loop, stmt);
3367 infer_loop_bounds_from_pointer_arith (loop, stmt);
3373 free (bbs);
3376 /* Compare wide ints, callback for qsort. */
3378 static int
3379 wide_int_cmp (const void *p1, const void *p2)
3381 const widest_int *d1 = (const widest_int *) p1;
3382 const widest_int *d2 = (const widest_int *) p2;
3383 return wi::cmpu (*d1, *d2);
3386 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3387 Lookup by binary search. */
3389 static int
3390 bound_index (vec<widest_int> bounds, const widest_int &bound)
3392 unsigned int end = bounds.length ();
3393 unsigned int begin = 0;
3395 /* Find a matching index by means of a binary search. */
3396 while (begin != end)
3398 unsigned int middle = (begin + end) / 2;
3399 widest_int index = bounds[middle];
3401 if (index == bound)
3402 return middle;
3403 else if (wi::ltu_p (index, bound))
3404 begin = middle + 1;
3405 else
3406 end = middle;
3408 gcc_unreachable ();
3411 /* We recorded loop bounds only for statements dominating loop latch (and thus
3412 executed each loop iteration). If there are any bounds on statements not
3413 dominating the loop latch we can improve the estimate by walking the loop
3414 body and seeing if every path from loop header to loop latch contains
3415 some bounded statement. */
3417 static void
3418 discover_iteration_bound_by_body_walk (struct loop *loop)
3420 struct nb_iter_bound *elt;
3421 vec<widest_int> bounds = vNULL;
3422 vec<vec<basic_block> > queues = vNULL;
3423 vec<basic_block> queue = vNULL;
3424 ptrdiff_t queue_index;
3425 ptrdiff_t latch_index = 0;
3427 /* Discover what bounds may interest us. */
3428 for (elt = loop->bounds; elt; elt = elt->next)
3430 widest_int bound = elt->bound;
3432 /* Exit terminates loop at given iteration, while non-exits produce undefined
3433 effect on the next iteration. */
3434 if (!elt->is_exit)
3436 bound += 1;
3437 /* If an overflow occurred, ignore the result. */
3438 if (bound == 0)
3439 continue;
3442 if (!loop->any_upper_bound
3443 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3444 bounds.safe_push (bound);
3447 /* Exit early if there is nothing to do. */
3448 if (!bounds.exists ())
3449 return;
3451 if (dump_file && (dump_flags & TDF_DETAILS))
3452 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3454 /* Sort the bounds in decreasing order. */
3455 bounds.qsort (wide_int_cmp);
3457 /* For every basic block record the lowest bound that is guaranteed to
3458 terminate the loop. */
3460 hash_map<basic_block, ptrdiff_t> bb_bounds;
3461 for (elt = loop->bounds; elt; elt = elt->next)
3463 widest_int bound = elt->bound;
3464 if (!elt->is_exit)
3466 bound += 1;
3467 /* If an overflow occurred, ignore the result. */
3468 if (bound == 0)
3469 continue;
3472 if (!loop->any_upper_bound
3473 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3475 ptrdiff_t index = bound_index (bounds, bound);
3476 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3477 if (!entry)
3478 bb_bounds.put (gimple_bb (elt->stmt), index);
3479 else if ((ptrdiff_t)*entry > index)
3480 *entry = index;
3484 hash_map<basic_block, ptrdiff_t> block_priority;
3486 /* Perform shortest path discovery loop->header ... loop->latch.
3488 The "distance" is given by the smallest loop bound of basic block
3489 present in the path and we look for path with largest smallest bound
3490 on it.
3492 To avoid the need for fibonacci heap on double ints we simply compress
3493 double ints into indexes to BOUNDS array and then represent the queue
3494 as arrays of queues for every index.
3495 Index of BOUNDS.length() means that the execution of given BB has
3496 no bounds determined.
3498 VISITED is a pointer map translating basic block into smallest index
3499 it was inserted into the priority queue with. */
3500 latch_index = -1;
3502 /* Start walk in loop header with index set to infinite bound. */
3503 queue_index = bounds.length ();
3504 queues.safe_grow_cleared (queue_index + 1);
3505 queue.safe_push (loop->header);
3506 queues[queue_index] = queue;
3507 block_priority.put (loop->header, queue_index);
3509 for (; queue_index >= 0; queue_index--)
3511 if (latch_index < queue_index)
3513 while (queues[queue_index].length ())
3515 basic_block bb;
3516 ptrdiff_t bound_index = queue_index;
3517 edge e;
3518 edge_iterator ei;
3520 queue = queues[queue_index];
3521 bb = queue.pop ();
3523 /* OK, we later inserted the BB with lower priority, skip it. */
3524 if (*block_priority.get (bb) > queue_index)
3525 continue;
3527 /* See if we can improve the bound. */
3528 ptrdiff_t *entry = bb_bounds.get (bb);
3529 if (entry && *entry < bound_index)
3530 bound_index = *entry;
3532 /* Insert succesors into the queue, watch for latch edge
3533 and record greatest index we saw. */
3534 FOR_EACH_EDGE (e, ei, bb->succs)
3536 bool insert = false;
3538 if (loop_exit_edge_p (loop, e))
3539 continue;
3541 if (e == loop_latch_edge (loop)
3542 && latch_index < bound_index)
3543 latch_index = bound_index;
3544 else if (!(entry = block_priority.get (e->dest)))
3546 insert = true;
3547 block_priority.put (e->dest, bound_index);
3549 else if (*entry < bound_index)
3551 insert = true;
3552 *entry = bound_index;
3555 if (insert)
3556 queues[bound_index].safe_push (e->dest);
3560 queues[queue_index].release ();
3563 gcc_assert (latch_index >= 0);
3564 if ((unsigned)latch_index < bounds.length ())
3566 if (dump_file && (dump_flags & TDF_DETAILS))
3568 fprintf (dump_file, "Found better loop bound ");
3569 print_decu (bounds[latch_index], dump_file);
3570 fprintf (dump_file, "\n");
3572 record_niter_bound (loop, bounds[latch_index], false, true);
3575 queues.release ();
3576 bounds.release ();
3579 /* See if every path cross the loop goes through a statement that is known
3580 to not execute at the last iteration. In that case we can decrese iteration
3581 count by 1. */
3583 static void
3584 maybe_lower_iteration_bound (struct loop *loop)
3586 hash_set<gimple *> *not_executed_last_iteration = NULL;
3587 struct nb_iter_bound *elt;
3588 bool found_exit = false;
3589 vec<basic_block> queue = vNULL;
3590 bitmap visited;
3592 /* Collect all statements with interesting (i.e. lower than
3593 nb_iterations_upper_bound) bound on them.
3595 TODO: Due to the way record_estimate choose estimates to store, the bounds
3596 will be always nb_iterations_upper_bound-1. We can change this to record
3597 also statements not dominating the loop latch and update the walk bellow
3598 to the shortest path algorthm. */
3599 for (elt = loop->bounds; elt; elt = elt->next)
3601 if (!elt->is_exit
3602 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
3604 if (!not_executed_last_iteration)
3605 not_executed_last_iteration = new hash_set<gimple *>;
3606 not_executed_last_iteration->add (elt->stmt);
3609 if (!not_executed_last_iteration)
3610 return;
3612 /* Start DFS walk in the loop header and see if we can reach the
3613 loop latch or any of the exits (including statements with side
3614 effects that may terminate the loop otherwise) without visiting
3615 any of the statements known to have undefined effect on the last
3616 iteration. */
3617 queue.safe_push (loop->header);
3618 visited = BITMAP_ALLOC (NULL);
3619 bitmap_set_bit (visited, loop->header->index);
3620 found_exit = false;
3624 basic_block bb = queue.pop ();
3625 gimple_stmt_iterator gsi;
3626 bool stmt_found = false;
3628 /* Loop for possible exits and statements bounding the execution. */
3629 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3631 gimple *stmt = gsi_stmt (gsi);
3632 if (not_executed_last_iteration->contains (stmt))
3634 stmt_found = true;
3635 break;
3637 if (gimple_has_side_effects (stmt))
3639 found_exit = true;
3640 break;
3643 if (found_exit)
3644 break;
3646 /* If no bounding statement is found, continue the walk. */
3647 if (!stmt_found)
3649 edge e;
3650 edge_iterator ei;
3652 FOR_EACH_EDGE (e, ei, bb->succs)
3654 if (loop_exit_edge_p (loop, e)
3655 || e == loop_latch_edge (loop))
3657 found_exit = true;
3658 break;
3660 if (bitmap_set_bit (visited, e->dest->index))
3661 queue.safe_push (e->dest);
3665 while (queue.length () && !found_exit);
3667 /* If every path through the loop reach bounding statement before exit,
3668 then we know the last iteration of the loop will have undefined effect
3669 and we can decrease number of iterations. */
3671 if (!found_exit)
3673 if (dump_file && (dump_flags & TDF_DETAILS))
3674 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3675 "undefined statement must be executed at the last iteration.\n");
3676 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
3677 false, true);
3680 BITMAP_FREE (visited);
3681 queue.release ();
3682 delete not_executed_last_iteration;
3685 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3686 is true also use estimates derived from undefined behavior. */
3688 static void
3689 estimate_numbers_of_iterations_loop (struct loop *loop)
3691 vec<edge> exits;
3692 tree niter, type;
3693 unsigned i;
3694 struct tree_niter_desc niter_desc;
3695 edge ex;
3696 widest_int bound;
3697 edge likely_exit;
3699 /* Give up if we already have tried to compute an estimation. */
3700 if (loop->estimate_state != EST_NOT_COMPUTED)
3701 return;
3703 loop->estimate_state = EST_AVAILABLE;
3704 /* Force estimate compuation but leave any existing upper bound in place. */
3705 loop->any_estimate = false;
3707 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3708 to be constant, we avoid undefined behavior implied bounds and instead
3709 diagnose those loops with -Waggressive-loop-optimizations. */
3710 number_of_latch_executions (loop);
3712 exits = get_loop_exit_edges (loop);
3713 likely_exit = single_likely_exit (loop);
3714 FOR_EACH_VEC_ELT (exits, i, ex)
3716 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
3717 continue;
3719 niter = niter_desc.niter;
3720 type = TREE_TYPE (niter);
3721 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3722 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3723 build_int_cst (type, 0),
3724 niter);
3725 record_estimate (loop, niter, niter_desc.max,
3726 last_stmt (ex->src),
3727 true, ex == likely_exit, true);
3728 record_control_iv (loop, &niter_desc);
3730 exits.release ();
3732 if (flag_aggressive_loop_optimizations)
3733 infer_loop_bounds_from_undefined (loop);
3735 discover_iteration_bound_by_body_walk (loop);
3737 maybe_lower_iteration_bound (loop);
3739 /* If we have a measured profile, use it to estimate the number of
3740 iterations. */
3741 if (loop->header->count != 0)
3743 gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
3744 bound = gcov_type_to_wide_int (nit);
3745 record_niter_bound (loop, bound, true, false);
3748 /* If we know the exact number of iterations of this loop, try to
3749 not break code with undefined behavior by not recording smaller
3750 maximum number of iterations. */
3751 if (loop->nb_iterations
3752 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
3754 loop->any_upper_bound = true;
3755 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
3759 /* Sets NIT to the estimated number of executions of the latch of the
3760 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3761 large as the number of iterations. If we have no reliable estimate,
3762 the function returns false, otherwise returns true. */
3764 bool
3765 estimated_loop_iterations (struct loop *loop, widest_int *nit)
3767 /* When SCEV information is available, try to update loop iterations
3768 estimate. Otherwise just return whatever we recorded earlier. */
3769 if (scev_initialized_p ())
3770 estimate_numbers_of_iterations_loop (loop);
3772 return (get_estimated_loop_iterations (loop, nit));
3775 /* Similar to estimated_loop_iterations, but returns the estimate only
3776 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3777 on the number of iterations of LOOP could not be derived, returns -1. */
3779 HOST_WIDE_INT
3780 estimated_loop_iterations_int (struct loop *loop)
3782 widest_int nit;
3783 HOST_WIDE_INT hwi_nit;
3785 if (!estimated_loop_iterations (loop, &nit))
3786 return -1;
3788 if (!wi::fits_shwi_p (nit))
3789 return -1;
3790 hwi_nit = nit.to_shwi ();
3792 return hwi_nit < 0 ? -1 : hwi_nit;
3796 /* Sets NIT to an upper bound for the maximum number of executions of the
3797 latch of the LOOP. If we have no reliable estimate, the function returns
3798 false, otherwise returns true. */
3800 bool
3801 max_loop_iterations (struct loop *loop, widest_int *nit)
3803 /* When SCEV information is available, try to update loop iterations
3804 estimate. Otherwise just return whatever we recorded earlier. */
3805 if (scev_initialized_p ())
3806 estimate_numbers_of_iterations_loop (loop);
3808 return get_max_loop_iterations (loop, nit);
3811 /* Similar to max_loop_iterations, but returns the estimate only
3812 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3813 on the number of iterations of LOOP could not be derived, returns -1. */
3815 HOST_WIDE_INT
3816 max_loop_iterations_int (struct loop *loop)
3818 widest_int nit;
3819 HOST_WIDE_INT hwi_nit;
3821 if (!max_loop_iterations (loop, &nit))
3822 return -1;
3824 if (!wi::fits_shwi_p (nit))
3825 return -1;
3826 hwi_nit = nit.to_shwi ();
3828 return hwi_nit < 0 ? -1 : hwi_nit;
3831 /* Returns an estimate for the number of executions of statements
3832 in the LOOP. For statements before the loop exit, this exceeds
3833 the number of execution of the latch by one. */
3835 HOST_WIDE_INT
3836 estimated_stmt_executions_int (struct loop *loop)
3838 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3839 HOST_WIDE_INT snit;
3841 if (nit == -1)
3842 return -1;
3844 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3846 /* If the computation overflows, return -1. */
3847 return snit < 0 ? -1 : snit;
3850 /* Sets NIT to the estimated maximum number of executions of the latch of the
3851 LOOP, plus one. If we have no reliable estimate, the function returns
3852 false, otherwise returns true. */
3854 bool
3855 max_stmt_executions (struct loop *loop, widest_int *nit)
3857 widest_int nit_minus_one;
3859 if (!max_loop_iterations (loop, nit))
3860 return false;
3862 nit_minus_one = *nit;
3864 *nit += 1;
3866 return wi::gtu_p (*nit, nit_minus_one);
3869 /* Sets NIT to the estimated number of executions of the latch of the
3870 LOOP, plus one. If we have no reliable estimate, the function returns
3871 false, otherwise returns true. */
3873 bool
3874 estimated_stmt_executions (struct loop *loop, widest_int *nit)
3876 widest_int nit_minus_one;
3878 if (!estimated_loop_iterations (loop, nit))
3879 return false;
3881 nit_minus_one = *nit;
3883 *nit += 1;
3885 return wi::gtu_p (*nit, nit_minus_one);
3888 /* Records estimates on numbers of iterations of loops. */
3890 void
3891 estimate_numbers_of_iterations (void)
3893 struct loop *loop;
3895 /* We don't want to issue signed overflow warnings while getting
3896 loop iteration estimates. */
3897 fold_defer_overflow_warnings ();
3899 FOR_EACH_LOOP (loop, 0)
3901 estimate_numbers_of_iterations_loop (loop);
3904 fold_undefer_and_ignore_overflow_warnings ();
3907 /* Returns true if statement S1 dominates statement S2. */
3909 bool
3910 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
3912 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
3914 if (!bb1
3915 || s1 == s2)
3916 return true;
3918 if (bb1 == bb2)
3920 gimple_stmt_iterator bsi;
3922 if (gimple_code (s2) == GIMPLE_PHI)
3923 return false;
3925 if (gimple_code (s1) == GIMPLE_PHI)
3926 return true;
3928 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
3929 if (gsi_stmt (bsi) == s1)
3930 return true;
3932 return false;
3935 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
3938 /* Returns true when we can prove that the number of executions of
3939 STMT in the loop is at most NITER, according to the bound on
3940 the number of executions of the statement NITER_BOUND->stmt recorded in
3941 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
3943 ??? This code can become quite a CPU hog - we can have many bounds,
3944 and large basic block forcing stmt_dominates_stmt_p to be queried
3945 many times on a large basic blocks, so the whole thing is O(n^2)
3946 for scev_probably_wraps_p invocation (that can be done n times).
3948 It would make more sense (and give better answers) to remember BB
3949 bounds computed by discover_iteration_bound_by_body_walk. */
3951 static bool
3952 n_of_executions_at_most (gimple *stmt,
3953 struct nb_iter_bound *niter_bound,
3954 tree niter)
3956 widest_int bound = niter_bound->bound;
3957 tree nit_type = TREE_TYPE (niter), e;
3958 enum tree_code cmp;
3960 gcc_assert (TYPE_UNSIGNED (nit_type));
3962 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3963 the number of iterations is small. */
3964 if (!wi::fits_to_tree_p (bound, nit_type))
3965 return false;
3967 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3968 times. This means that:
3970 -- if NITER_BOUND->is_exit is true, then everything after
3971 it at most NITER_BOUND->bound times.
3973 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3974 is executed, then NITER_BOUND->stmt is executed as well in the same
3975 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
3977 If we can determine that NITER_BOUND->stmt is always executed
3978 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
3979 We conclude that if both statements belong to the same
3980 basic block and STMT is before NITER_BOUND->stmt and there are no
3981 statements with side effects in between. */
3983 if (niter_bound->is_exit)
3985 if (stmt == niter_bound->stmt
3986 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3987 return false;
3988 cmp = GE_EXPR;
3990 else
3992 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3994 gimple_stmt_iterator bsi;
3995 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
3996 || gimple_code (stmt) == GIMPLE_PHI
3997 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
3998 return false;
4000 /* By stmt_dominates_stmt_p we already know that STMT appears
4001 before NITER_BOUND->STMT. Still need to test that the loop
4002 can not be terinated by a side effect in between. */
4003 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4004 gsi_next (&bsi))
4005 if (gimple_has_side_effects (gsi_stmt (bsi)))
4006 return false;
4007 bound += 1;
4008 if (bound == 0
4009 || !wi::fits_to_tree_p (bound, nit_type))
4010 return false;
4012 cmp = GT_EXPR;
4015 e = fold_binary (cmp, boolean_type_node,
4016 niter, wide_int_to_tree (nit_type, bound));
4017 return e && integer_nonzerop (e);
4020 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4022 bool
4023 nowrap_type_p (tree type)
4025 if (INTEGRAL_TYPE_P (type)
4026 && TYPE_OVERFLOW_UNDEFINED (type))
4027 return true;
4029 if (POINTER_TYPE_P (type))
4030 return true;
4032 return false;
4035 /* Return true if we can prove LOOP is exited before evolution of induction
4036 variabled {BASE, STEP} overflows with respect to its type bound. */
4038 static bool
4039 loop_exits_before_overflow (tree base, tree step,
4040 gimple *at_stmt, struct loop *loop)
4042 widest_int niter;
4043 struct control_iv *civ;
4044 struct nb_iter_bound *bound;
4045 tree e, delta, step_abs, unsigned_base;
4046 tree type = TREE_TYPE (step);
4047 tree unsigned_type, valid_niter;
4049 /* Don't issue signed overflow warnings. */
4050 fold_defer_overflow_warnings ();
4052 /* Compute the number of iterations before we reach the bound of the
4053 type, and verify that the loop is exited before this occurs. */
4054 unsigned_type = unsigned_type_for (type);
4055 unsigned_base = fold_convert (unsigned_type, base);
4057 if (tree_int_cst_sign_bit (step))
4059 tree extreme = fold_convert (unsigned_type,
4060 lower_bound_in_type (type, type));
4061 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4062 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4063 fold_convert (unsigned_type, step));
4065 else
4067 tree extreme = fold_convert (unsigned_type,
4068 upper_bound_in_type (type, type));
4069 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4070 step_abs = fold_convert (unsigned_type, step);
4073 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4075 estimate_numbers_of_iterations_loop (loop);
4077 if (max_loop_iterations (loop, &niter)
4078 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4079 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4080 wide_int_to_tree (TREE_TYPE (valid_niter),
4081 niter))) != NULL
4082 && integer_nonzerop (e))
4084 fold_undefer_and_ignore_overflow_warnings ();
4085 return true;
4087 if (at_stmt)
4088 for (bound = loop->bounds; bound; bound = bound->next)
4090 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4092 fold_undefer_and_ignore_overflow_warnings ();
4093 return true;
4096 fold_undefer_and_ignore_overflow_warnings ();
4098 /* Try to prove loop is exited before {base, step} overflows with the
4099 help of analyzed loop control IV. This is done only for IVs with
4100 constant step because otherwise we don't have the information. */
4101 if (TREE_CODE (step) == INTEGER_CST)
4103 tree stop = (TREE_CODE (base) == SSA_NAME) ? base : NULL;
4105 for (civ = loop->control_ivs; civ; civ = civ->next)
4107 enum tree_code code;
4108 tree stepped, extreme, civ_type = TREE_TYPE (civ->step);
4110 /* Have to consider type difference because operand_equal_p ignores
4111 that for constants. */
4112 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4113 || element_precision (type) != element_precision (civ_type))
4114 continue;
4116 /* Only consider control IV with same step. */
4117 if (!operand_equal_p (step, civ->step, 0))
4118 continue;
4120 /* Done proving if this is a no-overflow control IV. */
4121 if (operand_equal_p (base, civ->base, 0))
4122 return true;
4124 /* If this is a before stepping control IV, in other words, we have
4126 {civ_base, step} = {base + step, step}
4128 Because civ {base + step, step} doesn't overflow during loop
4129 iterations, {base, step} will not overflow if we can prove the
4130 operation "base + step" does not overflow. Specifically, we try
4131 to prove below conditions are satisfied:
4133 base <= UPPER_BOUND (type) - step ;;step > 0
4134 base >= LOWER_BOUND (type) - step ;;step < 0
4136 by proving the reverse conditions are false using loop's initial
4137 condition. */
4138 if (POINTER_TYPE_P (TREE_TYPE (base)))
4139 code = POINTER_PLUS_EXPR;
4140 else
4141 code = PLUS_EXPR;
4143 stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4144 if (operand_equal_p (stepped, civ->base, 0))
4146 if (tree_int_cst_sign_bit (step))
4148 code = LT_EXPR;
4149 extreme = lower_bound_in_type (type, type);
4151 else
4153 code = GT_EXPR;
4154 extreme = upper_bound_in_type (type, type);
4156 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4157 e = fold_build2 (code, boolean_type_node, base, extreme);
4158 e = simplify_using_initial_conditions (loop, e, stop);
4159 if (integer_zerop (e))
4160 return true;
4165 return false;
4168 /* Return false only when the induction variable BASE + STEP * I is
4169 known to not overflow: i.e. when the number of iterations is small
4170 enough with respect to the step and initial condition in order to
4171 keep the evolution confined in TYPEs bounds. Return true when the
4172 iv is known to overflow or when the property is not computable.
4174 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4175 the rules for overflow of the given language apply (e.g., that signed
4176 arithmetics in C does not overflow). */
4178 bool
4179 scev_probably_wraps_p (tree base, tree step,
4180 gimple *at_stmt, struct loop *loop,
4181 bool use_overflow_semantics)
4183 /* FIXME: We really need something like
4184 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4186 We used to test for the following situation that frequently appears
4187 during address arithmetics:
4189 D.1621_13 = (long unsigned intD.4) D.1620_12;
4190 D.1622_14 = D.1621_13 * 8;
4191 D.1623_15 = (doubleD.29 *) D.1622_14;
4193 And derived that the sequence corresponding to D_14
4194 can be proved to not wrap because it is used for computing a
4195 memory access; however, this is not really the case -- for example,
4196 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4197 2032, 2040, 0, 8, ..., but the code is still legal. */
4199 if (chrec_contains_undetermined (base)
4200 || chrec_contains_undetermined (step))
4201 return true;
4203 if (integer_zerop (step))
4204 return false;
4206 /* If we can use the fact that signed and pointer arithmetics does not
4207 wrap, we are done. */
4208 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4209 return false;
4211 /* To be able to use estimates on number of iterations of the loop,
4212 we must have an upper bound on the absolute value of the step. */
4213 if (TREE_CODE (step) != INTEGER_CST)
4214 return true;
4216 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4217 return false;
4219 /* At this point we still don't have a proof that the iv does not
4220 overflow: give up. */
4221 return true;
4224 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4226 void
4227 free_numbers_of_iterations_estimates_loop (struct loop *loop)
4229 struct control_iv *civ;
4230 struct nb_iter_bound *bound;
4232 loop->nb_iterations = NULL;
4233 loop->estimate_state = EST_NOT_COMPUTED;
4234 for (bound = loop->bounds; bound;)
4236 struct nb_iter_bound *next = bound->next;
4237 ggc_free (bound);
4238 bound = next;
4240 loop->bounds = NULL;
4242 for (civ = loop->control_ivs; civ;)
4244 struct control_iv *next = civ->next;
4245 ggc_free (civ);
4246 civ = next;
4248 loop->control_ivs = NULL;
4251 /* Frees the information on upper bounds on numbers of iterations of loops. */
4253 void
4254 free_numbers_of_iterations_estimates (function *fn)
4256 struct loop *loop;
4258 FOR_EACH_LOOP_FN (fn, loop, 0)
4260 free_numbers_of_iterations_estimates_loop (loop);
4264 /* Substitute value VAL for ssa name NAME inside expressions held
4265 at LOOP. */
4267 void
4268 substitute_in_loop_info (struct loop *loop, tree name, tree val)
4270 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);