Daily bump.
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob4d5e04945118880d0c0cfb00d9c5841cc58e46f5
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "tree-dfa.h"
47 /* The maximum number of dominator BBs we search for conditions
48 of loop header copies we use for simplifying a conditional
49 expression. */
50 #define MAX_DOMINATORS_TO_WALK 8
54 Analysis of number of iterations of an affine exit test.
58 /* Bounds on some value, BELOW <= X <= UP. */
60 struct bounds
62 mpz_t below, up;
65 static bool number_of_iterations_popcount (loop_p loop, edge exit,
66 enum tree_code code,
67 class tree_niter_desc *niter);
70 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
72 static void
73 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
75 tree type = TREE_TYPE (expr);
76 tree op0, op1;
77 bool negate = false;
79 *var = expr;
80 mpz_set_ui (offset, 0);
82 switch (TREE_CODE (expr))
84 case MINUS_EXPR:
85 negate = true;
86 /* Fallthru. */
88 case PLUS_EXPR:
89 case POINTER_PLUS_EXPR:
90 op0 = TREE_OPERAND (expr, 0);
91 op1 = TREE_OPERAND (expr, 1);
93 if (TREE_CODE (op1) != INTEGER_CST)
94 break;
96 *var = op0;
97 /* Always sign extend the offset. */
98 wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
99 if (negate)
100 mpz_neg (offset, offset);
101 break;
103 case INTEGER_CST:
104 *var = build_int_cst_type (type, 0);
105 wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
106 break;
108 default:
109 break;
113 /* From condition C0 CMP C1 derives information regarding the value range
114 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
116 static void
117 refine_value_range_using_guard (tree type, tree var,
118 tree c0, enum tree_code cmp, tree c1,
119 mpz_t below, mpz_t up)
121 tree varc0, varc1, ctype;
122 mpz_t offc0, offc1;
123 mpz_t mint, maxt, minc1, maxc1;
124 wide_int minv, maxv;
125 bool no_wrap = nowrap_type_p (type);
126 bool c0_ok, c1_ok;
127 signop sgn = TYPE_SIGN (type);
129 switch (cmp)
131 case LT_EXPR:
132 case LE_EXPR:
133 case GT_EXPR:
134 case GE_EXPR:
135 STRIP_SIGN_NOPS (c0);
136 STRIP_SIGN_NOPS (c1);
137 ctype = TREE_TYPE (c0);
138 if (!useless_type_conversion_p (ctype, type))
139 return;
141 break;
143 case EQ_EXPR:
144 /* We could derive quite precise information from EQ_EXPR, however,
145 such a guard is unlikely to appear, so we do not bother with
146 handling it. */
147 return;
149 case NE_EXPR:
150 /* NE_EXPR comparisons do not contain much of useful information,
151 except for cases of comparing with bounds. */
152 if (TREE_CODE (c1) != INTEGER_CST
153 || !INTEGRAL_TYPE_P (type))
154 return;
156 /* Ensure that the condition speaks about an expression in the same
157 type as X and Y. */
158 ctype = TREE_TYPE (c0);
159 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
160 return;
161 c0 = fold_convert (type, c0);
162 c1 = fold_convert (type, c1);
164 if (operand_equal_p (var, c0, 0))
166 mpz_t valc1;
168 /* Case of comparing VAR with its below/up bounds. */
169 mpz_init (valc1);
170 wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
171 if (mpz_cmp (valc1, below) == 0)
172 cmp = GT_EXPR;
173 if (mpz_cmp (valc1, up) == 0)
174 cmp = LT_EXPR;
176 mpz_clear (valc1);
178 else
180 /* Case of comparing with the bounds of the type. */
181 wide_int min = wi::min_value (type);
182 wide_int max = wi::max_value (type);
184 if (wi::to_wide (c1) == min)
185 cmp = GT_EXPR;
186 if (wi::to_wide (c1) == max)
187 cmp = LT_EXPR;
190 /* Quick return if no useful information. */
191 if (cmp == NE_EXPR)
192 return;
194 break;
196 default:
197 return;
200 mpz_init (offc0);
201 mpz_init (offc1);
202 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
203 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
205 /* We are only interested in comparisons of expressions based on VAR. */
206 if (operand_equal_p (var, varc1, 0))
208 std::swap (varc0, varc1);
209 mpz_swap (offc0, offc1);
210 cmp = swap_tree_comparison (cmp);
212 else if (!operand_equal_p (var, varc0, 0))
214 mpz_clear (offc0);
215 mpz_clear (offc1);
216 return;
219 mpz_init (mint);
220 mpz_init (maxt);
221 get_type_static_bounds (type, mint, maxt);
222 mpz_init (minc1);
223 mpz_init (maxc1);
224 /* Setup range information for varc1. */
225 if (integer_zerop (varc1))
227 wi::to_mpz (0, minc1, TYPE_SIGN (type));
228 wi::to_mpz (0, maxc1, TYPE_SIGN (type));
230 else if (TREE_CODE (varc1) == SSA_NAME
231 && INTEGRAL_TYPE_P (type)
232 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
234 gcc_assert (wi::le_p (minv, maxv, sgn));
235 wi::to_mpz (minv, minc1, sgn);
236 wi::to_mpz (maxv, maxc1, sgn);
238 else
240 mpz_set (minc1, mint);
241 mpz_set (maxc1, maxt);
244 /* Compute valid range information for varc1 + offc1. Note nothing
245 useful can be derived if it overflows or underflows. Overflow or
246 underflow could happen when:
248 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
249 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
250 mpz_add (minc1, minc1, offc1);
251 mpz_add (maxc1, maxc1, offc1);
252 c1_ok = (no_wrap
253 || mpz_sgn (offc1) == 0
254 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
255 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
256 if (!c1_ok)
257 goto end;
259 if (mpz_cmp (minc1, mint) < 0)
260 mpz_set (minc1, mint);
261 if (mpz_cmp (maxc1, maxt) > 0)
262 mpz_set (maxc1, maxt);
264 if (cmp == LT_EXPR)
266 cmp = LE_EXPR;
267 mpz_sub_ui (maxc1, maxc1, 1);
269 if (cmp == GT_EXPR)
271 cmp = GE_EXPR;
272 mpz_add_ui (minc1, minc1, 1);
275 /* Compute range information for varc0. If there is no overflow,
276 the condition implied that
278 (varc0) cmp (varc1 + offc1 - offc0)
280 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
281 or the below bound if cmp is GE_EXPR.
283 To prove there is no overflow/underflow, we need to check below
284 four cases:
285 1) cmp == LE_EXPR && offc0 > 0
287 (varc0 + offc0) doesn't overflow
288 && (varc1 + offc1 - offc0) doesn't underflow
290 2) cmp == LE_EXPR && offc0 < 0
292 (varc0 + offc0) doesn't underflow
293 && (varc1 + offc1 - offc0) doesn't overfloe
295 In this case, (varc0 + offc0) will never underflow if we can
296 prove (varc1 + offc1 - offc0) doesn't overflow.
298 3) cmp == GE_EXPR && offc0 < 0
300 (varc0 + offc0) doesn't underflow
301 && (varc1 + offc1 - offc0) doesn't overflow
303 4) cmp == GE_EXPR && offc0 > 0
305 (varc0 + offc0) doesn't overflow
306 && (varc1 + offc1 - offc0) doesn't underflow
308 In this case, (varc0 + offc0) will never overflow if we can
309 prove (varc1 + offc1 - offc0) doesn't underflow.
311 Note we only handle case 2 and 4 in below code. */
313 mpz_sub (minc1, minc1, offc0);
314 mpz_sub (maxc1, maxc1, offc0);
315 c0_ok = (no_wrap
316 || mpz_sgn (offc0) == 0
317 || (cmp == LE_EXPR
318 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
319 || (cmp == GE_EXPR
320 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
321 if (!c0_ok)
322 goto end;
324 if (cmp == LE_EXPR)
326 if (mpz_cmp (up, maxc1) > 0)
327 mpz_set (up, maxc1);
329 else
331 if (mpz_cmp (below, minc1) < 0)
332 mpz_set (below, minc1);
335 end:
336 mpz_clear (mint);
337 mpz_clear (maxt);
338 mpz_clear (minc1);
339 mpz_clear (maxc1);
340 mpz_clear (offc0);
341 mpz_clear (offc1);
344 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
345 in TYPE to MIN and MAX. */
347 static void
348 determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
349 mpz_t min, mpz_t max)
351 int cnt = 0;
352 mpz_t minm, maxm;
353 basic_block bb;
354 wide_int minv, maxv;
355 enum value_range_kind rtype = VR_VARYING;
357 /* If the expression is a constant, we know its value exactly. */
358 if (integer_zerop (var))
360 mpz_set (min, off);
361 mpz_set (max, off);
362 return;
365 get_type_static_bounds (type, min, max);
367 /* See if we have some range info from VRP. */
368 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
370 edge e = loop_preheader_edge (loop);
371 signop sgn = TYPE_SIGN (type);
372 gphi_iterator gsi;
374 /* Either for VAR itself... */
375 rtype = get_range_info (var, &minv, &maxv);
376 /* Or for PHI results in loop->header where VAR is used as
377 PHI argument from the loop preheader edge. */
378 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
380 gphi *phi = gsi.phi ();
381 wide_int minc, maxc;
382 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
383 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
384 == VR_RANGE))
386 if (rtype != VR_RANGE)
388 rtype = VR_RANGE;
389 minv = minc;
390 maxv = maxc;
392 else
394 minv = wi::max (minv, minc, sgn);
395 maxv = wi::min (maxv, maxc, sgn);
396 /* If the PHI result range are inconsistent with
397 the VAR range, give up on looking at the PHI
398 results. This can happen if VR_UNDEFINED is
399 involved. */
400 if (wi::gt_p (minv, maxv, sgn))
402 rtype = get_range_info (var, &minv, &maxv);
403 break;
408 mpz_init (minm);
409 mpz_init (maxm);
410 if (rtype != VR_RANGE)
412 mpz_set (minm, min);
413 mpz_set (maxm, max);
415 else
417 gcc_assert (wi::le_p (minv, maxv, sgn));
418 wi::to_mpz (minv, minm, sgn);
419 wi::to_mpz (maxv, maxm, sgn);
421 /* Now walk the dominators of the loop header and use the entry
422 guards to refine the estimates. */
423 for (bb = loop->header;
424 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
425 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
427 edge e;
428 tree c0, c1;
429 gimple *cond;
430 enum tree_code cmp;
432 if (!single_pred_p (bb))
433 continue;
434 e = single_pred_edge (bb);
436 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
437 continue;
439 cond = last_stmt (e->src);
440 c0 = gimple_cond_lhs (cond);
441 cmp = gimple_cond_code (cond);
442 c1 = gimple_cond_rhs (cond);
444 if (e->flags & EDGE_FALSE_VALUE)
445 cmp = invert_tree_comparison (cmp, false);
447 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
448 ++cnt;
451 mpz_add (minm, minm, off);
452 mpz_add (maxm, maxm, off);
453 /* If the computation may not wrap or off is zero, then this
454 is always fine. If off is negative and minv + off isn't
455 smaller than type's minimum, or off is positive and
456 maxv + off isn't bigger than type's maximum, use the more
457 precise range too. */
458 if (nowrap_type_p (type)
459 || mpz_sgn (off) == 0
460 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
461 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
463 mpz_set (min, minm);
464 mpz_set (max, maxm);
465 mpz_clear (minm);
466 mpz_clear (maxm);
467 return;
469 mpz_clear (minm);
470 mpz_clear (maxm);
473 /* If the computation may wrap, we know nothing about the value, except for
474 the range of the type. */
475 if (!nowrap_type_p (type))
476 return;
478 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
479 add it to MIN, otherwise to MAX. */
480 if (mpz_sgn (off) < 0)
481 mpz_add (max, max, off);
482 else
483 mpz_add (min, min, off);
486 /* Stores the bounds on the difference of the values of the expressions
487 (var + X) and (var + Y), computed in TYPE, to BNDS. */
489 static void
490 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
491 bounds *bnds)
493 int rel = mpz_cmp (x, y);
494 bool may_wrap = !nowrap_type_p (type);
495 mpz_t m;
497 /* If X == Y, then the expressions are always equal.
498 If X > Y, there are the following possibilities:
499 a) neither of var + X and var + Y overflow or underflow, or both of
500 them do. Then their difference is X - Y.
501 b) var + X overflows, and var + Y does not. Then the values of the
502 expressions are var + X - M and var + Y, where M is the range of
503 the type, and their difference is X - Y - M.
504 c) var + Y underflows and var + X does not. Their difference again
505 is M - X + Y.
506 Therefore, if the arithmetics in type does not overflow, then the
507 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
508 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
509 (X - Y, X - Y + M). */
511 if (rel == 0)
513 mpz_set_ui (bnds->below, 0);
514 mpz_set_ui (bnds->up, 0);
515 return;
518 mpz_init (m);
519 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
520 mpz_add_ui (m, m, 1);
521 mpz_sub (bnds->up, x, y);
522 mpz_set (bnds->below, bnds->up);
524 if (may_wrap)
526 if (rel > 0)
527 mpz_sub (bnds->below, bnds->below, m);
528 else
529 mpz_add (bnds->up, bnds->up, m);
532 mpz_clear (m);
535 /* From condition C0 CMP C1 derives information regarding the
536 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
537 and stores it to BNDS. */
539 static void
540 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
541 tree vary, mpz_t offy,
542 tree c0, enum tree_code cmp, tree c1,
543 bounds *bnds)
545 tree varc0, varc1, ctype;
546 mpz_t offc0, offc1, loffx, loffy, bnd;
547 bool lbound = false;
548 bool no_wrap = nowrap_type_p (type);
549 bool x_ok, y_ok;
551 switch (cmp)
553 case LT_EXPR:
554 case LE_EXPR:
555 case GT_EXPR:
556 case GE_EXPR:
557 STRIP_SIGN_NOPS (c0);
558 STRIP_SIGN_NOPS (c1);
559 ctype = TREE_TYPE (c0);
560 if (!useless_type_conversion_p (ctype, type))
561 return;
563 break;
565 case EQ_EXPR:
566 /* We could derive quite precise information from EQ_EXPR, however, such
567 a guard is unlikely to appear, so we do not bother with handling
568 it. */
569 return;
571 case NE_EXPR:
572 /* NE_EXPR comparisons do not contain much of useful information, except for
573 special case of comparing with the bounds of the type. */
574 if (TREE_CODE (c1) != INTEGER_CST
575 || !INTEGRAL_TYPE_P (type))
576 return;
578 /* Ensure that the condition speaks about an expression in the same type
579 as X and Y. */
580 ctype = TREE_TYPE (c0);
581 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
582 return;
583 c0 = fold_convert (type, c0);
584 c1 = fold_convert (type, c1);
586 if (TYPE_MIN_VALUE (type)
587 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
589 cmp = GT_EXPR;
590 break;
592 if (TYPE_MAX_VALUE (type)
593 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
595 cmp = LT_EXPR;
596 break;
599 return;
600 default:
601 return;
604 mpz_init (offc0);
605 mpz_init (offc1);
606 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
607 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
609 /* We are only interested in comparisons of expressions based on VARX and
610 VARY. TODO -- we might also be able to derive some bounds from
611 expressions containing just one of the variables. */
613 if (operand_equal_p (varx, varc1, 0))
615 std::swap (varc0, varc1);
616 mpz_swap (offc0, offc1);
617 cmp = swap_tree_comparison (cmp);
620 if (!operand_equal_p (varx, varc0, 0)
621 || !operand_equal_p (vary, varc1, 0))
622 goto end;
624 mpz_init_set (loffx, offx);
625 mpz_init_set (loffy, offy);
627 if (cmp == GT_EXPR || cmp == GE_EXPR)
629 std::swap (varx, vary);
630 mpz_swap (offc0, offc1);
631 mpz_swap (loffx, loffy);
632 cmp = swap_tree_comparison (cmp);
633 lbound = true;
636 /* If there is no overflow, the condition implies that
638 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
640 The overflows and underflows may complicate things a bit; each
641 overflow decreases the appropriate offset by M, and underflow
642 increases it by M. The above inequality would not necessarily be
643 true if
645 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
646 VARX + OFFC0 overflows, but VARX + OFFX does not.
647 This may only happen if OFFX < OFFC0.
648 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
649 VARY + OFFC1 underflows and VARY + OFFY does not.
650 This may only happen if OFFY > OFFC1. */
652 if (no_wrap)
654 x_ok = true;
655 y_ok = true;
657 else
659 x_ok = (integer_zerop (varx)
660 || mpz_cmp (loffx, offc0) >= 0);
661 y_ok = (integer_zerop (vary)
662 || mpz_cmp (loffy, offc1) <= 0);
665 if (x_ok && y_ok)
667 mpz_init (bnd);
668 mpz_sub (bnd, loffx, loffy);
669 mpz_add (bnd, bnd, offc1);
670 mpz_sub (bnd, bnd, offc0);
672 if (cmp == LT_EXPR)
673 mpz_sub_ui (bnd, bnd, 1);
675 if (lbound)
677 mpz_neg (bnd, bnd);
678 if (mpz_cmp (bnds->below, bnd) < 0)
679 mpz_set (bnds->below, bnd);
681 else
683 if (mpz_cmp (bnd, bnds->up) < 0)
684 mpz_set (bnds->up, bnd);
686 mpz_clear (bnd);
689 mpz_clear (loffx);
690 mpz_clear (loffy);
691 end:
692 mpz_clear (offc0);
693 mpz_clear (offc1);
696 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
697 The subtraction is considered to be performed in arbitrary precision,
698 without overflows.
700 We do not attempt to be too clever regarding the value ranges of X and
701 Y; most of the time, they are just integers or ssa names offsetted by
702 integer. However, we try to use the information contained in the
703 comparisons before the loop (usually created by loop header copying). */
705 static void
706 bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
708 tree type = TREE_TYPE (x);
709 tree varx, vary;
710 mpz_t offx, offy;
711 mpz_t minx, maxx, miny, maxy;
712 int cnt = 0;
713 edge e;
714 basic_block bb;
715 tree c0, c1;
716 gimple *cond;
717 enum tree_code cmp;
719 /* Get rid of unnecessary casts, but preserve the value of
720 the expressions. */
721 STRIP_SIGN_NOPS (x);
722 STRIP_SIGN_NOPS (y);
724 mpz_init (bnds->below);
725 mpz_init (bnds->up);
726 mpz_init (offx);
727 mpz_init (offy);
728 split_to_var_and_offset (x, &varx, offx);
729 split_to_var_and_offset (y, &vary, offy);
731 if (!integer_zerop (varx)
732 && operand_equal_p (varx, vary, 0))
734 /* Special case VARX == VARY -- we just need to compare the
735 offsets. The matters are a bit more complicated in the
736 case addition of offsets may wrap. */
737 bound_difference_of_offsetted_base (type, offx, offy, bnds);
739 else
741 /* Otherwise, use the value ranges to determine the initial
742 estimates on below and up. */
743 mpz_init (minx);
744 mpz_init (maxx);
745 mpz_init (miny);
746 mpz_init (maxy);
747 determine_value_range (loop, type, varx, offx, minx, maxx);
748 determine_value_range (loop, type, vary, offy, miny, maxy);
750 mpz_sub (bnds->below, minx, maxy);
751 mpz_sub (bnds->up, maxx, miny);
752 mpz_clear (minx);
753 mpz_clear (maxx);
754 mpz_clear (miny);
755 mpz_clear (maxy);
758 /* If both X and Y are constants, we cannot get any more precise. */
759 if (integer_zerop (varx) && integer_zerop (vary))
760 goto end;
762 /* Now walk the dominators of the loop header and use the entry
763 guards to refine the estimates. */
764 for (bb = loop->header;
765 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
766 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
768 if (!single_pred_p (bb))
769 continue;
770 e = single_pred_edge (bb);
772 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
773 continue;
775 cond = last_stmt (e->src);
776 c0 = gimple_cond_lhs (cond);
777 cmp = gimple_cond_code (cond);
778 c1 = gimple_cond_rhs (cond);
780 if (e->flags & EDGE_FALSE_VALUE)
781 cmp = invert_tree_comparison (cmp, false);
783 refine_bounds_using_guard (type, varx, offx, vary, offy,
784 c0, cmp, c1, bnds);
785 ++cnt;
788 end:
789 mpz_clear (offx);
790 mpz_clear (offy);
793 /* Update the bounds in BNDS that restrict the value of X to the bounds
794 that restrict the value of X + DELTA. X can be obtained as a
795 difference of two values in TYPE. */
797 static void
798 bounds_add (bounds *bnds, const widest_int &delta, tree type)
800 mpz_t mdelta, max;
802 mpz_init (mdelta);
803 wi::to_mpz (delta, mdelta, SIGNED);
805 mpz_init (max);
806 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
808 mpz_add (bnds->up, bnds->up, mdelta);
809 mpz_add (bnds->below, bnds->below, mdelta);
811 if (mpz_cmp (bnds->up, max) > 0)
812 mpz_set (bnds->up, max);
814 mpz_neg (max, max);
815 if (mpz_cmp (bnds->below, max) < 0)
816 mpz_set (bnds->below, max);
818 mpz_clear (mdelta);
819 mpz_clear (max);
822 /* Update the bounds in BNDS that restrict the value of X to the bounds
823 that restrict the value of -X. */
825 static void
826 bounds_negate (bounds *bnds)
828 mpz_t tmp;
830 mpz_init_set (tmp, bnds->up);
831 mpz_neg (bnds->up, bnds->below);
832 mpz_neg (bnds->below, tmp);
833 mpz_clear (tmp);
836 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
838 static tree
839 inverse (tree x, tree mask)
841 tree type = TREE_TYPE (x);
842 tree rslt;
843 unsigned ctr = tree_floor_log2 (mask);
845 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
847 unsigned HOST_WIDE_INT ix;
848 unsigned HOST_WIDE_INT imask;
849 unsigned HOST_WIDE_INT irslt = 1;
851 gcc_assert (cst_and_fits_in_hwi (x));
852 gcc_assert (cst_and_fits_in_hwi (mask));
854 ix = int_cst_value (x);
855 imask = int_cst_value (mask);
857 for (; ctr; ctr--)
859 irslt *= ix;
860 ix *= ix;
862 irslt &= imask;
864 rslt = build_int_cst_type (type, irslt);
866 else
868 rslt = build_int_cst (type, 1);
869 for (; ctr; ctr--)
871 rslt = int_const_binop (MULT_EXPR, rslt, x);
872 x = int_const_binop (MULT_EXPR, x, x);
874 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
877 return rslt;
880 /* Derives the upper bound BND on the number of executions of loop with exit
881 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
882 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
883 that the loop ends through this exit, i.e., the induction variable ever
884 reaches the value of C.
886 The value C is equal to final - base, where final and base are the final and
887 initial value of the actual induction variable in the analysed loop. BNDS
888 bounds the value of this difference when computed in signed type with
889 unbounded range, while the computation of C is performed in an unsigned
890 type with the range matching the range of the type of the induction variable.
891 In particular, BNDS.up contains an upper bound on C in the following cases:
892 -- if the iv must reach its final value without overflow, i.e., if
893 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
894 -- if final >= base, which we know to hold when BNDS.below >= 0. */
896 static void
897 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
898 bounds *bnds, bool exit_must_be_taken)
900 widest_int max;
901 mpz_t d;
902 tree type = TREE_TYPE (c);
903 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
904 || mpz_sgn (bnds->below) >= 0);
906 if (integer_onep (s)
907 || (TREE_CODE (c) == INTEGER_CST
908 && TREE_CODE (s) == INTEGER_CST
909 && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
910 TYPE_SIGN (type)) == 0)
911 || (TYPE_OVERFLOW_UNDEFINED (type)
912 && multiple_of_p (type, c, s)))
914 /* If C is an exact multiple of S, then its value will be reached before
915 the induction variable overflows (unless the loop is exited in some
916 other way before). Note that the actual induction variable in the
917 loop (which ranges from base to final instead of from 0 to C) may
918 overflow, in which case BNDS.up will not be giving a correct upper
919 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
920 no_overflow = true;
921 exit_must_be_taken = true;
924 /* If the induction variable can overflow, the number of iterations is at
925 most the period of the control variable (or infinite, but in that case
926 the whole # of iterations analysis will fail). */
927 if (!no_overflow)
929 max = wi::mask <widest_int> (TYPE_PRECISION (type)
930 - wi::ctz (wi::to_wide (s)), false);
931 wi::to_mpz (max, bnd, UNSIGNED);
932 return;
935 /* Now we know that the induction variable does not overflow, so the loop
936 iterates at most (range of type / S) times. */
937 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
939 /* If the induction variable is guaranteed to reach the value of C before
940 overflow, ... */
941 if (exit_must_be_taken)
943 /* ... then we can strengthen this to C / S, and possibly we can use
944 the upper bound on C given by BNDS. */
945 if (TREE_CODE (c) == INTEGER_CST)
946 wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
947 else if (bnds_u_valid)
948 mpz_set (bnd, bnds->up);
951 mpz_init (d);
952 wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
953 mpz_fdiv_q (bnd, bnd, d);
954 mpz_clear (d);
957 /* Determines number of iterations of loop whose ending condition
958 is IV <> FINAL. TYPE is the type of the iv. The number of
959 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
960 we know that the exit must be taken eventually, i.e., that the IV
961 ever reaches the value FINAL (we derived this earlier, and possibly set
962 NITER->assumptions to make sure this is the case). BNDS contains the
963 bounds on the difference FINAL - IV->base. */
965 static bool
966 number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
967 tree final, class tree_niter_desc *niter,
968 bool exit_must_be_taken, bounds *bnds)
970 tree niter_type = unsigned_type_for (type);
971 tree s, c, d, bits, assumption, tmp, bound;
972 mpz_t max;
974 niter->control = *iv;
975 niter->bound = final;
976 niter->cmp = NE_EXPR;
978 /* Rearrange the terms so that we get inequality S * i <> C, with S
979 positive. Also cast everything to the unsigned type. If IV does
980 not overflow, BNDS bounds the value of C. Also, this is the
981 case if the computation |FINAL - IV->base| does not overflow, i.e.,
982 if BNDS->below in the result is nonnegative. */
983 if (tree_int_cst_sign_bit (iv->step))
985 s = fold_convert (niter_type,
986 fold_build1 (NEGATE_EXPR, type, iv->step));
987 c = fold_build2 (MINUS_EXPR, niter_type,
988 fold_convert (niter_type, iv->base),
989 fold_convert (niter_type, final));
990 bounds_negate (bnds);
992 else
994 s = fold_convert (niter_type, iv->step);
995 c = fold_build2 (MINUS_EXPR, niter_type,
996 fold_convert (niter_type, final),
997 fold_convert (niter_type, iv->base));
1000 mpz_init (max);
1001 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1002 exit_must_be_taken);
1003 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1004 TYPE_SIGN (niter_type));
1005 mpz_clear (max);
1007 /* Compute no-overflow information for the control iv. This can be
1008 proven when below two conditions are satisfied:
1010 1) IV evaluates toward FINAL at beginning, i.e:
1011 base <= FINAL ; step > 0
1012 base >= FINAL ; step < 0
1014 2) |FINAL - base| is an exact multiple of step.
1016 Unfortunately, it's hard to prove above conditions after pass loop-ch
1017 because loop with exit condition (IV != FINAL) usually will be guarded
1018 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1019 can alternatively try to prove below conditions:
1021 1') IV evaluates toward FINAL at beginning, i.e:
1022 new_base = base - step < FINAL ; step > 0
1023 && base - step doesn't underflow
1024 new_base = base - step > FINAL ; step < 0
1025 && base - step doesn't overflow
1027 2') |FINAL - new_base| is an exact multiple of step.
1029 Please refer to PR34114 as an example of loop-ch's impact, also refer
1030 to PR72817 as an example why condition 2') is necessary.
1032 Note, for NE_EXPR, base equals to FINAL is a special case, in
1033 which the loop exits immediately, and the iv does not overflow. */
1034 if (!niter->control.no_overflow
1035 && (integer_onep (s) || multiple_of_p (type, c, s)))
1037 tree t, cond, new_c, relaxed_cond = boolean_false_node;
1039 if (tree_int_cst_sign_bit (iv->step))
1041 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1042 if (TREE_CODE (type) == INTEGER_TYPE)
1044 /* Only when base - step doesn't overflow. */
1045 t = TYPE_MAX_VALUE (type);
1046 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1047 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1048 if (integer_nonzerop (t))
1050 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1051 new_c = fold_build2 (MINUS_EXPR, niter_type,
1052 fold_convert (niter_type, t),
1053 fold_convert (niter_type, final));
1054 if (multiple_of_p (type, new_c, s))
1055 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1056 t, final);
1060 else
1062 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1063 if (TREE_CODE (type) == INTEGER_TYPE)
1065 /* Only when base - step doesn't underflow. */
1066 t = TYPE_MIN_VALUE (type);
1067 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1068 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1069 if (integer_nonzerop (t))
1071 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1072 new_c = fold_build2 (MINUS_EXPR, niter_type,
1073 fold_convert (niter_type, final),
1074 fold_convert (niter_type, t));
1075 if (multiple_of_p (type, new_c, s))
1076 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1077 t, final);
1082 t = simplify_using_initial_conditions (loop, cond);
1083 if (!t || !integer_onep (t))
1084 t = simplify_using_initial_conditions (loop, relaxed_cond);
1086 if (t && integer_onep (t))
1087 niter->control.no_overflow = true;
1090 /* First the trivial cases -- when the step is 1. */
1091 if (integer_onep (s))
1093 niter->niter = c;
1094 return true;
1096 if (niter->control.no_overflow && multiple_of_p (type, c, s))
1098 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1099 return true;
1102 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1103 is infinite. Otherwise, the number of iterations is
1104 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1105 bits = num_ending_zeros (s);
1106 bound = build_low_bits_mask (niter_type,
1107 (TYPE_PRECISION (niter_type)
1108 - tree_to_uhwi (bits)));
1110 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1111 build_int_cst (niter_type, 1), bits);
1112 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1114 if (!exit_must_be_taken)
1116 /* If we cannot assume that the exit is taken eventually, record the
1117 assumptions for divisibility of c. */
1118 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1119 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1120 assumption, build_int_cst (niter_type, 0));
1121 if (!integer_nonzerop (assumption))
1122 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1123 niter->assumptions, assumption);
1126 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1127 if (integer_onep (s))
1129 niter->niter = c;
1131 else
1133 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1134 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1136 return true;
1139 /* Checks whether we can determine the final value of the control variable
1140 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1141 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1142 of the step. The assumptions necessary to ensure that the computation
1143 of the final value does not overflow are recorded in NITER. If we
1144 find the final value, we adjust DELTA and return TRUE. Otherwise
1145 we return false. BNDS bounds the value of IV1->base - IV0->base,
1146 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1147 true if we know that the exit must be taken eventually. */
1149 static bool
1150 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1151 class tree_niter_desc *niter,
1152 tree *delta, tree step,
1153 bool exit_must_be_taken, bounds *bnds)
1155 tree niter_type = TREE_TYPE (step);
1156 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1157 tree tmod;
1158 mpz_t mmod;
1159 tree assumption = boolean_true_node, bound, noloop;
1160 bool ret = false, fv_comp_no_overflow;
1161 tree type1 = type;
1162 if (POINTER_TYPE_P (type))
1163 type1 = sizetype;
1165 if (TREE_CODE (mod) != INTEGER_CST)
1166 return false;
1167 if (integer_nonzerop (mod))
1168 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1169 tmod = fold_convert (type1, mod);
1171 mpz_init (mmod);
1172 wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1173 mpz_neg (mmod, mmod);
1175 /* If the induction variable does not overflow and the exit is taken,
1176 then the computation of the final value does not overflow. This is
1177 also obviously the case if the new final value is equal to the
1178 current one. Finally, we postulate this for pointer type variables,
1179 as the code cannot rely on the object to that the pointer points being
1180 placed at the end of the address space (and more pragmatically,
1181 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1182 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1183 fv_comp_no_overflow = true;
1184 else if (!exit_must_be_taken)
1185 fv_comp_no_overflow = false;
1186 else
1187 fv_comp_no_overflow =
1188 (iv0->no_overflow && integer_nonzerop (iv0->step))
1189 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1191 if (integer_nonzerop (iv0->step))
1193 /* The final value of the iv is iv1->base + MOD, assuming that this
1194 computation does not overflow, and that
1195 iv0->base <= iv1->base + MOD. */
1196 if (!fv_comp_no_overflow)
1198 bound = fold_build2 (MINUS_EXPR, type1,
1199 TYPE_MAX_VALUE (type1), tmod);
1200 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1201 iv1->base, bound);
1202 if (integer_zerop (assumption))
1203 goto end;
1205 if (mpz_cmp (mmod, bnds->below) < 0)
1206 noloop = boolean_false_node;
1207 else if (POINTER_TYPE_P (type))
1208 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1209 iv0->base,
1210 fold_build_pointer_plus (iv1->base, tmod));
1211 else
1212 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1213 iv0->base,
1214 fold_build2 (PLUS_EXPR, type1,
1215 iv1->base, tmod));
1217 else
1219 /* The final value of the iv is iv0->base - MOD, assuming that this
1220 computation does not overflow, and that
1221 iv0->base - MOD <= iv1->base. */
1222 if (!fv_comp_no_overflow)
1224 bound = fold_build2 (PLUS_EXPR, type1,
1225 TYPE_MIN_VALUE (type1), tmod);
1226 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1227 iv0->base, bound);
1228 if (integer_zerop (assumption))
1229 goto end;
1231 if (mpz_cmp (mmod, bnds->below) < 0)
1232 noloop = boolean_false_node;
1233 else if (POINTER_TYPE_P (type))
1234 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1235 fold_build_pointer_plus (iv0->base,
1236 fold_build1 (NEGATE_EXPR,
1237 type1, tmod)),
1238 iv1->base);
1239 else
1240 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1241 fold_build2 (MINUS_EXPR, type1,
1242 iv0->base, tmod),
1243 iv1->base);
1246 if (!integer_nonzerop (assumption))
1247 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1248 niter->assumptions,
1249 assumption);
1250 if (!integer_zerop (noloop))
1251 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1252 niter->may_be_zero,
1253 noloop);
1254 bounds_add (bnds, wi::to_widest (mod), type);
1255 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1257 ret = true;
1258 end:
1259 mpz_clear (mmod);
1260 return ret;
1263 /* Add assertions to NITER that ensure that the control variable of the loop
1264 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1265 are TYPE. Returns false if we can prove that there is an overflow, true
1266 otherwise. STEP is the absolute value of the step. */
1268 static bool
1269 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1270 class tree_niter_desc *niter, tree step)
1272 tree bound, d, assumption, diff;
1273 tree niter_type = TREE_TYPE (step);
1275 if (integer_nonzerop (iv0->step))
1277 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1278 if (iv0->no_overflow)
1279 return true;
1281 /* If iv0->base is a constant, we can determine the last value before
1282 overflow precisely; otherwise we conservatively assume
1283 MAX - STEP + 1. */
1285 if (TREE_CODE (iv0->base) == INTEGER_CST)
1287 d = fold_build2 (MINUS_EXPR, niter_type,
1288 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1289 fold_convert (niter_type, iv0->base));
1290 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1292 else
1293 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1294 build_int_cst (niter_type, 1));
1295 bound = fold_build2 (MINUS_EXPR, type,
1296 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1297 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1298 iv1->base, bound);
1300 else
1302 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1303 if (iv1->no_overflow)
1304 return true;
1306 if (TREE_CODE (iv1->base) == INTEGER_CST)
1308 d = fold_build2 (MINUS_EXPR, niter_type,
1309 fold_convert (niter_type, iv1->base),
1310 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1311 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1313 else
1314 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1315 build_int_cst (niter_type, 1));
1316 bound = fold_build2 (PLUS_EXPR, type,
1317 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1318 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1319 iv0->base, bound);
1322 if (integer_zerop (assumption))
1323 return false;
1324 if (!integer_nonzerop (assumption))
1325 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1326 niter->assumptions, assumption);
1328 iv0->no_overflow = true;
1329 iv1->no_overflow = true;
1330 return true;
1333 /* Add an assumption to NITER that a loop whose ending condition
1334 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1335 bounds the value of IV1->base - IV0->base. */
1337 static void
1338 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1339 class tree_niter_desc *niter, bounds *bnds)
1341 tree assumption = boolean_true_node, bound, diff;
1342 tree mbz, mbzl, mbzr, type1;
1343 bool rolls_p, no_overflow_p;
1344 widest_int dstep;
1345 mpz_t mstep, max;
1347 /* We are going to compute the number of iterations as
1348 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1349 variant of TYPE. This formula only works if
1351 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1353 (where MAX is the maximum value of the unsigned variant of TYPE, and
1354 the computations in this formula are performed in full precision,
1355 i.e., without overflows).
1357 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1358 we have a condition of the form iv0->base - step < iv1->base before the loop,
1359 and for loops iv0->base < iv1->base - step * i the condition
1360 iv0->base < iv1->base + step, due to loop header copying, which enable us
1361 to prove the lower bound.
1363 The upper bound is more complicated. Unless the expressions for initial
1364 and final value themselves contain enough information, we usually cannot
1365 derive it from the context. */
1367 /* First check whether the answer does not follow from the bounds we gathered
1368 before. */
1369 if (integer_nonzerop (iv0->step))
1370 dstep = wi::to_widest (iv0->step);
1371 else
1373 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1374 dstep = -dstep;
1377 mpz_init (mstep);
1378 wi::to_mpz (dstep, mstep, UNSIGNED);
1379 mpz_neg (mstep, mstep);
1380 mpz_add_ui (mstep, mstep, 1);
1382 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1384 mpz_init (max);
1385 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1386 mpz_add (max, max, mstep);
1387 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1388 /* For pointers, only values lying inside a single object
1389 can be compared or manipulated by pointer arithmetics.
1390 Gcc in general does not allow or handle objects larger
1391 than half of the address space, hence the upper bound
1392 is satisfied for pointers. */
1393 || POINTER_TYPE_P (type));
1394 mpz_clear (mstep);
1395 mpz_clear (max);
1397 if (rolls_p && no_overflow_p)
1398 return;
1400 type1 = type;
1401 if (POINTER_TYPE_P (type))
1402 type1 = sizetype;
1404 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1405 we must be careful not to introduce overflow. */
1407 if (integer_nonzerop (iv0->step))
1409 diff = fold_build2 (MINUS_EXPR, type1,
1410 iv0->step, build_int_cst (type1, 1));
1412 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1413 0 address never belongs to any object, we can assume this for
1414 pointers. */
1415 if (!POINTER_TYPE_P (type))
1417 bound = fold_build2 (PLUS_EXPR, type1,
1418 TYPE_MIN_VALUE (type), diff);
1419 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1420 iv0->base, bound);
1423 /* And then we can compute iv0->base - diff, and compare it with
1424 iv1->base. */
1425 mbzl = fold_build2 (MINUS_EXPR, type1,
1426 fold_convert (type1, iv0->base), diff);
1427 mbzr = fold_convert (type1, iv1->base);
1429 else
1431 diff = fold_build2 (PLUS_EXPR, type1,
1432 iv1->step, build_int_cst (type1, 1));
1434 if (!POINTER_TYPE_P (type))
1436 bound = fold_build2 (PLUS_EXPR, type1,
1437 TYPE_MAX_VALUE (type), diff);
1438 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1439 iv1->base, bound);
1442 mbzl = fold_convert (type1, iv0->base);
1443 mbzr = fold_build2 (MINUS_EXPR, type1,
1444 fold_convert (type1, iv1->base), diff);
1447 if (!integer_nonzerop (assumption))
1448 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1449 niter->assumptions, assumption);
1450 if (!rolls_p)
1452 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1453 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1454 niter->may_be_zero, mbz);
1458 /* Determines number of iterations of loop whose ending condition
1459 is IV0 < IV1. TYPE is the type of the iv. The number of
1460 iterations is stored to NITER. BNDS bounds the difference
1461 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1462 that the exit must be taken eventually. */
1464 static bool
1465 number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
1466 affine_iv *iv1, class tree_niter_desc *niter,
1467 bool exit_must_be_taken, bounds *bnds)
1469 tree niter_type = unsigned_type_for (type);
1470 tree delta, step, s;
1471 mpz_t mstep, tmp;
1473 if (integer_nonzerop (iv0->step))
1475 niter->control = *iv0;
1476 niter->cmp = LT_EXPR;
1477 niter->bound = iv1->base;
1479 else
1481 niter->control = *iv1;
1482 niter->cmp = GT_EXPR;
1483 niter->bound = iv0->base;
1486 delta = fold_build2 (MINUS_EXPR, niter_type,
1487 fold_convert (niter_type, iv1->base),
1488 fold_convert (niter_type, iv0->base));
1490 /* First handle the special case that the step is +-1. */
1491 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1492 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1494 /* for (i = iv0->base; i < iv1->base; i++)
1498 for (i = iv1->base; i > iv0->base; i--).
1500 In both cases # of iterations is iv1->base - iv0->base, assuming that
1501 iv1->base >= iv0->base.
1503 First try to derive a lower bound on the value of
1504 iv1->base - iv0->base, computed in full precision. If the difference
1505 is nonnegative, we are done, otherwise we must record the
1506 condition. */
1508 if (mpz_sgn (bnds->below) < 0)
1509 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1510 iv1->base, iv0->base);
1511 niter->niter = delta;
1512 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1513 TYPE_SIGN (niter_type));
1514 niter->control.no_overflow = true;
1515 return true;
1518 if (integer_nonzerop (iv0->step))
1519 step = fold_convert (niter_type, iv0->step);
1520 else
1521 step = fold_convert (niter_type,
1522 fold_build1 (NEGATE_EXPR, type, iv1->step));
1524 /* If we can determine the final value of the control iv exactly, we can
1525 transform the condition to != comparison. In particular, this will be
1526 the case if DELTA is constant. */
1527 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1528 exit_must_be_taken, bnds))
1530 affine_iv zps;
1532 zps.base = build_int_cst (niter_type, 0);
1533 zps.step = step;
1534 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1535 zps does not overflow. */
1536 zps.no_overflow = true;
1538 return number_of_iterations_ne (loop, type, &zps,
1539 delta, niter, true, bnds);
1542 /* Make sure that the control iv does not overflow. */
1543 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1544 return false;
1546 /* We determine the number of iterations as (delta + step - 1) / step. For
1547 this to work, we must know that iv1->base >= iv0->base - step + 1,
1548 otherwise the loop does not roll. */
1549 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1551 s = fold_build2 (MINUS_EXPR, niter_type,
1552 step, build_int_cst (niter_type, 1));
1553 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1554 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1556 mpz_init (mstep);
1557 mpz_init (tmp);
1558 wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1559 mpz_add (tmp, bnds->up, mstep);
1560 mpz_sub_ui (tmp, tmp, 1);
1561 mpz_fdiv_q (tmp, tmp, mstep);
1562 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1563 TYPE_SIGN (niter_type));
1564 mpz_clear (mstep);
1565 mpz_clear (tmp);
1567 return true;
1570 /* Determines number of iterations of loop whose ending condition
1571 is IV0 <= IV1. TYPE is the type of the iv. The number of
1572 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1573 we know that this condition must eventually become false (we derived this
1574 earlier, and possibly set NITER->assumptions to make sure this
1575 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1577 static bool
1578 number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
1579 affine_iv *iv1, class tree_niter_desc *niter,
1580 bool exit_must_be_taken, bounds *bnds)
1582 tree assumption;
1583 tree type1 = type;
1584 if (POINTER_TYPE_P (type))
1585 type1 = sizetype;
1587 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1588 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1589 value of the type. This we must know anyway, since if it is
1590 equal to this value, the loop rolls forever. We do not check
1591 this condition for pointer type ivs, as the code cannot rely on
1592 the object to that the pointer points being placed at the end of
1593 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1594 not defined for pointers). */
1596 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1598 if (integer_nonzerop (iv0->step))
1599 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1600 iv1->base, TYPE_MAX_VALUE (type));
1601 else
1602 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1603 iv0->base, TYPE_MIN_VALUE (type));
1605 if (integer_zerop (assumption))
1606 return false;
1607 if (!integer_nonzerop (assumption))
1608 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1609 niter->assumptions, assumption);
1612 if (integer_nonzerop (iv0->step))
1614 if (POINTER_TYPE_P (type))
1615 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1616 else
1617 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1618 build_int_cst (type1, 1));
1620 else if (POINTER_TYPE_P (type))
1621 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1622 else
1623 iv0->base = fold_build2 (MINUS_EXPR, type1,
1624 iv0->base, build_int_cst (type1, 1));
1626 bounds_add (bnds, 1, type1);
1628 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1629 bnds);
1632 /* Dumps description of affine induction variable IV to FILE. */
1634 static void
1635 dump_affine_iv (FILE *file, affine_iv *iv)
1637 if (!integer_zerop (iv->step))
1638 fprintf (file, "[");
1640 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1642 if (!integer_zerop (iv->step))
1644 fprintf (file, ", + , ");
1645 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1646 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1650 /* Given exit condition IV0 CODE IV1 in TYPE, this function adjusts
1651 the condition for loop-until-wrap cases. For example:
1652 (unsigned){8, -1}_loop < 10 => {0, 1} != 9
1653 10 < (unsigned){0, max - 7}_loop => {0, 1} != 8
1654 Return true if condition is successfully adjusted. */
1656 static bool
1657 adjust_cond_for_loop_until_wrap (tree type, affine_iv *iv0, tree_code *code,
1658 affine_iv *iv1)
1660 /* Only support simple cases for the moment. */
1661 if (TREE_CODE (iv0->base) != INTEGER_CST
1662 || TREE_CODE (iv1->base) != INTEGER_CST)
1663 return false;
1665 tree niter_type = unsigned_type_for (type), high, low;
1666 /* Case: i-- < 10. */
1667 if (integer_zerop (iv1->step))
1669 /* TODO: Should handle case in which abs(step) != 1. */
1670 if (!integer_minus_onep (iv0->step))
1671 return false;
1672 /* Give up on infinite loop. */
1673 if (*code == LE_EXPR
1674 && tree_int_cst_equal (iv1->base, TYPE_MAX_VALUE (type)))
1675 return false;
1676 high = fold_build2 (PLUS_EXPR, niter_type,
1677 fold_convert (niter_type, iv0->base),
1678 build_int_cst (niter_type, 1));
1679 low = fold_convert (niter_type, TYPE_MIN_VALUE (type));
1681 else if (integer_zerop (iv0->step))
1683 /* TODO: Should handle case in which abs(step) != 1. */
1684 if (!integer_onep (iv1->step))
1685 return false;
1686 /* Give up on infinite loop. */
1687 if (*code == LE_EXPR
1688 && tree_int_cst_equal (iv0->base, TYPE_MIN_VALUE (type)))
1689 return false;
1690 high = fold_convert (niter_type, TYPE_MAX_VALUE (type));
1691 low = fold_build2 (MINUS_EXPR, niter_type,
1692 fold_convert (niter_type, iv1->base),
1693 build_int_cst (niter_type, 1));
1695 else
1696 gcc_unreachable ();
1698 iv0->base = low;
1699 iv0->step = fold_convert (niter_type, integer_one_node);
1700 iv1->base = high;
1701 iv1->step = build_int_cst (niter_type, 0);
1702 *code = NE_EXPR;
1703 return true;
1706 /* Determine the number of iterations according to condition (for staying
1707 inside loop) which compares two induction variables using comparison
1708 operator CODE. The induction variable on left side of the comparison
1709 is IV0, the right-hand side is IV1. Both induction variables must have
1710 type TYPE, which must be an integer or pointer type. The steps of the
1711 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1713 LOOP is the loop whose number of iterations we are determining.
1715 ONLY_EXIT is true if we are sure this is the only way the loop could be
1716 exited (including possibly non-returning function calls, exceptions, etc.)
1717 -- in this case we can use the information whether the control induction
1718 variables can overflow or not in a more efficient way.
1720 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1722 The results (number of iterations and assumptions as described in
1723 comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1724 Returns false if it fails to determine number of iterations, true if it
1725 was determined (possibly with some assumptions). */
1727 static bool
1728 number_of_iterations_cond (class loop *loop,
1729 tree type, affine_iv *iv0, enum tree_code code,
1730 affine_iv *iv1, class tree_niter_desc *niter,
1731 bool only_exit, bool every_iteration)
1733 bool exit_must_be_taken = false, ret;
1734 bounds bnds;
1736 /* If the test is not executed every iteration, wrapping may make the test
1737 to pass again.
1738 TODO: the overflow case can be still used as unreliable estimate of upper
1739 bound. But we have no API to pass it down to number of iterations code
1740 and, at present, it will not use it anyway. */
1741 if (!every_iteration
1742 && (!iv0->no_overflow || !iv1->no_overflow
1743 || code == NE_EXPR || code == EQ_EXPR))
1744 return false;
1746 /* The meaning of these assumptions is this:
1747 if !assumptions
1748 then the rest of information does not have to be valid
1749 if may_be_zero then the loop does not roll, even if
1750 niter != 0. */
1751 niter->assumptions = boolean_true_node;
1752 niter->may_be_zero = boolean_false_node;
1753 niter->niter = NULL_TREE;
1754 niter->max = 0;
1755 niter->bound = NULL_TREE;
1756 niter->cmp = ERROR_MARK;
1758 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1759 the control variable is on lhs. */
1760 if (code == GE_EXPR || code == GT_EXPR
1761 || (code == NE_EXPR && integer_zerop (iv0->step)))
1763 std::swap (iv0, iv1);
1764 code = swap_tree_comparison (code);
1767 if (POINTER_TYPE_P (type))
1769 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1770 to the same object. If they do, the control variable cannot wrap
1771 (as wrap around the bounds of memory will never return a pointer
1772 that would be guaranteed to point to the same object, even if we
1773 avoid undefined behavior by casting to size_t and back). */
1774 iv0->no_overflow = true;
1775 iv1->no_overflow = true;
1778 /* If the control induction variable does not overflow and the only exit
1779 from the loop is the one that we analyze, we know it must be taken
1780 eventually. */
1781 if (only_exit)
1783 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1784 exit_must_be_taken = true;
1785 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1786 exit_must_be_taken = true;
1789 /* We can handle cases which neither of the sides of the comparison is
1790 invariant:
1792 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1793 as if:
1794 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1796 provided that either below condition is satisfied:
1798 a) the test is NE_EXPR;
1799 b) iv0.step - iv1.step is integer and iv0/iv1 don't overflow.
1801 This rarely occurs in practice, but it is simple enough to manage. */
1802 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1804 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1805 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1806 iv0->step, iv1->step);
1808 /* No need to check sign of the new step since below code takes care
1809 of this well. */
1810 if (code != NE_EXPR
1811 && (TREE_CODE (step) != INTEGER_CST
1812 || !iv0->no_overflow || !iv1->no_overflow))
1813 return false;
1815 iv0->step = step;
1816 if (!POINTER_TYPE_P (type))
1817 iv0->no_overflow = false;
1819 iv1->step = build_int_cst (step_type, 0);
1820 iv1->no_overflow = true;
1823 /* If the result of the comparison is a constant, the loop is weird. More
1824 precise handling would be possible, but the situation is not common enough
1825 to waste time on it. */
1826 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1827 return false;
1829 /* If the loop exits immediately, there is nothing to do. */
1830 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1831 if (tem && integer_zerop (tem))
1833 if (!every_iteration)
1834 return false;
1835 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1836 niter->max = 0;
1837 return true;
1840 /* Handle special case loops: while (i-- < 10) and while (10 < i++) by
1841 adjusting iv0, iv1 and code. */
1842 if (code != NE_EXPR
1843 && (tree_int_cst_sign_bit (iv0->step)
1844 || (!integer_zerop (iv1->step)
1845 && !tree_int_cst_sign_bit (iv1->step)))
1846 && !adjust_cond_for_loop_until_wrap (type, iv0, &code, iv1))
1847 return false;
1849 /* OK, now we know we have a senseful loop. Handle several cases, depending
1850 on what comparison operator is used. */
1851 bound_difference (loop, iv1->base, iv0->base, &bnds);
1853 if (dump_file && (dump_flags & TDF_DETAILS))
1855 fprintf (dump_file,
1856 "Analyzing # of iterations of loop %d\n", loop->num);
1858 fprintf (dump_file, " exit condition ");
1859 dump_affine_iv (dump_file, iv0);
1860 fprintf (dump_file, " %s ",
1861 code == NE_EXPR ? "!="
1862 : code == LT_EXPR ? "<"
1863 : "<=");
1864 dump_affine_iv (dump_file, iv1);
1865 fprintf (dump_file, "\n");
1867 fprintf (dump_file, " bounds on difference of bases: ");
1868 mpz_out_str (dump_file, 10, bnds.below);
1869 fprintf (dump_file, " ... ");
1870 mpz_out_str (dump_file, 10, bnds.up);
1871 fprintf (dump_file, "\n");
1874 switch (code)
1876 case NE_EXPR:
1877 gcc_assert (integer_zerop (iv1->step));
1878 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1879 exit_must_be_taken, &bnds);
1880 break;
1882 case LT_EXPR:
1883 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1884 exit_must_be_taken, &bnds);
1885 break;
1887 case LE_EXPR:
1888 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1889 exit_must_be_taken, &bnds);
1890 break;
1892 default:
1893 gcc_unreachable ();
1896 mpz_clear (bnds.up);
1897 mpz_clear (bnds.below);
1899 if (dump_file && (dump_flags & TDF_DETAILS))
1901 if (ret)
1903 fprintf (dump_file, " result:\n");
1904 if (!integer_nonzerop (niter->assumptions))
1906 fprintf (dump_file, " under assumptions ");
1907 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1908 fprintf (dump_file, "\n");
1911 if (!integer_zerop (niter->may_be_zero))
1913 fprintf (dump_file, " zero if ");
1914 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1915 fprintf (dump_file, "\n");
1918 fprintf (dump_file, " # of iterations ");
1919 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1920 fprintf (dump_file, ", bounded by ");
1921 print_decu (niter->max, dump_file);
1922 fprintf (dump_file, "\n");
1924 else
1925 fprintf (dump_file, " failed\n\n");
1927 return ret;
1930 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
1931 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
1932 all SSA names are replaced with the result of calling the VALUEIZE
1933 function with the SSA name as argument. */
1935 tree
1936 simplify_replace_tree (tree expr, tree old, tree new_tree,
1937 tree (*valueize) (tree, void*), void *context)
1939 unsigned i, n;
1940 tree ret = NULL_TREE, e, se;
1942 if (!expr)
1943 return NULL_TREE;
1945 /* Do not bother to replace constants. */
1946 if (CONSTANT_CLASS_P (expr))
1947 return expr;
1949 if (valueize)
1951 if (TREE_CODE (expr) == SSA_NAME)
1953 new_tree = valueize (expr, context);
1954 if (new_tree != expr)
1955 return new_tree;
1958 else if (expr == old
1959 || operand_equal_p (expr, old, 0))
1960 return unshare_expr (new_tree);
1962 if (!EXPR_P (expr))
1963 return expr;
1965 n = TREE_OPERAND_LENGTH (expr);
1966 for (i = 0; i < n; i++)
1968 e = TREE_OPERAND (expr, i);
1969 se = simplify_replace_tree (e, old, new_tree, valueize, context);
1970 if (e == se)
1971 continue;
1973 if (!ret)
1974 ret = copy_node (expr);
1976 TREE_OPERAND (ret, i) = se;
1979 return (ret ? fold (ret) : expr);
1982 /* Expand definitions of ssa names in EXPR as long as they are simple
1983 enough, and return the new expression. If STOP is specified, stop
1984 expanding if EXPR equals to it. */
1986 static tree
1987 expand_simple_operations (tree expr, tree stop, hash_map<tree, tree> &cache)
1989 unsigned i, n;
1990 tree ret = NULL_TREE, e, ee, e1;
1991 enum tree_code code;
1992 gimple *stmt;
1994 if (expr == NULL_TREE)
1995 return expr;
1997 if (is_gimple_min_invariant (expr))
1998 return expr;
2000 code = TREE_CODE (expr);
2001 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
2003 n = TREE_OPERAND_LENGTH (expr);
2004 for (i = 0; i < n; i++)
2006 e = TREE_OPERAND (expr, i);
2007 /* SCEV analysis feeds us with a proper expression
2008 graph matching the SSA graph. Avoid turning it
2009 into a tree here, thus handle tree sharing
2010 properly.
2011 ??? The SSA walk below still turns the SSA graph
2012 into a tree but until we find a testcase do not
2013 introduce additional tree sharing here. */
2014 bool existed_p;
2015 tree &cee = cache.get_or_insert (e, &existed_p);
2016 if (existed_p)
2017 ee = cee;
2018 else
2020 cee = e;
2021 ee = expand_simple_operations (e, stop, cache);
2022 if (ee != e)
2023 *cache.get (e) = ee;
2025 if (e == ee)
2026 continue;
2028 if (!ret)
2029 ret = copy_node (expr);
2031 TREE_OPERAND (ret, i) = ee;
2034 if (!ret)
2035 return expr;
2037 fold_defer_overflow_warnings ();
2038 ret = fold (ret);
2039 fold_undefer_and_ignore_overflow_warnings ();
2040 return ret;
2043 /* Stop if it's not ssa name or the one we don't want to expand. */
2044 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
2045 return expr;
2047 stmt = SSA_NAME_DEF_STMT (expr);
2048 if (gimple_code (stmt) == GIMPLE_PHI)
2050 basic_block src, dest;
2052 if (gimple_phi_num_args (stmt) != 1)
2053 return expr;
2054 e = PHI_ARG_DEF (stmt, 0);
2056 /* Avoid propagating through loop exit phi nodes, which
2057 could break loop-closed SSA form restrictions. */
2058 dest = gimple_bb (stmt);
2059 src = single_pred (dest);
2060 if (TREE_CODE (e) == SSA_NAME
2061 && src->loop_father != dest->loop_father)
2062 return expr;
2064 return expand_simple_operations (e, stop, cache);
2066 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2067 return expr;
2069 /* Avoid expanding to expressions that contain SSA names that need
2070 to take part in abnormal coalescing. */
2071 ssa_op_iter iter;
2072 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
2073 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
2074 return expr;
2076 e = gimple_assign_rhs1 (stmt);
2077 code = gimple_assign_rhs_code (stmt);
2078 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2080 if (is_gimple_min_invariant (e))
2081 return e;
2083 if (code == SSA_NAME)
2084 return expand_simple_operations (e, stop, cache);
2085 else if (code == ADDR_EXPR)
2087 poly_int64 offset;
2088 tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0),
2089 &offset);
2090 if (base
2091 && TREE_CODE (base) == MEM_REF)
2093 ee = expand_simple_operations (TREE_OPERAND (base, 0), stop,
2094 cache);
2095 return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,
2096 wide_int_to_tree (sizetype,
2097 mem_ref_offset (base)
2098 + offset));
2102 return expr;
2105 switch (code)
2107 CASE_CONVERT:
2108 /* Casts are simple. */
2109 ee = expand_simple_operations (e, stop, cache);
2110 return fold_build1 (code, TREE_TYPE (expr), ee);
2112 case PLUS_EXPR:
2113 case MINUS_EXPR:
2114 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
2115 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
2116 return expr;
2117 /* Fallthru. */
2118 case POINTER_PLUS_EXPR:
2119 /* And increments and decrements by a constant are simple. */
2120 e1 = gimple_assign_rhs2 (stmt);
2121 if (!is_gimple_min_invariant (e1))
2122 return expr;
2124 ee = expand_simple_operations (e, stop, cache);
2125 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
2127 default:
2128 return expr;
2132 tree
2133 expand_simple_operations (tree expr, tree stop)
2135 hash_map<tree, tree> cache;
2136 return expand_simple_operations (expr, stop, cache);
2139 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2140 expression (or EXPR unchanged, if no simplification was possible). */
2142 static tree
2143 tree_simplify_using_condition_1 (tree cond, tree expr)
2145 bool changed;
2146 tree e, e0, e1, e2, notcond;
2147 enum tree_code code = TREE_CODE (expr);
2149 if (code == INTEGER_CST)
2150 return expr;
2152 if (code == TRUTH_OR_EXPR
2153 || code == TRUTH_AND_EXPR
2154 || code == COND_EXPR)
2156 changed = false;
2158 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
2159 if (TREE_OPERAND (expr, 0) != e0)
2160 changed = true;
2162 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
2163 if (TREE_OPERAND (expr, 1) != e1)
2164 changed = true;
2166 if (code == COND_EXPR)
2168 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
2169 if (TREE_OPERAND (expr, 2) != e2)
2170 changed = true;
2172 else
2173 e2 = NULL_TREE;
2175 if (changed)
2177 if (code == COND_EXPR)
2178 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2179 else
2180 expr = fold_build2 (code, boolean_type_node, e0, e1);
2183 return expr;
2186 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2187 propagation, and vice versa. Fold does not handle this, since it is
2188 considered too expensive. */
2189 if (TREE_CODE (cond) == EQ_EXPR)
2191 e0 = TREE_OPERAND (cond, 0);
2192 e1 = TREE_OPERAND (cond, 1);
2194 /* We know that e0 == e1. Check whether we cannot simplify expr
2195 using this fact. */
2196 e = simplify_replace_tree (expr, e0, e1);
2197 if (integer_zerop (e) || integer_nonzerop (e))
2198 return e;
2200 e = simplify_replace_tree (expr, e1, e0);
2201 if (integer_zerop (e) || integer_nonzerop (e))
2202 return e;
2204 if (TREE_CODE (expr) == EQ_EXPR)
2206 e0 = TREE_OPERAND (expr, 0);
2207 e1 = TREE_OPERAND (expr, 1);
2209 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2210 e = simplify_replace_tree (cond, e0, e1);
2211 if (integer_zerop (e))
2212 return e;
2213 e = simplify_replace_tree (cond, e1, e0);
2214 if (integer_zerop (e))
2215 return e;
2217 if (TREE_CODE (expr) == NE_EXPR)
2219 e0 = TREE_OPERAND (expr, 0);
2220 e1 = TREE_OPERAND (expr, 1);
2222 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2223 e = simplify_replace_tree (cond, e0, e1);
2224 if (integer_zerop (e))
2225 return boolean_true_node;
2226 e = simplify_replace_tree (cond, e1, e0);
2227 if (integer_zerop (e))
2228 return boolean_true_node;
2231 /* Check whether COND ==> EXPR. */
2232 notcond = invert_truthvalue (cond);
2233 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2234 if (e && integer_nonzerop (e))
2235 return e;
2237 /* Check whether COND ==> not EXPR. */
2238 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2239 if (e && integer_zerop (e))
2240 return e;
2242 return expr;
2245 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2246 expression (or EXPR unchanged, if no simplification was possible).
2247 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2248 of simple operations in definitions of ssa names in COND are expanded,
2249 so that things like casts or incrementing the value of the bound before
2250 the loop do not cause us to fail. */
2252 static tree
2253 tree_simplify_using_condition (tree cond, tree expr)
2255 cond = expand_simple_operations (cond);
2257 return tree_simplify_using_condition_1 (cond, expr);
2260 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2261 Returns the simplified expression (or EXPR unchanged, if no
2262 simplification was possible). */
2264 tree
2265 simplify_using_initial_conditions (class loop *loop, tree expr)
2267 edge e;
2268 basic_block bb;
2269 gimple *stmt;
2270 tree cond, expanded, backup;
2271 int cnt = 0;
2273 if (TREE_CODE (expr) == INTEGER_CST)
2274 return expr;
2276 backup = expanded = expand_simple_operations (expr);
2278 /* Limit walking the dominators to avoid quadraticness in
2279 the number of BBs times the number of loops in degenerate
2280 cases. */
2281 for (bb = loop->header;
2282 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2283 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2285 if (!single_pred_p (bb))
2286 continue;
2287 e = single_pred_edge (bb);
2289 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2290 continue;
2292 stmt = last_stmt (e->src);
2293 cond = fold_build2 (gimple_cond_code (stmt),
2294 boolean_type_node,
2295 gimple_cond_lhs (stmt),
2296 gimple_cond_rhs (stmt));
2297 if (e->flags & EDGE_FALSE_VALUE)
2298 cond = invert_truthvalue (cond);
2299 expanded = tree_simplify_using_condition (cond, expanded);
2300 /* Break if EXPR is simplified to const values. */
2301 if (expanded
2302 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2303 return expanded;
2305 ++cnt;
2308 /* Return the original expression if no simplification is done. */
2309 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2312 /* Tries to simplify EXPR using the evolutions of the loop invariants
2313 in the superloops of LOOP. Returns the simplified expression
2314 (or EXPR unchanged, if no simplification was possible). */
2316 static tree
2317 simplify_using_outer_evolutions (class loop *loop, tree expr)
2319 enum tree_code code = TREE_CODE (expr);
2320 bool changed;
2321 tree e, e0, e1, e2;
2323 if (is_gimple_min_invariant (expr))
2324 return expr;
2326 if (code == TRUTH_OR_EXPR
2327 || code == TRUTH_AND_EXPR
2328 || code == COND_EXPR)
2330 changed = false;
2332 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2333 if (TREE_OPERAND (expr, 0) != e0)
2334 changed = true;
2336 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2337 if (TREE_OPERAND (expr, 1) != e1)
2338 changed = true;
2340 if (code == COND_EXPR)
2342 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2343 if (TREE_OPERAND (expr, 2) != e2)
2344 changed = true;
2346 else
2347 e2 = NULL_TREE;
2349 if (changed)
2351 if (code == COND_EXPR)
2352 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2353 else
2354 expr = fold_build2 (code, boolean_type_node, e0, e1);
2357 return expr;
2360 e = instantiate_parameters (loop, expr);
2361 if (is_gimple_min_invariant (e))
2362 return e;
2364 return expr;
2367 /* Returns true if EXIT is the only possible exit from LOOP. */
2369 bool
2370 loop_only_exit_p (const class loop *loop, basic_block *body, const_edge exit)
2372 gimple_stmt_iterator bsi;
2373 unsigned i;
2375 if (exit != single_exit (loop))
2376 return false;
2378 for (i = 0; i < loop->num_nodes; i++)
2379 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2380 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2381 return false;
2383 return true;
2386 /* Stores description of number of iterations of LOOP derived from
2387 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2388 information could be derived (and fields of NITER have meaning described
2389 in comments at class tree_niter_desc declaration), false otherwise.
2390 When EVERY_ITERATION is true, only tests that are known to be executed
2391 every iteration are considered (i.e. only test that alone bounds the loop).
2392 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2393 it when returning true. */
2395 bool
2396 number_of_iterations_exit_assumptions (class loop *loop, edge exit,
2397 class tree_niter_desc *niter,
2398 gcond **at_stmt, bool every_iteration,
2399 basic_block *body)
2401 gimple *last;
2402 gcond *stmt;
2403 tree type;
2404 tree op0, op1;
2405 enum tree_code code;
2406 affine_iv iv0, iv1;
2407 bool safe;
2409 /* Nothing to analyze if the loop is known to be infinite. */
2410 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2411 return false;
2413 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2415 if (every_iteration && !safe)
2416 return false;
2418 niter->assumptions = boolean_false_node;
2419 niter->control.base = NULL_TREE;
2420 niter->control.step = NULL_TREE;
2421 niter->control.no_overflow = false;
2422 last = last_stmt (exit->src);
2423 if (!last)
2424 return false;
2425 stmt = dyn_cast <gcond *> (last);
2426 if (!stmt)
2427 return false;
2429 /* We want the condition for staying inside loop. */
2430 code = gimple_cond_code (stmt);
2431 if (exit->flags & EDGE_TRUE_VALUE)
2432 code = invert_tree_comparison (code, false);
2434 switch (code)
2436 case GT_EXPR:
2437 case GE_EXPR:
2438 case LT_EXPR:
2439 case LE_EXPR:
2440 case NE_EXPR:
2441 break;
2443 default:
2444 return false;
2447 op0 = gimple_cond_lhs (stmt);
2448 op1 = gimple_cond_rhs (stmt);
2449 type = TREE_TYPE (op0);
2451 if (TREE_CODE (type) != INTEGER_TYPE
2452 && !POINTER_TYPE_P (type))
2453 return false;
2455 tree iv0_niters = NULL_TREE;
2456 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2457 op0, &iv0, safe ? &iv0_niters : NULL, false))
2458 return number_of_iterations_popcount (loop, exit, code, niter);
2459 tree iv1_niters = NULL_TREE;
2460 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2461 op1, &iv1, safe ? &iv1_niters : NULL, false))
2462 return false;
2463 /* Give up on complicated case. */
2464 if (iv0_niters && iv1_niters)
2465 return false;
2467 /* We don't want to see undefined signed overflow warnings while
2468 computing the number of iterations. */
2469 fold_defer_overflow_warnings ();
2471 iv0.base = expand_simple_operations (iv0.base);
2472 iv1.base = expand_simple_operations (iv1.base);
2473 bool body_from_caller = true;
2474 if (!body)
2476 body = get_loop_body (loop);
2477 body_from_caller = false;
2479 bool only_exit_p = loop_only_exit_p (loop, body, exit);
2480 if (!body_from_caller)
2481 free (body);
2482 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2483 only_exit_p, safe))
2485 fold_undefer_and_ignore_overflow_warnings ();
2486 return false;
2489 /* Incorporate additional assumption implied by control iv. */
2490 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2491 if (iv_niters)
2493 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2494 fold_convert (TREE_TYPE (niter->niter),
2495 iv_niters));
2497 if (!integer_nonzerop (assumption))
2498 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2499 niter->assumptions, assumption);
2501 /* Refine upper bound if possible. */
2502 if (TREE_CODE (iv_niters) == INTEGER_CST
2503 && niter->max > wi::to_widest (iv_niters))
2504 niter->max = wi::to_widest (iv_niters);
2507 /* There is no assumptions if the loop is known to be finite. */
2508 if (!integer_zerop (niter->assumptions)
2509 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2510 niter->assumptions = boolean_true_node;
2512 if (optimize >= 3)
2514 niter->assumptions = simplify_using_outer_evolutions (loop,
2515 niter->assumptions);
2516 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2517 niter->may_be_zero);
2518 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2521 niter->assumptions
2522 = simplify_using_initial_conditions (loop,
2523 niter->assumptions);
2524 niter->may_be_zero
2525 = simplify_using_initial_conditions (loop,
2526 niter->may_be_zero);
2528 fold_undefer_and_ignore_overflow_warnings ();
2530 /* If NITER has simplified into a constant, update MAX. */
2531 if (TREE_CODE (niter->niter) == INTEGER_CST)
2532 niter->max = wi::to_widest (niter->niter);
2534 if (at_stmt)
2535 *at_stmt = stmt;
2537 return (!integer_zerop (niter->assumptions));
2541 /* Utility function to check if OP is defined by a stmt
2542 that is a val - 1. */
2544 static bool
2545 ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2547 gimple *stmt;
2548 return (TREE_CODE (op) == SSA_NAME
2549 && (stmt = SSA_NAME_DEF_STMT (op))
2550 && is_gimple_assign (stmt)
2551 && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2552 && val == gimple_assign_rhs1 (stmt)
2553 && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2557 /* See if LOOP is a popcout implementation, determine NITER for the loop
2559 We match:
2560 <bb 2>
2561 goto <bb 4>
2563 <bb 3>
2564 _1 = b_11 + -1
2565 b_6 = _1 & b_11
2567 <bb 4>
2568 b_11 = PHI <b_5(D)(2), b_6(3)>
2570 exit block
2571 if (b_11 != 0)
2572 goto <bb 3>
2573 else
2574 goto <bb 5>
2576 OR we match copy-header version:
2577 if (b_5 != 0)
2578 goto <bb 3>
2579 else
2580 goto <bb 4>
2582 <bb 3>
2583 b_11 = PHI <b_5(2), b_6(3)>
2584 _1 = b_11 + -1
2585 b_6 = _1 & b_11
2587 exit block
2588 if (b_6 != 0)
2589 goto <bb 3>
2590 else
2591 goto <bb 4>
2593 If popcount pattern, update NITER accordingly.
2594 i.e., set NITER to __builtin_popcount (b)
2595 return true if we did, false otherwise.
2599 static bool
2600 number_of_iterations_popcount (loop_p loop, edge exit,
2601 enum tree_code code,
2602 class tree_niter_desc *niter)
2604 bool adjust = true;
2605 tree iter;
2606 HOST_WIDE_INT max;
2607 adjust = true;
2608 tree fn = NULL_TREE;
2610 /* Check loop terminating branch is like
2611 if (b != 0). */
2612 gimple *stmt = last_stmt (exit->src);
2613 if (!stmt
2614 || gimple_code (stmt) != GIMPLE_COND
2615 || code != NE_EXPR
2616 || !integer_zerop (gimple_cond_rhs (stmt))
2617 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME)
2618 return false;
2620 gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2622 /* Depending on copy-header is performed, feeding PHI stmts might be in
2623 the loop header or loop latch, handle this. */
2624 if (gimple_code (and_stmt) == GIMPLE_PHI
2625 && gimple_bb (and_stmt) == loop->header
2626 && gimple_phi_num_args (and_stmt) == 2
2627 && (TREE_CODE (gimple_phi_arg_def (and_stmt,
2628 loop_latch_edge (loop)->dest_idx))
2629 == SSA_NAME))
2631 /* SSA used in exit condition is defined by PHI stmt
2632 b_11 = PHI <b_5(D)(2), b_6(3)>
2633 from the PHI stmt, get the and_stmt
2634 b_6 = _1 & b_11. */
2635 tree t = gimple_phi_arg_def (and_stmt, loop_latch_edge (loop)->dest_idx);
2636 and_stmt = SSA_NAME_DEF_STMT (t);
2637 adjust = false;
2640 /* Make sure it is indeed an and stmt (b_6 = _1 & b_11). */
2641 if (!is_gimple_assign (and_stmt)
2642 || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR)
2643 return false;
2645 tree b_11 = gimple_assign_rhs1 (and_stmt);
2646 tree _1 = gimple_assign_rhs2 (and_stmt);
2648 /* Check that _1 is defined by _b11 + -1 (_1 = b_11 + -1).
2649 Also make sure that b_11 is the same in and_stmt and _1 defining stmt.
2650 Also canonicalize if _1 and _b11 are revrsed. */
2651 if (ssa_defined_by_minus_one_stmt_p (b_11, _1))
2652 std::swap (b_11, _1);
2653 else if (ssa_defined_by_minus_one_stmt_p (_1, b_11))
2655 else
2656 return false;
2657 /* Check the recurrence:
2658 ... = PHI <b_5(2), b_6(3)>. */
2659 gimple *phi = SSA_NAME_DEF_STMT (b_11);
2660 if (gimple_code (phi) != GIMPLE_PHI
2661 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2662 || (gimple_assign_lhs (and_stmt)
2663 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2664 return false;
2666 /* We found a match. Get the corresponding popcount builtin. */
2667 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2668 if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION (integer_type_node))
2669 fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2670 else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2671 (long_integer_type_node))
2672 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2673 else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2674 (long_long_integer_type_node))
2675 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2677 /* ??? Support promoting char/short to int. */
2678 if (!fn)
2679 return false;
2681 /* Update NITER params accordingly */
2682 tree utype = unsigned_type_for (TREE_TYPE (src));
2683 src = fold_convert (utype, src);
2684 tree call = fold_convert (utype, build_call_expr (fn, 1, src));
2685 if (adjust)
2686 iter = fold_build2 (MINUS_EXPR, utype,
2687 call,
2688 build_int_cst (utype, 1));
2689 else
2690 iter = call;
2692 if (TREE_CODE (call) == INTEGER_CST)
2693 max = tree_to_uhwi (call);
2694 else
2695 max = TYPE_PRECISION (TREE_TYPE (src));
2696 if (adjust)
2697 max = max - 1;
2699 niter->niter = iter;
2700 niter->assumptions = boolean_true_node;
2702 if (adjust)
2704 tree may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,
2705 build_zero_cst
2706 (TREE_TYPE (src)));
2707 niter->may_be_zero =
2708 simplify_using_initial_conditions (loop, may_be_zero);
2710 else
2711 niter->may_be_zero = boolean_false_node;
2713 niter->max = max;
2714 niter->bound = NULL_TREE;
2715 niter->cmp = ERROR_MARK;
2716 return true;
2720 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2721 the niter information holds unconditionally. */
2723 bool
2724 number_of_iterations_exit (class loop *loop, edge exit,
2725 class tree_niter_desc *niter,
2726 bool warn, bool every_iteration,
2727 basic_block *body)
2729 gcond *stmt;
2730 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2731 &stmt, every_iteration, body))
2732 return false;
2734 if (integer_nonzerop (niter->assumptions))
2735 return true;
2737 if (warn && dump_enabled_p ())
2738 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
2739 "missed loop optimization: niters analysis ends up "
2740 "with assumptions.\n");
2742 return false;
2745 /* Try to determine the number of iterations of LOOP. If we succeed,
2746 expression giving number of iterations is returned and *EXIT is
2747 set to the edge from that the information is obtained. Otherwise
2748 chrec_dont_know is returned. */
2750 tree
2751 find_loop_niter (class loop *loop, edge *exit)
2753 unsigned i;
2754 vec<edge> exits = get_loop_exit_edges (loop);
2755 edge ex;
2756 tree niter = NULL_TREE, aniter;
2757 class tree_niter_desc desc;
2759 *exit = NULL;
2760 FOR_EACH_VEC_ELT (exits, i, ex)
2762 if (!number_of_iterations_exit (loop, ex, &desc, false))
2763 continue;
2765 if (integer_nonzerop (desc.may_be_zero))
2767 /* We exit in the first iteration through this exit.
2768 We won't find anything better. */
2769 niter = build_int_cst (unsigned_type_node, 0);
2770 *exit = ex;
2771 break;
2774 if (!integer_zerop (desc.may_be_zero))
2775 continue;
2777 aniter = desc.niter;
2779 if (!niter)
2781 /* Nothing recorded yet. */
2782 niter = aniter;
2783 *exit = ex;
2784 continue;
2787 /* Prefer constants, the lower the better. */
2788 if (TREE_CODE (aniter) != INTEGER_CST)
2789 continue;
2791 if (TREE_CODE (niter) != INTEGER_CST)
2793 niter = aniter;
2794 *exit = ex;
2795 continue;
2798 if (tree_int_cst_lt (aniter, niter))
2800 niter = aniter;
2801 *exit = ex;
2802 continue;
2805 exits.release ();
2807 return niter ? niter : chrec_dont_know;
2810 /* Return true if loop is known to have bounded number of iterations. */
2812 bool
2813 finite_loop_p (class loop *loop)
2815 widest_int nit;
2816 int flags;
2818 flags = flags_from_decl_or_type (current_function_decl);
2819 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2821 if (dump_file && (dump_flags & TDF_DETAILS))
2822 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2823 loop->num);
2824 return true;
2827 if (loop->any_upper_bound
2828 || max_loop_iterations (loop, &nit))
2830 if (dump_file && (dump_flags & TDF_DETAILS))
2831 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2832 loop->num);
2833 return true;
2836 if (flag_finite_loops)
2838 unsigned i;
2839 vec<edge> exits = get_loop_exit_edges (loop);
2840 edge ex;
2842 /* If the loop has a normal exit, we can assume it will terminate. */
2843 FOR_EACH_VEC_ELT (exits, i, ex)
2844 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_FAKE)))
2846 exits.release ();
2847 if (dump_file)
2848 fprintf (dump_file, "Assume loop %i to be finite: it has an exit "
2849 "and -ffinite-loops is on.\n", loop->num);
2850 return true;
2853 exits.release ();
2856 return false;
2861 Analysis of a number of iterations of a loop by a brute-force evaluation.
2865 /* Bound on the number of iterations we try to evaluate. */
2867 #define MAX_ITERATIONS_TO_TRACK \
2868 ((unsigned) param_max_iterations_to_track)
2870 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2871 result by a chain of operations such that all but exactly one of their
2872 operands are constants. */
2874 static gphi *
2875 chain_of_csts_start (class loop *loop, tree x)
2877 gimple *stmt = SSA_NAME_DEF_STMT (x);
2878 tree use;
2879 basic_block bb = gimple_bb (stmt);
2880 enum tree_code code;
2882 if (!bb
2883 || !flow_bb_inside_loop_p (loop, bb))
2884 return NULL;
2886 if (gimple_code (stmt) == GIMPLE_PHI)
2888 if (bb == loop->header)
2889 return as_a <gphi *> (stmt);
2891 return NULL;
2894 if (gimple_code (stmt) != GIMPLE_ASSIGN
2895 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2896 return NULL;
2898 code = gimple_assign_rhs_code (stmt);
2899 if (gimple_references_memory_p (stmt)
2900 || TREE_CODE_CLASS (code) == tcc_reference
2901 || (code == ADDR_EXPR
2902 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2903 return NULL;
2905 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2906 if (use == NULL_TREE)
2907 return NULL;
2909 return chain_of_csts_start (loop, use);
2912 /* Determines whether the expression X is derived from a result of a phi node
2913 in header of LOOP such that
2915 * the derivation of X consists only from operations with constants
2916 * the initial value of the phi node is constant
2917 * the value of the phi node in the next iteration can be derived from the
2918 value in the current iteration by a chain of operations with constants,
2919 or is also a constant
2921 If such phi node exists, it is returned, otherwise NULL is returned. */
2923 static gphi *
2924 get_base_for (class loop *loop, tree x)
2926 gphi *phi;
2927 tree init, next;
2929 if (is_gimple_min_invariant (x))
2930 return NULL;
2932 phi = chain_of_csts_start (loop, x);
2933 if (!phi)
2934 return NULL;
2936 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2937 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2939 if (!is_gimple_min_invariant (init))
2940 return NULL;
2942 if (TREE_CODE (next) == SSA_NAME
2943 && chain_of_csts_start (loop, next) != phi)
2944 return NULL;
2946 return phi;
2949 /* Given an expression X, then
2951 * if X is NULL_TREE, we return the constant BASE.
2952 * if X is a constant, we return the constant X.
2953 * otherwise X is a SSA name, whose value in the considered loop is derived
2954 by a chain of operations with constant from a result of a phi node in
2955 the header of the loop. Then we return value of X when the value of the
2956 result of this phi node is given by the constant BASE. */
2958 static tree
2959 get_val_for (tree x, tree base)
2961 gimple *stmt;
2963 gcc_checking_assert (is_gimple_min_invariant (base));
2965 if (!x)
2966 return base;
2967 else if (is_gimple_min_invariant (x))
2968 return x;
2970 stmt = SSA_NAME_DEF_STMT (x);
2971 if (gimple_code (stmt) == GIMPLE_PHI)
2972 return base;
2974 gcc_checking_assert (is_gimple_assign (stmt));
2976 /* STMT must be either an assignment of a single SSA name or an
2977 expression involving an SSA name and a constant. Try to fold that
2978 expression using the value for the SSA name. */
2979 if (gimple_assign_ssa_name_copy_p (stmt))
2980 return get_val_for (gimple_assign_rhs1 (stmt), base);
2981 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2982 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2983 return fold_build1 (gimple_assign_rhs_code (stmt),
2984 gimple_expr_type (stmt),
2985 get_val_for (gimple_assign_rhs1 (stmt), base));
2986 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2988 tree rhs1 = gimple_assign_rhs1 (stmt);
2989 tree rhs2 = gimple_assign_rhs2 (stmt);
2990 if (TREE_CODE (rhs1) == SSA_NAME)
2991 rhs1 = get_val_for (rhs1, base);
2992 else if (TREE_CODE (rhs2) == SSA_NAME)
2993 rhs2 = get_val_for (rhs2, base);
2994 else
2995 gcc_unreachable ();
2996 return fold_build2 (gimple_assign_rhs_code (stmt),
2997 gimple_expr_type (stmt), rhs1, rhs2);
2999 else
3000 gcc_unreachable ();
3004 /* Tries to count the number of iterations of LOOP till it exits by EXIT
3005 by brute force -- i.e. by determining the value of the operands of the
3006 condition at EXIT in first few iterations of the loop (assuming that
3007 these values are constant) and determining the first one in that the
3008 condition is not satisfied. Returns the constant giving the number
3009 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3011 tree
3012 loop_niter_by_eval (class loop *loop, edge exit)
3014 tree acnd;
3015 tree op[2], val[2], next[2], aval[2];
3016 gphi *phi;
3017 gimple *cond;
3018 unsigned i, j;
3019 enum tree_code cmp;
3021 cond = last_stmt (exit->src);
3022 if (!cond || gimple_code (cond) != GIMPLE_COND)
3023 return chrec_dont_know;
3025 cmp = gimple_cond_code (cond);
3026 if (exit->flags & EDGE_TRUE_VALUE)
3027 cmp = invert_tree_comparison (cmp, false);
3029 switch (cmp)
3031 case EQ_EXPR:
3032 case NE_EXPR:
3033 case GT_EXPR:
3034 case GE_EXPR:
3035 case LT_EXPR:
3036 case LE_EXPR:
3037 op[0] = gimple_cond_lhs (cond);
3038 op[1] = gimple_cond_rhs (cond);
3039 break;
3041 default:
3042 return chrec_dont_know;
3045 for (j = 0; j < 2; j++)
3047 if (is_gimple_min_invariant (op[j]))
3049 val[j] = op[j];
3050 next[j] = NULL_TREE;
3051 op[j] = NULL_TREE;
3053 else
3055 phi = get_base_for (loop, op[j]);
3056 if (!phi)
3057 return chrec_dont_know;
3058 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3059 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3063 /* Don't issue signed overflow warnings. */
3064 fold_defer_overflow_warnings ();
3066 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
3068 for (j = 0; j < 2; j++)
3069 aval[j] = get_val_for (op[j], val[j]);
3071 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
3072 if (acnd && integer_zerop (acnd))
3074 fold_undefer_and_ignore_overflow_warnings ();
3075 if (dump_file && (dump_flags & TDF_DETAILS))
3076 fprintf (dump_file,
3077 "Proved that loop %d iterates %d times using brute force.\n",
3078 loop->num, i);
3079 return build_int_cst (unsigned_type_node, i);
3082 for (j = 0; j < 2; j++)
3084 aval[j] = val[j];
3085 val[j] = get_val_for (next[j], val[j]);
3086 if (!is_gimple_min_invariant (val[j]))
3088 fold_undefer_and_ignore_overflow_warnings ();
3089 return chrec_dont_know;
3093 /* If the next iteration would use the same base values
3094 as the current one, there is no point looping further,
3095 all following iterations will be the same as this one. */
3096 if (val[0] == aval[0] && val[1] == aval[1])
3097 break;
3100 fold_undefer_and_ignore_overflow_warnings ();
3102 return chrec_dont_know;
3105 /* Finds the exit of the LOOP by that the loop exits after a constant
3106 number of iterations and stores the exit edge to *EXIT. The constant
3107 giving the number of iterations of LOOP is returned. The number of
3108 iterations is determined using loop_niter_by_eval (i.e. by brute force
3109 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3110 determines the number of iterations, chrec_dont_know is returned. */
3112 tree
3113 find_loop_niter_by_eval (class loop *loop, edge *exit)
3115 unsigned i;
3116 vec<edge> exits = get_loop_exit_edges (loop);
3117 edge ex;
3118 tree niter = NULL_TREE, aniter;
3120 *exit = NULL;
3122 /* Loops with multiple exits are expensive to handle and less important. */
3123 if (!flag_expensive_optimizations
3124 && exits.length () > 1)
3126 exits.release ();
3127 return chrec_dont_know;
3130 FOR_EACH_VEC_ELT (exits, i, ex)
3132 if (!just_once_each_iteration_p (loop, ex->src))
3133 continue;
3135 aniter = loop_niter_by_eval (loop, ex);
3136 if (chrec_contains_undetermined (aniter))
3137 continue;
3139 if (niter
3140 && !tree_int_cst_lt (aniter, niter))
3141 continue;
3143 niter = aniter;
3144 *exit = ex;
3146 exits.release ();
3148 return niter ? niter : chrec_dont_know;
3153 Analysis of upper bounds on number of iterations of a loop.
3157 static widest_int derive_constant_upper_bound_ops (tree, tree,
3158 enum tree_code, tree);
3160 /* Returns a constant upper bound on the value of the right-hand side of
3161 an assignment statement STMT. */
3163 static widest_int
3164 derive_constant_upper_bound_assign (gimple *stmt)
3166 enum tree_code code = gimple_assign_rhs_code (stmt);
3167 tree op0 = gimple_assign_rhs1 (stmt);
3168 tree op1 = gimple_assign_rhs2 (stmt);
3170 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
3171 op0, code, op1);
3174 /* Returns a constant upper bound on the value of expression VAL. VAL
3175 is considered to be unsigned. If its type is signed, its value must
3176 be nonnegative. */
3178 static widest_int
3179 derive_constant_upper_bound (tree val)
3181 enum tree_code code;
3182 tree op0, op1, op2;
3184 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3185 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
3188 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3189 whose type is TYPE. The expression is considered to be unsigned. If
3190 its type is signed, its value must be nonnegative. */
3192 static widest_int
3193 derive_constant_upper_bound_ops (tree type, tree op0,
3194 enum tree_code code, tree op1)
3196 tree subtype, maxt;
3197 widest_int bnd, max, cst;
3198 gimple *stmt;
3200 if (INTEGRAL_TYPE_P (type))
3201 maxt = TYPE_MAX_VALUE (type);
3202 else
3203 maxt = upper_bound_in_type (type, type);
3205 max = wi::to_widest (maxt);
3207 switch (code)
3209 case INTEGER_CST:
3210 return wi::to_widest (op0);
3212 CASE_CONVERT:
3213 subtype = TREE_TYPE (op0);
3214 if (!TYPE_UNSIGNED (subtype)
3215 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3216 that OP0 is nonnegative. */
3217 && TYPE_UNSIGNED (type)
3218 && !tree_expr_nonnegative_p (op0))
3220 /* If we cannot prove that the casted expression is nonnegative,
3221 we cannot establish more useful upper bound than the precision
3222 of the type gives us. */
3223 return max;
3226 /* We now know that op0 is an nonnegative value. Try deriving an upper
3227 bound for it. */
3228 bnd = derive_constant_upper_bound (op0);
3230 /* If the bound does not fit in TYPE, max. value of TYPE could be
3231 attained. */
3232 if (wi::ltu_p (max, bnd))
3233 return max;
3235 return bnd;
3237 case PLUS_EXPR:
3238 case POINTER_PLUS_EXPR:
3239 case MINUS_EXPR:
3240 if (TREE_CODE (op1) != INTEGER_CST
3241 || !tree_expr_nonnegative_p (op0))
3242 return max;
3244 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3245 choose the most logical way how to treat this constant regardless
3246 of the signedness of the type. */
3247 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
3248 if (code != MINUS_EXPR)
3249 cst = -cst;
3251 bnd = derive_constant_upper_bound (op0);
3253 if (wi::neg_p (cst))
3255 cst = -cst;
3256 /* Avoid CST == 0x80000... */
3257 if (wi::neg_p (cst))
3258 return max;
3260 /* OP0 + CST. We need to check that
3261 BND <= MAX (type) - CST. */
3263 widest_int mmax = max - cst;
3264 if (wi::leu_p (bnd, mmax))
3265 return max;
3267 return bnd + cst;
3269 else
3271 /* OP0 - CST, where CST >= 0.
3273 If TYPE is signed, we have already verified that OP0 >= 0, and we
3274 know that the result is nonnegative. This implies that
3275 VAL <= BND - CST.
3277 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3278 otherwise the operation underflows.
3281 /* This should only happen if the type is unsigned; however, for
3282 buggy programs that use overflowing signed arithmetics even with
3283 -fno-wrapv, this condition may also be true for signed values. */
3284 if (wi::ltu_p (bnd, cst))
3285 return max;
3287 if (TYPE_UNSIGNED (type))
3289 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
3290 wide_int_to_tree (type, cst));
3291 if (!tem || integer_nonzerop (tem))
3292 return max;
3295 bnd -= cst;
3298 return bnd;
3300 case FLOOR_DIV_EXPR:
3301 case EXACT_DIV_EXPR:
3302 if (TREE_CODE (op1) != INTEGER_CST
3303 || tree_int_cst_sign_bit (op1))
3304 return max;
3306 bnd = derive_constant_upper_bound (op0);
3307 return wi::udiv_floor (bnd, wi::to_widest (op1));
3309 case BIT_AND_EXPR:
3310 if (TREE_CODE (op1) != INTEGER_CST
3311 || tree_int_cst_sign_bit (op1))
3312 return max;
3313 return wi::to_widest (op1);
3315 case SSA_NAME:
3316 stmt = SSA_NAME_DEF_STMT (op0);
3317 if (gimple_code (stmt) != GIMPLE_ASSIGN
3318 || gimple_assign_lhs (stmt) != op0)
3319 return max;
3320 return derive_constant_upper_bound_assign (stmt);
3322 default:
3323 return max;
3327 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3329 static void
3330 do_warn_aggressive_loop_optimizations (class loop *loop,
3331 widest_int i_bound, gimple *stmt)
3333 /* Don't warn if the loop doesn't have known constant bound. */
3334 if (!loop->nb_iterations
3335 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
3336 || !warn_aggressive_loop_optimizations
3337 /* To avoid warning multiple times for the same loop,
3338 only start warning when we preserve loops. */
3339 || (cfun->curr_properties & PROP_loops) == 0
3340 /* Only warn once per loop. */
3341 || loop->warned_aggressive_loop_optimizations
3342 /* Only warn if undefined behavior gives us lower estimate than the
3343 known constant bound. */
3344 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3345 /* And undefined behavior happens unconditionally. */
3346 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3347 return;
3349 edge e = single_exit (loop);
3350 if (e == NULL)
3351 return;
3353 gimple *estmt = last_stmt (e->src);
3354 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
3355 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
3356 ? UNSIGNED : SIGNED);
3357 auto_diagnostic_group d;
3358 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3359 "iteration %s invokes undefined behavior", buf))
3360 inform (gimple_location (estmt), "within this loop");
3361 loop->warned_aggressive_loop_optimizations = true;
3364 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3365 is true if the loop is exited immediately after STMT, and this exit
3366 is taken at last when the STMT is executed BOUND + 1 times.
3367 REALISTIC is true if BOUND is expected to be close to the real number
3368 of iterations. UPPER is true if we are sure the loop iterates at most
3369 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3371 static void
3372 record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
3373 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3375 widest_int delta;
3377 if (dump_file && (dump_flags & TDF_DETAILS))
3379 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3380 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3381 fprintf (dump_file, " is %sexecuted at most ",
3382 upper ? "" : "probably ");
3383 print_generic_expr (dump_file, bound, TDF_SLIM);
3384 fprintf (dump_file, " (bounded by ");
3385 print_decu (i_bound, dump_file);
3386 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3389 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3390 real number of iterations. */
3391 if (TREE_CODE (bound) != INTEGER_CST)
3392 realistic = false;
3393 else
3394 gcc_checking_assert (i_bound == wi::to_widest (bound));
3396 /* If we have a guaranteed upper bound, record it in the appropriate
3397 list, unless this is an !is_exit bound (i.e. undefined behavior in
3398 at_stmt) in a loop with known constant number of iterations. */
3399 if (upper
3400 && (is_exit
3401 || loop->nb_iterations == NULL_TREE
3402 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3404 class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3406 elt->bound = i_bound;
3407 elt->stmt = at_stmt;
3408 elt->is_exit = is_exit;
3409 elt->next = loop->bounds;
3410 loop->bounds = elt;
3413 /* If statement is executed on every path to the loop latch, we can directly
3414 infer the upper bound on the # of iterations of the loop. */
3415 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3416 upper = false;
3418 /* Update the number of iteration estimates according to the bound.
3419 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3420 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3421 later if such statement must be executed on last iteration */
3422 if (is_exit)
3423 delta = 0;
3424 else
3425 delta = 1;
3426 widest_int new_i_bound = i_bound + delta;
3428 /* If an overflow occurred, ignore the result. */
3429 if (wi::ltu_p (new_i_bound, delta))
3430 return;
3432 if (upper && !is_exit)
3433 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3434 record_niter_bound (loop, new_i_bound, realistic, upper);
3437 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3438 and doesn't overflow. */
3440 static void
3441 record_control_iv (class loop *loop, class tree_niter_desc *niter)
3443 struct control_iv *iv;
3445 if (!niter->control.base || !niter->control.step)
3446 return;
3448 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3449 return;
3451 iv = ggc_alloc<control_iv> ();
3452 iv->base = niter->control.base;
3453 iv->step = niter->control.step;
3454 iv->next = loop->control_ivs;
3455 loop->control_ivs = iv;
3457 return;
3460 /* This function returns TRUE if below conditions are satisfied:
3461 1) VAR is SSA variable.
3462 2) VAR is an IV:{base, step} in its defining loop.
3463 3) IV doesn't overflow.
3464 4) Both base and step are integer constants.
3465 5) Base is the MIN/MAX value depends on IS_MIN.
3466 Store value of base to INIT correspondingly. */
3468 static bool
3469 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3471 if (TREE_CODE (var) != SSA_NAME)
3472 return false;
3474 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3475 class loop *loop = loop_containing_stmt (def_stmt);
3477 if (loop == NULL)
3478 return false;
3480 affine_iv iv;
3481 if (!simple_iv (loop, loop, var, &iv, false))
3482 return false;
3484 if (!iv.no_overflow)
3485 return false;
3487 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3488 return false;
3490 if (is_min == tree_int_cst_sign_bit (iv.step))
3491 return false;
3493 *init = wi::to_wide (iv.base);
3494 return true;
3497 /* Record the estimate on number of iterations of LOOP based on the fact that
3498 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3499 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3500 estimated number of iterations is expected to be close to the real one.
3501 UPPER is true if we are sure the induction variable does not wrap. */
3503 static void
3504 record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
3505 tree low, tree high, bool realistic, bool upper)
3507 tree niter_bound, extreme, delta;
3508 tree type = TREE_TYPE (base), unsigned_type;
3509 tree orig_base = base;
3511 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3512 return;
3514 if (dump_file && (dump_flags & TDF_DETAILS))
3516 fprintf (dump_file, "Induction variable (");
3517 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3518 fprintf (dump_file, ") ");
3519 print_generic_expr (dump_file, base, TDF_SLIM);
3520 fprintf (dump_file, " + ");
3521 print_generic_expr (dump_file, step, TDF_SLIM);
3522 fprintf (dump_file, " * iteration does not wrap in statement ");
3523 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3524 fprintf (dump_file, " in loop %d.\n", loop->num);
3527 unsigned_type = unsigned_type_for (type);
3528 base = fold_convert (unsigned_type, base);
3529 step = fold_convert (unsigned_type, step);
3531 if (tree_int_cst_sign_bit (step))
3533 wide_int min, max;
3534 extreme = fold_convert (unsigned_type, low);
3535 if (TREE_CODE (orig_base) == SSA_NAME
3536 && TREE_CODE (high) == INTEGER_CST
3537 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3538 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3539 || get_cst_init_from_scev (orig_base, &max, false))
3540 && wi::gts_p (wi::to_wide (high), max))
3541 base = wide_int_to_tree (unsigned_type, max);
3542 else if (TREE_CODE (base) != INTEGER_CST
3543 && dominated_by_p (CDI_DOMINATORS,
3544 loop->latch, gimple_bb (stmt)))
3545 base = fold_convert (unsigned_type, high);
3546 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3547 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3549 else
3551 wide_int min, max;
3552 extreme = fold_convert (unsigned_type, high);
3553 if (TREE_CODE (orig_base) == SSA_NAME
3554 && TREE_CODE (low) == INTEGER_CST
3555 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3556 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3557 || get_cst_init_from_scev (orig_base, &min, true))
3558 && wi::gts_p (min, wi::to_wide (low)))
3559 base = wide_int_to_tree (unsigned_type, min);
3560 else if (TREE_CODE (base) != INTEGER_CST
3561 && dominated_by_p (CDI_DOMINATORS,
3562 loop->latch, gimple_bb (stmt)))
3563 base = fold_convert (unsigned_type, low);
3564 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3567 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3568 would get out of the range. */
3569 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3570 widest_int max = derive_constant_upper_bound (niter_bound);
3571 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3574 /* Determine information about number of iterations a LOOP from the index
3575 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3576 guaranteed to be executed in every iteration of LOOP. Callback for
3577 for_each_index. */
3579 struct ilb_data
3581 class loop *loop;
3582 gimple *stmt;
3585 static bool
3586 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3588 struct ilb_data *data = (struct ilb_data *) dta;
3589 tree ev, init, step;
3590 tree low, high, type, next;
3591 bool sign, upper = true, at_end = false;
3592 class loop *loop = data->loop;
3594 if (TREE_CODE (base) != ARRAY_REF)
3595 return true;
3597 /* For arrays at the end of the structure, we are not guaranteed that they
3598 do not really extend over their declared size. However, for arrays of
3599 size greater than one, this is unlikely to be intended. */
3600 if (array_at_struct_end_p (base))
3602 at_end = true;
3603 upper = false;
3606 class loop *dloop = loop_containing_stmt (data->stmt);
3607 if (!dloop)
3608 return true;
3610 ev = analyze_scalar_evolution (dloop, *idx);
3611 ev = instantiate_parameters (loop, ev);
3612 init = initial_condition (ev);
3613 step = evolution_part_in_loop_num (ev, loop->num);
3615 if (!init
3616 || !step
3617 || TREE_CODE (step) != INTEGER_CST
3618 || integer_zerop (step)
3619 || tree_contains_chrecs (init, NULL)
3620 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3621 return true;
3623 low = array_ref_low_bound (base);
3624 high = array_ref_up_bound (base);
3626 /* The case of nonconstant bounds could be handled, but it would be
3627 complicated. */
3628 if (TREE_CODE (low) != INTEGER_CST
3629 || !high
3630 || TREE_CODE (high) != INTEGER_CST)
3631 return true;
3632 sign = tree_int_cst_sign_bit (step);
3633 type = TREE_TYPE (step);
3635 /* The array of length 1 at the end of a structure most likely extends
3636 beyond its bounds. */
3637 if (at_end
3638 && operand_equal_p (low, high, 0))
3639 return true;
3641 /* In case the relevant bound of the array does not fit in type, or
3642 it does, but bound + step (in type) still belongs into the range of the
3643 array, the index may wrap and still stay within the range of the array
3644 (consider e.g. if the array is indexed by the full range of
3645 unsigned char).
3647 To make things simpler, we require both bounds to fit into type, although
3648 there are cases where this would not be strictly necessary. */
3649 if (!int_fits_type_p (high, type)
3650 || !int_fits_type_p (low, type))
3651 return true;
3652 low = fold_convert (type, low);
3653 high = fold_convert (type, high);
3655 if (sign)
3656 next = fold_binary (PLUS_EXPR, type, low, step);
3657 else
3658 next = fold_binary (PLUS_EXPR, type, high, step);
3660 if (tree_int_cst_compare (low, next) <= 0
3661 && tree_int_cst_compare (next, high) <= 0)
3662 return true;
3664 /* If access is not executed on every iteration, we must ensure that overlow
3665 may not make the access valid later. */
3666 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3667 && scev_probably_wraps_p (NULL_TREE,
3668 initial_condition_in_loop_num (ev, loop->num),
3669 step, data->stmt, loop, true))
3670 upper = false;
3672 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3673 return true;
3676 /* Determine information about number of iterations a LOOP from the bounds
3677 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3678 STMT is guaranteed to be executed in every iteration of LOOP.*/
3680 static void
3681 infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref)
3683 struct ilb_data data;
3685 data.loop = loop;
3686 data.stmt = stmt;
3687 for_each_index (&ref, idx_infer_loop_bounds, &data);
3690 /* Determine information about number of iterations of a LOOP from the way
3691 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3692 executed in every iteration of LOOP. */
3694 static void
3695 infer_loop_bounds_from_array (class loop *loop, gimple *stmt)
3697 if (is_gimple_assign (stmt))
3699 tree op0 = gimple_assign_lhs (stmt);
3700 tree op1 = gimple_assign_rhs1 (stmt);
3702 /* For each memory access, analyze its access function
3703 and record a bound on the loop iteration domain. */
3704 if (REFERENCE_CLASS_P (op0))
3705 infer_loop_bounds_from_ref (loop, stmt, op0);
3707 if (REFERENCE_CLASS_P (op1))
3708 infer_loop_bounds_from_ref (loop, stmt, op1);
3710 else if (is_gimple_call (stmt))
3712 tree arg, lhs;
3713 unsigned i, n = gimple_call_num_args (stmt);
3715 lhs = gimple_call_lhs (stmt);
3716 if (lhs && REFERENCE_CLASS_P (lhs))
3717 infer_loop_bounds_from_ref (loop, stmt, lhs);
3719 for (i = 0; i < n; i++)
3721 arg = gimple_call_arg (stmt, i);
3722 if (REFERENCE_CLASS_P (arg))
3723 infer_loop_bounds_from_ref (loop, stmt, arg);
3728 /* Determine information about number of iterations of a LOOP from the fact
3729 that pointer arithmetics in STMT does not overflow. */
3731 static void
3732 infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt)
3734 tree def, base, step, scev, type, low, high;
3735 tree var, ptr;
3737 if (!is_gimple_assign (stmt)
3738 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3739 return;
3741 def = gimple_assign_lhs (stmt);
3742 if (TREE_CODE (def) != SSA_NAME)
3743 return;
3745 type = TREE_TYPE (def);
3746 if (!nowrap_type_p (type))
3747 return;
3749 ptr = gimple_assign_rhs1 (stmt);
3750 if (!expr_invariant_in_loop_p (loop, ptr))
3751 return;
3753 var = gimple_assign_rhs2 (stmt);
3754 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3755 return;
3757 class loop *uloop = loop_containing_stmt (stmt);
3758 scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
3759 if (chrec_contains_undetermined (scev))
3760 return;
3762 base = initial_condition_in_loop_num (scev, loop->num);
3763 step = evolution_part_in_loop_num (scev, loop->num);
3765 if (!base || !step
3766 || TREE_CODE (step) != INTEGER_CST
3767 || tree_contains_chrecs (base, NULL)
3768 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3769 return;
3771 low = lower_bound_in_type (type, type);
3772 high = upper_bound_in_type (type, type);
3774 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3775 produce a NULL pointer. The contrary would mean NULL points to an object,
3776 while NULL is supposed to compare unequal with the address of all objects.
3777 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3778 NULL pointer since that would mean wrapping, which we assume here not to
3779 happen. So, we can exclude NULL from the valid range of pointer
3780 arithmetic. */
3781 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3782 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3784 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3787 /* Determine information about number of iterations of a LOOP from the fact
3788 that signed arithmetics in STMT does not overflow. */
3790 static void
3791 infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt)
3793 tree def, base, step, scev, type, low, high;
3795 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3796 return;
3798 def = gimple_assign_lhs (stmt);
3800 if (TREE_CODE (def) != SSA_NAME)
3801 return;
3803 type = TREE_TYPE (def);
3804 if (!INTEGRAL_TYPE_P (type)
3805 || !TYPE_OVERFLOW_UNDEFINED (type))
3806 return;
3808 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3809 if (chrec_contains_undetermined (scev))
3810 return;
3812 base = initial_condition_in_loop_num (scev, loop->num);
3813 step = evolution_part_in_loop_num (scev, loop->num);
3815 if (!base || !step
3816 || TREE_CODE (step) != INTEGER_CST
3817 || tree_contains_chrecs (base, NULL)
3818 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3819 return;
3821 low = lower_bound_in_type (type, type);
3822 high = upper_bound_in_type (type, type);
3823 wide_int minv, maxv;
3824 if (get_range_info (def, &minv, &maxv) == VR_RANGE)
3826 low = wide_int_to_tree (type, minv);
3827 high = wide_int_to_tree (type, maxv);
3830 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3833 /* The following analyzers are extracting informations on the bounds
3834 of LOOP from the following undefined behaviors:
3836 - data references should not access elements over the statically
3837 allocated size,
3839 - signed variables should not overflow when flag_wrapv is not set.
3842 static void
3843 infer_loop_bounds_from_undefined (class loop *loop, basic_block *bbs)
3845 unsigned i;
3846 gimple_stmt_iterator bsi;
3847 basic_block bb;
3848 bool reliable;
3850 for (i = 0; i < loop->num_nodes; i++)
3852 bb = bbs[i];
3854 /* If BB is not executed in each iteration of the loop, we cannot
3855 use the operations in it to infer reliable upper bound on the
3856 # of iterations of the loop. However, we can use it as a guess.
3857 Reliable guesses come only from array bounds. */
3858 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3860 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3862 gimple *stmt = gsi_stmt (bsi);
3864 infer_loop_bounds_from_array (loop, stmt);
3866 if (reliable)
3868 infer_loop_bounds_from_signedness (loop, stmt);
3869 infer_loop_bounds_from_pointer_arith (loop, stmt);
3876 /* Compare wide ints, callback for qsort. */
3878 static int
3879 wide_int_cmp (const void *p1, const void *p2)
3881 const widest_int *d1 = (const widest_int *) p1;
3882 const widest_int *d2 = (const widest_int *) p2;
3883 return wi::cmpu (*d1, *d2);
3886 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3887 Lookup by binary search. */
3889 static int
3890 bound_index (vec<widest_int> bounds, const widest_int &bound)
3892 unsigned int end = bounds.length ();
3893 unsigned int begin = 0;
3895 /* Find a matching index by means of a binary search. */
3896 while (begin != end)
3898 unsigned int middle = (begin + end) / 2;
3899 widest_int index = bounds[middle];
3901 if (index == bound)
3902 return middle;
3903 else if (wi::ltu_p (index, bound))
3904 begin = middle + 1;
3905 else
3906 end = middle;
3908 gcc_unreachable ();
3911 /* We recorded loop bounds only for statements dominating loop latch (and thus
3912 executed each loop iteration). If there are any bounds on statements not
3913 dominating the loop latch we can improve the estimate by walking the loop
3914 body and seeing if every path from loop header to loop latch contains
3915 some bounded statement. */
3917 static void
3918 discover_iteration_bound_by_body_walk (class loop *loop)
3920 class nb_iter_bound *elt;
3921 auto_vec<widest_int> bounds;
3922 vec<vec<basic_block> > queues = vNULL;
3923 vec<basic_block> queue = vNULL;
3924 ptrdiff_t queue_index;
3925 ptrdiff_t latch_index = 0;
3927 /* Discover what bounds may interest us. */
3928 for (elt = loop->bounds; elt; elt = elt->next)
3930 widest_int bound = elt->bound;
3932 /* Exit terminates loop at given iteration, while non-exits produce undefined
3933 effect on the next iteration. */
3934 if (!elt->is_exit)
3936 bound += 1;
3937 /* If an overflow occurred, ignore the result. */
3938 if (bound == 0)
3939 continue;
3942 if (!loop->any_upper_bound
3943 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3944 bounds.safe_push (bound);
3947 /* Exit early if there is nothing to do. */
3948 if (!bounds.exists ())
3949 return;
3951 if (dump_file && (dump_flags & TDF_DETAILS))
3952 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3954 /* Sort the bounds in decreasing order. */
3955 bounds.qsort (wide_int_cmp);
3957 /* For every basic block record the lowest bound that is guaranteed to
3958 terminate the loop. */
3960 hash_map<basic_block, ptrdiff_t> bb_bounds;
3961 for (elt = loop->bounds; elt; elt = elt->next)
3963 widest_int bound = elt->bound;
3964 if (!elt->is_exit)
3966 bound += 1;
3967 /* If an overflow occurred, ignore the result. */
3968 if (bound == 0)
3969 continue;
3972 if (!loop->any_upper_bound
3973 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3975 ptrdiff_t index = bound_index (bounds, bound);
3976 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3977 if (!entry)
3978 bb_bounds.put (gimple_bb (elt->stmt), index);
3979 else if ((ptrdiff_t)*entry > index)
3980 *entry = index;
3984 hash_map<basic_block, ptrdiff_t> block_priority;
3986 /* Perform shortest path discovery loop->header ... loop->latch.
3988 The "distance" is given by the smallest loop bound of basic block
3989 present in the path and we look for path with largest smallest bound
3990 on it.
3992 To avoid the need for fibonacci heap on double ints we simply compress
3993 double ints into indexes to BOUNDS array and then represent the queue
3994 as arrays of queues for every index.
3995 Index of BOUNDS.length() means that the execution of given BB has
3996 no bounds determined.
3998 VISITED is a pointer map translating basic block into smallest index
3999 it was inserted into the priority queue with. */
4000 latch_index = -1;
4002 /* Start walk in loop header with index set to infinite bound. */
4003 queue_index = bounds.length ();
4004 queues.safe_grow_cleared (queue_index + 1);
4005 queue.safe_push (loop->header);
4006 queues[queue_index] = queue;
4007 block_priority.put (loop->header, queue_index);
4009 for (; queue_index >= 0; queue_index--)
4011 if (latch_index < queue_index)
4013 while (queues[queue_index].length ())
4015 basic_block bb;
4016 ptrdiff_t bound_index = queue_index;
4017 edge e;
4018 edge_iterator ei;
4020 queue = queues[queue_index];
4021 bb = queue.pop ();
4023 /* OK, we later inserted the BB with lower priority, skip it. */
4024 if (*block_priority.get (bb) > queue_index)
4025 continue;
4027 /* See if we can improve the bound. */
4028 ptrdiff_t *entry = bb_bounds.get (bb);
4029 if (entry && *entry < bound_index)
4030 bound_index = *entry;
4032 /* Insert succesors into the queue, watch for latch edge
4033 and record greatest index we saw. */
4034 FOR_EACH_EDGE (e, ei, bb->succs)
4036 bool insert = false;
4038 if (loop_exit_edge_p (loop, e))
4039 continue;
4041 if (e == loop_latch_edge (loop)
4042 && latch_index < bound_index)
4043 latch_index = bound_index;
4044 else if (!(entry = block_priority.get (e->dest)))
4046 insert = true;
4047 block_priority.put (e->dest, bound_index);
4049 else if (*entry < bound_index)
4051 insert = true;
4052 *entry = bound_index;
4055 if (insert)
4056 queues[bound_index].safe_push (e->dest);
4060 queues[queue_index].release ();
4063 gcc_assert (latch_index >= 0);
4064 if ((unsigned)latch_index < bounds.length ())
4066 if (dump_file && (dump_flags & TDF_DETAILS))
4068 fprintf (dump_file, "Found better loop bound ");
4069 print_decu (bounds[latch_index], dump_file);
4070 fprintf (dump_file, "\n");
4072 record_niter_bound (loop, bounds[latch_index], false, true);
4075 queues.release ();
4078 /* See if every path cross the loop goes through a statement that is known
4079 to not execute at the last iteration. In that case we can decrese iteration
4080 count by 1. */
4082 static void
4083 maybe_lower_iteration_bound (class loop *loop)
4085 hash_set<gimple *> *not_executed_last_iteration = NULL;
4086 class nb_iter_bound *elt;
4087 bool found_exit = false;
4088 auto_vec<basic_block> queue;
4089 bitmap visited;
4091 /* Collect all statements with interesting (i.e. lower than
4092 nb_iterations_upper_bound) bound on them.
4094 TODO: Due to the way record_estimate choose estimates to store, the bounds
4095 will be always nb_iterations_upper_bound-1. We can change this to record
4096 also statements not dominating the loop latch and update the walk bellow
4097 to the shortest path algorithm. */
4098 for (elt = loop->bounds; elt; elt = elt->next)
4100 if (!elt->is_exit
4101 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
4103 if (!not_executed_last_iteration)
4104 not_executed_last_iteration = new hash_set<gimple *>;
4105 not_executed_last_iteration->add (elt->stmt);
4108 if (!not_executed_last_iteration)
4109 return;
4111 /* Start DFS walk in the loop header and see if we can reach the
4112 loop latch or any of the exits (including statements with side
4113 effects that may terminate the loop otherwise) without visiting
4114 any of the statements known to have undefined effect on the last
4115 iteration. */
4116 queue.safe_push (loop->header);
4117 visited = BITMAP_ALLOC (NULL);
4118 bitmap_set_bit (visited, loop->header->index);
4119 found_exit = false;
4123 basic_block bb = queue.pop ();
4124 gimple_stmt_iterator gsi;
4125 bool stmt_found = false;
4127 /* Loop for possible exits and statements bounding the execution. */
4128 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4130 gimple *stmt = gsi_stmt (gsi);
4131 if (not_executed_last_iteration->contains (stmt))
4133 stmt_found = true;
4134 break;
4136 if (gimple_has_side_effects (stmt))
4138 found_exit = true;
4139 break;
4142 if (found_exit)
4143 break;
4145 /* If no bounding statement is found, continue the walk. */
4146 if (!stmt_found)
4148 edge e;
4149 edge_iterator ei;
4151 FOR_EACH_EDGE (e, ei, bb->succs)
4153 if (loop_exit_edge_p (loop, e)
4154 || e == loop_latch_edge (loop))
4156 found_exit = true;
4157 break;
4159 if (bitmap_set_bit (visited, e->dest->index))
4160 queue.safe_push (e->dest);
4164 while (queue.length () && !found_exit);
4166 /* If every path through the loop reach bounding statement before exit,
4167 then we know the last iteration of the loop will have undefined effect
4168 and we can decrease number of iterations. */
4170 if (!found_exit)
4172 if (dump_file && (dump_flags & TDF_DETAILS))
4173 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
4174 "undefined statement must be executed at the last iteration.\n");
4175 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
4176 false, true);
4179 BITMAP_FREE (visited);
4180 delete not_executed_last_iteration;
4183 /* Get expected upper bound for number of loop iterations for
4184 BUILT_IN_EXPECT_WITH_PROBABILITY for a condition COND. */
4186 static tree
4187 get_upper_bound_based_on_builtin_expr_with_prob (gcond *cond)
4189 if (cond == NULL)
4190 return NULL_TREE;
4192 tree lhs = gimple_cond_lhs (cond);
4193 if (TREE_CODE (lhs) != SSA_NAME)
4194 return NULL_TREE;
4196 gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
4197 gcall *def = dyn_cast<gcall *> (stmt);
4198 if (def == NULL)
4199 return NULL_TREE;
4201 tree decl = gimple_call_fndecl (def);
4202 if (!decl
4203 || !fndecl_built_in_p (decl, BUILT_IN_EXPECT_WITH_PROBABILITY)
4204 || gimple_call_num_args (stmt) != 3)
4205 return NULL_TREE;
4207 tree c = gimple_call_arg (def, 1);
4208 tree condt = TREE_TYPE (lhs);
4209 tree res = fold_build2 (gimple_cond_code (cond),
4210 condt, c,
4211 gimple_cond_rhs (cond));
4212 if (TREE_CODE (res) != INTEGER_CST)
4213 return NULL_TREE;
4216 tree prob = gimple_call_arg (def, 2);
4217 tree t = TREE_TYPE (prob);
4218 tree one
4219 = build_real_from_int_cst (t,
4220 integer_one_node);
4221 if (integer_zerop (res))
4222 prob = fold_build2 (MINUS_EXPR, t, one, prob);
4223 tree r = fold_build2 (RDIV_EXPR, t, one, prob);
4224 if (TREE_CODE (r) != REAL_CST)
4225 return NULL_TREE;
4227 HOST_WIDE_INT probi
4228 = real_to_integer (TREE_REAL_CST_PTR (r));
4229 return build_int_cst (condt, probi);
4232 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
4233 is true also use estimates derived from undefined behavior. */
4235 void
4236 estimate_numbers_of_iterations (class loop *loop)
4238 vec<edge> exits;
4239 tree niter, type;
4240 unsigned i;
4241 class tree_niter_desc niter_desc;
4242 edge ex;
4243 widest_int bound;
4244 edge likely_exit;
4246 /* Give up if we already have tried to compute an estimation. */
4247 if (loop->estimate_state != EST_NOT_COMPUTED)
4248 return;
4250 loop->estimate_state = EST_AVAILABLE;
4252 /* If we have a measured profile, use it to estimate the number of
4253 iterations. Normally this is recorded by branch_prob right after
4254 reading the profile. In case we however found a new loop, record the
4255 information here.
4257 Explicitly check for profile status so we do not report
4258 wrong prediction hitrates for guessed loop iterations heuristics.
4259 Do not recompute already recorded bounds - we ought to be better on
4260 updating iteration bounds than updating profile in general and thus
4261 recomputing iteration bounds later in the compilation process will just
4262 introduce random roundoff errors. */
4263 if (!loop->any_estimate
4264 && loop->header->count.reliable_p ())
4266 gcov_type nit = expected_loop_iterations_unbounded (loop);
4267 bound = gcov_type_to_wide_int (nit);
4268 record_niter_bound (loop, bound, true, false);
4271 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
4272 to be constant, we avoid undefined behavior implied bounds and instead
4273 diagnose those loops with -Waggressive-loop-optimizations. */
4274 number_of_latch_executions (loop);
4276 basic_block *body = get_loop_body (loop);
4277 exits = get_loop_exit_edges (loop, body);
4278 likely_exit = single_likely_exit (loop, exits);
4279 FOR_EACH_VEC_ELT (exits, i, ex)
4281 if (ex == likely_exit)
4283 gimple *stmt = last_stmt (ex->src);
4284 if (stmt != NULL)
4286 gcond *cond = dyn_cast<gcond *> (stmt);
4287 tree niter_bound
4288 = get_upper_bound_based_on_builtin_expr_with_prob (cond);
4289 if (niter_bound != NULL_TREE)
4291 widest_int max = derive_constant_upper_bound (niter_bound);
4292 record_estimate (loop, niter_bound, max, cond,
4293 true, true, false);
4298 if (!number_of_iterations_exit (loop, ex, &niter_desc,
4299 false, false, body))
4300 continue;
4302 niter = niter_desc.niter;
4303 type = TREE_TYPE (niter);
4304 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
4305 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
4306 build_int_cst (type, 0),
4307 niter);
4308 record_estimate (loop, niter, niter_desc.max,
4309 last_stmt (ex->src),
4310 true, ex == likely_exit, true);
4311 record_control_iv (loop, &niter_desc);
4313 exits.release ();
4315 if (flag_aggressive_loop_optimizations)
4316 infer_loop_bounds_from_undefined (loop, body);
4318 discover_iteration_bound_by_body_walk (loop);
4320 maybe_lower_iteration_bound (loop);
4322 /* If we know the exact number of iterations of this loop, try to
4323 not break code with undefined behavior by not recording smaller
4324 maximum number of iterations. */
4325 if (loop->nb_iterations
4326 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
4328 loop->any_upper_bound = true;
4329 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
4333 /* Sets NIT to the estimated number of executions of the latch of the
4334 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
4335 large as the number of iterations. If we have no reliable estimate,
4336 the function returns false, otherwise returns true. */
4338 bool
4339 estimated_loop_iterations (class loop *loop, widest_int *nit)
4341 /* When SCEV information is available, try to update loop iterations
4342 estimate. Otherwise just return whatever we recorded earlier. */
4343 if (scev_initialized_p ())
4344 estimate_numbers_of_iterations (loop);
4346 return (get_estimated_loop_iterations (loop, nit));
4349 /* Similar to estimated_loop_iterations, but returns the estimate only
4350 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4351 on the number of iterations of LOOP could not be derived, returns -1. */
4353 HOST_WIDE_INT
4354 estimated_loop_iterations_int (class loop *loop)
4356 widest_int nit;
4357 HOST_WIDE_INT hwi_nit;
4359 if (!estimated_loop_iterations (loop, &nit))
4360 return -1;
4362 if (!wi::fits_shwi_p (nit))
4363 return -1;
4364 hwi_nit = nit.to_shwi ();
4366 return hwi_nit < 0 ? -1 : hwi_nit;
4370 /* Sets NIT to an upper bound for the maximum number of executions of the
4371 latch of the LOOP. If we have no reliable estimate, the function returns
4372 false, otherwise returns true. */
4374 bool
4375 max_loop_iterations (class loop *loop, widest_int *nit)
4377 /* When SCEV information is available, try to update loop iterations
4378 estimate. Otherwise just return whatever we recorded earlier. */
4379 if (scev_initialized_p ())
4380 estimate_numbers_of_iterations (loop);
4382 return get_max_loop_iterations (loop, nit);
4385 /* Similar to max_loop_iterations, but returns the estimate only
4386 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4387 on the number of iterations of LOOP could not be derived, returns -1. */
4389 HOST_WIDE_INT
4390 max_loop_iterations_int (class loop *loop)
4392 widest_int nit;
4393 HOST_WIDE_INT hwi_nit;
4395 if (!max_loop_iterations (loop, &nit))
4396 return -1;
4398 if (!wi::fits_shwi_p (nit))
4399 return -1;
4400 hwi_nit = nit.to_shwi ();
4402 return hwi_nit < 0 ? -1 : hwi_nit;
4405 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4406 latch of the LOOP. If we have no reliable estimate, the function returns
4407 false, otherwise returns true. */
4409 bool
4410 likely_max_loop_iterations (class loop *loop, widest_int *nit)
4412 /* When SCEV information is available, try to update loop iterations
4413 estimate. Otherwise just return whatever we recorded earlier. */
4414 if (scev_initialized_p ())
4415 estimate_numbers_of_iterations (loop);
4417 return get_likely_max_loop_iterations (loop, nit);
4420 /* Similar to max_loop_iterations, but returns the estimate only
4421 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4422 on the number of iterations of LOOP could not be derived, returns -1. */
4424 HOST_WIDE_INT
4425 likely_max_loop_iterations_int (class loop *loop)
4427 widest_int nit;
4428 HOST_WIDE_INT hwi_nit;
4430 if (!likely_max_loop_iterations (loop, &nit))
4431 return -1;
4433 if (!wi::fits_shwi_p (nit))
4434 return -1;
4435 hwi_nit = nit.to_shwi ();
4437 return hwi_nit < 0 ? -1 : hwi_nit;
4440 /* Returns an estimate for the number of executions of statements
4441 in the LOOP. For statements before the loop exit, this exceeds
4442 the number of execution of the latch by one. */
4444 HOST_WIDE_INT
4445 estimated_stmt_executions_int (class loop *loop)
4447 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
4448 HOST_WIDE_INT snit;
4450 if (nit == -1)
4451 return -1;
4453 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4455 /* If the computation overflows, return -1. */
4456 return snit < 0 ? -1 : snit;
4459 /* Sets NIT to the maximum number of executions of the latch of the
4460 LOOP, plus one. If we have no reliable estimate, the function returns
4461 false, otherwise returns true. */
4463 bool
4464 max_stmt_executions (class loop *loop, widest_int *nit)
4466 widest_int nit_minus_one;
4468 if (!max_loop_iterations (loop, nit))
4469 return false;
4471 nit_minus_one = *nit;
4473 *nit += 1;
4475 return wi::gtu_p (*nit, nit_minus_one);
4478 /* Sets NIT to the estimated maximum number of executions of the latch of the
4479 LOOP, plus one. If we have no likely estimate, the function returns
4480 false, otherwise returns true. */
4482 bool
4483 likely_max_stmt_executions (class loop *loop, widest_int *nit)
4485 widest_int nit_minus_one;
4487 if (!likely_max_loop_iterations (loop, nit))
4488 return false;
4490 nit_minus_one = *nit;
4492 *nit += 1;
4494 return wi::gtu_p (*nit, nit_minus_one);
4497 /* Sets NIT to the estimated number of executions of the latch of the
4498 LOOP, plus one. If we have no reliable estimate, the function returns
4499 false, otherwise returns true. */
4501 bool
4502 estimated_stmt_executions (class loop *loop, widest_int *nit)
4504 widest_int nit_minus_one;
4506 if (!estimated_loop_iterations (loop, nit))
4507 return false;
4509 nit_minus_one = *nit;
4511 *nit += 1;
4513 return wi::gtu_p (*nit, nit_minus_one);
4516 /* Records estimates on numbers of iterations of loops. */
4518 void
4519 estimate_numbers_of_iterations (function *fn)
4521 class loop *loop;
4523 /* We don't want to issue signed overflow warnings while getting
4524 loop iteration estimates. */
4525 fold_defer_overflow_warnings ();
4527 FOR_EACH_LOOP_FN (fn, loop, 0)
4528 estimate_numbers_of_iterations (loop);
4530 fold_undefer_and_ignore_overflow_warnings ();
4533 /* Returns true if statement S1 dominates statement S2. */
4535 bool
4536 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4538 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4540 if (!bb1
4541 || s1 == s2)
4542 return true;
4544 if (bb1 == bb2)
4546 gimple_stmt_iterator bsi;
4548 if (gimple_code (s2) == GIMPLE_PHI)
4549 return false;
4551 if (gimple_code (s1) == GIMPLE_PHI)
4552 return true;
4554 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4555 if (gsi_stmt (bsi) == s1)
4556 return true;
4558 return false;
4561 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4564 /* Returns true when we can prove that the number of executions of
4565 STMT in the loop is at most NITER, according to the bound on
4566 the number of executions of the statement NITER_BOUND->stmt recorded in
4567 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4569 ??? This code can become quite a CPU hog - we can have many bounds,
4570 and large basic block forcing stmt_dominates_stmt_p to be queried
4571 many times on a large basic blocks, so the whole thing is O(n^2)
4572 for scev_probably_wraps_p invocation (that can be done n times).
4574 It would make more sense (and give better answers) to remember BB
4575 bounds computed by discover_iteration_bound_by_body_walk. */
4577 static bool
4578 n_of_executions_at_most (gimple *stmt,
4579 class nb_iter_bound *niter_bound,
4580 tree niter)
4582 widest_int bound = niter_bound->bound;
4583 tree nit_type = TREE_TYPE (niter), e;
4584 enum tree_code cmp;
4586 gcc_assert (TYPE_UNSIGNED (nit_type));
4588 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4589 the number of iterations is small. */
4590 if (!wi::fits_to_tree_p (bound, nit_type))
4591 return false;
4593 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4594 times. This means that:
4596 -- if NITER_BOUND->is_exit is true, then everything after
4597 it at most NITER_BOUND->bound times.
4599 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4600 is executed, then NITER_BOUND->stmt is executed as well in the same
4601 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4603 If we can determine that NITER_BOUND->stmt is always executed
4604 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4605 We conclude that if both statements belong to the same
4606 basic block and STMT is before NITER_BOUND->stmt and there are no
4607 statements with side effects in between. */
4609 if (niter_bound->is_exit)
4611 if (stmt == niter_bound->stmt
4612 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4613 return false;
4614 cmp = GE_EXPR;
4616 else
4618 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4620 gimple_stmt_iterator bsi;
4621 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4622 || gimple_code (stmt) == GIMPLE_PHI
4623 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4624 return false;
4626 /* By stmt_dominates_stmt_p we already know that STMT appears
4627 before NITER_BOUND->STMT. Still need to test that the loop
4628 cannot be terinated by a side effect in between. */
4629 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4630 gsi_next (&bsi))
4631 if (gimple_has_side_effects (gsi_stmt (bsi)))
4632 return false;
4633 bound += 1;
4634 if (bound == 0
4635 || !wi::fits_to_tree_p (bound, nit_type))
4636 return false;
4638 cmp = GT_EXPR;
4641 e = fold_binary (cmp, boolean_type_node,
4642 niter, wide_int_to_tree (nit_type, bound));
4643 return e && integer_nonzerop (e);
4646 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4648 bool
4649 nowrap_type_p (tree type)
4651 if (ANY_INTEGRAL_TYPE_P (type)
4652 && TYPE_OVERFLOW_UNDEFINED (type))
4653 return true;
4655 if (POINTER_TYPE_P (type))
4656 return true;
4658 return false;
4661 /* Return true if we can prove LOOP is exited before evolution of induction
4662 variable {BASE, STEP} overflows with respect to its type bound. */
4664 static bool
4665 loop_exits_before_overflow (tree base, tree step,
4666 gimple *at_stmt, class loop *loop)
4668 widest_int niter;
4669 struct control_iv *civ;
4670 class nb_iter_bound *bound;
4671 tree e, delta, step_abs, unsigned_base;
4672 tree type = TREE_TYPE (step);
4673 tree unsigned_type, valid_niter;
4675 /* Don't issue signed overflow warnings. */
4676 fold_defer_overflow_warnings ();
4678 /* Compute the number of iterations before we reach the bound of the
4679 type, and verify that the loop is exited before this occurs. */
4680 unsigned_type = unsigned_type_for (type);
4681 unsigned_base = fold_convert (unsigned_type, base);
4683 if (tree_int_cst_sign_bit (step))
4685 tree extreme = fold_convert (unsigned_type,
4686 lower_bound_in_type (type, type));
4687 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4688 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4689 fold_convert (unsigned_type, step));
4691 else
4693 tree extreme = fold_convert (unsigned_type,
4694 upper_bound_in_type (type, type));
4695 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4696 step_abs = fold_convert (unsigned_type, step);
4699 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4701 estimate_numbers_of_iterations (loop);
4703 if (max_loop_iterations (loop, &niter)
4704 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4705 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4706 wide_int_to_tree (TREE_TYPE (valid_niter),
4707 niter))) != NULL
4708 && integer_nonzerop (e))
4710 fold_undefer_and_ignore_overflow_warnings ();
4711 return true;
4713 if (at_stmt)
4714 for (bound = loop->bounds; bound; bound = bound->next)
4716 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4718 fold_undefer_and_ignore_overflow_warnings ();
4719 return true;
4722 fold_undefer_and_ignore_overflow_warnings ();
4724 /* Try to prove loop is exited before {base, step} overflows with the
4725 help of analyzed loop control IV. This is done only for IVs with
4726 constant step because otherwise we don't have the information. */
4727 if (TREE_CODE (step) == INTEGER_CST)
4729 for (civ = loop->control_ivs; civ; civ = civ->next)
4731 enum tree_code code;
4732 tree civ_type = TREE_TYPE (civ->step);
4734 /* Have to consider type difference because operand_equal_p ignores
4735 that for constants. */
4736 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4737 || element_precision (type) != element_precision (civ_type))
4738 continue;
4740 /* Only consider control IV with same step. */
4741 if (!operand_equal_p (step, civ->step, 0))
4742 continue;
4744 /* Done proving if this is a no-overflow control IV. */
4745 if (operand_equal_p (base, civ->base, 0))
4746 return true;
4748 /* Control IV is recorded after expanding simple operations,
4749 Here we expand base and compare it too. */
4750 tree expanded_base = expand_simple_operations (base);
4751 if (operand_equal_p (expanded_base, civ->base, 0))
4752 return true;
4754 /* If this is a before stepping control IV, in other words, we have
4756 {civ_base, step} = {base + step, step}
4758 Because civ {base + step, step} doesn't overflow during loop
4759 iterations, {base, step} will not overflow if we can prove the
4760 operation "base + step" does not overflow. Specifically, we try
4761 to prove below conditions are satisfied:
4763 base <= UPPER_BOUND (type) - step ;;step > 0
4764 base >= LOWER_BOUND (type) - step ;;step < 0
4766 by proving the reverse conditions are false using loop's initial
4767 condition. */
4768 if (POINTER_TYPE_P (TREE_TYPE (base)))
4769 code = POINTER_PLUS_EXPR;
4770 else
4771 code = PLUS_EXPR;
4773 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4774 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4775 expanded_base, step);
4776 if (operand_equal_p (stepped, civ->base, 0)
4777 || operand_equal_p (expanded_stepped, civ->base, 0))
4779 tree extreme;
4781 if (tree_int_cst_sign_bit (step))
4783 code = LT_EXPR;
4784 extreme = lower_bound_in_type (type, type);
4786 else
4788 code = GT_EXPR;
4789 extreme = upper_bound_in_type (type, type);
4791 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4792 e = fold_build2 (code, boolean_type_node, base, extreme);
4793 e = simplify_using_initial_conditions (loop, e);
4794 if (integer_zerop (e))
4795 return true;
4800 return false;
4803 /* VAR is scev variable whose evolution part is constant STEP, this function
4804 proves that VAR can't overflow by using value range info. If VAR's value
4805 range is [MIN, MAX], it can be proven by:
4806 MAX + step doesn't overflow ; if step > 0
4808 MIN + step doesn't underflow ; if step < 0.
4810 We can only do this if var is computed in every loop iteration, i.e, var's
4811 definition has to dominate loop latch. Consider below example:
4814 unsigned int i;
4816 <bb 3>:
4818 <bb 4>:
4819 # RANGE [0, 4294967294] NONZERO 65535
4820 # i_21 = PHI <0(3), i_18(9)>
4821 if (i_21 != 0)
4822 goto <bb 6>;
4823 else
4824 goto <bb 8>;
4826 <bb 6>:
4827 # RANGE [0, 65533] NONZERO 65535
4828 _6 = i_21 + 4294967295;
4829 # RANGE [0, 65533] NONZERO 65535
4830 _7 = (long unsigned int) _6;
4831 # RANGE [0, 524264] NONZERO 524280
4832 _8 = _7 * 8;
4833 # PT = nonlocal escaped
4834 _9 = a_14 + _8;
4835 *_9 = 0;
4837 <bb 8>:
4838 # RANGE [1, 65535] NONZERO 65535
4839 i_18 = i_21 + 1;
4840 if (i_18 >= 65535)
4841 goto <bb 10>;
4842 else
4843 goto <bb 9>;
4845 <bb 9>:
4846 goto <bb 4>;
4848 <bb 10>:
4849 return;
4852 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4853 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4854 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4855 (4294967295, 4294967296, ...). */
4857 static bool
4858 scev_var_range_cant_overflow (tree var, tree step, class loop *loop)
4860 tree type;
4861 wide_int minv, maxv, diff, step_wi;
4862 enum value_range_kind rtype;
4864 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4865 return false;
4867 /* Check if VAR evaluates in every loop iteration. It's not the case
4868 if VAR is default definition or does not dominate loop's latch. */
4869 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4870 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4871 return false;
4873 rtype = get_range_info (var, &minv, &maxv);
4874 if (rtype != VR_RANGE)
4875 return false;
4877 /* VAR is a scev whose evolution part is STEP and value range info
4878 is [MIN, MAX], we can prove its no-overflowness by conditions:
4880 type_MAX - MAX >= step ; if step > 0
4881 MIN - type_MIN >= |step| ; if step < 0.
4883 Or VAR must take value outside of value range, which is not true. */
4884 step_wi = wi::to_wide (step);
4885 type = TREE_TYPE (var);
4886 if (tree_int_cst_sign_bit (step))
4888 diff = minv - wi::to_wide (lower_bound_in_type (type, type));
4889 step_wi = - step_wi;
4891 else
4892 diff = wi::to_wide (upper_bound_in_type (type, type)) - maxv;
4894 return (wi::geu_p (diff, step_wi));
4897 /* Return false only when the induction variable BASE + STEP * I is
4898 known to not overflow: i.e. when the number of iterations is small
4899 enough with respect to the step and initial condition in order to
4900 keep the evolution confined in TYPEs bounds. Return true when the
4901 iv is known to overflow or when the property is not computable.
4903 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4904 the rules for overflow of the given language apply (e.g., that signed
4905 arithmetics in C does not overflow).
4907 If VAR is a ssa variable, this function also returns false if VAR can
4908 be proven not overflow with value range info. */
4910 bool
4911 scev_probably_wraps_p (tree var, tree base, tree step,
4912 gimple *at_stmt, class loop *loop,
4913 bool use_overflow_semantics)
4915 /* FIXME: We really need something like
4916 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4918 We used to test for the following situation that frequently appears
4919 during address arithmetics:
4921 D.1621_13 = (long unsigned intD.4) D.1620_12;
4922 D.1622_14 = D.1621_13 * 8;
4923 D.1623_15 = (doubleD.29 *) D.1622_14;
4925 And derived that the sequence corresponding to D_14
4926 can be proved to not wrap because it is used for computing a
4927 memory access; however, this is not really the case -- for example,
4928 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4929 2032, 2040, 0, 8, ..., but the code is still legal. */
4931 if (chrec_contains_undetermined (base)
4932 || chrec_contains_undetermined (step))
4933 return true;
4935 if (integer_zerop (step))
4936 return false;
4938 /* If we can use the fact that signed and pointer arithmetics does not
4939 wrap, we are done. */
4940 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4941 return false;
4943 /* To be able to use estimates on number of iterations of the loop,
4944 we must have an upper bound on the absolute value of the step. */
4945 if (TREE_CODE (step) != INTEGER_CST)
4946 return true;
4948 /* Check if var can be proven not overflow with value range info. */
4949 if (var && TREE_CODE (var) == SSA_NAME
4950 && scev_var_range_cant_overflow (var, step, loop))
4951 return false;
4953 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4954 return false;
4956 /* At this point we still don't have a proof that the iv does not
4957 overflow: give up. */
4958 return true;
4961 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4963 void
4964 free_numbers_of_iterations_estimates (class loop *loop)
4966 struct control_iv *civ;
4967 class nb_iter_bound *bound;
4969 loop->nb_iterations = NULL;
4970 loop->estimate_state = EST_NOT_COMPUTED;
4971 for (bound = loop->bounds; bound;)
4973 class nb_iter_bound *next = bound->next;
4974 ggc_free (bound);
4975 bound = next;
4977 loop->bounds = NULL;
4979 for (civ = loop->control_ivs; civ;)
4981 struct control_iv *next = civ->next;
4982 ggc_free (civ);
4983 civ = next;
4985 loop->control_ivs = NULL;
4988 /* Frees the information on upper bounds on numbers of iterations of loops. */
4990 void
4991 free_numbers_of_iterations_estimates (function *fn)
4993 class loop *loop;
4995 FOR_EACH_LOOP_FN (fn, loop, 0)
4996 free_numbers_of_iterations_estimates (loop);
4999 /* Substitute value VAL for ssa name NAME inside expressions held
5000 at LOOP. */
5002 void
5003 substitute_in_loop_info (class loop *loop, tree name, tree val)
5005 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);