c, c++: attribute format on a ctor with a vbase [PR101833, PR47634]
[official-gcc.git] / gcc / tree-ssa-loop-niter.cc
blobafa5106495382fe2596a6a0f71bf81aa84d141f7
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2022 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "tree-dfa.h"
45 #include "gimple-range.h"
48 /* The maximum number of dominator BBs we search for conditions
49 of loop header copies we use for simplifying a conditional
50 expression. */
51 #define MAX_DOMINATORS_TO_WALK 8
55 Analysis of number of iterations of an affine exit test.
59 /* Bounds on some value, BELOW <= X <= UP. */
61 struct bounds
63 mpz_t below, up;
66 static bool number_of_iterations_popcount (loop_p loop, edge exit,
67 enum tree_code code,
68 class tree_niter_desc *niter);
71 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
73 static void
74 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
76 tree type = TREE_TYPE (expr);
77 tree op0, op1;
78 bool negate = false;
80 *var = expr;
81 mpz_set_ui (offset, 0);
83 switch (TREE_CODE (expr))
85 case MINUS_EXPR:
86 negate = true;
87 /* Fallthru. */
89 case PLUS_EXPR:
90 case POINTER_PLUS_EXPR:
91 op0 = TREE_OPERAND (expr, 0);
92 op1 = TREE_OPERAND (expr, 1);
94 if (TREE_CODE (op1) != INTEGER_CST)
95 break;
97 *var = op0;
98 /* Always sign extend the offset. */
99 wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
100 if (negate)
101 mpz_neg (offset, offset);
102 break;
104 case INTEGER_CST:
105 *var = build_int_cst_type (type, 0);
106 wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
107 break;
109 default:
110 break;
114 /* From condition C0 CMP C1 derives information regarding the value range
115 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
117 static void
118 refine_value_range_using_guard (tree type, tree var,
119 tree c0, enum tree_code cmp, tree c1,
120 mpz_t below, mpz_t up)
122 tree varc0, varc1, ctype;
123 mpz_t offc0, offc1;
124 mpz_t mint, maxt, minc1, maxc1;
125 bool no_wrap = nowrap_type_p (type);
126 bool c0_ok, c1_ok;
127 signop sgn = TYPE_SIGN (type);
129 switch (cmp)
131 case LT_EXPR:
132 case LE_EXPR:
133 case GT_EXPR:
134 case GE_EXPR:
135 STRIP_SIGN_NOPS (c0);
136 STRIP_SIGN_NOPS (c1);
137 ctype = TREE_TYPE (c0);
138 if (!useless_type_conversion_p (ctype, type))
139 return;
141 break;
143 case EQ_EXPR:
144 /* We could derive quite precise information from EQ_EXPR, however,
145 such a guard is unlikely to appear, so we do not bother with
146 handling it. */
147 return;
149 case NE_EXPR:
150 /* NE_EXPR comparisons do not contain much of useful information,
151 except for cases of comparing with bounds. */
152 if (TREE_CODE (c1) != INTEGER_CST
153 || !INTEGRAL_TYPE_P (type))
154 return;
156 /* Ensure that the condition speaks about an expression in the same
157 type as X and Y. */
158 ctype = TREE_TYPE (c0);
159 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
160 return;
161 c0 = fold_convert (type, c0);
162 c1 = fold_convert (type, c1);
164 if (operand_equal_p (var, c0, 0))
166 mpz_t valc1;
168 /* Case of comparing VAR with its below/up bounds. */
169 mpz_init (valc1);
170 wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
171 if (mpz_cmp (valc1, below) == 0)
172 cmp = GT_EXPR;
173 if (mpz_cmp (valc1, up) == 0)
174 cmp = LT_EXPR;
176 mpz_clear (valc1);
178 else
180 /* Case of comparing with the bounds of the type. */
181 wide_int min = wi::min_value (type);
182 wide_int max = wi::max_value (type);
184 if (wi::to_wide (c1) == min)
185 cmp = GT_EXPR;
186 if (wi::to_wide (c1) == max)
187 cmp = LT_EXPR;
190 /* Quick return if no useful information. */
191 if (cmp == NE_EXPR)
192 return;
194 break;
196 default:
197 return;
200 mpz_init (offc0);
201 mpz_init (offc1);
202 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
203 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
205 /* We are only interested in comparisons of expressions based on VAR. */
206 if (operand_equal_p (var, varc1, 0))
208 std::swap (varc0, varc1);
209 mpz_swap (offc0, offc1);
210 cmp = swap_tree_comparison (cmp);
212 else if (!operand_equal_p (var, varc0, 0))
214 mpz_clear (offc0);
215 mpz_clear (offc1);
216 return;
219 mpz_init (mint);
220 mpz_init (maxt);
221 get_type_static_bounds (type, mint, maxt);
222 mpz_init (minc1);
223 mpz_init (maxc1);
224 value_range r;
225 /* Setup range information for varc1. */
226 if (integer_zerop (varc1))
228 wi::to_mpz (0, minc1, TYPE_SIGN (type));
229 wi::to_mpz (0, maxc1, TYPE_SIGN (type));
231 else if (TREE_CODE (varc1) == SSA_NAME
232 && INTEGRAL_TYPE_P (type)
233 && get_range_query (cfun)->range_of_expr (r, varc1)
234 && r.kind () == VR_RANGE)
236 gcc_assert (wi::le_p (r.lower_bound (), r.upper_bound (), sgn));
237 wi::to_mpz (r.lower_bound (), minc1, sgn);
238 wi::to_mpz (r.upper_bound (), maxc1, sgn);
240 else
242 mpz_set (minc1, mint);
243 mpz_set (maxc1, maxt);
246 /* Compute valid range information for varc1 + offc1. Note nothing
247 useful can be derived if it overflows or underflows. Overflow or
248 underflow could happen when:
250 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
251 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
252 mpz_add (minc1, minc1, offc1);
253 mpz_add (maxc1, maxc1, offc1);
254 c1_ok = (no_wrap
255 || mpz_sgn (offc1) == 0
256 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
257 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
258 if (!c1_ok)
259 goto end;
261 if (mpz_cmp (minc1, mint) < 0)
262 mpz_set (minc1, mint);
263 if (mpz_cmp (maxc1, maxt) > 0)
264 mpz_set (maxc1, maxt);
266 if (cmp == LT_EXPR)
268 cmp = LE_EXPR;
269 mpz_sub_ui (maxc1, maxc1, 1);
271 if (cmp == GT_EXPR)
273 cmp = GE_EXPR;
274 mpz_add_ui (minc1, minc1, 1);
277 /* Compute range information for varc0. If there is no overflow,
278 the condition implied that
280 (varc0) cmp (varc1 + offc1 - offc0)
282 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
283 or the below bound if cmp is GE_EXPR.
285 To prove there is no overflow/underflow, we need to check below
286 four cases:
287 1) cmp == LE_EXPR && offc0 > 0
289 (varc0 + offc0) doesn't overflow
290 && (varc1 + offc1 - offc0) doesn't underflow
292 2) cmp == LE_EXPR && offc0 < 0
294 (varc0 + offc0) doesn't underflow
295 && (varc1 + offc1 - offc0) doesn't overfloe
297 In this case, (varc0 + offc0) will never underflow if we can
298 prove (varc1 + offc1 - offc0) doesn't overflow.
300 3) cmp == GE_EXPR && offc0 < 0
302 (varc0 + offc0) doesn't underflow
303 && (varc1 + offc1 - offc0) doesn't overflow
305 4) cmp == GE_EXPR && offc0 > 0
307 (varc0 + offc0) doesn't overflow
308 && (varc1 + offc1 - offc0) doesn't underflow
310 In this case, (varc0 + offc0) will never overflow if we can
311 prove (varc1 + offc1 - offc0) doesn't underflow.
313 Note we only handle case 2 and 4 in below code. */
315 mpz_sub (minc1, minc1, offc0);
316 mpz_sub (maxc1, maxc1, offc0);
317 c0_ok = (no_wrap
318 || mpz_sgn (offc0) == 0
319 || (cmp == LE_EXPR
320 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
321 || (cmp == GE_EXPR
322 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
323 if (!c0_ok)
324 goto end;
326 if (cmp == LE_EXPR)
328 if (mpz_cmp (up, maxc1) > 0)
329 mpz_set (up, maxc1);
331 else
333 if (mpz_cmp (below, minc1) < 0)
334 mpz_set (below, minc1);
337 end:
338 mpz_clear (mint);
339 mpz_clear (maxt);
340 mpz_clear (minc1);
341 mpz_clear (maxc1);
342 mpz_clear (offc0);
343 mpz_clear (offc1);
346 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
347 in TYPE to MIN and MAX. */
349 static void
350 determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
351 mpz_t min, mpz_t max)
353 int cnt = 0;
354 mpz_t minm, maxm;
355 basic_block bb;
356 wide_int minv, maxv;
357 enum value_range_kind rtype = VR_VARYING;
359 /* If the expression is a constant, we know its value exactly. */
360 if (integer_zerop (var))
362 mpz_set (min, off);
363 mpz_set (max, off);
364 return;
367 get_type_static_bounds (type, min, max);
369 /* See if we have some range info from VRP. */
370 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
372 edge e = loop_preheader_edge (loop);
373 signop sgn = TYPE_SIGN (type);
374 gphi_iterator gsi;
376 /* Either for VAR itself... */
377 value_range var_range;
378 get_range_query (cfun)->range_of_expr (var_range, var);
379 rtype = var_range.kind ();
380 if (!var_range.undefined_p ())
382 minv = var_range.lower_bound ();
383 maxv = var_range.upper_bound ();
386 /* Or for PHI results in loop->header where VAR is used as
387 PHI argument from the loop preheader edge. */
388 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
390 gphi *phi = gsi.phi ();
391 value_range phi_range;
392 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
393 && get_range_query (cfun)->range_of_expr (phi_range,
394 gimple_phi_result (phi))
395 && phi_range.kind () == VR_RANGE)
397 if (rtype != VR_RANGE)
399 rtype = VR_RANGE;
400 minv = phi_range.lower_bound ();
401 maxv = phi_range.upper_bound ();
403 else
405 minv = wi::max (minv, phi_range.lower_bound (), sgn);
406 maxv = wi::min (maxv, phi_range.upper_bound (), sgn);
407 /* If the PHI result range are inconsistent with
408 the VAR range, give up on looking at the PHI
409 results. This can happen if VR_UNDEFINED is
410 involved. */
411 if (wi::gt_p (minv, maxv, sgn))
413 value_range vr;
414 get_range_query (cfun)->range_of_expr (vr, var);
415 rtype = vr.kind ();
416 if (!vr.undefined_p ())
418 minv = vr.lower_bound ();
419 maxv = vr.upper_bound ();
421 break;
426 mpz_init (minm);
427 mpz_init (maxm);
428 if (rtype != VR_RANGE)
430 mpz_set (minm, min);
431 mpz_set (maxm, max);
433 else
435 gcc_assert (wi::le_p (minv, maxv, sgn));
436 wi::to_mpz (minv, minm, sgn);
437 wi::to_mpz (maxv, maxm, sgn);
439 /* Now walk the dominators of the loop header and use the entry
440 guards to refine the estimates. */
441 for (bb = loop->header;
442 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
443 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
445 edge e;
446 tree c0, c1;
447 gimple *cond;
448 enum tree_code cmp;
450 if (!single_pred_p (bb))
451 continue;
452 e = single_pred_edge (bb);
454 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
455 continue;
457 cond = last_stmt (e->src);
458 c0 = gimple_cond_lhs (cond);
459 cmp = gimple_cond_code (cond);
460 c1 = gimple_cond_rhs (cond);
462 if (e->flags & EDGE_FALSE_VALUE)
463 cmp = invert_tree_comparison (cmp, false);
465 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
466 ++cnt;
469 mpz_add (minm, minm, off);
470 mpz_add (maxm, maxm, off);
471 /* If the computation may not wrap or off is zero, then this
472 is always fine. If off is negative and minv + off isn't
473 smaller than type's minimum, or off is positive and
474 maxv + off isn't bigger than type's maximum, use the more
475 precise range too. */
476 if (nowrap_type_p (type)
477 || mpz_sgn (off) == 0
478 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
479 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
481 mpz_set (min, minm);
482 mpz_set (max, maxm);
483 mpz_clear (minm);
484 mpz_clear (maxm);
485 return;
487 mpz_clear (minm);
488 mpz_clear (maxm);
491 /* If the computation may wrap, we know nothing about the value, except for
492 the range of the type. */
493 if (!nowrap_type_p (type))
494 return;
496 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
497 add it to MIN, otherwise to MAX. */
498 if (mpz_sgn (off) < 0)
499 mpz_add (max, max, off);
500 else
501 mpz_add (min, min, off);
504 /* Stores the bounds on the difference of the values of the expressions
505 (var + X) and (var + Y), computed in TYPE, to BNDS. */
507 static void
508 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
509 bounds *bnds)
511 int rel = mpz_cmp (x, y);
512 bool may_wrap = !nowrap_type_p (type);
513 mpz_t m;
515 /* If X == Y, then the expressions are always equal.
516 If X > Y, there are the following possibilities:
517 a) neither of var + X and var + Y overflow or underflow, or both of
518 them do. Then their difference is X - Y.
519 b) var + X overflows, and var + Y does not. Then the values of the
520 expressions are var + X - M and var + Y, where M is the range of
521 the type, and their difference is X - Y - M.
522 c) var + Y underflows and var + X does not. Their difference again
523 is M - X + Y.
524 Therefore, if the arithmetics in type does not overflow, then the
525 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
526 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
527 (X - Y, X - Y + M). */
529 if (rel == 0)
531 mpz_set_ui (bnds->below, 0);
532 mpz_set_ui (bnds->up, 0);
533 return;
536 mpz_init (m);
537 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
538 mpz_add_ui (m, m, 1);
539 mpz_sub (bnds->up, x, y);
540 mpz_set (bnds->below, bnds->up);
542 if (may_wrap)
544 if (rel > 0)
545 mpz_sub (bnds->below, bnds->below, m);
546 else
547 mpz_add (bnds->up, bnds->up, m);
550 mpz_clear (m);
553 /* From condition C0 CMP C1 derives information regarding the
554 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
555 and stores it to BNDS. */
557 static void
558 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
559 tree vary, mpz_t offy,
560 tree c0, enum tree_code cmp, tree c1,
561 bounds *bnds)
563 tree varc0, varc1, ctype;
564 mpz_t offc0, offc1, loffx, loffy, bnd;
565 bool lbound = false;
566 bool no_wrap = nowrap_type_p (type);
567 bool x_ok, y_ok;
569 switch (cmp)
571 case LT_EXPR:
572 case LE_EXPR:
573 case GT_EXPR:
574 case GE_EXPR:
575 STRIP_SIGN_NOPS (c0);
576 STRIP_SIGN_NOPS (c1);
577 ctype = TREE_TYPE (c0);
578 if (!useless_type_conversion_p (ctype, type))
579 return;
581 break;
583 case EQ_EXPR:
584 /* We could derive quite precise information from EQ_EXPR, however, such
585 a guard is unlikely to appear, so we do not bother with handling
586 it. */
587 return;
589 case NE_EXPR:
590 /* NE_EXPR comparisons do not contain much of useful information, except for
591 special case of comparing with the bounds of the type. */
592 if (TREE_CODE (c1) != INTEGER_CST
593 || !INTEGRAL_TYPE_P (type))
594 return;
596 /* Ensure that the condition speaks about an expression in the same type
597 as X and Y. */
598 ctype = TREE_TYPE (c0);
599 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
600 return;
601 c0 = fold_convert (type, c0);
602 c1 = fold_convert (type, c1);
604 if (TYPE_MIN_VALUE (type)
605 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
607 cmp = GT_EXPR;
608 break;
610 if (TYPE_MAX_VALUE (type)
611 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
613 cmp = LT_EXPR;
614 break;
617 return;
618 default:
619 return;
622 mpz_init (offc0);
623 mpz_init (offc1);
624 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
625 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
627 /* We are only interested in comparisons of expressions based on VARX and
628 VARY. TODO -- we might also be able to derive some bounds from
629 expressions containing just one of the variables. */
631 if (operand_equal_p (varx, varc1, 0))
633 std::swap (varc0, varc1);
634 mpz_swap (offc0, offc1);
635 cmp = swap_tree_comparison (cmp);
638 if (!operand_equal_p (varx, varc0, 0)
639 || !operand_equal_p (vary, varc1, 0))
640 goto end;
642 mpz_init_set (loffx, offx);
643 mpz_init_set (loffy, offy);
645 if (cmp == GT_EXPR || cmp == GE_EXPR)
647 std::swap (varx, vary);
648 mpz_swap (offc0, offc1);
649 mpz_swap (loffx, loffy);
650 cmp = swap_tree_comparison (cmp);
651 lbound = true;
654 /* If there is no overflow, the condition implies that
656 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
658 The overflows and underflows may complicate things a bit; each
659 overflow decreases the appropriate offset by M, and underflow
660 increases it by M. The above inequality would not necessarily be
661 true if
663 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
664 VARX + OFFC0 overflows, but VARX + OFFX does not.
665 This may only happen if OFFX < OFFC0.
666 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
667 VARY + OFFC1 underflows and VARY + OFFY does not.
668 This may only happen if OFFY > OFFC1. */
670 if (no_wrap)
672 x_ok = true;
673 y_ok = true;
675 else
677 x_ok = (integer_zerop (varx)
678 || mpz_cmp (loffx, offc0) >= 0);
679 y_ok = (integer_zerop (vary)
680 || mpz_cmp (loffy, offc1) <= 0);
683 if (x_ok && y_ok)
685 mpz_init (bnd);
686 mpz_sub (bnd, loffx, loffy);
687 mpz_add (bnd, bnd, offc1);
688 mpz_sub (bnd, bnd, offc0);
690 if (cmp == LT_EXPR)
691 mpz_sub_ui (bnd, bnd, 1);
693 if (lbound)
695 mpz_neg (bnd, bnd);
696 if (mpz_cmp (bnds->below, bnd) < 0)
697 mpz_set (bnds->below, bnd);
699 else
701 if (mpz_cmp (bnd, bnds->up) < 0)
702 mpz_set (bnds->up, bnd);
704 mpz_clear (bnd);
707 mpz_clear (loffx);
708 mpz_clear (loffy);
709 end:
710 mpz_clear (offc0);
711 mpz_clear (offc1);
714 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
715 The subtraction is considered to be performed in arbitrary precision,
716 without overflows.
718 We do not attempt to be too clever regarding the value ranges of X and
719 Y; most of the time, they are just integers or ssa names offsetted by
720 integer. However, we try to use the information contained in the
721 comparisons before the loop (usually created by loop header copying). */
723 static void
724 bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
726 tree type = TREE_TYPE (x);
727 tree varx, vary;
728 mpz_t offx, offy;
729 mpz_t minx, maxx, miny, maxy;
730 int cnt = 0;
731 edge e;
732 basic_block bb;
733 tree c0, c1;
734 gimple *cond;
735 enum tree_code cmp;
737 /* Get rid of unnecessary casts, but preserve the value of
738 the expressions. */
739 STRIP_SIGN_NOPS (x);
740 STRIP_SIGN_NOPS (y);
742 mpz_init (bnds->below);
743 mpz_init (bnds->up);
744 mpz_init (offx);
745 mpz_init (offy);
746 split_to_var_and_offset (x, &varx, offx);
747 split_to_var_and_offset (y, &vary, offy);
749 if (!integer_zerop (varx)
750 && operand_equal_p (varx, vary, 0))
752 /* Special case VARX == VARY -- we just need to compare the
753 offsets. The matters are a bit more complicated in the
754 case addition of offsets may wrap. */
755 bound_difference_of_offsetted_base (type, offx, offy, bnds);
757 else
759 /* Otherwise, use the value ranges to determine the initial
760 estimates on below and up. */
761 mpz_init (minx);
762 mpz_init (maxx);
763 mpz_init (miny);
764 mpz_init (maxy);
765 determine_value_range (loop, type, varx, offx, minx, maxx);
766 determine_value_range (loop, type, vary, offy, miny, maxy);
768 mpz_sub (bnds->below, minx, maxy);
769 mpz_sub (bnds->up, maxx, miny);
770 mpz_clear (minx);
771 mpz_clear (maxx);
772 mpz_clear (miny);
773 mpz_clear (maxy);
776 /* If both X and Y are constants, we cannot get any more precise. */
777 if (integer_zerop (varx) && integer_zerop (vary))
778 goto end;
780 /* Now walk the dominators of the loop header and use the entry
781 guards to refine the estimates. */
782 for (bb = loop->header;
783 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
784 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
786 if (!single_pred_p (bb))
787 continue;
788 e = single_pred_edge (bb);
790 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
791 continue;
793 cond = last_stmt (e->src);
794 c0 = gimple_cond_lhs (cond);
795 cmp = gimple_cond_code (cond);
796 c1 = gimple_cond_rhs (cond);
798 if (e->flags & EDGE_FALSE_VALUE)
799 cmp = invert_tree_comparison (cmp, false);
801 refine_bounds_using_guard (type, varx, offx, vary, offy,
802 c0, cmp, c1, bnds);
803 ++cnt;
806 end:
807 mpz_clear (offx);
808 mpz_clear (offy);
811 /* Update the bounds in BNDS that restrict the value of X to the bounds
812 that restrict the value of X + DELTA. X can be obtained as a
813 difference of two values in TYPE. */
815 static void
816 bounds_add (bounds *bnds, const widest_int &delta, tree type)
818 mpz_t mdelta, max;
820 mpz_init (mdelta);
821 wi::to_mpz (delta, mdelta, SIGNED);
823 mpz_init (max);
824 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
826 mpz_add (bnds->up, bnds->up, mdelta);
827 mpz_add (bnds->below, bnds->below, mdelta);
829 if (mpz_cmp (bnds->up, max) > 0)
830 mpz_set (bnds->up, max);
832 mpz_neg (max, max);
833 if (mpz_cmp (bnds->below, max) < 0)
834 mpz_set (bnds->below, max);
836 mpz_clear (mdelta);
837 mpz_clear (max);
840 /* Update the bounds in BNDS that restrict the value of X to the bounds
841 that restrict the value of -X. */
843 static void
844 bounds_negate (bounds *bnds)
846 mpz_t tmp;
848 mpz_init_set (tmp, bnds->up);
849 mpz_neg (bnds->up, bnds->below);
850 mpz_neg (bnds->below, tmp);
851 mpz_clear (tmp);
854 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
856 static tree
857 inverse (tree x, tree mask)
859 tree type = TREE_TYPE (x);
860 tree rslt;
861 unsigned ctr = tree_floor_log2 (mask);
863 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
865 unsigned HOST_WIDE_INT ix;
866 unsigned HOST_WIDE_INT imask;
867 unsigned HOST_WIDE_INT irslt = 1;
869 gcc_assert (cst_and_fits_in_hwi (x));
870 gcc_assert (cst_and_fits_in_hwi (mask));
872 ix = int_cst_value (x);
873 imask = int_cst_value (mask);
875 for (; ctr; ctr--)
877 irslt *= ix;
878 ix *= ix;
880 irslt &= imask;
882 rslt = build_int_cst_type (type, irslt);
884 else
886 rslt = build_int_cst (type, 1);
887 for (; ctr; ctr--)
889 rslt = int_const_binop (MULT_EXPR, rslt, x);
890 x = int_const_binop (MULT_EXPR, x, x);
892 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
895 return rslt;
898 /* Derives the upper bound BND on the number of executions of loop with exit
899 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
900 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
901 that the loop ends through this exit, i.e., the induction variable ever
902 reaches the value of C.
904 The value C is equal to final - base, where final and base are the final and
905 initial value of the actual induction variable in the analysed loop. BNDS
906 bounds the value of this difference when computed in signed type with
907 unbounded range, while the computation of C is performed in an unsigned
908 type with the range matching the range of the type of the induction variable.
909 In particular, BNDS.up contains an upper bound on C in the following cases:
910 -- if the iv must reach its final value without overflow, i.e., if
911 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
912 -- if final >= base, which we know to hold when BNDS.below >= 0. */
914 static void
915 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
916 bounds *bnds, bool exit_must_be_taken)
918 widest_int max;
919 mpz_t d;
920 tree type = TREE_TYPE (c);
921 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
922 || mpz_sgn (bnds->below) >= 0);
924 if (integer_onep (s)
925 || (TREE_CODE (c) == INTEGER_CST
926 && TREE_CODE (s) == INTEGER_CST
927 && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
928 TYPE_SIGN (type)) == 0)
929 || (TYPE_OVERFLOW_UNDEFINED (type)
930 && multiple_of_p (type, c, s)))
932 /* If C is an exact multiple of S, then its value will be reached before
933 the induction variable overflows (unless the loop is exited in some
934 other way before). Note that the actual induction variable in the
935 loop (which ranges from base to final instead of from 0 to C) may
936 overflow, in which case BNDS.up will not be giving a correct upper
937 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
938 no_overflow = true;
939 exit_must_be_taken = true;
942 /* If the induction variable can overflow, the number of iterations is at
943 most the period of the control variable (or infinite, but in that case
944 the whole # of iterations analysis will fail). */
945 if (!no_overflow)
947 max = wi::mask <widest_int> (TYPE_PRECISION (type)
948 - wi::ctz (wi::to_wide (s)), false);
949 wi::to_mpz (max, bnd, UNSIGNED);
950 return;
953 /* Now we know that the induction variable does not overflow, so the loop
954 iterates at most (range of type / S) times. */
955 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
957 /* If the induction variable is guaranteed to reach the value of C before
958 overflow, ... */
959 if (exit_must_be_taken)
961 /* ... then we can strengthen this to C / S, and possibly we can use
962 the upper bound on C given by BNDS. */
963 if (TREE_CODE (c) == INTEGER_CST)
964 wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
965 else if (bnds_u_valid)
966 mpz_set (bnd, bnds->up);
969 mpz_init (d);
970 wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
971 mpz_fdiv_q (bnd, bnd, d);
972 mpz_clear (d);
975 /* Determines number of iterations of loop whose ending condition
976 is IV <> FINAL. TYPE is the type of the iv. The number of
977 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
978 we know that the exit must be taken eventually, i.e., that the IV
979 ever reaches the value FINAL (we derived this earlier, and possibly set
980 NITER->assumptions to make sure this is the case). BNDS contains the
981 bounds on the difference FINAL - IV->base. */
983 static bool
984 number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
985 tree final, class tree_niter_desc *niter,
986 bool exit_must_be_taken, bounds *bnds)
988 tree niter_type = unsigned_type_for (type);
989 tree s, c, d, bits, assumption, tmp, bound;
990 mpz_t max;
992 niter->control = *iv;
993 niter->bound = final;
994 niter->cmp = NE_EXPR;
996 /* Rearrange the terms so that we get inequality S * i <> C, with S
997 positive. Also cast everything to the unsigned type. If IV does
998 not overflow, BNDS bounds the value of C. Also, this is the
999 case if the computation |FINAL - IV->base| does not overflow, i.e.,
1000 if BNDS->below in the result is nonnegative. */
1001 if (tree_int_cst_sign_bit (iv->step))
1003 s = fold_convert (niter_type,
1004 fold_build1 (NEGATE_EXPR, type, iv->step));
1005 c = fold_build2 (MINUS_EXPR, niter_type,
1006 fold_convert (niter_type, iv->base),
1007 fold_convert (niter_type, final));
1008 bounds_negate (bnds);
1010 else
1012 s = fold_convert (niter_type, iv->step);
1013 c = fold_build2 (MINUS_EXPR, niter_type,
1014 fold_convert (niter_type, final),
1015 fold_convert (niter_type, iv->base));
1018 mpz_init (max);
1019 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1020 exit_must_be_taken);
1021 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1022 TYPE_SIGN (niter_type));
1023 mpz_clear (max);
1025 /* Compute no-overflow information for the control iv. This can be
1026 proven when below two conditions are satisfied:
1028 1) IV evaluates toward FINAL at beginning, i.e:
1029 base <= FINAL ; step > 0
1030 base >= FINAL ; step < 0
1032 2) |FINAL - base| is an exact multiple of step.
1034 Unfortunately, it's hard to prove above conditions after pass loop-ch
1035 because loop with exit condition (IV != FINAL) usually will be guarded
1036 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1037 can alternatively try to prove below conditions:
1039 1') IV evaluates toward FINAL at beginning, i.e:
1040 new_base = base - step < FINAL ; step > 0
1041 && base - step doesn't underflow
1042 new_base = base - step > FINAL ; step < 0
1043 && base - step doesn't overflow
1045 Please refer to PR34114 as an example of loop-ch's impact.
1047 Note, for NE_EXPR, base equals to FINAL is a special case, in
1048 which the loop exits immediately, and the iv does not overflow.
1050 Also note, we prove condition 2) by checking base and final seperately
1051 along with condition 1) or 1'). Since we ensure the difference
1052 computation of c does not wrap with cond below and the adjusted s
1053 will fit a signed type as well as an unsigned we can safely do
1054 this using the type of the IV if it is not pointer typed. */
1055 tree mtype = type;
1056 if (POINTER_TYPE_P (type))
1057 mtype = niter_type;
1058 if (!niter->control.no_overflow
1059 && (integer_onep (s)
1060 || (multiple_of_p (mtype, fold_convert (mtype, iv->base),
1061 fold_convert (mtype, s), false)
1062 && multiple_of_p (mtype, fold_convert (mtype, final),
1063 fold_convert (mtype, s), false))))
1065 tree t, cond, relaxed_cond = boolean_false_node;
1067 if (tree_int_cst_sign_bit (iv->step))
1069 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1070 if (TREE_CODE (type) == INTEGER_TYPE)
1072 /* Only when base - step doesn't overflow. */
1073 t = TYPE_MAX_VALUE (type);
1074 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1075 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1076 if (integer_nonzerop (t))
1078 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1079 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node, t,
1080 final);
1084 else
1086 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1087 if (TREE_CODE (type) == INTEGER_TYPE)
1089 /* Only when base - step doesn't underflow. */
1090 t = TYPE_MIN_VALUE (type);
1091 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1092 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1093 if (integer_nonzerop (t))
1095 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1096 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node, t,
1097 final);
1102 t = simplify_using_initial_conditions (loop, cond);
1103 if (!t || !integer_onep (t))
1104 t = simplify_using_initial_conditions (loop, relaxed_cond);
1106 if (t && integer_onep (t))
1108 niter->control.no_overflow = true;
1109 niter->niter = fold_build2 (EXACT_DIV_EXPR, niter_type, c, s);
1110 return true;
1114 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1115 is infinite. Otherwise, the number of iterations is
1116 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1117 bits = num_ending_zeros (s);
1118 bound = build_low_bits_mask (niter_type,
1119 (TYPE_PRECISION (niter_type)
1120 - tree_to_uhwi (bits)));
1122 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1123 build_int_cst (niter_type, 1), bits);
1124 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1126 if (!exit_must_be_taken)
1128 /* If we cannot assume that the exit is taken eventually, record the
1129 assumptions for divisibility of c. */
1130 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1131 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1132 assumption, build_int_cst (niter_type, 0));
1133 if (!integer_nonzerop (assumption))
1134 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1135 niter->assumptions, assumption);
1138 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1139 if (integer_onep (s))
1141 niter->niter = c;
1143 else
1145 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1146 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1148 return true;
1151 /* Checks whether we can determine the final value of the control variable
1152 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1153 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1154 of the step. The assumptions necessary to ensure that the computation
1155 of the final value does not overflow are recorded in NITER. If we
1156 find the final value, we adjust DELTA and return TRUE. Otherwise
1157 we return false. BNDS bounds the value of IV1->base - IV0->base,
1158 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1159 true if we know that the exit must be taken eventually. */
1161 static bool
1162 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1163 class tree_niter_desc *niter,
1164 tree *delta, tree step,
1165 bool exit_must_be_taken, bounds *bnds)
1167 tree niter_type = TREE_TYPE (step);
1168 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1169 tree tmod;
1170 mpz_t mmod;
1171 tree assumption = boolean_true_node, bound, noloop;
1172 bool ret = false, fv_comp_no_overflow;
1173 tree type1 = type;
1174 if (POINTER_TYPE_P (type))
1175 type1 = sizetype;
1177 if (TREE_CODE (mod) != INTEGER_CST)
1178 return false;
1179 if (integer_nonzerop (mod))
1180 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1181 tmod = fold_convert (type1, mod);
1183 mpz_init (mmod);
1184 wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1185 mpz_neg (mmod, mmod);
1187 /* If the induction variable does not overflow and the exit is taken,
1188 then the computation of the final value does not overflow. This is
1189 also obviously the case if the new final value is equal to the
1190 current one. Finally, we postulate this for pointer type variables,
1191 as the code cannot rely on the object to that the pointer points being
1192 placed at the end of the address space (and more pragmatically,
1193 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1194 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1195 fv_comp_no_overflow = true;
1196 else if (!exit_must_be_taken)
1197 fv_comp_no_overflow = false;
1198 else
1199 fv_comp_no_overflow =
1200 (iv0->no_overflow && integer_nonzerop (iv0->step))
1201 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1203 if (integer_nonzerop (iv0->step))
1205 /* The final value of the iv is iv1->base + MOD, assuming that this
1206 computation does not overflow, and that
1207 iv0->base <= iv1->base + MOD. */
1208 if (!fv_comp_no_overflow)
1210 bound = fold_build2 (MINUS_EXPR, type1,
1211 TYPE_MAX_VALUE (type1), tmod);
1212 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1213 iv1->base, bound);
1214 if (integer_zerop (assumption))
1215 goto end;
1217 if (mpz_cmp (mmod, bnds->below) < 0)
1218 noloop = boolean_false_node;
1219 else if (POINTER_TYPE_P (type))
1220 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1221 iv0->base,
1222 fold_build_pointer_plus (iv1->base, tmod));
1223 else
1224 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1225 iv0->base,
1226 fold_build2 (PLUS_EXPR, type1,
1227 iv1->base, tmod));
1229 else
1231 /* The final value of the iv is iv0->base - MOD, assuming that this
1232 computation does not overflow, and that
1233 iv0->base - MOD <= iv1->base. */
1234 if (!fv_comp_no_overflow)
1236 bound = fold_build2 (PLUS_EXPR, type1,
1237 TYPE_MIN_VALUE (type1), tmod);
1238 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1239 iv0->base, bound);
1240 if (integer_zerop (assumption))
1241 goto end;
1243 if (mpz_cmp (mmod, bnds->below) < 0)
1244 noloop = boolean_false_node;
1245 else if (POINTER_TYPE_P (type))
1246 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1247 fold_build_pointer_plus (iv0->base,
1248 fold_build1 (NEGATE_EXPR,
1249 type1, tmod)),
1250 iv1->base);
1251 else
1252 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1253 fold_build2 (MINUS_EXPR, type1,
1254 iv0->base, tmod),
1255 iv1->base);
1258 if (!integer_nonzerop (assumption))
1259 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1260 niter->assumptions,
1261 assumption);
1262 if (!integer_zerop (noloop))
1263 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1264 niter->may_be_zero,
1265 noloop);
1266 bounds_add (bnds, wi::to_widest (mod), type);
1267 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1269 ret = true;
1270 end:
1271 mpz_clear (mmod);
1272 return ret;
1275 /* Add assertions to NITER that ensure that the control variable of the loop
1276 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1277 are TYPE. Returns false if we can prove that there is an overflow, true
1278 otherwise. STEP is the absolute value of the step. */
1280 static bool
1281 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1282 class tree_niter_desc *niter, tree step)
1284 tree bound, d, assumption, diff;
1285 tree niter_type = TREE_TYPE (step);
1287 if (integer_nonzerop (iv0->step))
1289 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1290 if (iv0->no_overflow)
1291 return true;
1293 /* If iv0->base is a constant, we can determine the last value before
1294 overflow precisely; otherwise we conservatively assume
1295 MAX - STEP + 1. */
1297 if (TREE_CODE (iv0->base) == INTEGER_CST)
1299 d = fold_build2 (MINUS_EXPR, niter_type,
1300 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1301 fold_convert (niter_type, iv0->base));
1302 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1304 else
1305 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1306 build_int_cst (niter_type, 1));
1307 bound = fold_build2 (MINUS_EXPR, type,
1308 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1309 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1310 iv1->base, bound);
1312 else
1314 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1315 if (iv1->no_overflow)
1316 return true;
1318 if (TREE_CODE (iv1->base) == INTEGER_CST)
1320 d = fold_build2 (MINUS_EXPR, niter_type,
1321 fold_convert (niter_type, iv1->base),
1322 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1323 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1325 else
1326 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1327 build_int_cst (niter_type, 1));
1328 bound = fold_build2 (PLUS_EXPR, type,
1329 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1330 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1331 iv0->base, bound);
1334 if (integer_zerop (assumption))
1335 return false;
1336 if (!integer_nonzerop (assumption))
1337 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1338 niter->assumptions, assumption);
1340 iv0->no_overflow = true;
1341 iv1->no_overflow = true;
1342 return true;
1345 /* Add an assumption to NITER that a loop whose ending condition
1346 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1347 bounds the value of IV1->base - IV0->base. */
1349 static void
1350 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1351 class tree_niter_desc *niter, bounds *bnds)
1353 tree assumption = boolean_true_node, bound, diff;
1354 tree mbz, mbzl, mbzr, type1;
1355 bool rolls_p, no_overflow_p;
1356 widest_int dstep;
1357 mpz_t mstep, max;
1359 /* We are going to compute the number of iterations as
1360 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1361 variant of TYPE. This formula only works if
1363 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1365 (where MAX is the maximum value of the unsigned variant of TYPE, and
1366 the computations in this formula are performed in full precision,
1367 i.e., without overflows).
1369 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1370 we have a condition of the form iv0->base - step < iv1->base before the loop,
1371 and for loops iv0->base < iv1->base - step * i the condition
1372 iv0->base < iv1->base + step, due to loop header copying, which enable us
1373 to prove the lower bound.
1375 The upper bound is more complicated. Unless the expressions for initial
1376 and final value themselves contain enough information, we usually cannot
1377 derive it from the context. */
1379 /* First check whether the answer does not follow from the bounds we gathered
1380 before. */
1381 if (integer_nonzerop (iv0->step))
1382 dstep = wi::to_widest (iv0->step);
1383 else
1385 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1386 dstep = -dstep;
1389 mpz_init (mstep);
1390 wi::to_mpz (dstep, mstep, UNSIGNED);
1391 mpz_neg (mstep, mstep);
1392 mpz_add_ui (mstep, mstep, 1);
1394 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1396 mpz_init (max);
1397 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1398 mpz_add (max, max, mstep);
1399 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1400 /* For pointers, only values lying inside a single object
1401 can be compared or manipulated by pointer arithmetics.
1402 Gcc in general does not allow or handle objects larger
1403 than half of the address space, hence the upper bound
1404 is satisfied for pointers. */
1405 || POINTER_TYPE_P (type));
1406 mpz_clear (mstep);
1407 mpz_clear (max);
1409 if (rolls_p && no_overflow_p)
1410 return;
1412 type1 = type;
1413 if (POINTER_TYPE_P (type))
1414 type1 = sizetype;
1416 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1417 we must be careful not to introduce overflow. */
1419 if (integer_nonzerop (iv0->step))
1421 diff = fold_build2 (MINUS_EXPR, type1,
1422 iv0->step, build_int_cst (type1, 1));
1424 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1425 0 address never belongs to any object, we can assume this for
1426 pointers. */
1427 if (!POINTER_TYPE_P (type))
1429 bound = fold_build2 (PLUS_EXPR, type1,
1430 TYPE_MIN_VALUE (type), diff);
1431 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1432 iv0->base, bound);
1435 /* And then we can compute iv0->base - diff, and compare it with
1436 iv1->base. */
1437 mbzl = fold_build2 (MINUS_EXPR, type1,
1438 fold_convert (type1, iv0->base), diff);
1439 mbzr = fold_convert (type1, iv1->base);
1441 else
1443 diff = fold_build2 (PLUS_EXPR, type1,
1444 iv1->step, build_int_cst (type1, 1));
1446 if (!POINTER_TYPE_P (type))
1448 bound = fold_build2 (PLUS_EXPR, type1,
1449 TYPE_MAX_VALUE (type), diff);
1450 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1451 iv1->base, bound);
1454 mbzl = fold_convert (type1, iv0->base);
1455 mbzr = fold_build2 (MINUS_EXPR, type1,
1456 fold_convert (type1, iv1->base), diff);
1459 if (!integer_nonzerop (assumption))
1460 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1461 niter->assumptions, assumption);
1462 if (!rolls_p)
1464 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1465 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1466 niter->may_be_zero, mbz);
1470 /* Determines number of iterations of loop whose ending condition
1471 is IV0 < IV1 which likes: {base, -C} < n, or n < {base, C}.
1472 The number of iterations is stored to NITER. */
1474 static bool
1475 number_of_iterations_until_wrap (class loop *loop, tree type, affine_iv *iv0,
1476 affine_iv *iv1, class tree_niter_desc *niter)
1478 tree niter_type = unsigned_type_for (type);
1479 tree step, num, assumptions, may_be_zero, span;
1480 wide_int high, low, max, min;
1482 may_be_zero = fold_build2 (LE_EXPR, boolean_type_node, iv1->base, iv0->base);
1483 if (integer_onep (may_be_zero))
1484 return false;
1486 int prec = TYPE_PRECISION (type);
1487 signop sgn = TYPE_SIGN (type);
1488 min = wi::min_value (prec, sgn);
1489 max = wi::max_value (prec, sgn);
1491 /* n < {base, C}. */
1492 if (integer_zerop (iv0->step) && !tree_int_cst_sign_bit (iv1->step))
1494 step = iv1->step;
1495 /* MIN + C - 1 <= n. */
1496 tree last = wide_int_to_tree (type, min + wi::to_wide (step) - 1);
1497 assumptions = fold_build2 (LE_EXPR, boolean_type_node, last, iv0->base);
1498 if (integer_zerop (assumptions))
1499 return false;
1501 num = fold_build2 (MINUS_EXPR, niter_type, wide_int_to_tree (type, max),
1502 iv1->base);
1504 /* When base has the form iv + 1, if we know iv >= n, then iv + 1 < n
1505 only when iv + 1 overflows, i.e. when iv == TYPE_VALUE_MAX. */
1506 if (sgn == UNSIGNED
1507 && integer_onep (step)
1508 && TREE_CODE (iv1->base) == PLUS_EXPR
1509 && integer_onep (TREE_OPERAND (iv1->base, 1)))
1511 tree cond = fold_build2 (GE_EXPR, boolean_type_node,
1512 TREE_OPERAND (iv1->base, 0), iv0->base);
1513 cond = simplify_using_initial_conditions (loop, cond);
1514 if (integer_onep (cond))
1515 may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node,
1516 TREE_OPERAND (iv1->base, 0),
1517 TYPE_MAX_VALUE (type));
1520 high = max;
1521 if (TREE_CODE (iv1->base) == INTEGER_CST)
1522 low = wi::to_wide (iv1->base) - 1;
1523 else if (TREE_CODE (iv0->base) == INTEGER_CST)
1524 low = wi::to_wide (iv0->base);
1525 else
1526 low = min;
1528 /* {base, -C} < n. */
1529 else if (tree_int_cst_sign_bit (iv0->step) && integer_zerop (iv1->step))
1531 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv0->step), iv0->step);
1532 /* MAX - C + 1 >= n. */
1533 tree last = wide_int_to_tree (type, max - wi::to_wide (step) + 1);
1534 assumptions = fold_build2 (GE_EXPR, boolean_type_node, last, iv1->base);
1535 if (integer_zerop (assumptions))
1536 return false;
1538 num = fold_build2 (MINUS_EXPR, niter_type, iv0->base,
1539 wide_int_to_tree (type, min));
1540 low = min;
1541 if (TREE_CODE (iv0->base) == INTEGER_CST)
1542 high = wi::to_wide (iv0->base) + 1;
1543 else if (TREE_CODE (iv1->base) == INTEGER_CST)
1544 high = wi::to_wide (iv1->base);
1545 else
1546 high = max;
1548 else
1549 return false;
1551 /* (delta + step - 1) / step */
1552 step = fold_convert (niter_type, step);
1553 num = fold_convert (niter_type, num);
1554 num = fold_build2 (PLUS_EXPR, niter_type, num, step);
1555 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, num, step);
1557 widest_int delta, s;
1558 delta = widest_int::from (high, sgn) - widest_int::from (low, sgn);
1559 s = wi::to_widest (step);
1560 delta = delta + s - 1;
1561 niter->max = wi::udiv_floor (delta, s);
1563 niter->may_be_zero = may_be_zero;
1565 if (!integer_nonzerop (assumptions))
1566 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1567 niter->assumptions, assumptions);
1569 niter->control.no_overflow = false;
1571 /* Update bound and exit condition as:
1572 bound = niter * STEP + (IVbase - STEP).
1573 { IVbase - STEP, +, STEP } != bound
1574 Here, biasing IVbase by 1 step makes 'bound' be the value before wrap.
1576 tree base_type = TREE_TYPE (niter->control.base);
1577 if (POINTER_TYPE_P (base_type))
1579 tree utype = unsigned_type_for (base_type);
1580 niter->control.base
1581 = fold_build2 (MINUS_EXPR, utype,
1582 fold_convert (utype, niter->control.base),
1583 fold_convert (utype, niter->control.step));
1584 niter->control.base = fold_convert (base_type, niter->control.base);
1586 else
1587 niter->control.base
1588 = fold_build2 (MINUS_EXPR, base_type, niter->control.base,
1589 niter->control.step);
1591 span = fold_build2 (MULT_EXPR, niter_type, niter->niter,
1592 fold_convert (niter_type, niter->control.step));
1593 niter->bound = fold_build2 (PLUS_EXPR, niter_type, span,
1594 fold_convert (niter_type, niter->control.base));
1595 niter->bound = fold_convert (type, niter->bound);
1596 niter->cmp = NE_EXPR;
1598 return true;
1601 /* Determines number of iterations of loop whose ending condition
1602 is IV0 < IV1. TYPE is the type of the iv. The number of
1603 iterations is stored to NITER. BNDS bounds the difference
1604 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1605 that the exit must be taken eventually. */
1607 static bool
1608 number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
1609 affine_iv *iv1, class tree_niter_desc *niter,
1610 bool exit_must_be_taken, bounds *bnds)
1612 tree niter_type = unsigned_type_for (type);
1613 tree delta, step, s;
1614 mpz_t mstep, tmp;
1616 if (integer_nonzerop (iv0->step))
1618 niter->control = *iv0;
1619 niter->cmp = LT_EXPR;
1620 niter->bound = iv1->base;
1622 else
1624 niter->control = *iv1;
1625 niter->cmp = GT_EXPR;
1626 niter->bound = iv0->base;
1629 /* {base, -C} < n, or n < {base, C} */
1630 if (tree_int_cst_sign_bit (iv0->step)
1631 || (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step)))
1632 return number_of_iterations_until_wrap (loop, type, iv0, iv1, niter);
1634 delta = fold_build2 (MINUS_EXPR, niter_type,
1635 fold_convert (niter_type, iv1->base),
1636 fold_convert (niter_type, iv0->base));
1638 /* First handle the special case that the step is +-1. */
1639 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1640 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1642 /* for (i = iv0->base; i < iv1->base; i++)
1646 for (i = iv1->base; i > iv0->base; i--).
1648 In both cases # of iterations is iv1->base - iv0->base, assuming that
1649 iv1->base >= iv0->base.
1651 First try to derive a lower bound on the value of
1652 iv1->base - iv0->base, computed in full precision. If the difference
1653 is nonnegative, we are done, otherwise we must record the
1654 condition. */
1656 if (mpz_sgn (bnds->below) < 0)
1657 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1658 iv1->base, iv0->base);
1659 niter->niter = delta;
1660 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1661 TYPE_SIGN (niter_type));
1662 niter->control.no_overflow = true;
1663 return true;
1666 if (integer_nonzerop (iv0->step))
1667 step = fold_convert (niter_type, iv0->step);
1668 else
1669 step = fold_convert (niter_type,
1670 fold_build1 (NEGATE_EXPR, type, iv1->step));
1672 /* If we can determine the final value of the control iv exactly, we can
1673 transform the condition to != comparison. In particular, this will be
1674 the case if DELTA is constant. */
1675 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1676 exit_must_be_taken, bnds))
1678 affine_iv zps;
1680 zps.base = build_int_cst (niter_type, 0);
1681 zps.step = step;
1682 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1683 zps does not overflow. */
1684 zps.no_overflow = true;
1686 return number_of_iterations_ne (loop, type, &zps,
1687 delta, niter, true, bnds);
1690 /* Make sure that the control iv does not overflow. */
1691 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1692 return false;
1694 /* We determine the number of iterations as (delta + step - 1) / step. For
1695 this to work, we must know that iv1->base >= iv0->base - step + 1,
1696 otherwise the loop does not roll. */
1697 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1699 s = fold_build2 (MINUS_EXPR, niter_type,
1700 step, build_int_cst (niter_type, 1));
1701 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1702 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1704 mpz_init (mstep);
1705 mpz_init (tmp);
1706 wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1707 mpz_add (tmp, bnds->up, mstep);
1708 mpz_sub_ui (tmp, tmp, 1);
1709 mpz_fdiv_q (tmp, tmp, mstep);
1710 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1711 TYPE_SIGN (niter_type));
1712 mpz_clear (mstep);
1713 mpz_clear (tmp);
1715 return true;
1718 /* Determines number of iterations of loop whose ending condition
1719 is IV0 <= IV1. TYPE is the type of the iv. The number of
1720 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1721 we know that this condition must eventually become false (we derived this
1722 earlier, and possibly set NITER->assumptions to make sure this
1723 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1725 static bool
1726 number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
1727 affine_iv *iv1, class tree_niter_desc *niter,
1728 bool exit_must_be_taken, bounds *bnds)
1730 tree assumption;
1731 tree type1 = type;
1732 if (POINTER_TYPE_P (type))
1733 type1 = sizetype;
1735 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1736 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1737 value of the type. This we must know anyway, since if it is
1738 equal to this value, the loop rolls forever. We do not check
1739 this condition for pointer type ivs, as the code cannot rely on
1740 the object to that the pointer points being placed at the end of
1741 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1742 not defined for pointers). */
1744 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1746 if (integer_nonzerop (iv0->step))
1747 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1748 iv1->base, TYPE_MAX_VALUE (type));
1749 else
1750 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1751 iv0->base, TYPE_MIN_VALUE (type));
1753 if (integer_zerop (assumption))
1754 return false;
1755 if (!integer_nonzerop (assumption))
1756 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1757 niter->assumptions, assumption);
1760 if (integer_nonzerop (iv0->step))
1762 if (POINTER_TYPE_P (type))
1763 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1764 else
1765 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1766 build_int_cst (type1, 1));
1768 else if (POINTER_TYPE_P (type))
1769 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1770 else
1771 iv0->base = fold_build2 (MINUS_EXPR, type1,
1772 iv0->base, build_int_cst (type1, 1));
1774 bounds_add (bnds, 1, type1);
1776 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1777 bnds);
1780 /* Dumps description of affine induction variable IV to FILE. */
1782 static void
1783 dump_affine_iv (FILE *file, affine_iv *iv)
1785 if (!integer_zerop (iv->step))
1786 fprintf (file, "[");
1788 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1790 if (!integer_zerop (iv->step))
1792 fprintf (file, ", + , ");
1793 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1794 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1798 /* Determine the number of iterations according to condition (for staying
1799 inside loop) which compares two induction variables using comparison
1800 operator CODE. The induction variable on left side of the comparison
1801 is IV0, the right-hand side is IV1. Both induction variables must have
1802 type TYPE, which must be an integer or pointer type. The steps of the
1803 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1805 LOOP is the loop whose number of iterations we are determining.
1807 ONLY_EXIT is true if we are sure this is the only way the loop could be
1808 exited (including possibly non-returning function calls, exceptions, etc.)
1809 -- in this case we can use the information whether the control induction
1810 variables can overflow or not in a more efficient way.
1812 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1814 The results (number of iterations and assumptions as described in
1815 comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1816 Returns false if it fails to determine number of iterations, true if it
1817 was determined (possibly with some assumptions). */
1819 static bool
1820 number_of_iterations_cond (class loop *loop,
1821 tree type, affine_iv *iv0, enum tree_code code,
1822 affine_iv *iv1, class tree_niter_desc *niter,
1823 bool only_exit, bool every_iteration)
1825 bool exit_must_be_taken = false, ret;
1826 bounds bnds;
1828 /* If the test is not executed every iteration, wrapping may make the test
1829 to pass again.
1830 TODO: the overflow case can be still used as unreliable estimate of upper
1831 bound. But we have no API to pass it down to number of iterations code
1832 and, at present, it will not use it anyway. */
1833 if (!every_iteration
1834 && (!iv0->no_overflow || !iv1->no_overflow
1835 || code == NE_EXPR || code == EQ_EXPR))
1836 return false;
1838 /* The meaning of these assumptions is this:
1839 if !assumptions
1840 then the rest of information does not have to be valid
1841 if may_be_zero then the loop does not roll, even if
1842 niter != 0. */
1843 niter->assumptions = boolean_true_node;
1844 niter->may_be_zero = boolean_false_node;
1845 niter->niter = NULL_TREE;
1846 niter->max = 0;
1847 niter->bound = NULL_TREE;
1848 niter->cmp = ERROR_MARK;
1850 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1851 the control variable is on lhs. */
1852 if (code == GE_EXPR || code == GT_EXPR
1853 || (code == NE_EXPR && integer_zerop (iv0->step)))
1855 std::swap (iv0, iv1);
1856 code = swap_tree_comparison (code);
1859 if (POINTER_TYPE_P (type))
1861 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1862 to the same object. If they do, the control variable cannot wrap
1863 (as wrap around the bounds of memory will never return a pointer
1864 that would be guaranteed to point to the same object, even if we
1865 avoid undefined behavior by casting to size_t and back). */
1866 iv0->no_overflow = true;
1867 iv1->no_overflow = true;
1870 /* If the control induction variable does not overflow and the only exit
1871 from the loop is the one that we analyze, we know it must be taken
1872 eventually. */
1873 if (only_exit)
1875 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1876 exit_must_be_taken = true;
1877 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1878 exit_must_be_taken = true;
1881 /* We can handle cases which neither of the sides of the comparison is
1882 invariant:
1884 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1885 as if:
1886 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1888 provided that either below condition is satisfied:
1890 a) the test is NE_EXPR;
1891 b) iv0 and iv1 do not overflow and iv0.step - iv1.step is of
1892 the same sign and of less or equal magnitude than iv0.step
1894 This rarely occurs in practice, but it is simple enough to manage. */
1895 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1897 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1898 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1899 iv0->step, iv1->step);
1901 /* For code other than NE_EXPR we have to ensure moving the evolution
1902 of IV1 to that of IV0 does not introduce overflow. */
1903 if (TREE_CODE (step) != INTEGER_CST
1904 || !iv0->no_overflow || !iv1->no_overflow)
1906 if (code != NE_EXPR)
1907 return false;
1908 iv0->no_overflow = false;
1910 /* If the new step of IV0 has changed sign or is of greater
1911 magnitude then we do not know whether IV0 does overflow
1912 and thus the transform is not valid for code other than NE_EXPR. */
1913 else if (tree_int_cst_sign_bit (step) != tree_int_cst_sign_bit (iv0->step)
1914 || wi::gtu_p (wi::abs (wi::to_widest (step)),
1915 wi::abs (wi::to_widest (iv0->step))))
1917 if (POINTER_TYPE_P (type) && code != NE_EXPR)
1918 /* For relational pointer compares we have further guarantees
1919 that the pointers always point to the same object (or one
1920 after it) and that objects do not cross the zero page. So
1921 not only is the transform always valid for relational
1922 pointer compares, we also know the resulting IV does not
1923 overflow. */
1925 else if (code != NE_EXPR)
1926 return false;
1927 else
1928 iv0->no_overflow = false;
1931 iv0->step = step;
1932 iv1->step = build_int_cst (step_type, 0);
1933 iv1->no_overflow = true;
1936 /* If the result of the comparison is a constant, the loop is weird. More
1937 precise handling would be possible, but the situation is not common enough
1938 to waste time on it. */
1939 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1940 return false;
1942 /* If the loop exits immediately, there is nothing to do. */
1943 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1944 if (tem && integer_zerop (tem))
1946 if (!every_iteration)
1947 return false;
1948 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1949 niter->max = 0;
1950 return true;
1953 /* OK, now we know we have a senseful loop. Handle several cases, depending
1954 on what comparison operator is used. */
1955 bound_difference (loop, iv1->base, iv0->base, &bnds);
1957 if (dump_file && (dump_flags & TDF_DETAILS))
1959 fprintf (dump_file,
1960 "Analyzing # of iterations of loop %d\n", loop->num);
1962 fprintf (dump_file, " exit condition ");
1963 dump_affine_iv (dump_file, iv0);
1964 fprintf (dump_file, " %s ",
1965 code == NE_EXPR ? "!="
1966 : code == LT_EXPR ? "<"
1967 : "<=");
1968 dump_affine_iv (dump_file, iv1);
1969 fprintf (dump_file, "\n");
1971 fprintf (dump_file, " bounds on difference of bases: ");
1972 mpz_out_str (dump_file, 10, bnds.below);
1973 fprintf (dump_file, " ... ");
1974 mpz_out_str (dump_file, 10, bnds.up);
1975 fprintf (dump_file, "\n");
1978 switch (code)
1980 case NE_EXPR:
1981 gcc_assert (integer_zerop (iv1->step));
1982 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1983 exit_must_be_taken, &bnds);
1984 break;
1986 case LT_EXPR:
1987 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1988 exit_must_be_taken, &bnds);
1989 break;
1991 case LE_EXPR:
1992 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1993 exit_must_be_taken, &bnds);
1994 break;
1996 default:
1997 gcc_unreachable ();
2000 mpz_clear (bnds.up);
2001 mpz_clear (bnds.below);
2003 if (dump_file && (dump_flags & TDF_DETAILS))
2005 if (ret)
2007 fprintf (dump_file, " result:\n");
2008 if (!integer_nonzerop (niter->assumptions))
2010 fprintf (dump_file, " under assumptions ");
2011 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
2012 fprintf (dump_file, "\n");
2015 if (!integer_zerop (niter->may_be_zero))
2017 fprintf (dump_file, " zero if ");
2018 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
2019 fprintf (dump_file, "\n");
2022 fprintf (dump_file, " # of iterations ");
2023 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
2024 fprintf (dump_file, ", bounded by ");
2025 print_decu (niter->max, dump_file);
2026 fprintf (dump_file, "\n");
2028 else
2029 fprintf (dump_file, " failed\n\n");
2031 return ret;
2034 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
2035 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
2036 all SSA names are replaced with the result of calling the VALUEIZE
2037 function with the SSA name as argument. */
2039 tree
2040 simplify_replace_tree (tree expr, tree old, tree new_tree,
2041 tree (*valueize) (tree, void*), void *context,
2042 bool do_fold)
2044 unsigned i, n;
2045 tree ret = NULL_TREE, e, se;
2047 if (!expr)
2048 return NULL_TREE;
2050 /* Do not bother to replace constants. */
2051 if (CONSTANT_CLASS_P (expr))
2052 return expr;
2054 if (valueize)
2056 if (TREE_CODE (expr) == SSA_NAME)
2058 new_tree = valueize (expr, context);
2059 if (new_tree != expr)
2060 return new_tree;
2063 else if (expr == old
2064 || operand_equal_p (expr, old, 0))
2065 return unshare_expr (new_tree);
2067 if (!EXPR_P (expr))
2068 return expr;
2070 n = TREE_OPERAND_LENGTH (expr);
2071 for (i = 0; i < n; i++)
2073 e = TREE_OPERAND (expr, i);
2074 se = simplify_replace_tree (e, old, new_tree, valueize, context, do_fold);
2075 if (e == se)
2076 continue;
2078 if (!ret)
2079 ret = copy_node (expr);
2081 TREE_OPERAND (ret, i) = se;
2084 return (ret ? (do_fold ? fold (ret) : ret) : expr);
2087 /* Expand definitions of ssa names in EXPR as long as they are simple
2088 enough, and return the new expression. If STOP is specified, stop
2089 expanding if EXPR equals to it. */
2091 static tree
2092 expand_simple_operations (tree expr, tree stop, hash_map<tree, tree> &cache)
2094 unsigned i, n;
2095 tree ret = NULL_TREE, e, ee, e1;
2096 enum tree_code code;
2097 gimple *stmt;
2099 if (expr == NULL_TREE)
2100 return expr;
2102 if (is_gimple_min_invariant (expr))
2103 return expr;
2105 code = TREE_CODE (expr);
2106 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
2108 n = TREE_OPERAND_LENGTH (expr);
2109 for (i = 0; i < n; i++)
2111 e = TREE_OPERAND (expr, i);
2112 /* SCEV analysis feeds us with a proper expression
2113 graph matching the SSA graph. Avoid turning it
2114 into a tree here, thus handle tree sharing
2115 properly.
2116 ??? The SSA walk below still turns the SSA graph
2117 into a tree but until we find a testcase do not
2118 introduce additional tree sharing here. */
2119 bool existed_p;
2120 tree &cee = cache.get_or_insert (e, &existed_p);
2121 if (existed_p)
2122 ee = cee;
2123 else
2125 cee = e;
2126 ee = expand_simple_operations (e, stop, cache);
2127 if (ee != e)
2128 *cache.get (e) = ee;
2130 if (e == ee)
2131 continue;
2133 if (!ret)
2134 ret = copy_node (expr);
2136 TREE_OPERAND (ret, i) = ee;
2139 if (!ret)
2140 return expr;
2142 fold_defer_overflow_warnings ();
2143 ret = fold (ret);
2144 fold_undefer_and_ignore_overflow_warnings ();
2145 return ret;
2148 /* Stop if it's not ssa name or the one we don't want to expand. */
2149 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
2150 return expr;
2152 stmt = SSA_NAME_DEF_STMT (expr);
2153 if (gimple_code (stmt) == GIMPLE_PHI)
2155 basic_block src, dest;
2157 if (gimple_phi_num_args (stmt) != 1)
2158 return expr;
2159 e = PHI_ARG_DEF (stmt, 0);
2161 /* Avoid propagating through loop exit phi nodes, which
2162 could break loop-closed SSA form restrictions. */
2163 dest = gimple_bb (stmt);
2164 src = single_pred (dest);
2165 if (TREE_CODE (e) == SSA_NAME
2166 && src->loop_father != dest->loop_father)
2167 return expr;
2169 return expand_simple_operations (e, stop, cache);
2171 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2172 return expr;
2174 /* Avoid expanding to expressions that contain SSA names that need
2175 to take part in abnormal coalescing. */
2176 ssa_op_iter iter;
2177 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
2178 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
2179 return expr;
2181 e = gimple_assign_rhs1 (stmt);
2182 code = gimple_assign_rhs_code (stmt);
2183 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2185 if (is_gimple_min_invariant (e))
2186 return e;
2188 if (code == SSA_NAME)
2189 return expand_simple_operations (e, stop, cache);
2190 else if (code == ADDR_EXPR)
2192 poly_int64 offset;
2193 tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0),
2194 &offset);
2195 if (base
2196 && TREE_CODE (base) == MEM_REF)
2198 ee = expand_simple_operations (TREE_OPERAND (base, 0), stop,
2199 cache);
2200 return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,
2201 wide_int_to_tree (sizetype,
2202 mem_ref_offset (base)
2203 + offset));
2207 return expr;
2210 switch (code)
2212 CASE_CONVERT:
2213 /* Casts are simple. */
2214 ee = expand_simple_operations (e, stop, cache);
2215 return fold_build1 (code, TREE_TYPE (expr), ee);
2217 case PLUS_EXPR:
2218 case MINUS_EXPR:
2219 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
2220 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
2221 return expr;
2222 /* Fallthru. */
2223 case POINTER_PLUS_EXPR:
2224 /* And increments and decrements by a constant are simple. */
2225 e1 = gimple_assign_rhs2 (stmt);
2226 if (!is_gimple_min_invariant (e1))
2227 return expr;
2229 ee = expand_simple_operations (e, stop, cache);
2230 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
2232 default:
2233 return expr;
2237 tree
2238 expand_simple_operations (tree expr, tree stop)
2240 hash_map<tree, tree> cache;
2241 return expand_simple_operations (expr, stop, cache);
2244 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2245 expression (or EXPR unchanged, if no simplification was possible). */
2247 static tree
2248 tree_simplify_using_condition_1 (tree cond, tree expr)
2250 bool changed;
2251 tree e, e0, e1, e2, notcond;
2252 enum tree_code code = TREE_CODE (expr);
2254 if (code == INTEGER_CST)
2255 return expr;
2257 if (code == TRUTH_OR_EXPR
2258 || code == TRUTH_AND_EXPR
2259 || code == COND_EXPR)
2261 changed = false;
2263 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
2264 if (TREE_OPERAND (expr, 0) != e0)
2265 changed = true;
2267 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
2268 if (TREE_OPERAND (expr, 1) != e1)
2269 changed = true;
2271 if (code == COND_EXPR)
2273 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
2274 if (TREE_OPERAND (expr, 2) != e2)
2275 changed = true;
2277 else
2278 e2 = NULL_TREE;
2280 if (changed)
2282 if (code == COND_EXPR)
2283 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2284 else
2285 expr = fold_build2 (code, boolean_type_node, e0, e1);
2288 return expr;
2291 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2292 propagation, and vice versa. Fold does not handle this, since it is
2293 considered too expensive. */
2294 if (TREE_CODE (cond) == EQ_EXPR)
2296 e0 = TREE_OPERAND (cond, 0);
2297 e1 = TREE_OPERAND (cond, 1);
2299 /* We know that e0 == e1. Check whether we cannot simplify expr
2300 using this fact. */
2301 e = simplify_replace_tree (expr, e0, e1);
2302 if (integer_zerop (e) || integer_nonzerop (e))
2303 return e;
2305 e = simplify_replace_tree (expr, e1, e0);
2306 if (integer_zerop (e) || integer_nonzerop (e))
2307 return e;
2309 if (TREE_CODE (expr) == EQ_EXPR)
2311 e0 = TREE_OPERAND (expr, 0);
2312 e1 = TREE_OPERAND (expr, 1);
2314 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2315 e = simplify_replace_tree (cond, e0, e1);
2316 if (integer_zerop (e))
2317 return e;
2318 e = simplify_replace_tree (cond, e1, e0);
2319 if (integer_zerop (e))
2320 return e;
2322 if (TREE_CODE (expr) == NE_EXPR)
2324 e0 = TREE_OPERAND (expr, 0);
2325 e1 = TREE_OPERAND (expr, 1);
2327 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2328 e = simplify_replace_tree (cond, e0, e1);
2329 if (integer_zerop (e))
2330 return boolean_true_node;
2331 e = simplify_replace_tree (cond, e1, e0);
2332 if (integer_zerop (e))
2333 return boolean_true_node;
2336 /* Check whether COND ==> EXPR. */
2337 notcond = invert_truthvalue (cond);
2338 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2339 if (e && integer_nonzerop (e))
2340 return e;
2342 /* Check whether COND ==> not EXPR. */
2343 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2344 if (e && integer_zerop (e))
2345 return e;
2347 return expr;
2350 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2351 expression (or EXPR unchanged, if no simplification was possible).
2352 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2353 of simple operations in definitions of ssa names in COND are expanded,
2354 so that things like casts or incrementing the value of the bound before
2355 the loop do not cause us to fail. */
2357 static tree
2358 tree_simplify_using_condition (tree cond, tree expr)
2360 cond = expand_simple_operations (cond);
2362 return tree_simplify_using_condition_1 (cond, expr);
2365 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2366 Returns the simplified expression (or EXPR unchanged, if no
2367 simplification was possible). */
2369 tree
2370 simplify_using_initial_conditions (class loop *loop, tree expr)
2372 edge e;
2373 basic_block bb;
2374 gimple *stmt;
2375 tree cond, expanded, backup;
2376 int cnt = 0;
2378 if (TREE_CODE (expr) == INTEGER_CST)
2379 return expr;
2381 backup = expanded = expand_simple_operations (expr);
2383 /* Limit walking the dominators to avoid quadraticness in
2384 the number of BBs times the number of loops in degenerate
2385 cases. */
2386 for (bb = loop->header;
2387 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2388 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2390 if (!single_pred_p (bb))
2391 continue;
2392 e = single_pred_edge (bb);
2394 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2395 continue;
2397 stmt = last_stmt (e->src);
2398 cond = fold_build2 (gimple_cond_code (stmt),
2399 boolean_type_node,
2400 gimple_cond_lhs (stmt),
2401 gimple_cond_rhs (stmt));
2402 if (e->flags & EDGE_FALSE_VALUE)
2403 cond = invert_truthvalue (cond);
2404 expanded = tree_simplify_using_condition (cond, expanded);
2405 /* Break if EXPR is simplified to const values. */
2406 if (expanded
2407 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2408 return expanded;
2410 ++cnt;
2413 /* Return the original expression if no simplification is done. */
2414 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2417 /* Tries to simplify EXPR using the evolutions of the loop invariants
2418 in the superloops of LOOP. Returns the simplified expression
2419 (or EXPR unchanged, if no simplification was possible). */
2421 static tree
2422 simplify_using_outer_evolutions (class loop *loop, tree expr)
2424 enum tree_code code = TREE_CODE (expr);
2425 bool changed;
2426 tree e, e0, e1, e2;
2428 if (is_gimple_min_invariant (expr))
2429 return expr;
2431 if (code == TRUTH_OR_EXPR
2432 || code == TRUTH_AND_EXPR
2433 || code == COND_EXPR)
2435 changed = false;
2437 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2438 if (TREE_OPERAND (expr, 0) != e0)
2439 changed = true;
2441 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2442 if (TREE_OPERAND (expr, 1) != e1)
2443 changed = true;
2445 if (code == COND_EXPR)
2447 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2448 if (TREE_OPERAND (expr, 2) != e2)
2449 changed = true;
2451 else
2452 e2 = NULL_TREE;
2454 if (changed)
2456 if (code == COND_EXPR)
2457 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2458 else
2459 expr = fold_build2 (code, boolean_type_node, e0, e1);
2462 return expr;
2465 e = instantiate_parameters (loop, expr);
2466 if (is_gimple_min_invariant (e))
2467 return e;
2469 return expr;
2472 /* Returns true if EXIT is the only possible exit from LOOP. */
2474 bool
2475 loop_only_exit_p (const class loop *loop, basic_block *body, const_edge exit)
2477 gimple_stmt_iterator bsi;
2478 unsigned i;
2480 if (exit != single_exit (loop))
2481 return false;
2483 for (i = 0; i < loop->num_nodes; i++)
2484 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2485 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2486 return false;
2488 return true;
2491 /* Stores description of number of iterations of LOOP derived from
2492 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2493 information could be derived (and fields of NITER have meaning described
2494 in comments at class tree_niter_desc declaration), false otherwise.
2495 When EVERY_ITERATION is true, only tests that are known to be executed
2496 every iteration are considered (i.e. only test that alone bounds the loop).
2497 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2498 it when returning true. */
2500 bool
2501 number_of_iterations_exit_assumptions (class loop *loop, edge exit,
2502 class tree_niter_desc *niter,
2503 gcond **at_stmt, bool every_iteration,
2504 basic_block *body)
2506 gimple *last;
2507 gcond *stmt;
2508 tree type;
2509 tree op0, op1;
2510 enum tree_code code;
2511 affine_iv iv0, iv1;
2512 bool safe;
2514 /* The condition at a fake exit (if it exists) does not control its
2515 execution. */
2516 if (exit->flags & EDGE_FAKE)
2517 return false;
2519 /* Nothing to analyze if the loop is known to be infinite. */
2520 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2521 return false;
2523 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2525 if (every_iteration && !safe)
2526 return false;
2528 niter->assumptions = boolean_false_node;
2529 niter->control.base = NULL_TREE;
2530 niter->control.step = NULL_TREE;
2531 niter->control.no_overflow = false;
2532 last = last_stmt (exit->src);
2533 if (!last)
2534 return false;
2535 stmt = dyn_cast <gcond *> (last);
2536 if (!stmt)
2537 return false;
2539 /* We want the condition for staying inside loop. */
2540 code = gimple_cond_code (stmt);
2541 if (exit->flags & EDGE_TRUE_VALUE)
2542 code = invert_tree_comparison (code, false);
2544 switch (code)
2546 case GT_EXPR:
2547 case GE_EXPR:
2548 case LT_EXPR:
2549 case LE_EXPR:
2550 case NE_EXPR:
2551 break;
2553 default:
2554 return false;
2557 op0 = gimple_cond_lhs (stmt);
2558 op1 = gimple_cond_rhs (stmt);
2559 type = TREE_TYPE (op0);
2561 if (TREE_CODE (type) != INTEGER_TYPE
2562 && !POINTER_TYPE_P (type))
2563 return false;
2565 tree iv0_niters = NULL_TREE;
2566 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2567 op0, &iv0, safe ? &iv0_niters : NULL, false))
2568 return number_of_iterations_popcount (loop, exit, code, niter);
2569 tree iv1_niters = NULL_TREE;
2570 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2571 op1, &iv1, safe ? &iv1_niters : NULL, false))
2572 return false;
2573 /* Give up on complicated case. */
2574 if (iv0_niters && iv1_niters)
2575 return false;
2577 /* We don't want to see undefined signed overflow warnings while
2578 computing the number of iterations. */
2579 fold_defer_overflow_warnings ();
2581 iv0.base = expand_simple_operations (iv0.base);
2582 iv1.base = expand_simple_operations (iv1.base);
2583 bool body_from_caller = true;
2584 if (!body)
2586 body = get_loop_body (loop);
2587 body_from_caller = false;
2589 bool only_exit_p = loop_only_exit_p (loop, body, exit);
2590 if (!body_from_caller)
2591 free (body);
2592 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2593 only_exit_p, safe))
2595 fold_undefer_and_ignore_overflow_warnings ();
2596 return false;
2599 /* Incorporate additional assumption implied by control iv. */
2600 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2601 if (iv_niters)
2603 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2604 fold_convert (TREE_TYPE (niter->niter),
2605 iv_niters));
2607 if (!integer_nonzerop (assumption))
2608 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2609 niter->assumptions, assumption);
2611 /* Refine upper bound if possible. */
2612 if (TREE_CODE (iv_niters) == INTEGER_CST
2613 && niter->max > wi::to_widest (iv_niters))
2614 niter->max = wi::to_widest (iv_niters);
2617 /* There is no assumptions if the loop is known to be finite. */
2618 if (!integer_zerop (niter->assumptions)
2619 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2620 niter->assumptions = boolean_true_node;
2622 if (optimize >= 3)
2624 niter->assumptions = simplify_using_outer_evolutions (loop,
2625 niter->assumptions);
2626 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2627 niter->may_be_zero);
2628 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2631 niter->assumptions
2632 = simplify_using_initial_conditions (loop,
2633 niter->assumptions);
2634 niter->may_be_zero
2635 = simplify_using_initial_conditions (loop,
2636 niter->may_be_zero);
2638 fold_undefer_and_ignore_overflow_warnings ();
2640 /* If NITER has simplified into a constant, update MAX. */
2641 if (TREE_CODE (niter->niter) == INTEGER_CST)
2642 niter->max = wi::to_widest (niter->niter);
2644 if (at_stmt)
2645 *at_stmt = stmt;
2647 return (!integer_zerop (niter->assumptions));
2651 /* Utility function to check if OP is defined by a stmt
2652 that is a val - 1. */
2654 static bool
2655 ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2657 gimple *stmt;
2658 return (TREE_CODE (op) == SSA_NAME
2659 && (stmt = SSA_NAME_DEF_STMT (op))
2660 && is_gimple_assign (stmt)
2661 && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2662 && val == gimple_assign_rhs1 (stmt)
2663 && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2667 /* See if LOOP is a popcout implementation, determine NITER for the loop
2669 We match:
2670 <bb 2>
2671 goto <bb 4>
2673 <bb 3>
2674 _1 = b_11 + -1
2675 b_6 = _1 & b_11
2677 <bb 4>
2678 b_11 = PHI <b_5(D)(2), b_6(3)>
2680 exit block
2681 if (b_11 != 0)
2682 goto <bb 3>
2683 else
2684 goto <bb 5>
2686 OR we match copy-header version:
2687 if (b_5 != 0)
2688 goto <bb 3>
2689 else
2690 goto <bb 4>
2692 <bb 3>
2693 b_11 = PHI <b_5(2), b_6(3)>
2694 _1 = b_11 + -1
2695 b_6 = _1 & b_11
2697 exit block
2698 if (b_6 != 0)
2699 goto <bb 3>
2700 else
2701 goto <bb 4>
2703 If popcount pattern, update NITER accordingly.
2704 i.e., set NITER to __builtin_popcount (b)
2705 return true if we did, false otherwise.
2709 static bool
2710 number_of_iterations_popcount (loop_p loop, edge exit,
2711 enum tree_code code,
2712 class tree_niter_desc *niter)
2714 bool adjust = true;
2715 tree iter;
2716 HOST_WIDE_INT max;
2717 adjust = true;
2718 tree fn = NULL_TREE;
2720 /* Check loop terminating branch is like
2721 if (b != 0). */
2722 gimple *stmt = last_stmt (exit->src);
2723 if (!stmt
2724 || gimple_code (stmt) != GIMPLE_COND
2725 || code != NE_EXPR
2726 || !integer_zerop (gimple_cond_rhs (stmt))
2727 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME)
2728 return false;
2730 gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2732 /* Depending on copy-header is performed, feeding PHI stmts might be in
2733 the loop header or loop latch, handle this. */
2734 if (gimple_code (and_stmt) == GIMPLE_PHI
2735 && gimple_bb (and_stmt) == loop->header
2736 && gimple_phi_num_args (and_stmt) == 2
2737 && (TREE_CODE (gimple_phi_arg_def (and_stmt,
2738 loop_latch_edge (loop)->dest_idx))
2739 == SSA_NAME))
2741 /* SSA used in exit condition is defined by PHI stmt
2742 b_11 = PHI <b_5(D)(2), b_6(3)>
2743 from the PHI stmt, get the and_stmt
2744 b_6 = _1 & b_11. */
2745 tree t = gimple_phi_arg_def (and_stmt, loop_latch_edge (loop)->dest_idx);
2746 and_stmt = SSA_NAME_DEF_STMT (t);
2747 adjust = false;
2750 /* Make sure it is indeed an and stmt (b_6 = _1 & b_11). */
2751 if (!is_gimple_assign (and_stmt)
2752 || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR)
2753 return false;
2755 tree b_11 = gimple_assign_rhs1 (and_stmt);
2756 tree _1 = gimple_assign_rhs2 (and_stmt);
2758 /* Check that _1 is defined by _b11 + -1 (_1 = b_11 + -1).
2759 Also make sure that b_11 is the same in and_stmt and _1 defining stmt.
2760 Also canonicalize if _1 and _b11 are revrsed. */
2761 if (ssa_defined_by_minus_one_stmt_p (b_11, _1))
2762 std::swap (b_11, _1);
2763 else if (ssa_defined_by_minus_one_stmt_p (_1, b_11))
2765 else
2766 return false;
2767 /* Check the recurrence:
2768 ... = PHI <b_5(2), b_6(3)>. */
2769 gimple *phi = SSA_NAME_DEF_STMT (b_11);
2770 if (gimple_code (phi) != GIMPLE_PHI
2771 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2772 || (gimple_assign_lhs (and_stmt)
2773 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2774 return false;
2776 /* We found a match. Get the corresponding popcount builtin. */
2777 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2778 if (TYPE_PRECISION (TREE_TYPE (src)) <= TYPE_PRECISION (integer_type_node))
2779 fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2780 else if (TYPE_PRECISION (TREE_TYPE (src))
2781 == TYPE_PRECISION (long_integer_type_node))
2782 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2783 else if (TYPE_PRECISION (TREE_TYPE (src))
2784 == TYPE_PRECISION (long_long_integer_type_node)
2785 || (TYPE_PRECISION (TREE_TYPE (src))
2786 == 2 * TYPE_PRECISION (long_long_integer_type_node)))
2787 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2789 if (!fn)
2790 return false;
2792 /* Update NITER params accordingly */
2793 tree utype = unsigned_type_for (TREE_TYPE (src));
2794 src = fold_convert (utype, src);
2795 if (TYPE_PRECISION (TREE_TYPE (src)) < TYPE_PRECISION (integer_type_node))
2796 src = fold_convert (unsigned_type_node, src);
2797 tree call;
2798 if (TYPE_PRECISION (TREE_TYPE (src))
2799 == 2 * TYPE_PRECISION (long_long_integer_type_node))
2801 int prec = TYPE_PRECISION (long_long_integer_type_node);
2802 tree src1 = fold_convert (long_long_unsigned_type_node,
2803 fold_build2 (RSHIFT_EXPR, TREE_TYPE (src),
2804 unshare_expr (src),
2805 build_int_cst (integer_type_node,
2806 prec)));
2807 tree src2 = fold_convert (long_long_unsigned_type_node, src);
2808 call = build_call_expr (fn, 1, src1);
2809 call = fold_build2 (PLUS_EXPR, TREE_TYPE (call), call,
2810 build_call_expr (fn, 1, src2));
2811 call = fold_convert (utype, call);
2813 else
2814 call = fold_convert (utype, build_call_expr (fn, 1, src));
2815 if (adjust)
2816 iter = fold_build2 (MINUS_EXPR, utype, call, build_int_cst (utype, 1));
2817 else
2818 iter = call;
2820 if (TREE_CODE (call) == INTEGER_CST)
2821 max = tree_to_uhwi (call);
2822 else
2823 max = TYPE_PRECISION (TREE_TYPE (src));
2824 if (adjust)
2825 max = max - 1;
2827 niter->niter = iter;
2828 niter->assumptions = boolean_true_node;
2830 if (adjust)
2832 tree may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,
2833 build_zero_cst (TREE_TYPE (src)));
2834 niter->may_be_zero
2835 = simplify_using_initial_conditions (loop, may_be_zero);
2837 else
2838 niter->may_be_zero = boolean_false_node;
2840 niter->max = max;
2841 niter->bound = NULL_TREE;
2842 niter->cmp = ERROR_MARK;
2843 return true;
2847 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2848 the niter information holds unconditionally. */
2850 bool
2851 number_of_iterations_exit (class loop *loop, edge exit,
2852 class tree_niter_desc *niter,
2853 bool warn, bool every_iteration,
2854 basic_block *body)
2856 gcond *stmt;
2857 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2858 &stmt, every_iteration, body))
2859 return false;
2861 if (integer_nonzerop (niter->assumptions))
2862 return true;
2864 if (warn && dump_enabled_p ())
2865 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
2866 "missed loop optimization: niters analysis ends up "
2867 "with assumptions.\n");
2869 return false;
2872 /* Try to determine the number of iterations of LOOP. If we succeed,
2873 expression giving number of iterations is returned and *EXIT is
2874 set to the edge from that the information is obtained. Otherwise
2875 chrec_dont_know is returned. */
2877 tree
2878 find_loop_niter (class loop *loop, edge *exit)
2880 unsigned i;
2881 auto_vec<edge> exits = get_loop_exit_edges (loop);
2882 edge ex;
2883 tree niter = NULL_TREE, aniter;
2884 class tree_niter_desc desc;
2886 *exit = NULL;
2887 FOR_EACH_VEC_ELT (exits, i, ex)
2889 if (!number_of_iterations_exit (loop, ex, &desc, false))
2890 continue;
2892 if (integer_nonzerop (desc.may_be_zero))
2894 /* We exit in the first iteration through this exit.
2895 We won't find anything better. */
2896 niter = build_int_cst (unsigned_type_node, 0);
2897 *exit = ex;
2898 break;
2901 if (!integer_zerop (desc.may_be_zero))
2902 continue;
2904 aniter = desc.niter;
2906 if (!niter)
2908 /* Nothing recorded yet. */
2909 niter = aniter;
2910 *exit = ex;
2911 continue;
2914 /* Prefer constants, the lower the better. */
2915 if (TREE_CODE (aniter) != INTEGER_CST)
2916 continue;
2918 if (TREE_CODE (niter) != INTEGER_CST)
2920 niter = aniter;
2921 *exit = ex;
2922 continue;
2925 if (tree_int_cst_lt (aniter, niter))
2927 niter = aniter;
2928 *exit = ex;
2929 continue;
2933 return niter ? niter : chrec_dont_know;
2936 /* Return true if loop is known to have bounded number of iterations. */
2938 bool
2939 finite_loop_p (class loop *loop)
2941 widest_int nit;
2942 int flags;
2944 flags = flags_from_decl_or_type (current_function_decl);
2945 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2947 if (dump_file && (dump_flags & TDF_DETAILS))
2948 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2949 loop->num);
2950 return true;
2953 if (loop->any_upper_bound
2954 || max_loop_iterations (loop, &nit))
2956 if (dump_file && (dump_flags & TDF_DETAILS))
2957 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2958 loop->num);
2959 return true;
2962 if (loop->finite_p)
2964 unsigned i;
2965 auto_vec<edge> exits = get_loop_exit_edges (loop);
2966 edge ex;
2968 /* If the loop has a normal exit, we can assume it will terminate. */
2969 FOR_EACH_VEC_ELT (exits, i, ex)
2970 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_FAKE)))
2972 if (dump_file)
2973 fprintf (dump_file, "Assume loop %i to be finite: it has an exit "
2974 "and -ffinite-loops is on.\n", loop->num);
2975 return true;
2979 return false;
2984 Analysis of a number of iterations of a loop by a brute-force evaluation.
2988 /* Bound on the number of iterations we try to evaluate. */
2990 #define MAX_ITERATIONS_TO_TRACK \
2991 ((unsigned) param_max_iterations_to_track)
2993 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2994 result by a chain of operations such that all but exactly one of their
2995 operands are constants. */
2997 static gphi *
2998 chain_of_csts_start (class loop *loop, tree x)
3000 gimple *stmt = SSA_NAME_DEF_STMT (x);
3001 tree use;
3002 basic_block bb = gimple_bb (stmt);
3003 enum tree_code code;
3005 if (!bb
3006 || !flow_bb_inside_loop_p (loop, bb))
3007 return NULL;
3009 if (gimple_code (stmt) == GIMPLE_PHI)
3011 if (bb == loop->header)
3012 return as_a <gphi *> (stmt);
3014 return NULL;
3017 if (gimple_code (stmt) != GIMPLE_ASSIGN
3018 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
3019 return NULL;
3021 code = gimple_assign_rhs_code (stmt);
3022 if (gimple_references_memory_p (stmt)
3023 || TREE_CODE_CLASS (code) == tcc_reference
3024 || (code == ADDR_EXPR
3025 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
3026 return NULL;
3028 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
3029 if (use == NULL_TREE)
3030 return NULL;
3032 return chain_of_csts_start (loop, use);
3035 /* Determines whether the expression X is derived from a result of a phi node
3036 in header of LOOP such that
3038 * the derivation of X consists only from operations with constants
3039 * the initial value of the phi node is constant
3040 * the value of the phi node in the next iteration can be derived from the
3041 value in the current iteration by a chain of operations with constants,
3042 or is also a constant
3044 If such phi node exists, it is returned, otherwise NULL is returned. */
3046 static gphi *
3047 get_base_for (class loop *loop, tree x)
3049 gphi *phi;
3050 tree init, next;
3052 if (is_gimple_min_invariant (x))
3053 return NULL;
3055 phi = chain_of_csts_start (loop, x);
3056 if (!phi)
3057 return NULL;
3059 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3060 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3062 if (!is_gimple_min_invariant (init))
3063 return NULL;
3065 if (TREE_CODE (next) == SSA_NAME
3066 && chain_of_csts_start (loop, next) != phi)
3067 return NULL;
3069 return phi;
3072 /* Given an expression X, then
3074 * if X is NULL_TREE, we return the constant BASE.
3075 * if X is a constant, we return the constant X.
3076 * otherwise X is a SSA name, whose value in the considered loop is derived
3077 by a chain of operations with constant from a result of a phi node in
3078 the header of the loop. Then we return value of X when the value of the
3079 result of this phi node is given by the constant BASE. */
3081 static tree
3082 get_val_for (tree x, tree base)
3084 gimple *stmt;
3086 gcc_checking_assert (is_gimple_min_invariant (base));
3088 if (!x)
3089 return base;
3090 else if (is_gimple_min_invariant (x))
3091 return x;
3093 stmt = SSA_NAME_DEF_STMT (x);
3094 if (gimple_code (stmt) == GIMPLE_PHI)
3095 return base;
3097 gcc_checking_assert (is_gimple_assign (stmt));
3099 /* STMT must be either an assignment of a single SSA name or an
3100 expression involving an SSA name and a constant. Try to fold that
3101 expression using the value for the SSA name. */
3102 if (gimple_assign_ssa_name_copy_p (stmt))
3103 return get_val_for (gimple_assign_rhs1 (stmt), base);
3104 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
3105 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
3106 return fold_build1 (gimple_assign_rhs_code (stmt),
3107 TREE_TYPE (gimple_assign_lhs (stmt)),
3108 get_val_for (gimple_assign_rhs1 (stmt), base));
3109 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
3111 tree rhs1 = gimple_assign_rhs1 (stmt);
3112 tree rhs2 = gimple_assign_rhs2 (stmt);
3113 if (TREE_CODE (rhs1) == SSA_NAME)
3114 rhs1 = get_val_for (rhs1, base);
3115 else if (TREE_CODE (rhs2) == SSA_NAME)
3116 rhs2 = get_val_for (rhs2, base);
3117 else
3118 gcc_unreachable ();
3119 return fold_build2 (gimple_assign_rhs_code (stmt),
3120 TREE_TYPE (gimple_assign_lhs (stmt)), rhs1, rhs2);
3122 else
3123 gcc_unreachable ();
3127 /* Tries to count the number of iterations of LOOP till it exits by EXIT
3128 by brute force -- i.e. by determining the value of the operands of the
3129 condition at EXIT in first few iterations of the loop (assuming that
3130 these values are constant) and determining the first one in that the
3131 condition is not satisfied. Returns the constant giving the number
3132 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3134 tree
3135 loop_niter_by_eval (class loop *loop, edge exit)
3137 tree acnd;
3138 tree op[2], val[2], next[2], aval[2];
3139 gphi *phi;
3140 gimple *cond;
3141 unsigned i, j;
3142 enum tree_code cmp;
3144 cond = last_stmt (exit->src);
3145 if (!cond || gimple_code (cond) != GIMPLE_COND)
3146 return chrec_dont_know;
3148 cmp = gimple_cond_code (cond);
3149 if (exit->flags & EDGE_TRUE_VALUE)
3150 cmp = invert_tree_comparison (cmp, false);
3152 switch (cmp)
3154 case EQ_EXPR:
3155 case NE_EXPR:
3156 case GT_EXPR:
3157 case GE_EXPR:
3158 case LT_EXPR:
3159 case LE_EXPR:
3160 op[0] = gimple_cond_lhs (cond);
3161 op[1] = gimple_cond_rhs (cond);
3162 break;
3164 default:
3165 return chrec_dont_know;
3168 for (j = 0; j < 2; j++)
3170 if (is_gimple_min_invariant (op[j]))
3172 val[j] = op[j];
3173 next[j] = NULL_TREE;
3174 op[j] = NULL_TREE;
3176 else
3178 phi = get_base_for (loop, op[j]);
3179 if (!phi)
3180 return chrec_dont_know;
3181 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3182 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3186 /* Don't issue signed overflow warnings. */
3187 fold_defer_overflow_warnings ();
3189 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
3191 for (j = 0; j < 2; j++)
3192 aval[j] = get_val_for (op[j], val[j]);
3194 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
3195 if (acnd && integer_zerop (acnd))
3197 fold_undefer_and_ignore_overflow_warnings ();
3198 if (dump_file && (dump_flags & TDF_DETAILS))
3199 fprintf (dump_file,
3200 "Proved that loop %d iterates %d times using brute force.\n",
3201 loop->num, i);
3202 return build_int_cst (unsigned_type_node, i);
3205 for (j = 0; j < 2; j++)
3207 aval[j] = val[j];
3208 val[j] = get_val_for (next[j], val[j]);
3209 if (!is_gimple_min_invariant (val[j]))
3211 fold_undefer_and_ignore_overflow_warnings ();
3212 return chrec_dont_know;
3216 /* If the next iteration would use the same base values
3217 as the current one, there is no point looping further,
3218 all following iterations will be the same as this one. */
3219 if (val[0] == aval[0] && val[1] == aval[1])
3220 break;
3223 fold_undefer_and_ignore_overflow_warnings ();
3225 return chrec_dont_know;
3228 /* Finds the exit of the LOOP by that the loop exits after a constant
3229 number of iterations and stores the exit edge to *EXIT. The constant
3230 giving the number of iterations of LOOP is returned. The number of
3231 iterations is determined using loop_niter_by_eval (i.e. by brute force
3232 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3233 determines the number of iterations, chrec_dont_know is returned. */
3235 tree
3236 find_loop_niter_by_eval (class loop *loop, edge *exit)
3238 unsigned i;
3239 auto_vec<edge> exits = get_loop_exit_edges (loop);
3240 edge ex;
3241 tree niter = NULL_TREE, aniter;
3243 *exit = NULL;
3245 /* Loops with multiple exits are expensive to handle and less important. */
3246 if (!flag_expensive_optimizations
3247 && exits.length () > 1)
3248 return chrec_dont_know;
3250 FOR_EACH_VEC_ELT (exits, i, ex)
3252 if (!just_once_each_iteration_p (loop, ex->src))
3253 continue;
3255 aniter = loop_niter_by_eval (loop, ex);
3256 if (chrec_contains_undetermined (aniter))
3257 continue;
3259 if (niter
3260 && !tree_int_cst_lt (aniter, niter))
3261 continue;
3263 niter = aniter;
3264 *exit = ex;
3267 return niter ? niter : chrec_dont_know;
3272 Analysis of upper bounds on number of iterations of a loop.
3276 static widest_int derive_constant_upper_bound_ops (tree, tree,
3277 enum tree_code, tree);
3279 /* Returns a constant upper bound on the value of the right-hand side of
3280 an assignment statement STMT. */
3282 static widest_int
3283 derive_constant_upper_bound_assign (gimple *stmt)
3285 enum tree_code code = gimple_assign_rhs_code (stmt);
3286 tree op0 = gimple_assign_rhs1 (stmt);
3287 tree op1 = gimple_assign_rhs2 (stmt);
3289 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
3290 op0, code, op1);
3293 /* Returns a constant upper bound on the value of expression VAL. VAL
3294 is considered to be unsigned. If its type is signed, its value must
3295 be nonnegative. */
3297 static widest_int
3298 derive_constant_upper_bound (tree val)
3300 enum tree_code code;
3301 tree op0, op1, op2;
3303 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3304 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
3307 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3308 whose type is TYPE. The expression is considered to be unsigned. If
3309 its type is signed, its value must be nonnegative. */
3311 static widest_int
3312 derive_constant_upper_bound_ops (tree type, tree op0,
3313 enum tree_code code, tree op1)
3315 tree subtype, maxt;
3316 widest_int bnd, max, cst;
3317 gimple *stmt;
3319 if (INTEGRAL_TYPE_P (type))
3320 maxt = TYPE_MAX_VALUE (type);
3321 else
3322 maxt = upper_bound_in_type (type, type);
3324 max = wi::to_widest (maxt);
3326 switch (code)
3328 case INTEGER_CST:
3329 return wi::to_widest (op0);
3331 CASE_CONVERT:
3332 subtype = TREE_TYPE (op0);
3333 if (!TYPE_UNSIGNED (subtype)
3334 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3335 that OP0 is nonnegative. */
3336 && TYPE_UNSIGNED (type)
3337 && !tree_expr_nonnegative_p (op0))
3339 /* If we cannot prove that the casted expression is nonnegative,
3340 we cannot establish more useful upper bound than the precision
3341 of the type gives us. */
3342 return max;
3345 /* We now know that op0 is an nonnegative value. Try deriving an upper
3346 bound for it. */
3347 bnd = derive_constant_upper_bound (op0);
3349 /* If the bound does not fit in TYPE, max. value of TYPE could be
3350 attained. */
3351 if (wi::ltu_p (max, bnd))
3352 return max;
3354 return bnd;
3356 case PLUS_EXPR:
3357 case POINTER_PLUS_EXPR:
3358 case MINUS_EXPR:
3359 if (TREE_CODE (op1) != INTEGER_CST
3360 || !tree_expr_nonnegative_p (op0))
3361 return max;
3363 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3364 choose the most logical way how to treat this constant regardless
3365 of the signedness of the type. */
3366 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
3367 if (code != MINUS_EXPR)
3368 cst = -cst;
3370 bnd = derive_constant_upper_bound (op0);
3372 if (wi::neg_p (cst))
3374 cst = -cst;
3375 /* Avoid CST == 0x80000... */
3376 if (wi::neg_p (cst))
3377 return max;
3379 /* OP0 + CST. We need to check that
3380 BND <= MAX (type) - CST. */
3382 widest_int mmax = max - cst;
3383 if (wi::leu_p (bnd, mmax))
3384 return max;
3386 return bnd + cst;
3388 else
3390 /* OP0 - CST, where CST >= 0.
3392 If TYPE is signed, we have already verified that OP0 >= 0, and we
3393 know that the result is nonnegative. This implies that
3394 VAL <= BND - CST.
3396 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3397 otherwise the operation underflows.
3400 /* This should only happen if the type is unsigned; however, for
3401 buggy programs that use overflowing signed arithmetics even with
3402 -fno-wrapv, this condition may also be true for signed values. */
3403 if (wi::ltu_p (bnd, cst))
3404 return max;
3406 if (TYPE_UNSIGNED (type))
3408 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
3409 wide_int_to_tree (type, cst));
3410 if (!tem || integer_nonzerop (tem))
3411 return max;
3414 bnd -= cst;
3417 return bnd;
3419 case FLOOR_DIV_EXPR:
3420 case EXACT_DIV_EXPR:
3421 if (TREE_CODE (op1) != INTEGER_CST
3422 || tree_int_cst_sign_bit (op1))
3423 return max;
3425 bnd = derive_constant_upper_bound (op0);
3426 return wi::udiv_floor (bnd, wi::to_widest (op1));
3428 case BIT_AND_EXPR:
3429 if (TREE_CODE (op1) != INTEGER_CST
3430 || tree_int_cst_sign_bit (op1))
3431 return max;
3432 return wi::to_widest (op1);
3434 case SSA_NAME:
3435 stmt = SSA_NAME_DEF_STMT (op0);
3436 if (gimple_code (stmt) != GIMPLE_ASSIGN
3437 || gimple_assign_lhs (stmt) != op0)
3438 return max;
3439 return derive_constant_upper_bound_assign (stmt);
3441 default:
3442 return max;
3446 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3448 static void
3449 do_warn_aggressive_loop_optimizations (class loop *loop,
3450 widest_int i_bound, gimple *stmt)
3452 /* Don't warn if the loop doesn't have known constant bound. */
3453 if (!loop->nb_iterations
3454 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
3455 || !warn_aggressive_loop_optimizations
3456 /* To avoid warning multiple times for the same loop,
3457 only start warning when we preserve loops. */
3458 || (cfun->curr_properties & PROP_loops) == 0
3459 /* Only warn once per loop. */
3460 || loop->warned_aggressive_loop_optimizations
3461 /* Only warn if undefined behavior gives us lower estimate than the
3462 known constant bound. */
3463 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3464 /* And undefined behavior happens unconditionally. */
3465 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3466 return;
3468 edge e = single_exit (loop);
3469 if (e == NULL)
3470 return;
3472 gimple *estmt = last_stmt (e->src);
3473 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
3474 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
3475 ? UNSIGNED : SIGNED);
3476 auto_diagnostic_group d;
3477 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3478 "iteration %s invokes undefined behavior", buf))
3479 inform (gimple_location (estmt), "within this loop");
3480 loop->warned_aggressive_loop_optimizations = true;
3483 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3484 is true if the loop is exited immediately after STMT, and this exit
3485 is taken at last when the STMT is executed BOUND + 1 times.
3486 REALISTIC is true if BOUND is expected to be close to the real number
3487 of iterations. UPPER is true if we are sure the loop iterates at most
3488 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3490 static void
3491 record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
3492 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3494 widest_int delta;
3496 if (dump_file && (dump_flags & TDF_DETAILS))
3498 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3499 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3500 fprintf (dump_file, " is %sexecuted at most ",
3501 upper ? "" : "probably ");
3502 print_generic_expr (dump_file, bound, TDF_SLIM);
3503 fprintf (dump_file, " (bounded by ");
3504 print_decu (i_bound, dump_file);
3505 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3508 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3509 real number of iterations. */
3510 if (TREE_CODE (bound) != INTEGER_CST)
3511 realistic = false;
3512 else
3513 gcc_checking_assert (i_bound == wi::to_widest (bound));
3515 /* If we have a guaranteed upper bound, record it in the appropriate
3516 list, unless this is an !is_exit bound (i.e. undefined behavior in
3517 at_stmt) in a loop with known constant number of iterations. */
3518 if (upper
3519 && (is_exit
3520 || loop->nb_iterations == NULL_TREE
3521 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3523 class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3525 elt->bound = i_bound;
3526 elt->stmt = at_stmt;
3527 elt->is_exit = is_exit;
3528 elt->next = loop->bounds;
3529 loop->bounds = elt;
3532 /* If statement is executed on every path to the loop latch, we can directly
3533 infer the upper bound on the # of iterations of the loop. */
3534 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3535 upper = false;
3537 /* Update the number of iteration estimates according to the bound.
3538 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3539 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3540 later if such statement must be executed on last iteration */
3541 if (is_exit)
3542 delta = 0;
3543 else
3544 delta = 1;
3545 widest_int new_i_bound = i_bound + delta;
3547 /* If an overflow occurred, ignore the result. */
3548 if (wi::ltu_p (new_i_bound, delta))
3549 return;
3551 if (upper && !is_exit)
3552 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3553 record_niter_bound (loop, new_i_bound, realistic, upper);
3556 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3557 and doesn't overflow. */
3559 static void
3560 record_control_iv (class loop *loop, class tree_niter_desc *niter)
3562 struct control_iv *iv;
3564 if (!niter->control.base || !niter->control.step)
3565 return;
3567 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3568 return;
3570 iv = ggc_alloc<control_iv> ();
3571 iv->base = niter->control.base;
3572 iv->step = niter->control.step;
3573 iv->next = loop->control_ivs;
3574 loop->control_ivs = iv;
3576 return;
3579 /* This function returns TRUE if below conditions are satisfied:
3580 1) VAR is SSA variable.
3581 2) VAR is an IV:{base, step} in its defining loop.
3582 3) IV doesn't overflow.
3583 4) Both base and step are integer constants.
3584 5) Base is the MIN/MAX value depends on IS_MIN.
3585 Store value of base to INIT correspondingly. */
3587 static bool
3588 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3590 if (TREE_CODE (var) != SSA_NAME)
3591 return false;
3593 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3594 class loop *loop = loop_containing_stmt (def_stmt);
3596 if (loop == NULL)
3597 return false;
3599 affine_iv iv;
3600 if (!simple_iv (loop, loop, var, &iv, false))
3601 return false;
3603 if (!iv.no_overflow)
3604 return false;
3606 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3607 return false;
3609 if (is_min == tree_int_cst_sign_bit (iv.step))
3610 return false;
3612 *init = wi::to_wide (iv.base);
3613 return true;
3616 /* Record the estimate on number of iterations of LOOP based on the fact that
3617 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3618 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3619 estimated number of iterations is expected to be close to the real one.
3620 UPPER is true if we are sure the induction variable does not wrap. */
3622 static void
3623 record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
3624 tree low, tree high, bool realistic, bool upper)
3626 tree niter_bound, extreme, delta;
3627 tree type = TREE_TYPE (base), unsigned_type;
3628 tree orig_base = base;
3630 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3631 return;
3633 if (dump_file && (dump_flags & TDF_DETAILS))
3635 fprintf (dump_file, "Induction variable (");
3636 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3637 fprintf (dump_file, ") ");
3638 print_generic_expr (dump_file, base, TDF_SLIM);
3639 fprintf (dump_file, " + ");
3640 print_generic_expr (dump_file, step, TDF_SLIM);
3641 fprintf (dump_file, " * iteration does not wrap in statement ");
3642 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3643 fprintf (dump_file, " in loop %d.\n", loop->num);
3646 unsigned_type = unsigned_type_for (type);
3647 base = fold_convert (unsigned_type, base);
3648 step = fold_convert (unsigned_type, step);
3650 if (tree_int_cst_sign_bit (step))
3652 wide_int max;
3653 value_range base_range;
3654 if (get_range_query (cfun)->range_of_expr (base_range, orig_base)
3655 && !base_range.undefined_p ())
3656 max = base_range.upper_bound ();
3657 extreme = fold_convert (unsigned_type, low);
3658 if (TREE_CODE (orig_base) == SSA_NAME
3659 && TREE_CODE (high) == INTEGER_CST
3660 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3661 && (base_range.kind () == VR_RANGE
3662 || get_cst_init_from_scev (orig_base, &max, false))
3663 && wi::gts_p (wi::to_wide (high), max))
3664 base = wide_int_to_tree (unsigned_type, max);
3665 else if (TREE_CODE (base) != INTEGER_CST
3666 && dominated_by_p (CDI_DOMINATORS,
3667 loop->latch, gimple_bb (stmt)))
3668 base = fold_convert (unsigned_type, high);
3669 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3670 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3672 else
3674 wide_int min;
3675 value_range base_range;
3676 if (get_range_query (cfun)->range_of_expr (base_range, orig_base)
3677 && !base_range.undefined_p ())
3678 min = base_range.lower_bound ();
3679 extreme = fold_convert (unsigned_type, high);
3680 if (TREE_CODE (orig_base) == SSA_NAME
3681 && TREE_CODE (low) == INTEGER_CST
3682 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3683 && (base_range.kind () == VR_RANGE
3684 || get_cst_init_from_scev (orig_base, &min, true))
3685 && wi::gts_p (min, wi::to_wide (low)))
3686 base = wide_int_to_tree (unsigned_type, min);
3687 else if (TREE_CODE (base) != INTEGER_CST
3688 && dominated_by_p (CDI_DOMINATORS,
3689 loop->latch, gimple_bb (stmt)))
3690 base = fold_convert (unsigned_type, low);
3691 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3694 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3695 would get out of the range. */
3696 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3697 widest_int max = derive_constant_upper_bound (niter_bound);
3698 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3701 /* Determine information about number of iterations a LOOP from the index
3702 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3703 guaranteed to be executed in every iteration of LOOP. Callback for
3704 for_each_index. */
3706 struct ilb_data
3708 class loop *loop;
3709 gimple *stmt;
3712 static bool
3713 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3715 struct ilb_data *data = (struct ilb_data *) dta;
3716 tree ev, init, step;
3717 tree low, high, type, next;
3718 bool sign, upper = true, at_end = false;
3719 class loop *loop = data->loop;
3721 if (TREE_CODE (base) != ARRAY_REF)
3722 return true;
3724 /* For arrays at the end of the structure, we are not guaranteed that they
3725 do not really extend over their declared size. However, for arrays of
3726 size greater than one, this is unlikely to be intended. */
3727 if (array_at_struct_end_p (base))
3729 at_end = true;
3730 upper = false;
3733 class loop *dloop = loop_containing_stmt (data->stmt);
3734 if (!dloop)
3735 return true;
3737 ev = analyze_scalar_evolution (dloop, *idx);
3738 ev = instantiate_parameters (loop, ev);
3739 init = initial_condition (ev);
3740 step = evolution_part_in_loop_num (ev, loop->num);
3742 if (!init
3743 || !step
3744 || TREE_CODE (step) != INTEGER_CST
3745 || integer_zerop (step)
3746 || tree_contains_chrecs (init, NULL)
3747 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3748 return true;
3750 low = array_ref_low_bound (base);
3751 high = array_ref_up_bound (base);
3753 /* The case of nonconstant bounds could be handled, but it would be
3754 complicated. */
3755 if (TREE_CODE (low) != INTEGER_CST
3756 || !high
3757 || TREE_CODE (high) != INTEGER_CST)
3758 return true;
3759 sign = tree_int_cst_sign_bit (step);
3760 type = TREE_TYPE (step);
3762 /* The array of length 1 at the end of a structure most likely extends
3763 beyond its bounds. */
3764 if (at_end
3765 && operand_equal_p (low, high, 0))
3766 return true;
3768 /* In case the relevant bound of the array does not fit in type, or
3769 it does, but bound + step (in type) still belongs into the range of the
3770 array, the index may wrap and still stay within the range of the array
3771 (consider e.g. if the array is indexed by the full range of
3772 unsigned char).
3774 To make things simpler, we require both bounds to fit into type, although
3775 there are cases where this would not be strictly necessary. */
3776 if (!int_fits_type_p (high, type)
3777 || !int_fits_type_p (low, type))
3778 return true;
3779 low = fold_convert (type, low);
3780 high = fold_convert (type, high);
3782 if (sign)
3783 next = fold_binary (PLUS_EXPR, type, low, step);
3784 else
3785 next = fold_binary (PLUS_EXPR, type, high, step);
3787 if (tree_int_cst_compare (low, next) <= 0
3788 && tree_int_cst_compare (next, high) <= 0)
3789 return true;
3791 /* If access is not executed on every iteration, we must ensure that overlow
3792 may not make the access valid later. */
3793 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3794 && scev_probably_wraps_p (NULL_TREE,
3795 initial_condition_in_loop_num (ev, loop->num),
3796 step, data->stmt, loop, true))
3797 upper = false;
3799 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3800 return true;
3803 /* Determine information about number of iterations a LOOP from the bounds
3804 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3805 STMT is guaranteed to be executed in every iteration of LOOP.*/
3807 static void
3808 infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref)
3810 struct ilb_data data;
3812 data.loop = loop;
3813 data.stmt = stmt;
3814 for_each_index (&ref, idx_infer_loop_bounds, &data);
3817 /* Determine information about number of iterations of a LOOP from the way
3818 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3819 executed in every iteration of LOOP. */
3821 static void
3822 infer_loop_bounds_from_array (class loop *loop, gimple *stmt)
3824 if (is_gimple_assign (stmt))
3826 tree op0 = gimple_assign_lhs (stmt);
3827 tree op1 = gimple_assign_rhs1 (stmt);
3829 /* For each memory access, analyze its access function
3830 and record a bound on the loop iteration domain. */
3831 if (REFERENCE_CLASS_P (op0))
3832 infer_loop_bounds_from_ref (loop, stmt, op0);
3834 if (REFERENCE_CLASS_P (op1))
3835 infer_loop_bounds_from_ref (loop, stmt, op1);
3837 else if (is_gimple_call (stmt))
3839 tree arg, lhs;
3840 unsigned i, n = gimple_call_num_args (stmt);
3842 lhs = gimple_call_lhs (stmt);
3843 if (lhs && REFERENCE_CLASS_P (lhs))
3844 infer_loop_bounds_from_ref (loop, stmt, lhs);
3846 for (i = 0; i < n; i++)
3848 arg = gimple_call_arg (stmt, i);
3849 if (REFERENCE_CLASS_P (arg))
3850 infer_loop_bounds_from_ref (loop, stmt, arg);
3855 /* Determine information about number of iterations of a LOOP from the fact
3856 that pointer arithmetics in STMT does not overflow. */
3858 static void
3859 infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt)
3861 tree def, base, step, scev, type, low, high;
3862 tree var, ptr;
3864 if (!is_gimple_assign (stmt)
3865 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3866 return;
3868 def = gimple_assign_lhs (stmt);
3869 if (TREE_CODE (def) != SSA_NAME)
3870 return;
3872 type = TREE_TYPE (def);
3873 if (!nowrap_type_p (type))
3874 return;
3876 ptr = gimple_assign_rhs1 (stmt);
3877 if (!expr_invariant_in_loop_p (loop, ptr))
3878 return;
3880 var = gimple_assign_rhs2 (stmt);
3881 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3882 return;
3884 class loop *uloop = loop_containing_stmt (stmt);
3885 scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
3886 if (chrec_contains_undetermined (scev))
3887 return;
3889 base = initial_condition_in_loop_num (scev, loop->num);
3890 step = evolution_part_in_loop_num (scev, loop->num);
3892 if (!base || !step
3893 || TREE_CODE (step) != INTEGER_CST
3894 || tree_contains_chrecs (base, NULL)
3895 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3896 return;
3898 low = lower_bound_in_type (type, type);
3899 high = upper_bound_in_type (type, type);
3901 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3902 produce a NULL pointer. The contrary would mean NULL points to an object,
3903 while NULL is supposed to compare unequal with the address of all objects.
3904 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3905 NULL pointer since that would mean wrapping, which we assume here not to
3906 happen. So, we can exclude NULL from the valid range of pointer
3907 arithmetic. */
3908 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3909 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3911 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3914 /* Determine information about number of iterations of a LOOP from the fact
3915 that signed arithmetics in STMT does not overflow. */
3917 static void
3918 infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt)
3920 tree def, base, step, scev, type, low, high;
3922 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3923 return;
3925 def = gimple_assign_lhs (stmt);
3927 if (TREE_CODE (def) != SSA_NAME)
3928 return;
3930 type = TREE_TYPE (def);
3931 if (!INTEGRAL_TYPE_P (type)
3932 || !TYPE_OVERFLOW_UNDEFINED (type))
3933 return;
3935 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3936 if (chrec_contains_undetermined (scev))
3937 return;
3939 base = initial_condition_in_loop_num (scev, loop->num);
3940 step = evolution_part_in_loop_num (scev, loop->num);
3942 if (!base || !step
3943 || TREE_CODE (step) != INTEGER_CST
3944 || tree_contains_chrecs (base, NULL)
3945 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3946 return;
3948 low = lower_bound_in_type (type, type);
3949 high = upper_bound_in_type (type, type);
3950 value_range r;
3951 get_range_query (cfun)->range_of_expr (r, def);
3952 if (r.kind () == VR_RANGE)
3954 low = wide_int_to_tree (type, r.lower_bound ());
3955 high = wide_int_to_tree (type, r.upper_bound ());
3958 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3961 /* The following analyzers are extracting informations on the bounds
3962 of LOOP from the following undefined behaviors:
3964 - data references should not access elements over the statically
3965 allocated size,
3967 - signed variables should not overflow when flag_wrapv is not set.
3970 static void
3971 infer_loop_bounds_from_undefined (class loop *loop, basic_block *bbs)
3973 unsigned i;
3974 gimple_stmt_iterator bsi;
3975 basic_block bb;
3976 bool reliable;
3978 for (i = 0; i < loop->num_nodes; i++)
3980 bb = bbs[i];
3982 /* If BB is not executed in each iteration of the loop, we cannot
3983 use the operations in it to infer reliable upper bound on the
3984 # of iterations of the loop. However, we can use it as a guess.
3985 Reliable guesses come only from array bounds. */
3986 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3988 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3990 gimple *stmt = gsi_stmt (bsi);
3992 infer_loop_bounds_from_array (loop, stmt);
3994 if (reliable)
3996 infer_loop_bounds_from_signedness (loop, stmt);
3997 infer_loop_bounds_from_pointer_arith (loop, stmt);
4004 /* Compare wide ints, callback for qsort. */
4006 static int
4007 wide_int_cmp (const void *p1, const void *p2)
4009 const widest_int *d1 = (const widest_int *) p1;
4010 const widest_int *d2 = (const widest_int *) p2;
4011 return wi::cmpu (*d1, *d2);
4014 /* Return index of BOUND in BOUNDS array sorted in increasing order.
4015 Lookup by binary search. */
4017 static int
4018 bound_index (const vec<widest_int> &bounds, const widest_int &bound)
4020 unsigned int end = bounds.length ();
4021 unsigned int begin = 0;
4023 /* Find a matching index by means of a binary search. */
4024 while (begin != end)
4026 unsigned int middle = (begin + end) / 2;
4027 widest_int index = bounds[middle];
4029 if (index == bound)
4030 return middle;
4031 else if (wi::ltu_p (index, bound))
4032 begin = middle + 1;
4033 else
4034 end = middle;
4036 gcc_unreachable ();
4039 /* We recorded loop bounds only for statements dominating loop latch (and thus
4040 executed each loop iteration). If there are any bounds on statements not
4041 dominating the loop latch we can improve the estimate by walking the loop
4042 body and seeing if every path from loop header to loop latch contains
4043 some bounded statement. */
4045 static void
4046 discover_iteration_bound_by_body_walk (class loop *loop)
4048 class nb_iter_bound *elt;
4049 auto_vec<widest_int> bounds;
4050 vec<vec<basic_block> > queues = vNULL;
4051 vec<basic_block> queue = vNULL;
4052 ptrdiff_t queue_index;
4053 ptrdiff_t latch_index = 0;
4055 /* Discover what bounds may interest us. */
4056 for (elt = loop->bounds; elt; elt = elt->next)
4058 widest_int bound = elt->bound;
4060 /* Exit terminates loop at given iteration, while non-exits produce undefined
4061 effect on the next iteration. */
4062 if (!elt->is_exit)
4064 bound += 1;
4065 /* If an overflow occurred, ignore the result. */
4066 if (bound == 0)
4067 continue;
4070 if (!loop->any_upper_bound
4071 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
4072 bounds.safe_push (bound);
4075 /* Exit early if there is nothing to do. */
4076 if (!bounds.exists ())
4077 return;
4079 if (dump_file && (dump_flags & TDF_DETAILS))
4080 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
4082 /* Sort the bounds in decreasing order. */
4083 bounds.qsort (wide_int_cmp);
4085 /* For every basic block record the lowest bound that is guaranteed to
4086 terminate the loop. */
4088 hash_map<basic_block, ptrdiff_t> bb_bounds;
4089 for (elt = loop->bounds; elt; elt = elt->next)
4091 widest_int bound = elt->bound;
4092 if (!elt->is_exit)
4094 bound += 1;
4095 /* If an overflow occurred, ignore the result. */
4096 if (bound == 0)
4097 continue;
4100 if (!loop->any_upper_bound
4101 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
4103 ptrdiff_t index = bound_index (bounds, bound);
4104 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
4105 if (!entry)
4106 bb_bounds.put (gimple_bb (elt->stmt), index);
4107 else if ((ptrdiff_t)*entry > index)
4108 *entry = index;
4112 hash_map<basic_block, ptrdiff_t> block_priority;
4114 /* Perform shortest path discovery loop->header ... loop->latch.
4116 The "distance" is given by the smallest loop bound of basic block
4117 present in the path and we look for path with largest smallest bound
4118 on it.
4120 To avoid the need for fibonacci heap on double ints we simply compress
4121 double ints into indexes to BOUNDS array and then represent the queue
4122 as arrays of queues for every index.
4123 Index of BOUNDS.length() means that the execution of given BB has
4124 no bounds determined.
4126 VISITED is a pointer map translating basic block into smallest index
4127 it was inserted into the priority queue with. */
4128 latch_index = -1;
4130 /* Start walk in loop header with index set to infinite bound. */
4131 queue_index = bounds.length ();
4132 queues.safe_grow_cleared (queue_index + 1, true);
4133 queue.safe_push (loop->header);
4134 queues[queue_index] = queue;
4135 block_priority.put (loop->header, queue_index);
4137 for (; queue_index >= 0; queue_index--)
4139 if (latch_index < queue_index)
4141 while (queues[queue_index].length ())
4143 basic_block bb;
4144 ptrdiff_t bound_index = queue_index;
4145 edge e;
4146 edge_iterator ei;
4148 queue = queues[queue_index];
4149 bb = queue.pop ();
4151 /* OK, we later inserted the BB with lower priority, skip it. */
4152 if (*block_priority.get (bb) > queue_index)
4153 continue;
4155 /* See if we can improve the bound. */
4156 ptrdiff_t *entry = bb_bounds.get (bb);
4157 if (entry && *entry < bound_index)
4158 bound_index = *entry;
4160 /* Insert succesors into the queue, watch for latch edge
4161 and record greatest index we saw. */
4162 FOR_EACH_EDGE (e, ei, bb->succs)
4164 bool insert = false;
4166 if (loop_exit_edge_p (loop, e))
4167 continue;
4169 if (e == loop_latch_edge (loop)
4170 && latch_index < bound_index)
4171 latch_index = bound_index;
4172 else if (!(entry = block_priority.get (e->dest)))
4174 insert = true;
4175 block_priority.put (e->dest, bound_index);
4177 else if (*entry < bound_index)
4179 insert = true;
4180 *entry = bound_index;
4183 if (insert)
4184 queues[bound_index].safe_push (e->dest);
4188 queues[queue_index].release ();
4191 gcc_assert (latch_index >= 0);
4192 if ((unsigned)latch_index < bounds.length ())
4194 if (dump_file && (dump_flags & TDF_DETAILS))
4196 fprintf (dump_file, "Found better loop bound ");
4197 print_decu (bounds[latch_index], dump_file);
4198 fprintf (dump_file, "\n");
4200 record_niter_bound (loop, bounds[latch_index], false, true);
4203 queues.release ();
4206 /* See if every path cross the loop goes through a statement that is known
4207 to not execute at the last iteration. In that case we can decrese iteration
4208 count by 1. */
4210 static void
4211 maybe_lower_iteration_bound (class loop *loop)
4213 hash_set<gimple *> *not_executed_last_iteration = NULL;
4214 class nb_iter_bound *elt;
4215 bool found_exit = false;
4216 auto_vec<basic_block> queue;
4217 bitmap visited;
4219 /* Collect all statements with interesting (i.e. lower than
4220 nb_iterations_upper_bound) bound on them.
4222 TODO: Due to the way record_estimate choose estimates to store, the bounds
4223 will be always nb_iterations_upper_bound-1. We can change this to record
4224 also statements not dominating the loop latch and update the walk bellow
4225 to the shortest path algorithm. */
4226 for (elt = loop->bounds; elt; elt = elt->next)
4228 if (!elt->is_exit
4229 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
4231 if (!not_executed_last_iteration)
4232 not_executed_last_iteration = new hash_set<gimple *>;
4233 not_executed_last_iteration->add (elt->stmt);
4236 if (!not_executed_last_iteration)
4237 return;
4239 /* Start DFS walk in the loop header and see if we can reach the
4240 loop latch or any of the exits (including statements with side
4241 effects that may terminate the loop otherwise) without visiting
4242 any of the statements known to have undefined effect on the last
4243 iteration. */
4244 queue.safe_push (loop->header);
4245 visited = BITMAP_ALLOC (NULL);
4246 bitmap_set_bit (visited, loop->header->index);
4247 found_exit = false;
4251 basic_block bb = queue.pop ();
4252 gimple_stmt_iterator gsi;
4253 bool stmt_found = false;
4255 /* Loop for possible exits and statements bounding the execution. */
4256 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4258 gimple *stmt = gsi_stmt (gsi);
4259 if (not_executed_last_iteration->contains (stmt))
4261 stmt_found = true;
4262 break;
4264 if (gimple_has_side_effects (stmt))
4266 found_exit = true;
4267 break;
4270 if (found_exit)
4271 break;
4273 /* If no bounding statement is found, continue the walk. */
4274 if (!stmt_found)
4276 edge e;
4277 edge_iterator ei;
4279 FOR_EACH_EDGE (e, ei, bb->succs)
4281 if (loop_exit_edge_p (loop, e)
4282 || e == loop_latch_edge (loop))
4284 found_exit = true;
4285 break;
4287 if (bitmap_set_bit (visited, e->dest->index))
4288 queue.safe_push (e->dest);
4292 while (queue.length () && !found_exit);
4294 /* If every path through the loop reach bounding statement before exit,
4295 then we know the last iteration of the loop will have undefined effect
4296 and we can decrease number of iterations. */
4298 if (!found_exit)
4300 if (dump_file && (dump_flags & TDF_DETAILS))
4301 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
4302 "undefined statement must be executed at the last iteration.\n");
4303 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
4304 false, true);
4307 BITMAP_FREE (visited);
4308 delete not_executed_last_iteration;
4311 /* Get expected upper bound for number of loop iterations for
4312 BUILT_IN_EXPECT_WITH_PROBABILITY for a condition COND. */
4314 static tree
4315 get_upper_bound_based_on_builtin_expr_with_prob (gcond *cond)
4317 if (cond == NULL)
4318 return NULL_TREE;
4320 tree lhs = gimple_cond_lhs (cond);
4321 if (TREE_CODE (lhs) != SSA_NAME)
4322 return NULL_TREE;
4324 gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
4325 gcall *def = dyn_cast<gcall *> (stmt);
4326 if (def == NULL)
4327 return NULL_TREE;
4329 tree decl = gimple_call_fndecl (def);
4330 if (!decl
4331 || !fndecl_built_in_p (decl, BUILT_IN_EXPECT_WITH_PROBABILITY)
4332 || gimple_call_num_args (stmt) != 3)
4333 return NULL_TREE;
4335 tree c = gimple_call_arg (def, 1);
4336 tree condt = TREE_TYPE (lhs);
4337 tree res = fold_build2 (gimple_cond_code (cond),
4338 condt, c,
4339 gimple_cond_rhs (cond));
4340 if (TREE_CODE (res) != INTEGER_CST)
4341 return NULL_TREE;
4344 tree prob = gimple_call_arg (def, 2);
4345 tree t = TREE_TYPE (prob);
4346 tree one
4347 = build_real_from_int_cst (t,
4348 integer_one_node);
4349 if (integer_zerop (res))
4350 prob = fold_build2 (MINUS_EXPR, t, one, prob);
4351 tree r = fold_build2 (RDIV_EXPR, t, one, prob);
4352 if (TREE_CODE (r) != REAL_CST)
4353 return NULL_TREE;
4355 HOST_WIDE_INT probi
4356 = real_to_integer (TREE_REAL_CST_PTR (r));
4357 return build_int_cst (condt, probi);
4360 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
4361 is true also use estimates derived from undefined behavior. */
4363 void
4364 estimate_numbers_of_iterations (class loop *loop)
4366 tree niter, type;
4367 unsigned i;
4368 class tree_niter_desc niter_desc;
4369 edge ex;
4370 widest_int bound;
4371 edge likely_exit;
4373 /* Give up if we already have tried to compute an estimation. */
4374 if (loop->estimate_state != EST_NOT_COMPUTED)
4375 return;
4377 if (dump_file && (dump_flags & TDF_DETAILS))
4378 fprintf (dump_file, "Estimating # of iterations of loop %d\n", loop->num);
4380 loop->estimate_state = EST_AVAILABLE;
4382 /* If we have a measured profile, use it to estimate the number of
4383 iterations. Normally this is recorded by branch_prob right after
4384 reading the profile. In case we however found a new loop, record the
4385 information here.
4387 Explicitly check for profile status so we do not report
4388 wrong prediction hitrates for guessed loop iterations heuristics.
4389 Do not recompute already recorded bounds - we ought to be better on
4390 updating iteration bounds than updating profile in general and thus
4391 recomputing iteration bounds later in the compilation process will just
4392 introduce random roundoff errors. */
4393 if (!loop->any_estimate
4394 && loop->header->count.reliable_p ())
4396 gcov_type nit = expected_loop_iterations_unbounded (loop);
4397 bound = gcov_type_to_wide_int (nit);
4398 record_niter_bound (loop, bound, true, false);
4401 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
4402 to be constant, we avoid undefined behavior implied bounds and instead
4403 diagnose those loops with -Waggressive-loop-optimizations. */
4404 number_of_latch_executions (loop);
4406 basic_block *body = get_loop_body (loop);
4407 auto_vec<edge> exits = get_loop_exit_edges (loop, body);
4408 likely_exit = single_likely_exit (loop, exits);
4409 FOR_EACH_VEC_ELT (exits, i, ex)
4411 if (ex == likely_exit)
4413 gimple *stmt = last_stmt (ex->src);
4414 if (stmt != NULL)
4416 gcond *cond = dyn_cast<gcond *> (stmt);
4417 tree niter_bound
4418 = get_upper_bound_based_on_builtin_expr_with_prob (cond);
4419 if (niter_bound != NULL_TREE)
4421 widest_int max = derive_constant_upper_bound (niter_bound);
4422 record_estimate (loop, niter_bound, max, cond,
4423 true, true, false);
4428 if (!number_of_iterations_exit (loop, ex, &niter_desc,
4429 false, false, body))
4430 continue;
4432 niter = niter_desc.niter;
4433 type = TREE_TYPE (niter);
4434 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
4435 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
4436 build_int_cst (type, 0),
4437 niter);
4438 record_estimate (loop, niter, niter_desc.max,
4439 last_stmt (ex->src),
4440 true, ex == likely_exit, true);
4441 record_control_iv (loop, &niter_desc);
4444 if (flag_aggressive_loop_optimizations)
4445 infer_loop_bounds_from_undefined (loop, body);
4446 free (body);
4448 discover_iteration_bound_by_body_walk (loop);
4450 maybe_lower_iteration_bound (loop);
4452 /* If we know the exact number of iterations of this loop, try to
4453 not break code with undefined behavior by not recording smaller
4454 maximum number of iterations. */
4455 if (loop->nb_iterations
4456 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
4458 loop->any_upper_bound = true;
4459 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
4463 /* Sets NIT to the estimated number of executions of the latch of the
4464 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
4465 large as the number of iterations. If we have no reliable estimate,
4466 the function returns false, otherwise returns true. */
4468 bool
4469 estimated_loop_iterations (class loop *loop, widest_int *nit)
4471 /* When SCEV information is available, try to update loop iterations
4472 estimate. Otherwise just return whatever we recorded earlier. */
4473 if (scev_initialized_p ())
4474 estimate_numbers_of_iterations (loop);
4476 return (get_estimated_loop_iterations (loop, nit));
4479 /* Similar to estimated_loop_iterations, but returns the estimate only
4480 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4481 on the number of iterations of LOOP could not be derived, returns -1. */
4483 HOST_WIDE_INT
4484 estimated_loop_iterations_int (class loop *loop)
4486 widest_int nit;
4487 HOST_WIDE_INT hwi_nit;
4489 if (!estimated_loop_iterations (loop, &nit))
4490 return -1;
4492 if (!wi::fits_shwi_p (nit))
4493 return -1;
4494 hwi_nit = nit.to_shwi ();
4496 return hwi_nit < 0 ? -1 : hwi_nit;
4500 /* Sets NIT to an upper bound for the maximum number of executions of the
4501 latch of the LOOP. If we have no reliable estimate, the function returns
4502 false, otherwise returns true. */
4504 bool
4505 max_loop_iterations (class loop *loop, widest_int *nit)
4507 /* When SCEV information is available, try to update loop iterations
4508 estimate. Otherwise just return whatever we recorded earlier. */
4509 if (scev_initialized_p ())
4510 estimate_numbers_of_iterations (loop);
4512 return get_max_loop_iterations (loop, nit);
4515 /* Similar to max_loop_iterations, but returns the estimate only
4516 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4517 on the number of iterations of LOOP could not be derived, returns -1. */
4519 HOST_WIDE_INT
4520 max_loop_iterations_int (class loop *loop)
4522 widest_int nit;
4523 HOST_WIDE_INT hwi_nit;
4525 if (!max_loop_iterations (loop, &nit))
4526 return -1;
4528 if (!wi::fits_shwi_p (nit))
4529 return -1;
4530 hwi_nit = nit.to_shwi ();
4532 return hwi_nit < 0 ? -1 : hwi_nit;
4535 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4536 latch of the LOOP. If we have no reliable estimate, the function returns
4537 false, otherwise returns true. */
4539 bool
4540 likely_max_loop_iterations (class loop *loop, widest_int *nit)
4542 /* When SCEV information is available, try to update loop iterations
4543 estimate. Otherwise just return whatever we recorded earlier. */
4544 if (scev_initialized_p ())
4545 estimate_numbers_of_iterations (loop);
4547 return get_likely_max_loop_iterations (loop, nit);
4550 /* Similar to max_loop_iterations, but returns the estimate only
4551 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4552 on the number of iterations of LOOP could not be derived, returns -1. */
4554 HOST_WIDE_INT
4555 likely_max_loop_iterations_int (class loop *loop)
4557 widest_int nit;
4558 HOST_WIDE_INT hwi_nit;
4560 if (!likely_max_loop_iterations (loop, &nit))
4561 return -1;
4563 if (!wi::fits_shwi_p (nit))
4564 return -1;
4565 hwi_nit = nit.to_shwi ();
4567 return hwi_nit < 0 ? -1 : hwi_nit;
4570 /* Returns an estimate for the number of executions of statements
4571 in the LOOP. For statements before the loop exit, this exceeds
4572 the number of execution of the latch by one. */
4574 HOST_WIDE_INT
4575 estimated_stmt_executions_int (class loop *loop)
4577 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
4578 HOST_WIDE_INT snit;
4580 if (nit == -1)
4581 return -1;
4583 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4585 /* If the computation overflows, return -1. */
4586 return snit < 0 ? -1 : snit;
4589 /* Sets NIT to the maximum number of executions of the latch of the
4590 LOOP, plus one. If we have no reliable estimate, the function returns
4591 false, otherwise returns true. */
4593 bool
4594 max_stmt_executions (class loop *loop, widest_int *nit)
4596 widest_int nit_minus_one;
4598 if (!max_loop_iterations (loop, nit))
4599 return false;
4601 nit_minus_one = *nit;
4603 *nit += 1;
4605 return wi::gtu_p (*nit, nit_minus_one);
4608 /* Sets NIT to the estimated maximum number of executions of the latch of the
4609 LOOP, plus one. If we have no likely estimate, the function returns
4610 false, otherwise returns true. */
4612 bool
4613 likely_max_stmt_executions (class loop *loop, widest_int *nit)
4615 widest_int nit_minus_one;
4617 if (!likely_max_loop_iterations (loop, nit))
4618 return false;
4620 nit_minus_one = *nit;
4622 *nit += 1;
4624 return wi::gtu_p (*nit, nit_minus_one);
4627 /* Sets NIT to the estimated number of executions of the latch of the
4628 LOOP, plus one. If we have no reliable estimate, the function returns
4629 false, otherwise returns true. */
4631 bool
4632 estimated_stmt_executions (class loop *loop, widest_int *nit)
4634 widest_int nit_minus_one;
4636 if (!estimated_loop_iterations (loop, nit))
4637 return false;
4639 nit_minus_one = *nit;
4641 *nit += 1;
4643 return wi::gtu_p (*nit, nit_minus_one);
4646 /* Records estimates on numbers of iterations of loops. */
4648 void
4649 estimate_numbers_of_iterations (function *fn)
4651 /* We don't want to issue signed overflow warnings while getting
4652 loop iteration estimates. */
4653 fold_defer_overflow_warnings ();
4655 for (auto loop : loops_list (fn, 0))
4656 estimate_numbers_of_iterations (loop);
4658 fold_undefer_and_ignore_overflow_warnings ();
4661 /* Returns true if statement S1 dominates statement S2. */
4663 bool
4664 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4666 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4668 if (!bb1
4669 || s1 == s2)
4670 return true;
4672 if (bb1 == bb2)
4674 gimple_stmt_iterator bsi;
4676 if (gimple_code (s2) == GIMPLE_PHI)
4677 return false;
4679 if (gimple_code (s1) == GIMPLE_PHI)
4680 return true;
4682 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4683 if (gsi_stmt (bsi) == s1)
4684 return true;
4686 return false;
4689 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4692 /* Returns true when we can prove that the number of executions of
4693 STMT in the loop is at most NITER, according to the bound on
4694 the number of executions of the statement NITER_BOUND->stmt recorded in
4695 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4697 ??? This code can become quite a CPU hog - we can have many bounds,
4698 and large basic block forcing stmt_dominates_stmt_p to be queried
4699 many times on a large basic blocks, so the whole thing is O(n^2)
4700 for scev_probably_wraps_p invocation (that can be done n times).
4702 It would make more sense (and give better answers) to remember BB
4703 bounds computed by discover_iteration_bound_by_body_walk. */
4705 static bool
4706 n_of_executions_at_most (gimple *stmt,
4707 class nb_iter_bound *niter_bound,
4708 tree niter)
4710 widest_int bound = niter_bound->bound;
4711 tree nit_type = TREE_TYPE (niter), e;
4712 enum tree_code cmp;
4714 gcc_assert (TYPE_UNSIGNED (nit_type));
4716 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4717 the number of iterations is small. */
4718 if (!wi::fits_to_tree_p (bound, nit_type))
4719 return false;
4721 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4722 times. This means that:
4724 -- if NITER_BOUND->is_exit is true, then everything after
4725 it at most NITER_BOUND->bound times.
4727 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4728 is executed, then NITER_BOUND->stmt is executed as well in the same
4729 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4731 If we can determine that NITER_BOUND->stmt is always executed
4732 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4733 We conclude that if both statements belong to the same
4734 basic block and STMT is before NITER_BOUND->stmt and there are no
4735 statements with side effects in between. */
4737 if (niter_bound->is_exit)
4739 if (stmt == niter_bound->stmt
4740 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4741 return false;
4742 cmp = GE_EXPR;
4744 else
4746 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4748 gimple_stmt_iterator bsi;
4749 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4750 || gimple_code (stmt) == GIMPLE_PHI
4751 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4752 return false;
4754 /* By stmt_dominates_stmt_p we already know that STMT appears
4755 before NITER_BOUND->STMT. Still need to test that the loop
4756 cannot be terinated by a side effect in between. */
4757 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4758 gsi_next (&bsi))
4759 if (gimple_has_side_effects (gsi_stmt (bsi)))
4760 return false;
4761 bound += 1;
4762 if (bound == 0
4763 || !wi::fits_to_tree_p (bound, nit_type))
4764 return false;
4766 cmp = GT_EXPR;
4769 e = fold_binary (cmp, boolean_type_node,
4770 niter, wide_int_to_tree (nit_type, bound));
4771 return e && integer_nonzerop (e);
4774 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4776 bool
4777 nowrap_type_p (tree type)
4779 if (ANY_INTEGRAL_TYPE_P (type)
4780 && TYPE_OVERFLOW_UNDEFINED (type))
4781 return true;
4783 if (POINTER_TYPE_P (type))
4784 return true;
4786 return false;
4789 /* Return true if we can prove LOOP is exited before evolution of induction
4790 variable {BASE, STEP} overflows with respect to its type bound. */
4792 static bool
4793 loop_exits_before_overflow (tree base, tree step,
4794 gimple *at_stmt, class loop *loop)
4796 widest_int niter;
4797 struct control_iv *civ;
4798 class nb_iter_bound *bound;
4799 tree e, delta, step_abs, unsigned_base;
4800 tree type = TREE_TYPE (step);
4801 tree unsigned_type, valid_niter;
4803 /* Don't issue signed overflow warnings. */
4804 fold_defer_overflow_warnings ();
4806 /* Compute the number of iterations before we reach the bound of the
4807 type, and verify that the loop is exited before this occurs. */
4808 unsigned_type = unsigned_type_for (type);
4809 unsigned_base = fold_convert (unsigned_type, base);
4811 if (tree_int_cst_sign_bit (step))
4813 tree extreme = fold_convert (unsigned_type,
4814 lower_bound_in_type (type, type));
4815 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4816 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4817 fold_convert (unsigned_type, step));
4819 else
4821 tree extreme = fold_convert (unsigned_type,
4822 upper_bound_in_type (type, type));
4823 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4824 step_abs = fold_convert (unsigned_type, step);
4827 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4829 estimate_numbers_of_iterations (loop);
4831 if (max_loop_iterations (loop, &niter)
4832 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4833 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4834 wide_int_to_tree (TREE_TYPE (valid_niter),
4835 niter))) != NULL
4836 && integer_nonzerop (e))
4838 fold_undefer_and_ignore_overflow_warnings ();
4839 return true;
4841 if (at_stmt)
4842 for (bound = loop->bounds; bound; bound = bound->next)
4844 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4846 fold_undefer_and_ignore_overflow_warnings ();
4847 return true;
4850 fold_undefer_and_ignore_overflow_warnings ();
4852 /* Try to prove loop is exited before {base, step} overflows with the
4853 help of analyzed loop control IV. This is done only for IVs with
4854 constant step because otherwise we don't have the information. */
4855 if (TREE_CODE (step) == INTEGER_CST)
4857 for (civ = loop->control_ivs; civ; civ = civ->next)
4859 enum tree_code code;
4860 tree civ_type = TREE_TYPE (civ->step);
4862 /* Have to consider type difference because operand_equal_p ignores
4863 that for constants. */
4864 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4865 || element_precision (type) != element_precision (civ_type))
4866 continue;
4868 /* Only consider control IV with same step. */
4869 if (!operand_equal_p (step, civ->step, 0))
4870 continue;
4872 /* Done proving if this is a no-overflow control IV. */
4873 if (operand_equal_p (base, civ->base, 0))
4874 return true;
4876 /* Control IV is recorded after expanding simple operations,
4877 Here we expand base and compare it too. */
4878 tree expanded_base = expand_simple_operations (base);
4879 if (operand_equal_p (expanded_base, civ->base, 0))
4880 return true;
4882 /* If this is a before stepping control IV, in other words, we have
4884 {civ_base, step} = {base + step, step}
4886 Because civ {base + step, step} doesn't overflow during loop
4887 iterations, {base, step} will not overflow if we can prove the
4888 operation "base + step" does not overflow. Specifically, we try
4889 to prove below conditions are satisfied:
4891 base <= UPPER_BOUND (type) - step ;;step > 0
4892 base >= LOWER_BOUND (type) - step ;;step < 0
4894 by proving the reverse conditions are false using loop's initial
4895 condition. */
4896 if (POINTER_TYPE_P (TREE_TYPE (base)))
4897 code = POINTER_PLUS_EXPR;
4898 else
4899 code = PLUS_EXPR;
4901 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4902 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4903 expanded_base, step);
4904 if (operand_equal_p (stepped, civ->base, 0)
4905 || operand_equal_p (expanded_stepped, civ->base, 0))
4907 tree extreme;
4909 if (tree_int_cst_sign_bit (step))
4911 code = LT_EXPR;
4912 extreme = lower_bound_in_type (type, type);
4914 else
4916 code = GT_EXPR;
4917 extreme = upper_bound_in_type (type, type);
4919 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4920 e = fold_build2 (code, boolean_type_node, base, extreme);
4921 e = simplify_using_initial_conditions (loop, e);
4922 if (integer_zerop (e))
4923 return true;
4928 return false;
4931 /* VAR is scev variable whose evolution part is constant STEP, this function
4932 proves that VAR can't overflow by using value range info. If VAR's value
4933 range is [MIN, MAX], it can be proven by:
4934 MAX + step doesn't overflow ; if step > 0
4936 MIN + step doesn't underflow ; if step < 0.
4938 We can only do this if var is computed in every loop iteration, i.e, var's
4939 definition has to dominate loop latch. Consider below example:
4942 unsigned int i;
4944 <bb 3>:
4946 <bb 4>:
4947 # RANGE [0, 4294967294] NONZERO 65535
4948 # i_21 = PHI <0(3), i_18(9)>
4949 if (i_21 != 0)
4950 goto <bb 6>;
4951 else
4952 goto <bb 8>;
4954 <bb 6>:
4955 # RANGE [0, 65533] NONZERO 65535
4956 _6 = i_21 + 4294967295;
4957 # RANGE [0, 65533] NONZERO 65535
4958 _7 = (long unsigned int) _6;
4959 # RANGE [0, 524264] NONZERO 524280
4960 _8 = _7 * 8;
4961 # PT = nonlocal escaped
4962 _9 = a_14 + _8;
4963 *_9 = 0;
4965 <bb 8>:
4966 # RANGE [1, 65535] NONZERO 65535
4967 i_18 = i_21 + 1;
4968 if (i_18 >= 65535)
4969 goto <bb 10>;
4970 else
4971 goto <bb 9>;
4973 <bb 9>:
4974 goto <bb 4>;
4976 <bb 10>:
4977 return;
4980 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4981 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4982 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4983 (4294967295, 4294967296, ...). */
4985 static bool
4986 scev_var_range_cant_overflow (tree var, tree step, class loop *loop)
4988 tree type;
4989 wide_int minv, maxv, diff, step_wi;
4991 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4992 return false;
4994 /* Check if VAR evaluates in every loop iteration. It's not the case
4995 if VAR is default definition or does not dominate loop's latch. */
4996 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4997 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4998 return false;
5000 value_range r;
5001 get_range_query (cfun)->range_of_expr (r, var);
5002 if (r.kind () != VR_RANGE)
5003 return false;
5005 /* VAR is a scev whose evolution part is STEP and value range info
5006 is [MIN, MAX], we can prove its no-overflowness by conditions:
5008 type_MAX - MAX >= step ; if step > 0
5009 MIN - type_MIN >= |step| ; if step < 0.
5011 Or VAR must take value outside of value range, which is not true. */
5012 step_wi = wi::to_wide (step);
5013 type = TREE_TYPE (var);
5014 if (tree_int_cst_sign_bit (step))
5016 diff = r.lower_bound () - wi::to_wide (lower_bound_in_type (type, type));
5017 step_wi = - step_wi;
5019 else
5020 diff = wi::to_wide (upper_bound_in_type (type, type)) - r.upper_bound ();
5022 return (wi::geu_p (diff, step_wi));
5025 /* Return false only when the induction variable BASE + STEP * I is
5026 known to not overflow: i.e. when the number of iterations is small
5027 enough with respect to the step and initial condition in order to
5028 keep the evolution confined in TYPEs bounds. Return true when the
5029 iv is known to overflow or when the property is not computable.
5031 USE_OVERFLOW_SEMANTICS is true if this function should assume that
5032 the rules for overflow of the given language apply (e.g., that signed
5033 arithmetics in C does not overflow).
5035 If VAR is a ssa variable, this function also returns false if VAR can
5036 be proven not overflow with value range info. */
5038 bool
5039 scev_probably_wraps_p (tree var, tree base, tree step,
5040 gimple *at_stmt, class loop *loop,
5041 bool use_overflow_semantics)
5043 /* FIXME: We really need something like
5044 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
5046 We used to test for the following situation that frequently appears
5047 during address arithmetics:
5049 D.1621_13 = (long unsigned intD.4) D.1620_12;
5050 D.1622_14 = D.1621_13 * 8;
5051 D.1623_15 = (doubleD.29 *) D.1622_14;
5053 And derived that the sequence corresponding to D_14
5054 can be proved to not wrap because it is used for computing a
5055 memory access; however, this is not really the case -- for example,
5056 if D_12 = (unsigned char) [254,+,1], then D_14 has values
5057 2032, 2040, 0, 8, ..., but the code is still legal. */
5059 if (chrec_contains_undetermined (base)
5060 || chrec_contains_undetermined (step))
5061 return true;
5063 if (integer_zerop (step))
5064 return false;
5066 /* If we can use the fact that signed and pointer arithmetics does not
5067 wrap, we are done. */
5068 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
5069 return false;
5071 /* To be able to use estimates on number of iterations of the loop,
5072 we must have an upper bound on the absolute value of the step. */
5073 if (TREE_CODE (step) != INTEGER_CST)
5074 return true;
5076 /* Check if var can be proven not overflow with value range info. */
5077 if (var && TREE_CODE (var) == SSA_NAME
5078 && scev_var_range_cant_overflow (var, step, loop))
5079 return false;
5081 if (loop_exits_before_overflow (base, step, at_stmt, loop))
5082 return false;
5084 /* At this point we still don't have a proof that the iv does not
5085 overflow: give up. */
5086 return true;
5089 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
5091 void
5092 free_numbers_of_iterations_estimates (class loop *loop)
5094 struct control_iv *civ;
5095 class nb_iter_bound *bound;
5097 loop->nb_iterations = NULL;
5098 loop->estimate_state = EST_NOT_COMPUTED;
5099 for (bound = loop->bounds; bound;)
5101 class nb_iter_bound *next = bound->next;
5102 ggc_free (bound);
5103 bound = next;
5105 loop->bounds = NULL;
5107 for (civ = loop->control_ivs; civ;)
5109 struct control_iv *next = civ->next;
5110 ggc_free (civ);
5111 civ = next;
5113 loop->control_ivs = NULL;
5116 /* Frees the information on upper bounds on numbers of iterations of loops. */
5118 void
5119 free_numbers_of_iterations_estimates (function *fn)
5121 for (auto loop : loops_list (fn, 0))
5122 free_numbers_of_iterations_estimates (loop);
5125 /* Substitute value VAL for ssa name NAME inside expressions held
5126 at LOOP. */
5128 void
5129 substitute_in_loop_info (class loop *loop, tree name, tree val)
5131 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);