Print cgraph_uid in function header
[official-gcc.git] / gcc-4_6-mobile-vtable-security / gcc / tree-ssa-loop-ivcanon.c
blobeed7b6fd65a0d4a9746582aa2b7085fb21d4965c
1 /* Induction variable canonicalization.
2 Copyright (C) 2004, 2005, 2007, 2008, 2010
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass detects the loops that iterate a constant number of times,
22 adds a canonical induction variable (step -1, tested against 0)
23 and replaces the exit test. This enables the less powerful rtl
24 level analysis to use this information.
26 This might spoil the code in some cases (by increasing register pressure).
27 Note that in the case the new variable is not needed, ivopts will get rid
28 of it, so it might only be a problem when there are no other linear induction
29 variables. In that case the created optimization possibilities are likely
30 to pay up.
32 Additionally in case we detect that it is beneficial to unroll the
33 loop completely, we do it right here to expose the optimization
34 possibilities to the following passes. */
36 #include "config.h"
37 #include "system.h"
38 #include "coretypes.h"
39 #include "tm.h"
40 #include "tree.h"
41 #include "tm_p.h"
42 #include "basic-block.h"
43 #include "tree-pretty-print.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-flow.h"
46 #include "tree-dump.h"
47 #include "cfgloop.h"
48 #include "tree-pass.h"
49 #include "tree-chrec.h"
50 #include "tree-scalar-evolution.h"
51 #include "params.h"
52 #include "flags.h"
53 #include "tree-inline.h"
54 #include "target.h"
55 #include "diagnostic.h"
57 /* Specifies types of loops that may be unrolled. */
59 enum unroll_level
61 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
62 iteration. */
63 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
64 of code size. */
65 UL_ALL /* All suitable loops. */
68 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
69 is the exit edge whose condition is replaced. */
71 static void
72 create_canonical_iv (struct loop *loop, edge exit, tree niter)
74 edge in;
75 tree type, var;
76 gimple cond;
77 gimple_stmt_iterator incr_at;
78 enum tree_code cmp;
80 if (dump_file && (dump_flags & TDF_DETAILS))
82 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
83 print_generic_expr (dump_file, niter, TDF_SLIM);
84 fprintf (dump_file, " iterations.\n");
87 cond = last_stmt (exit->src);
88 in = EDGE_SUCC (exit->src, 0);
89 if (in == exit)
90 in = EDGE_SUCC (exit->src, 1);
92 /* Note that we do not need to worry about overflows, since
93 type of niter is always unsigned and all comparisons are
94 just for equality/nonequality -- i.e. everything works
95 with a modulo arithmetics. */
97 type = TREE_TYPE (niter);
98 niter = fold_build2 (PLUS_EXPR, type,
99 niter,
100 build_int_cst (type, 1));
101 incr_at = gsi_last_bb (in->src);
102 create_iv (niter,
103 build_int_cst (type, -1),
104 NULL_TREE, loop,
105 &incr_at, false, NULL, &var);
107 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
108 gimple_cond_set_code (cond, cmp);
109 gimple_cond_set_lhs (cond, var);
110 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
111 update_stmt (cond);
114 /* Computes an estimated number of insns in LOOP, weighted by WEIGHTS. */
116 unsigned
117 tree_num_loop_insns (struct loop *loop, eni_weights *weights)
119 basic_block *body = get_loop_body (loop);
120 gimple_stmt_iterator gsi;
121 unsigned size = 0, i;
123 for (i = 0; i < loop->num_nodes; i++)
124 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
125 size += estimate_num_insns (gsi_stmt (gsi), weights);
126 free (body);
128 return size;
131 /* Describe size of loop as detected by tree_estimate_loop_size. */
132 struct loop_size
134 /* Number of instructions in the loop. */
135 int overall;
137 /* Number of instructions that will be likely optimized out in
138 peeled iterations of loop (i.e. computation based on induction
139 variable where induction variable starts at known constant.) */
140 int eliminated_by_peeling;
142 /* Same statistics for last iteration of loop: it is smaller because
143 instructions after exit are not executed. */
144 int last_iteration;
145 int last_iteration_eliminated_by_peeling;
148 /* Return true if OP in STMT will be constant after peeling LOOP. */
150 static bool
151 constant_after_peeling (tree op, gimple stmt, struct loop *loop)
153 affine_iv iv;
155 if (is_gimple_min_invariant (op))
156 return true;
158 /* We can still fold accesses to constant arrays when index is known. */
159 if (TREE_CODE (op) != SSA_NAME)
161 tree base = op;
163 /* First make fast look if we see constant array inside. */
164 while (handled_component_p (base))
165 base = TREE_OPERAND (base, 0);
166 if ((DECL_P (base) == VAR_DECL
167 && const_value_known_p (base))
168 || CONSTANT_CLASS_P (base))
170 /* If so, see if we understand all the indices. */
171 base = op;
172 while (handled_component_p (base))
174 if (TREE_CODE (base) == ARRAY_REF
175 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
176 return false;
177 base = TREE_OPERAND (base, 0);
179 return true;
181 return false;
184 /* Induction variables are constants. */
185 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
186 return false;
187 if (!is_gimple_min_invariant (iv.base))
188 return false;
189 if (!is_gimple_min_invariant (iv.step))
190 return false;
191 return true;
194 /* Computes an estimated number of insns in LOOP, weighted by WEIGHTS.
195 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT. */
197 static void
198 tree_estimate_loop_size (struct loop *loop, edge exit, struct loop_size *size)
200 basic_block *body = get_loop_body (loop);
201 gimple_stmt_iterator gsi;
202 unsigned int i;
203 bool after_exit;
205 size->overall = 0;
206 size->eliminated_by_peeling = 0;
207 size->last_iteration = 0;
208 size->last_iteration_eliminated_by_peeling = 0;
210 if (dump_file && (dump_flags & TDF_DETAILS))
211 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
212 for (i = 0; i < loop->num_nodes; i++)
214 if (exit && body[i] != exit->src
215 && dominated_by_p (CDI_DOMINATORS, body[i], exit->src))
216 after_exit = true;
217 else
218 after_exit = false;
219 if (dump_file && (dump_flags & TDF_DETAILS))
220 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);
222 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
224 gimple stmt = gsi_stmt (gsi);
225 int num = estimate_num_insns (stmt, &eni_size_weights);
226 bool likely_eliminated = false;
228 if (dump_file && (dump_flags & TDF_DETAILS))
230 fprintf (dump_file, " size: %3i ", num);
231 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
234 /* Look for reasons why we might optimize this stmt away. */
236 /* Exit conditional. */
237 if (body[i] == exit->src && stmt == last_stmt (exit->src))
239 if (dump_file && (dump_flags & TDF_DETAILS))
240 fprintf (dump_file, " Exit condition will be eliminated.\n");
241 likely_eliminated = true;
243 /* Sets of IV variables */
244 else if (gimple_code (stmt) == GIMPLE_ASSIGN
245 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
247 if (dump_file && (dump_flags & TDF_DETAILS))
248 fprintf (dump_file, " Induction variable computation will"
249 " be folded away.\n");
250 likely_eliminated = true;
252 /* Assignments of IV variables. */
253 else if (gimple_code (stmt) == GIMPLE_ASSIGN
254 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
255 && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt,loop)
256 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
257 || constant_after_peeling (gimple_assign_rhs2 (stmt),
258 stmt, loop)))
260 if (dump_file && (dump_flags & TDF_DETAILS))
261 fprintf (dump_file, " Constant expression will be folded away.\n");
262 likely_eliminated = true;
264 /* Conditionals. */
265 else if (gimple_code (stmt) == GIMPLE_COND
266 && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
267 && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
269 if (dump_file && (dump_flags & TDF_DETAILS))
270 fprintf (dump_file, " Constant conditional.\n");
271 likely_eliminated = true;
274 size->overall += num;
275 if (likely_eliminated)
276 size->eliminated_by_peeling += num;
277 if (!after_exit)
279 size->last_iteration += num;
280 if (likely_eliminated)
281 size->last_iteration_eliminated_by_peeling += num;
285 if (dump_file && (dump_flags & TDF_DETAILS))
286 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
287 size->eliminated_by_peeling, size->last_iteration,
288 size->last_iteration_eliminated_by_peeling);
290 free (body);
293 /* Estimate number of insns of completely unrolled loop.
294 It is (NUNROLL + 1) * size of loop body with taking into account
295 the fact that in last copy everything after exit conditional
296 is dead and that some instructions will be eliminated after
297 peeling.
299 Loop body is likely going to simplify futher, this is difficult
300 to guess, we just decrease the result by 1/3. */
302 static unsigned HOST_WIDE_INT
303 estimated_unrolled_size (struct loop_size *size,
304 unsigned HOST_WIDE_INT nunroll)
306 HOST_WIDE_INT unr_insns = ((nunroll)
307 * (HOST_WIDE_INT) (size->overall
308 - size->eliminated_by_peeling));
309 if (!nunroll)
310 unr_insns = 0;
311 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
313 unr_insns = unr_insns * 2 / 3;
314 if (unr_insns <= 0)
315 unr_insns = 1;
317 return unr_insns;
320 /* Tries to unroll LOOP completely, i.e. NITER times.
321 UL determines which loops we are allowed to unroll.
322 EXIT is the exit of the loop that should be eliminated. */
324 static bool
325 try_unroll_loop_completely (struct loop *loop,
326 edge exit, tree niter,
327 enum unroll_level ul)
329 unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns;
330 unsigned HOST_WIDE_INT max_peeled_insns;
331 gimple cond;
332 struct loop_size size;
334 if (loop->inner)
335 return false;
337 if (!host_integerp (niter, 1))
338 return false;
339 n_unroll = tree_low_cst (niter, 1);
341 if (profile_status == PROFILE_READ
342 && optimize_loop_for_speed_p (loop))
343 max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES_FEEDBACK);
344 else
345 max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
347 if (n_unroll > max_unroll)
349 if (dump_file && (dump_flags & TDF_DETAILS))
351 fprintf (dump_file, " Not unrolling loop %d limited by max unroll"
352 " (%d > %d)\n",
353 loop->num, (int) n_unroll, (int) max_unroll);
355 return false;
358 if (n_unroll)
360 if (ul == UL_SINGLE_ITER)
361 return false;
363 tree_estimate_loop_size (loop, exit, &size);
364 ninsns = size.overall;
366 unr_insns = estimated_unrolled_size (&size, n_unroll);
367 if (dump_file && (dump_flags & TDF_DETAILS))
369 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
370 fprintf (dump_file, " Estimated size after unrolling: %d\n",
371 (int) unr_insns);
374 if (profile_status == PROFILE_READ
375 && optimize_loop_for_speed_p (loop))
376 max_peeled_insns =
377 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS_FEEDBACK);
378 else
379 max_peeled_insns = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS);
381 if (unr_insns > max_peeled_insns)
383 if (dump_file && (dump_flags & TDF_DETAILS))
384 fprintf (dump_file, "Not unrolling loop %d "
385 "(--param max-completely-peeled-insns(-feedback) limit. "
386 "(%u > %u)).\n",
387 loop->num, (unsigned) unr_insns, (unsigned) max_peeled_insns);
388 return false;
391 if (ul == UL_NO_GROWTH
392 && unr_insns > ninsns)
394 if (dump_file && (dump_flags & TDF_DETAILS))
395 fprintf (dump_file, "Not unrolling loop %d (NO_GROWTH %d > %d).\n",
396 loop->num, (int) unr_insns, (int) ninsns);
397 return false;
401 if (n_unroll)
403 sbitmap wont_exit;
404 edge e;
405 unsigned i;
406 VEC (edge, heap) *to_remove = NULL;
408 initialize_original_copy_tables ();
409 wont_exit = sbitmap_alloc (n_unroll + 1);
410 sbitmap_ones (wont_exit);
411 RESET_BIT (wont_exit, 0);
413 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
414 n_unroll, wont_exit,
415 exit, &to_remove,
416 DLTHE_FLAG_UPDATE_FREQ
417 | DLTHE_FLAG_COMPLETTE_PEEL))
419 free_original_copy_tables ();
420 free (wont_exit);
421 return false;
424 FOR_EACH_VEC_ELT (edge, to_remove, i, e)
426 bool ok = remove_path (e);
427 gcc_assert (ok);
430 VEC_free (edge, heap, to_remove);
431 free (wont_exit);
432 free_original_copy_tables ();
435 cond = last_stmt (exit->src);
436 if (exit->flags & EDGE_TRUE_VALUE)
437 gimple_cond_make_true (cond);
438 else
439 gimple_cond_make_false (cond);
440 update_stmt (cond);
441 update_ssa (TODO_update_ssa);
443 if (dump_file)
444 fprintf (dump_file, "Unrolled loop %d completely by factor %d.\n",
445 loop->num, (int) n_unroll);
447 if (flag_opt_info >= OPT_INFO_MIN)
449 location_t locus;
450 locus = gimple_location (cond);
452 inform (locus, "Completely Unroll loop by %d (header execution count %d)",
453 (int) n_unroll,
454 (int) loop->header->count);
457 return true;
460 /* Adds a canonical induction variable to LOOP if suitable.
461 CREATE_IV is true if we may create a new iv. UL determines
462 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
463 to determine the number of iterations of a loop by direct evaluation.
464 Returns true if cfg is changed. */
466 static bool
467 canonicalize_loop_induction_variables (struct loop *loop,
468 bool create_iv, enum unroll_level ul,
469 bool try_eval)
471 edge exit = NULL;
472 tree niter;
474 niter = number_of_latch_executions (loop);
475 if (TREE_CODE (niter) == INTEGER_CST)
477 exit = single_exit (loop);
478 if (!just_once_each_iteration_p (loop, exit->src))
479 return false;
481 else
483 /* If the loop has more than one exit, try checking all of them
484 for # of iterations determinable through scev. */
485 if (!single_exit (loop))
486 niter = find_loop_niter (loop, &exit);
488 /* Finally if everything else fails, try brute force evaluation. */
489 if (try_eval
490 && (chrec_contains_undetermined (niter)
491 || TREE_CODE (niter) != INTEGER_CST))
492 niter = find_loop_niter_by_eval (loop, &exit);
494 if (chrec_contains_undetermined (niter)
495 || TREE_CODE (niter) != INTEGER_CST)
496 return false;
499 if (dump_file && (dump_flags & TDF_DETAILS))
501 fprintf (dump_file, "Loop %d iterates ", loop->num);
502 print_generic_expr (dump_file, niter, TDF_SLIM);
503 fprintf (dump_file, " times.\n");
506 if (try_unroll_loop_completely (loop, exit, niter, ul))
507 return true;
509 if (create_iv)
510 create_canonical_iv (loop, exit, niter);
512 return false;
515 /* The main entry point of the pass. Adds canonical induction variables
516 to the suitable loops. */
518 unsigned int
519 canonicalize_induction_variables (void)
521 loop_iterator li;
522 struct loop *loop;
523 bool changed = false;
525 FOR_EACH_LOOP (li, loop, 0)
527 changed |= canonicalize_loop_induction_variables (loop,
528 true, UL_SINGLE_ITER,
529 true);
532 /* Clean up the information about numbers of iterations, since brute force
533 evaluation could reveal new information. */
534 scev_reset ();
536 if (changed)
537 return TODO_cleanup_cfg;
538 return 0;
541 /* Unroll LOOPS completely if they iterate just few times. Unless
542 MAY_INCREASE_SIZE is true, perform the unrolling only if the
543 size of the code does not increase. */
545 unsigned int
546 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
548 loop_iterator li;
549 struct loop *loop;
550 bool changed;
551 enum unroll_level ul;
552 int iteration = 0;
556 changed = false;
558 FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
560 if (may_increase_size && optimize_loop_for_speed_p (loop)
561 /* Unroll outermost loops only if asked to do so or they do
562 not cause code growth. */
563 && (unroll_outer
564 || loop_outer (loop_outer (loop))))
565 ul = UL_ALL;
566 else
567 ul = UL_NO_GROWTH;
568 changed |= canonicalize_loop_induction_variables
569 (loop, false, ul, !flag_tree_loop_ivcanon);
572 if (changed)
574 /* This will take care of removing completely unrolled loops
575 from the loop structures so we can continue unrolling now
576 innermost loops. */
577 if (cleanup_tree_cfg ())
578 update_ssa (TODO_update_ssa_only_virtuals);
580 /* Clean up the information about numbers of iterations, since
581 complete unrolling might have invalidated it. */
582 scev_reset ();
585 while (changed
586 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
588 return 0;