re PR fortran/83522 (ICE on allocatable string reference, string(:)(:))
[official-gcc.git] / gcc / cfgloopanal.c
blob3af0b2dbe72194abdefd2856822fab6f4d83e4d1
1 /* Natural loop analysis code for GNU compiler.
2 Copyright (C) 2002-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "predict.h"
27 #include "memmodel.h"
28 #include "emit-rtl.h"
29 #include "cfgloop.h"
30 #include "explow.h"
31 #include "expr.h"
32 #include "graphds.h"
33 #include "params.h"
34 #include "sreal.h"
36 struct target_cfgloop default_target_cfgloop;
37 #if SWITCHABLE_TARGET
38 struct target_cfgloop *this_target_cfgloop = &default_target_cfgloop;
39 #endif
41 /* Checks whether BB is executed exactly once in each LOOP iteration. */
43 bool
44 just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
46 /* It must be executed at least once each iteration. */
47 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
48 return false;
50 /* And just once. */
51 if (bb->loop_father != loop)
52 return false;
54 /* But this was not enough. We might have some irreducible loop here. */
55 if (bb->flags & BB_IRREDUCIBLE_LOOP)
56 return false;
58 return true;
61 /* Marks blocks and edges that are part of non-recognized loops; i.e. we
62 throw away all latch edges and mark blocks inside any remaining cycle.
63 Everything is a bit complicated due to fact we do not want to do this
64 for parts of cycles that only "pass" through some loop -- i.e. for
65 each cycle, we want to mark blocks that belong directly to innermost
66 loop containing the whole cycle.
68 LOOPS is the loop tree. */
70 #define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block_for_fn (cfun))
71 #define BB_REPR(BB) ((BB)->index + 1)
73 bool
74 mark_irreducible_loops (void)
76 basic_block act;
77 struct graph_edge *ge;
78 edge e;
79 edge_iterator ei;
80 int src, dest;
81 unsigned depth;
82 struct graph *g;
83 int num = number_of_loops (cfun);
84 struct loop *cloop;
85 bool irred_loop_found = false;
86 int i;
88 gcc_assert (current_loops != NULL);
90 /* Reset the flags. */
91 FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
92 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
94 act->flags &= ~BB_IRREDUCIBLE_LOOP;
95 FOR_EACH_EDGE (e, ei, act->succs)
96 e->flags &= ~EDGE_IRREDUCIBLE_LOOP;
99 /* Create the edge lists. */
100 g = new_graph (last_basic_block_for_fn (cfun) + num);
102 FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
103 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
104 FOR_EACH_EDGE (e, ei, act->succs)
106 /* Ignore edges to exit. */
107 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
108 continue;
110 src = BB_REPR (act);
111 dest = BB_REPR (e->dest);
113 /* Ignore latch edges. */
114 if (e->dest->loop_father->header == e->dest
115 && e->dest->loop_father->latch == act)
116 continue;
118 /* Edges inside a single loop should be left where they are. Edges
119 to subloop headers should lead to representative of the subloop,
120 but from the same place.
122 Edges exiting loops should lead from representative
123 of the son of nearest common ancestor of the loops in that
124 act lays. */
126 if (e->dest->loop_father->header == e->dest)
127 dest = LOOP_REPR (e->dest->loop_father);
129 if (!flow_bb_inside_loop_p (act->loop_father, e->dest))
131 depth = 1 + loop_depth (find_common_loop (act->loop_father,
132 e->dest->loop_father));
133 if (depth == loop_depth (act->loop_father))
134 cloop = act->loop_father;
135 else
136 cloop = (*act->loop_father->superloops)[depth];
138 src = LOOP_REPR (cloop);
141 add_edge (g, src, dest)->data = e;
144 /* Find the strongly connected components. */
145 graphds_scc (g, NULL);
147 /* Mark the irreducible loops. */
148 for (i = 0; i < g->n_vertices; i++)
149 for (ge = g->vertices[i].succ; ge; ge = ge->succ_next)
151 edge real = (edge) ge->data;
152 /* edge E in graph G is irreducible if it connects two vertices in the
153 same scc. */
155 /* All edges should lead from a component with higher number to the
156 one with lower one. */
157 gcc_assert (g->vertices[ge->src].component >= g->vertices[ge->dest].component);
159 if (g->vertices[ge->src].component != g->vertices[ge->dest].component)
160 continue;
162 real->flags |= EDGE_IRREDUCIBLE_LOOP;
163 irred_loop_found = true;
164 if (flow_bb_inside_loop_p (real->src->loop_father, real->dest))
165 real->src->flags |= BB_IRREDUCIBLE_LOOP;
168 free_graph (g);
170 loops_state_set (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
171 return irred_loop_found;
174 /* Counts number of insns inside LOOP. */
176 num_loop_insns (const struct loop *loop)
178 basic_block *bbs, bb;
179 unsigned i, ninsns = 0;
180 rtx_insn *insn;
182 bbs = get_loop_body (loop);
183 for (i = 0; i < loop->num_nodes; i++)
185 bb = bbs[i];
186 FOR_BB_INSNS (bb, insn)
187 if (NONDEBUG_INSN_P (insn))
188 ninsns++;
190 free (bbs);
192 if (!ninsns)
193 ninsns = 1; /* To avoid division by zero. */
195 return ninsns;
198 /* Counts number of insns executed on average per iteration LOOP. */
200 average_num_loop_insns (const struct loop *loop)
202 basic_block *bbs, bb;
203 unsigned i, binsns;
204 sreal ninsns;
205 rtx_insn *insn;
207 ninsns = 0;
208 bbs = get_loop_body (loop);
209 for (i = 0; i < loop->num_nodes; i++)
211 bb = bbs[i];
213 binsns = 0;
214 FOR_BB_INSNS (bb, insn)
215 if (NONDEBUG_INSN_P (insn))
216 binsns++;
218 ninsns += (sreal)binsns * bb->count.to_sreal_scale (loop->header->count);
219 /* Avoid overflows. */
220 if (ninsns > 1000000)
221 return 100000;
223 free (bbs);
225 int64_t ret = ninsns.to_int ();
226 if (!ret)
227 ret = 1; /* To avoid division by zero. */
229 return ret;
232 /* Returns expected number of iterations of LOOP, according to
233 measured or guessed profile.
235 This functions attempts to return "sane" value even if profile
236 information is not good enough to derive osmething.
237 If BY_PROFILE_ONLY is set, this logic is bypassed and function
238 return -1 in those scenarios. */
240 gcov_type
241 expected_loop_iterations_unbounded (const struct loop *loop,
242 bool *read_profile_p,
243 bool by_profile_only)
245 edge e;
246 edge_iterator ei;
247 gcov_type expected = -1;
249 if (read_profile_p)
250 *read_profile_p = false;
252 /* If we have no profile at all, use AVG_LOOP_NITER. */
253 if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
255 if (by_profile_only)
256 return -1;
257 expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
259 else if (loop->latch && (loop->latch->count.initialized_p ()
260 || loop->header->count.initialized_p ()))
262 profile_count count_in = profile_count::zero (),
263 count_latch = profile_count::zero ();
265 FOR_EACH_EDGE (e, ei, loop->header->preds)
266 if (e->src == loop->latch)
267 count_latch = e->count ();
268 else
269 count_in += e->count ();
271 if (!count_latch.initialized_p ())
273 if (by_profile_only)
274 return -1;
275 expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
277 else if (!count_in.nonzero_p ())
279 if (by_profile_only)
280 return -1;
281 expected = count_latch.to_gcov_type () * 2;
283 else
285 expected = (count_latch.to_gcov_type () + count_in.to_gcov_type ()
286 - 1) / count_in.to_gcov_type ();
287 if (read_profile_p
288 && count_latch.reliable_p () && count_in.reliable_p ())
289 *read_profile_p = true;
292 else
294 if (by_profile_only)
295 return -1;
296 expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
299 if (!by_profile_only)
301 HOST_WIDE_INT max = get_max_loop_iterations_int (loop);
302 if (max != -1 && max < expected)
303 return max;
306 return expected;
309 /* Returns expected number of LOOP iterations. The returned value is bounded
310 by REG_BR_PROB_BASE. */
312 unsigned
313 expected_loop_iterations (struct loop *loop)
315 gcov_type expected = expected_loop_iterations_unbounded (loop);
316 return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
319 /* Returns the maximum level of nesting of subloops of LOOP. */
321 unsigned
322 get_loop_level (const struct loop *loop)
324 const struct loop *ploop;
325 unsigned mx = 0, l;
327 for (ploop = loop->inner; ploop; ploop = ploop->next)
329 l = get_loop_level (ploop);
330 if (l >= mx)
331 mx = l + 1;
333 return mx;
336 /* Initialize the constants for computing set costs. */
338 void
339 init_set_costs (void)
341 int speed;
342 rtx_insn *seq;
343 rtx reg1 = gen_raw_REG (SImode, LAST_VIRTUAL_REGISTER + 1);
344 rtx reg2 = gen_raw_REG (SImode, LAST_VIRTUAL_REGISTER + 2);
345 rtx addr = gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 3);
346 rtx mem = validize_mem (gen_rtx_MEM (SImode, addr));
347 unsigned i;
349 target_avail_regs = 0;
350 target_clobbered_regs = 0;
351 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
352 if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i)
353 && !fixed_regs[i])
355 target_avail_regs++;
356 if (call_used_regs[i])
357 target_clobbered_regs++;
360 target_res_regs = 3;
362 for (speed = 0; speed < 2; speed++)
364 crtl->maybe_hot_insn_p = speed;
365 /* Set up the costs for using extra registers:
367 1) If not many free registers remain, we should prefer having an
368 additional move to decreasing the number of available registers.
369 (TARGET_REG_COST).
370 2) If no registers are available, we need to spill, which may require
371 storing the old value to memory and loading it back
372 (TARGET_SPILL_COST). */
374 start_sequence ();
375 emit_move_insn (reg1, reg2);
376 seq = get_insns ();
377 end_sequence ();
378 target_reg_cost [speed] = seq_cost (seq, speed);
380 start_sequence ();
381 emit_move_insn (mem, reg1);
382 emit_move_insn (reg2, mem);
383 seq = get_insns ();
384 end_sequence ();
385 target_spill_cost [speed] = seq_cost (seq, speed);
387 default_rtl_profile ();
390 /* Estimates cost of increased register pressure caused by making N_NEW new
391 registers live around the loop. N_OLD is the number of registers live
392 around the loop. If CALL_P is true, also take into account that
393 call-used registers may be clobbered in the loop body, reducing the
394 number of available registers before we spill. */
396 unsigned
397 estimate_reg_pressure_cost (unsigned n_new, unsigned n_old, bool speed,
398 bool call_p)
400 unsigned cost;
401 unsigned regs_needed = n_new + n_old;
402 unsigned available_regs = target_avail_regs;
404 /* If there is a call in the loop body, the call-clobbered registers
405 are not available for loop invariants. */
406 if (call_p)
407 available_regs = available_regs - target_clobbered_regs;
409 /* If we have enough registers, we should use them and not restrict
410 the transformations unnecessarily. */
411 if (regs_needed + target_res_regs <= available_regs)
412 return 0;
414 if (regs_needed <= available_regs)
415 /* If we are close to running out of registers, try to preserve
416 them. */
417 cost = target_reg_cost [speed] * n_new;
418 else
419 /* If we run out of registers, it is very expensive to add another
420 one. */
421 cost = target_spill_cost [speed] * n_new;
423 if (optimize && (flag_ira_region == IRA_REGION_ALL
424 || flag_ira_region == IRA_REGION_MIXED)
425 && number_of_loops (cfun) <= (unsigned) IRA_MAX_LOOPS_NUM)
426 /* IRA regional allocation deals with high register pressure
427 better. So decrease the cost (to do more accurate the cost
428 calculation for IRA, we need to know how many registers lives
429 through the loop transparently). */
430 cost /= 2;
432 return cost;
435 /* Sets EDGE_LOOP_EXIT flag for all loop exits. */
437 void
438 mark_loop_exit_edges (void)
440 basic_block bb;
441 edge e;
443 if (number_of_loops (cfun) <= 1)
444 return;
446 FOR_EACH_BB_FN (bb, cfun)
448 edge_iterator ei;
450 FOR_EACH_EDGE (e, ei, bb->succs)
452 if (loop_outer (bb->loop_father)
453 && loop_exit_edge_p (bb->loop_father, e))
454 e->flags |= EDGE_LOOP_EXIT;
455 else
456 e->flags &= ~EDGE_LOOP_EXIT;
461 /* Return exit edge if loop has only one exit that is likely
462 to be executed on runtime (i.e. it is not EH or leading
463 to noreturn call. */
465 edge
466 single_likely_exit (struct loop *loop)
468 edge found = single_exit (loop);
469 vec<edge> exits;
470 unsigned i;
471 edge ex;
473 if (found)
474 return found;
475 exits = get_loop_exit_edges (loop);
476 FOR_EACH_VEC_ELT (exits, i, ex)
478 if (probably_never_executed_edge_p (cfun, ex)
479 /* We want to rule out paths to noreturns but not low probabilities
480 resulting from adjustments or combining.
481 FIXME: once we have better quality tracking, make this more
482 robust. */
483 || ex->probability <= profile_probability::very_unlikely ())
484 continue;
485 if (!found)
486 found = ex;
487 else
489 exits.release ();
490 return NULL;
493 exits.release ();
494 return found;
498 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
499 order against direction of edges from latch. Specially, if
500 header != latch, latch is the 1-st block. */
502 vec<basic_block>
503 get_loop_hot_path (const struct loop *loop)
505 basic_block bb = loop->header;
506 vec<basic_block> path = vNULL;
507 bitmap visited = BITMAP_ALLOC (NULL);
509 while (true)
511 edge_iterator ei;
512 edge e;
513 edge best = NULL;
515 path.safe_push (bb);
516 bitmap_set_bit (visited, bb->index);
517 FOR_EACH_EDGE (e, ei, bb->succs)
518 if ((!best || e->probability > best->probability)
519 && !loop_exit_edge_p (loop, e)
520 && !bitmap_bit_p (visited, e->dest->index))
521 best = e;
522 if (!best || best->dest == loop->header)
523 break;
524 bb = best->dest;
526 BITMAP_FREE (visited);
527 return path;