1 /* Natural loop analysis code for GNU compiler.
2 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
25 #include "hard-reg-set.h"
34 #include "dominance.h"
36 #include "basic-block.h"
42 struct target_cfgloop default_target_cfgloop
;
44 struct target_cfgloop
*this_target_cfgloop
= &default_target_cfgloop
;
47 /* Checks whether BB is executed exactly once in each LOOP iteration. */
50 just_once_each_iteration_p (const struct loop
*loop
, const_basic_block bb
)
52 /* It must be executed at least once each iteration. */
53 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
57 if (bb
->loop_father
!= loop
)
60 /* But this was not enough. We might have some irreducible loop here. */
61 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
67 /* Marks blocks and edges that are part of non-recognized loops; i.e. we
68 throw away all latch edges and mark blocks inside any remaining cycle.
69 Everything is a bit complicated due to fact we do not want to do this
70 for parts of cycles that only "pass" through some loop -- i.e. for
71 each cycle, we want to mark blocks that belong directly to innermost
72 loop containing the whole cycle.
74 LOOPS is the loop tree. */
76 #define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block_for_fn (cfun))
77 #define BB_REPR(BB) ((BB)->index + 1)
80 mark_irreducible_loops (void)
83 struct graph_edge
*ge
;
89 int num
= number_of_loops (cfun
);
91 bool irred_loop_found
= false;
94 gcc_assert (current_loops
!= NULL
);
96 /* Reset the flags. */
97 FOR_BB_BETWEEN (act
, ENTRY_BLOCK_PTR_FOR_FN (cfun
),
98 EXIT_BLOCK_PTR_FOR_FN (cfun
), next_bb
)
100 act
->flags
&= ~BB_IRREDUCIBLE_LOOP
;
101 FOR_EACH_EDGE (e
, ei
, act
->succs
)
102 e
->flags
&= ~EDGE_IRREDUCIBLE_LOOP
;
105 /* Create the edge lists. */
106 g
= new_graph (last_basic_block_for_fn (cfun
) + num
);
108 FOR_BB_BETWEEN (act
, ENTRY_BLOCK_PTR_FOR_FN (cfun
),
109 EXIT_BLOCK_PTR_FOR_FN (cfun
), next_bb
)
110 FOR_EACH_EDGE (e
, ei
, act
->succs
)
112 /* Ignore edges to exit. */
113 if (e
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
117 dest
= BB_REPR (e
->dest
);
119 /* Ignore latch edges. */
120 if (e
->dest
->loop_father
->header
== e
->dest
121 && e
->dest
->loop_father
->latch
== act
)
124 /* Edges inside a single loop should be left where they are. Edges
125 to subloop headers should lead to representative of the subloop,
126 but from the same place.
128 Edges exiting loops should lead from representative
129 of the son of nearest common ancestor of the loops in that
132 if (e
->dest
->loop_father
->header
== e
->dest
)
133 dest
= LOOP_REPR (e
->dest
->loop_father
);
135 if (!flow_bb_inside_loop_p (act
->loop_father
, e
->dest
))
137 depth
= 1 + loop_depth (find_common_loop (act
->loop_father
,
138 e
->dest
->loop_father
));
139 if (depth
== loop_depth (act
->loop_father
))
140 cloop
= act
->loop_father
;
142 cloop
= (*act
->loop_father
->superloops
)[depth
];
144 src
= LOOP_REPR (cloop
);
147 add_edge (g
, src
, dest
)->data
= e
;
150 /* Find the strongly connected components. */
151 graphds_scc (g
, NULL
);
153 /* Mark the irreducible loops. */
154 for (i
= 0; i
< g
->n_vertices
; i
++)
155 for (ge
= g
->vertices
[i
].succ
; ge
; ge
= ge
->succ_next
)
157 edge real
= (edge
) ge
->data
;
158 /* edge E in graph G is irreducible if it connects two vertices in the
161 /* All edges should lead from a component with higher number to the
162 one with lower one. */
163 gcc_assert (g
->vertices
[ge
->src
].component
>= g
->vertices
[ge
->dest
].component
);
165 if (g
->vertices
[ge
->src
].component
!= g
->vertices
[ge
->dest
].component
)
168 real
->flags
|= EDGE_IRREDUCIBLE_LOOP
;
169 irred_loop_found
= true;
170 if (flow_bb_inside_loop_p (real
->src
->loop_father
, real
->dest
))
171 real
->src
->flags
|= BB_IRREDUCIBLE_LOOP
;
176 loops_state_set (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS
);
177 return irred_loop_found
;
180 /* Counts number of insns inside LOOP. */
182 num_loop_insns (const struct loop
*loop
)
184 basic_block
*bbs
, bb
;
185 unsigned i
, ninsns
= 0;
188 bbs
= get_loop_body (loop
);
189 for (i
= 0; i
< loop
->num_nodes
; i
++)
192 FOR_BB_INSNS (bb
, insn
)
193 if (NONDEBUG_INSN_P (insn
))
199 ninsns
= 1; /* To avoid division by zero. */
204 /* Counts number of insns executed on average per iteration LOOP. */
206 average_num_loop_insns (const struct loop
*loop
)
208 basic_block
*bbs
, bb
;
209 unsigned i
, binsns
, ninsns
, ratio
;
213 bbs
= get_loop_body (loop
);
214 for (i
= 0; i
< loop
->num_nodes
; i
++)
219 FOR_BB_INSNS (bb
, insn
)
220 if (NONDEBUG_INSN_P (insn
))
223 ratio
= loop
->header
->frequency
== 0
225 : (bb
->frequency
* BB_FREQ_MAX
) / loop
->header
->frequency
;
226 ninsns
+= binsns
* ratio
;
230 ninsns
/= BB_FREQ_MAX
;
232 ninsns
= 1; /* To avoid division by zero. */
237 /* Returns expected number of iterations of LOOP, according to
238 measured or guessed profile. No bounding is done on the
242 expected_loop_iterations_unbounded (const struct loop
*loop
)
247 if (loop
->latch
->count
|| loop
->header
->count
)
249 gcov_type count_in
, count_latch
, expected
;
254 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
255 if (e
->src
== loop
->latch
)
256 count_latch
= e
->count
;
258 count_in
+= e
->count
;
261 expected
= count_latch
* 2;
263 expected
= (count_latch
+ count_in
- 1) / count_in
;
269 int freq_in
, freq_latch
;
274 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
275 if (e
->src
== loop
->latch
)
276 freq_latch
= EDGE_FREQUENCY (e
);
278 freq_in
+= EDGE_FREQUENCY (e
);
281 return freq_latch
* 2;
283 return (freq_latch
+ freq_in
- 1) / freq_in
;
287 /* Returns expected number of LOOP iterations. The returned value is bounded
288 by REG_BR_PROB_BASE. */
291 expected_loop_iterations (const struct loop
*loop
)
293 gcov_type expected
= expected_loop_iterations_unbounded (loop
);
294 return (expected
> REG_BR_PROB_BASE
? REG_BR_PROB_BASE
: expected
);
297 /* Returns the maximum level of nesting of subloops of LOOP. */
300 get_loop_level (const struct loop
*loop
)
302 const struct loop
*ploop
;
305 for (ploop
= loop
->inner
; ploop
; ploop
= ploop
->next
)
307 l
= get_loop_level (ploop
);
314 /* Initialize the constants for computing set costs. */
317 init_set_costs (void)
321 rtx reg1
= gen_raw_REG (SImode
, FIRST_PSEUDO_REGISTER
);
322 rtx reg2
= gen_raw_REG (SImode
, FIRST_PSEUDO_REGISTER
+ 1);
323 rtx addr
= gen_raw_REG (Pmode
, FIRST_PSEUDO_REGISTER
+ 2);
324 rtx mem
= validize_mem (gen_rtx_MEM (SImode
, addr
));
327 target_avail_regs
= 0;
328 target_clobbered_regs
= 0;
329 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
330 if (TEST_HARD_REG_BIT (reg_class_contents
[GENERAL_REGS
], i
)
334 if (call_used_regs
[i
])
335 target_clobbered_regs
++;
340 for (speed
= 0; speed
< 2; speed
++)
342 crtl
->maybe_hot_insn_p
= speed
;
343 /* Set up the costs for using extra registers:
345 1) If not many free registers remain, we should prefer having an
346 additional move to decreasing the number of available registers.
348 2) If no registers are available, we need to spill, which may require
349 storing the old value to memory and loading it back
350 (TARGET_SPILL_COST). */
353 emit_move_insn (reg1
, reg2
);
356 target_reg_cost
[speed
] = seq_cost (seq
, speed
);
359 emit_move_insn (mem
, reg1
);
360 emit_move_insn (reg2
, mem
);
363 target_spill_cost
[speed
] = seq_cost (seq
, speed
);
365 default_rtl_profile ();
368 /* Estimates cost of increased register pressure caused by making N_NEW new
369 registers live around the loop. N_OLD is the number of registers live
370 around the loop. If CALL_P is true, also take into account that
371 call-used registers may be clobbered in the loop body, reducing the
372 number of available registers before we spill. */
375 estimate_reg_pressure_cost (unsigned n_new
, unsigned n_old
, bool speed
,
379 unsigned regs_needed
= n_new
+ n_old
;
380 unsigned available_regs
= target_avail_regs
;
382 /* If there is a call in the loop body, the call-clobbered registers
383 are not available for loop invariants. */
385 available_regs
= available_regs
- target_clobbered_regs
;
387 /* If we have enough registers, we should use them and not restrict
388 the transformations unnecessarily. */
389 if (regs_needed
+ target_res_regs
<= available_regs
)
392 if (regs_needed
<= available_regs
)
393 /* If we are close to running out of registers, try to preserve
395 cost
= target_reg_cost
[speed
] * n_new
;
397 /* If we run out of registers, it is very expensive to add another
399 cost
= target_spill_cost
[speed
] * n_new
;
401 if (optimize
&& (flag_ira_region
== IRA_REGION_ALL
402 || flag_ira_region
== IRA_REGION_MIXED
)
403 && number_of_loops (cfun
) <= (unsigned) IRA_MAX_LOOPS_NUM
)
404 /* IRA regional allocation deals with high register pressure
405 better. So decrease the cost (to do more accurate the cost
406 calculation for IRA, we need to know how many registers lives
407 through the loop transparently). */
413 /* Sets EDGE_LOOP_EXIT flag for all loop exits. */
416 mark_loop_exit_edges (void)
421 if (number_of_loops (cfun
) <= 1)
424 FOR_EACH_BB_FN (bb
, cfun
)
428 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
430 if (loop_outer (bb
->loop_father
)
431 && loop_exit_edge_p (bb
->loop_father
, e
))
432 e
->flags
|= EDGE_LOOP_EXIT
;
434 e
->flags
&= ~EDGE_LOOP_EXIT
;
439 /* Return exit edge if loop has only one exit that is likely
440 to be executed on runtime (i.e. it is not EH or leading
444 single_likely_exit (struct loop
*loop
)
446 edge found
= single_exit (loop
);
453 exits
= get_loop_exit_edges (loop
);
454 FOR_EACH_VEC_ELT (exits
, i
, ex
)
456 if (ex
->flags
& (EDGE_EH
| EDGE_ABNORMAL_CALL
))
458 /* The constant of 5 is set in a way so noreturn calls are
459 ruled out by this test. The static branch prediction algorithm
460 will not assign such a low probability to conditionals for usual
462 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
463 && ex
->probability
< 5 && !ex
->count
)
478 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
479 order against direction of edges from latch. Specially, if
480 header != latch, latch is the 1-st block. */
483 get_loop_hot_path (const struct loop
*loop
)
485 basic_block bb
= loop
->header
;
486 vec
<basic_block
> path
= vNULL
;
487 bitmap visited
= BITMAP_ALLOC (NULL
);
496 bitmap_set_bit (visited
, bb
->index
);
497 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
498 if ((!best
|| e
->probability
> best
->probability
)
499 && !loop_exit_edge_p (loop
, e
)
500 && !bitmap_bit_p (visited
, e
->dest
->index
))
502 if (!best
|| best
->dest
== loop
->header
)
506 BITMAP_FREE (visited
);