1 /* Natural loop analysis code for GNU compiler.
2 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
25 #include "hard-reg-set.h"
27 #include "basic-block.h"
33 struct target_cfgloop default_target_cfgloop
;
35 struct target_cfgloop
*this_target_cfgloop
= &default_target_cfgloop
;
38 /* Checks whether BB is executed exactly once in each LOOP iteration. */
41 just_once_each_iteration_p (const struct loop
*loop
, const_basic_block bb
)
43 /* It must be executed at least once each iteration. */
44 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
48 if (bb
->loop_father
!= loop
)
51 /* But this was not enough. We might have some irreducible loop here. */
52 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
58 /* Marks blocks and edges that are part of non-recognized loops; i.e. we
59 throw away all latch edges and mark blocks inside any remaining cycle.
60 Everything is a bit complicated due to fact we do not want to do this
61 for parts of cycles that only "pass" through some loop -- i.e. for
62 each cycle, we want to mark blocks that belong directly to innermost
63 loop containing the whole cycle.
65 LOOPS is the loop tree. */
67 #define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block_for_fn (cfun))
68 #define BB_REPR(BB) ((BB)->index + 1)
71 mark_irreducible_loops (void)
74 struct graph_edge
*ge
;
80 int num
= number_of_loops (cfun
);
82 bool irred_loop_found
= false;
85 gcc_assert (current_loops
!= NULL
);
87 /* Reset the flags. */
88 FOR_BB_BETWEEN (act
, ENTRY_BLOCK_PTR_FOR_FN (cfun
),
89 EXIT_BLOCK_PTR_FOR_FN (cfun
), next_bb
)
91 act
->flags
&= ~BB_IRREDUCIBLE_LOOP
;
92 FOR_EACH_EDGE (e
, ei
, act
->succs
)
93 e
->flags
&= ~EDGE_IRREDUCIBLE_LOOP
;
96 /* Create the edge lists. */
97 g
= new_graph (last_basic_block_for_fn (cfun
) + num
);
99 FOR_BB_BETWEEN (act
, ENTRY_BLOCK_PTR_FOR_FN (cfun
),
100 EXIT_BLOCK_PTR_FOR_FN (cfun
), next_bb
)
101 FOR_EACH_EDGE (e
, ei
, act
->succs
)
103 /* Ignore edges to exit. */
104 if (e
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
108 dest
= BB_REPR (e
->dest
);
110 /* Ignore latch edges. */
111 if (e
->dest
->loop_father
->header
== e
->dest
112 && e
->dest
->loop_father
->latch
== act
)
115 /* Edges inside a single loop should be left where they are. Edges
116 to subloop headers should lead to representative of the subloop,
117 but from the same place.
119 Edges exiting loops should lead from representative
120 of the son of nearest common ancestor of the loops in that
123 if (e
->dest
->loop_father
->header
== e
->dest
)
124 dest
= LOOP_REPR (e
->dest
->loop_father
);
126 if (!flow_bb_inside_loop_p (act
->loop_father
, e
->dest
))
128 depth
= 1 + loop_depth (find_common_loop (act
->loop_father
,
129 e
->dest
->loop_father
));
130 if (depth
== loop_depth (act
->loop_father
))
131 cloop
= act
->loop_father
;
133 cloop
= (*act
->loop_father
->superloops
)[depth
];
135 src
= LOOP_REPR (cloop
);
138 add_edge (g
, src
, dest
)->data
= e
;
141 /* Find the strongly connected components. */
142 graphds_scc (g
, NULL
);
144 /* Mark the irreducible loops. */
145 for (i
= 0; i
< g
->n_vertices
; i
++)
146 for (ge
= g
->vertices
[i
].succ
; ge
; ge
= ge
->succ_next
)
148 edge real
= (edge
) ge
->data
;
149 /* edge E in graph G is irreducible if it connects two vertices in the
152 /* All edges should lead from a component with higher number to the
153 one with lower one. */
154 gcc_assert (g
->vertices
[ge
->src
].component
>= g
->vertices
[ge
->dest
].component
);
156 if (g
->vertices
[ge
->src
].component
!= g
->vertices
[ge
->dest
].component
)
159 real
->flags
|= EDGE_IRREDUCIBLE_LOOP
;
160 irred_loop_found
= true;
161 if (flow_bb_inside_loop_p (real
->src
->loop_father
, real
->dest
))
162 real
->src
->flags
|= BB_IRREDUCIBLE_LOOP
;
167 loops_state_set (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS
);
168 return irred_loop_found
;
171 /* Counts number of insns inside LOOP. */
173 num_loop_insns (const struct loop
*loop
)
175 basic_block
*bbs
, bb
;
176 unsigned i
, ninsns
= 0;
179 bbs
= get_loop_body (loop
);
180 for (i
= 0; i
< loop
->num_nodes
; i
++)
183 FOR_BB_INSNS (bb
, insn
)
184 if (NONDEBUG_INSN_P (insn
))
190 ninsns
= 1; /* To avoid division by zero. */
195 /* Counts number of insns executed on average per iteration LOOP. */
197 average_num_loop_insns (const struct loop
*loop
)
199 basic_block
*bbs
, bb
;
200 unsigned i
, binsns
, ninsns
, ratio
;
204 bbs
= get_loop_body (loop
);
205 for (i
= 0; i
< loop
->num_nodes
; i
++)
210 FOR_BB_INSNS (bb
, insn
)
211 if (NONDEBUG_INSN_P (insn
))
214 ratio
= loop
->header
->frequency
== 0
216 : (bb
->frequency
* BB_FREQ_MAX
) / loop
->header
->frequency
;
217 ninsns
+= binsns
* ratio
;
221 ninsns
/= BB_FREQ_MAX
;
223 ninsns
= 1; /* To avoid division by zero. */
228 /* Returns expected number of iterations of LOOP, according to
229 measured or guessed profile. No bounding is done on the
233 expected_loop_iterations_unbounded (const struct loop
*loop
)
238 if (loop
->latch
->count
|| loop
->header
->count
)
240 gcov_type count_in
, count_latch
, expected
;
245 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
246 if (e
->src
== loop
->latch
)
247 count_latch
= e
->count
;
249 count_in
+= e
->count
;
252 expected
= count_latch
* 2;
254 expected
= (count_latch
+ count_in
- 1) / count_in
;
260 int freq_in
, freq_latch
;
265 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
266 if (e
->src
== loop
->latch
)
267 freq_latch
= EDGE_FREQUENCY (e
);
269 freq_in
+= EDGE_FREQUENCY (e
);
272 return freq_latch
* 2;
274 return (freq_latch
+ freq_in
- 1) / freq_in
;
278 /* Returns expected number of LOOP iterations. The returned value is bounded
279 by REG_BR_PROB_BASE. */
282 expected_loop_iterations (const struct loop
*loop
)
284 gcov_type expected
= expected_loop_iterations_unbounded (loop
);
285 return (expected
> REG_BR_PROB_BASE
? REG_BR_PROB_BASE
: expected
);
288 /* Returns the maximum level of nesting of subloops of LOOP. */
291 get_loop_level (const struct loop
*loop
)
293 const struct loop
*ploop
;
296 for (ploop
= loop
->inner
; ploop
; ploop
= ploop
->next
)
298 l
= get_loop_level (ploop
);
305 /* Initialize the constants for computing set costs. */
308 init_set_costs (void)
312 rtx reg1
= gen_raw_REG (SImode
, FIRST_PSEUDO_REGISTER
);
313 rtx reg2
= gen_raw_REG (SImode
, FIRST_PSEUDO_REGISTER
+ 1);
314 rtx addr
= gen_raw_REG (Pmode
, FIRST_PSEUDO_REGISTER
+ 2);
315 rtx mem
= validize_mem (gen_rtx_MEM (SImode
, addr
));
318 target_avail_regs
= 0;
319 target_clobbered_regs
= 0;
320 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
321 if (TEST_HARD_REG_BIT (reg_class_contents
[GENERAL_REGS
], i
)
325 if (call_used_regs
[i
])
326 target_clobbered_regs
++;
331 for (speed
= 0; speed
< 2; speed
++)
333 crtl
->maybe_hot_insn_p
= speed
;
334 /* Set up the costs for using extra registers:
336 1) If not many free registers remain, we should prefer having an
337 additional move to decreasing the number of available registers.
339 2) If no registers are available, we need to spill, which may require
340 storing the old value to memory and loading it back
341 (TARGET_SPILL_COST). */
344 emit_move_insn (reg1
, reg2
);
347 target_reg_cost
[speed
] = seq_cost (seq
, speed
);
350 emit_move_insn (mem
, reg1
);
351 emit_move_insn (reg2
, mem
);
354 target_spill_cost
[speed
] = seq_cost (seq
, speed
);
356 default_rtl_profile ();
359 /* Estimates cost of increased register pressure caused by making N_NEW new
360 registers live around the loop. N_OLD is the number of registers live
361 around the loop. If CALL_P is true, also take into account that
362 call-used registers may be clobbered in the loop body, reducing the
363 number of available registers before we spill. */
366 estimate_reg_pressure_cost (unsigned n_new
, unsigned n_old
, bool speed
,
370 unsigned regs_needed
= n_new
+ n_old
;
371 unsigned available_regs
= target_avail_regs
;
373 /* If there is a call in the loop body, the call-clobbered registers
374 are not available for loop invariants. */
376 available_regs
= available_regs
- target_clobbered_regs
;
378 /* If we have enough registers, we should use them and not restrict
379 the transformations unnecessarily. */
380 if (regs_needed
+ target_res_regs
<= available_regs
)
383 if (regs_needed
<= available_regs
)
384 /* If we are close to running out of registers, try to preserve
386 cost
= target_reg_cost
[speed
] * n_new
;
388 /* If we run out of registers, it is very expensive to add another
390 cost
= target_spill_cost
[speed
] * n_new
;
392 if (optimize
&& (flag_ira_region
== IRA_REGION_ALL
393 || flag_ira_region
== IRA_REGION_MIXED
)
394 && number_of_loops (cfun
) <= (unsigned) IRA_MAX_LOOPS_NUM
)
395 /* IRA regional allocation deals with high register pressure
396 better. So decrease the cost (to do more accurate the cost
397 calculation for IRA, we need to know how many registers lives
398 through the loop transparently). */
404 /* Sets EDGE_LOOP_EXIT flag for all loop exits. */
407 mark_loop_exit_edges (void)
412 if (number_of_loops (cfun
) <= 1)
415 FOR_EACH_BB_FN (bb
, cfun
)
419 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
421 if (loop_outer (bb
->loop_father
)
422 && loop_exit_edge_p (bb
->loop_father
, e
))
423 e
->flags
|= EDGE_LOOP_EXIT
;
425 e
->flags
&= ~EDGE_LOOP_EXIT
;
430 /* Return exit edge if loop has only one exit that is likely
431 to be executed on runtime (i.e. it is not EH or leading
435 single_likely_exit (struct loop
*loop
)
437 edge found
= single_exit (loop
);
444 exits
= get_loop_exit_edges (loop
);
445 FOR_EACH_VEC_ELT (exits
, i
, ex
)
447 if (ex
->flags
& (EDGE_EH
| EDGE_ABNORMAL_CALL
))
449 /* The constant of 5 is set in a way so noreturn calls are
450 ruled out by this test. The static branch prediction algorithm
451 will not assign such a low probability to conditionals for usual
453 if (profile_status_for_fn (cfun
) != PROFILE_ABSENT
454 && ex
->probability
< 5 && !ex
->count
)
469 /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
470 order against direction of edges from latch. Specially, if
471 header != latch, latch is the 1-st block. */
474 get_loop_hot_path (const struct loop
*loop
)
476 basic_block bb
= loop
->header
;
477 vec
<basic_block
> path
= vNULL
;
478 bitmap visited
= BITMAP_ALLOC (NULL
);
487 bitmap_set_bit (visited
, bb
->index
);
488 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
489 if ((!best
|| e
->probability
> best
->probability
)
490 && !loop_exit_edge_p (loop
, e
)
491 && !bitmap_bit_p (visited
, e
->dest
->index
))
493 if (!best
|| best
->dest
== loop
->header
)
497 BITMAP_FREE (visited
);