[gcc]
[official-gcc.git] / gcc / gimple-ssa-split-paths.c
blob81705918179ec6e03f2c750e422a1bd0f32139cc
1 /* Support routines for Splitting Paths to loop backedges
2 Copyright (C) 2015-2016 Free Software Foundation, Inc.
3 Contributed by Ajit Kumar Agarwal <ajitkum@xilinx.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "tree-cfg.h"
29 #include "cfganal.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tracer.h"
33 #include "predict.h"
34 #include "params.h"
36 /* Given LATCH, the latch block in a loop, see if the shape of the
37 path reaching LATCH is suitable for being split by duplication.
38 If so, return the block that will be duplicated into its predecessor
39 paths. Else return NULL. */
41 static basic_block
42 find_block_to_duplicate_for_splitting_paths (basic_block latch)
44 /* We should have simple latches at this point. So the latch should
45 have a single successor. This implies the predecessor of the latch
46 likely has the loop exit. And it's that predecessor we're most
47 interested in. To keep things simple, we're going to require that
48 the latch have a single predecessor too. */
49 if (single_succ_p (latch) && single_pred_p (latch))
51 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, latch);
52 gcc_assert (single_pred_edge (latch)->src == bb);
54 /* If BB has been marked as not to be duplicated, then honor that
55 request. */
56 if (ignore_bb_p (bb))
57 return NULL;
59 gimple *last = gsi_stmt (gsi_last_nondebug_bb (bb));
60 /* The immediate dominator of the latch must end in a conditional. */
61 if (!last || gimple_code (last) != GIMPLE_COND)
62 return NULL;
64 /* We're hoping that BB is a join point for an IF-THEN-ELSE diamond
65 region. Verify that it is.
67 First, verify that BB has two predecessors (each arm of the
68 IF-THEN-ELSE) and two successors (the latch and exit). */
69 if (EDGE_COUNT (bb->preds) == 2 && EDGE_COUNT (bb->succs) == 2)
71 /* Now verify that BB's immediate dominator ends in a
72 conditional as well. */
73 basic_block bb_idom = get_immediate_dominator (CDI_DOMINATORS, bb);
74 gimple *last = gsi_stmt (gsi_last_nondebug_bb (bb_idom));
75 if (!last || gimple_code (last) != GIMPLE_COND)
76 return NULL;
78 /* And that BB's immediate dominator's successors are the
79 predecessors of BB or BB itself. */
80 if (!(EDGE_PRED (bb, 0)->src == bb_idom
81 || find_edge (bb_idom, EDGE_PRED (bb, 0)->src))
82 || !(EDGE_PRED (bb, 1)->src == bb_idom
83 || find_edge (bb_idom, EDGE_PRED (bb, 1)->src)))
84 return NULL;
86 /* And that the predecessors of BB each have a single successor
87 or are BB's immediate domiator itself. */
88 if (!(EDGE_PRED (bb, 0)->src == bb_idom
89 || single_succ_p (EDGE_PRED (bb, 0)->src))
90 || !(EDGE_PRED (bb, 1)->src == bb_idom
91 || single_succ_p (EDGE_PRED (bb, 1)->src)))
92 return NULL;
94 /* So at this point we have a simple diamond for an IF-THEN-ELSE
95 construct starting at BB_IDOM, with a join point at BB. BB
96 pass control outside the loop or to the loop latch.
98 We're going to want to create two duplicates of BB, one for
99 each successor of BB_IDOM. */
100 return bb;
103 return NULL;
106 /* Return the number of non-debug statements in a block. */
107 static unsigned int
108 count_stmts_in_block (basic_block bb)
110 gimple_stmt_iterator gsi;
111 unsigned int num_stmts = 0;
113 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
115 gimple *stmt = gsi_stmt (gsi);
116 if (!is_gimple_debug (stmt))
117 num_stmts++;
119 return num_stmts;
122 /* Return TRUE if CODE represents a tree code that is not likely to
123 be easily if-convertable because it likely expands into multiple
124 insns, FALSE otherwise. */
125 static bool
126 poor_ifcvt_candidate_code (enum tree_code code)
128 return (code == MIN_EXPR
129 || code == MAX_EXPR
130 || code == ABS_EXPR
131 || code == COND_EXPR
132 || code == CALL_EXPR);
135 /* Return TRUE if BB is a reasonable block to duplicate by examining
136 its size, false otherwise. BB will always be a loop latch block.
138 Things to consider:
140 We do not want to spoil if-conversion if at all possible.
142 Most of the benefit seems to be from eliminating the unconditional
143 jump rather than CSE/DCE opportunities. So favor duplicating
144 small latches. A latch with just a conditional branch is ideal.
146 CSE/DCE opportunties crop up when statements from the predecessors
147 feed statements in the latch and allow statements in the latch to
148 simplify. */
150 static bool
151 is_feasible_trace (basic_block bb)
153 basic_block pred1 = EDGE_PRED (bb, 0)->src;
154 basic_block pred2 = EDGE_PRED (bb, 1)->src;
155 int num_stmts_in_join = count_stmts_in_block (bb);
156 int num_stmts_in_pred1
157 = EDGE_COUNT (pred1->succs) == 1 ? count_stmts_in_block (pred1) : 0;
158 int num_stmts_in_pred2
159 = EDGE_COUNT (pred2->succs) == 1 ? count_stmts_in_block (pred2) : 0;
161 /* This is meant to catch cases that are likely opportunities for
162 if-conversion. Essentially we look for the case where
163 BB's predecessors are both single statement blocks where
164 the output of that statement feed the same PHI in BB. */
165 if (num_stmts_in_pred1 == 1 && num_stmts_in_pred2 == 1)
167 gimple *stmt1 = last_and_only_stmt (pred1);
168 gimple *stmt2 = last_and_only_stmt (pred2);
170 if (stmt1 && stmt2
171 && gimple_code (stmt1) == GIMPLE_ASSIGN
172 && gimple_code (stmt2) == GIMPLE_ASSIGN)
174 enum tree_code code1 = gimple_assign_rhs_code (stmt1);
175 enum tree_code code2 = gimple_assign_rhs_code (stmt2);
177 if (!poor_ifcvt_candidate_code (code1)
178 && !poor_ifcvt_candidate_code (code2))
180 tree lhs1 = gimple_assign_lhs (stmt1);
181 tree lhs2 = gimple_assign_lhs (stmt2);
182 gimple_stmt_iterator gsi;
183 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
185 gimple *phi = gsi_stmt (gsi);
186 if ((gimple_phi_arg_def (phi, 0) == lhs1
187 && gimple_phi_arg_def (phi, 1) == lhs2)
188 || (gimple_phi_arg_def (phi, 1) == lhs1
189 && gimple_phi_arg_def (phi, 0) == lhs2))
191 if (dump_file && (dump_flags & TDF_DETAILS))
192 fprintf (dump_file,
193 "Block %d appears to be a join point for "
194 "if-convertable diamond.\n",
195 bb->index);
196 return false;
203 /* We may want something here which looks at dataflow and tries
204 to guess if duplication of BB is likely to result in simplification
205 of instructions in BB in either the original or the duplicate. */
207 /* Upper Hard limit on the number statements to copy. */
208 if (num_stmts_in_join
209 >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS))
210 return false;
212 return true;
215 /* If the immediate dominator of the latch of the loop is
216 block with conditional branch, then the loop latch is
217 duplicated to its predecessors path preserving the SSA
218 semantics.
220 CFG before transformation.
225 +---->3
226 | / \
227 | / \
228 | 4 5
229 | \ /
230 | \ /
232 | / \
233 | / \
234 | 8 7
235 | | |
236 ---+ E
240 Block 8 is the latch. We're going to make copies of block 6 (9 & 10)
241 and wire things up so they look like this:
246 +---->3
247 | / \
248 | / \
249 | 4 5
250 | | |
251 | | |
252 | 9 10
253 | |\ /|
254 | | \ / |
255 | | 7 |
256 | | | |
257 | | E |
258 | | |
259 | \ /
260 | \ /
261 +-----8
264 Blocks 9 and 10 will get merged into blocks 4 & 5 respectively which
265 enables CSE, DCE and other optimizations to occur on a larger block
266 of code. */
268 static bool
269 split_paths ()
271 bool changed = false;
272 loop_p loop;
274 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
275 initialize_original_copy_tables ();
276 calculate_dominance_info (CDI_DOMINATORS);
278 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
280 /* Only split paths if we are optimizing this loop for speed. */
281 if (!optimize_loop_for_speed_p (loop))
282 continue;
284 /* See if there is a block that we can duplicate to split the
285 path to the loop latch. */
286 basic_block bb
287 = find_block_to_duplicate_for_splitting_paths (loop->latch);
289 /* BB is the merge point for an IF-THEN-ELSE we want to transform.
291 Essentially we want to create a duplicate of bb and redirect the
292 first predecessor of BB to the duplicate (leaving the second
293 predecessor as is. This will split the path leading to the latch
294 re-using BB to avoid useless copying. */
295 if (bb && is_feasible_trace (bb))
297 if (dump_file && (dump_flags & TDF_DETAILS))
298 fprintf (dump_file,
299 "Duplicating join block %d into predecessor paths\n",
300 bb->index);
301 basic_block pred0 = EDGE_PRED (bb, 0)->src;
302 if (EDGE_COUNT (pred0->succs) != 1)
303 pred0 = EDGE_PRED (bb, 1)->src;
304 transform_duplicate (pred0, bb);
305 changed = true;
307 /* If BB has an outgoing edge marked as IRREDUCIBLE, then
308 duplicating BB may result in an irreducible region turning
309 into a natural loop.
311 Long term we might want to hook this into the block
312 duplication code, but as we've seen with similar changes
313 for edge removal, that can be somewhat risky. */
314 if (EDGE_SUCC (bb, 0)->flags & EDGE_IRREDUCIBLE_LOOP
315 || EDGE_SUCC (bb, 1)->flags & EDGE_IRREDUCIBLE_LOOP)
317 if (dump_file && (dump_flags & TDF_DETAILS))
318 fprintf (dump_file,
319 "Join block %d has EDGE_IRREDUCIBLE_LOOP set. "
320 "Scheduling loop fixups.\n",
321 bb->index);
322 loops_state_set (LOOPS_NEED_FIXUP);
327 loop_optimizer_finalize ();
328 free_original_copy_tables ();
329 return changed;
332 /* Main entry point for splitting paths. Returns TODO_cleanup_cfg if any
333 paths where split, otherwise return zero. */
335 static unsigned int
336 execute_split_paths ()
338 /* If we don't have at least 2 real blocks and backedges in the
339 CFG, then there's no point in trying to perform path splitting. */
340 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
341 || !mark_dfs_back_edges ())
342 return 0;
344 bool changed = split_paths();
345 if (changed)
346 free_dominance_info (CDI_DOMINATORS);
348 return changed ? TODO_cleanup_cfg : 0;
351 static bool
352 gate_split_paths ()
354 return flag_split_paths;
357 namespace {
359 const pass_data pass_data_split_paths =
361 GIMPLE_PASS, /* type */
362 "split-paths", /* name */
363 OPTGROUP_NONE, /* optinfo_flags */
364 TV_SPLIT_PATHS, /* tv_id */
365 PROP_ssa, /* properties_required */
366 0, /* properties_provided */
367 0, /* properties_destroyed */
368 0, /* todo_flags_start */
369 TODO_update_ssa, /* todo_flags_finish */
372 class pass_split_paths : public gimple_opt_pass
374 public:
375 pass_split_paths (gcc::context *ctxt)
376 : gimple_opt_pass (pass_data_split_paths, ctxt)
378 /* opt_pass methods: */
379 opt_pass * clone () { return new pass_split_paths (m_ctxt); }
380 virtual bool gate (function *) { return gate_split_paths (); }
381 virtual unsigned int execute (function *) { return execute_split_paths (); }
383 }; // class pass_split_paths
385 } // anon namespace
387 gimple_opt_pass *
388 make_pass_split_paths (gcc::context *ctxt)
390 return new pass_split_paths (ctxt);