* ru.po: Update.
[official-gcc.git] / gcc / tree-ssa-threadbackward.c
blob4d0fd9cac57da84e5195a34005d4d85806be6ca5
1 /* SSA Jump Threading
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "predict.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "fold-const.h"
28 #include "cfgloop.h"
29 #include "gimple-iterator.h"
30 #include "tree-cfg.h"
31 #include "tree-ssa-threadupdate.h"
32 #include "params.h"
33 #include "tree-ssa-loop.h"
34 #include "cfganal.h"
35 #include "tree-pass.h"
37 static int max_threaded_paths;
39 /* Simple helper to get the last statement from BB, which is assumed
40 to be a control statement. Return NULL if the last statement is
41 not a control statement. */
43 static gimple *
44 get_gimple_control_stmt (basic_block bb)
46 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
48 if (gsi_end_p (gsi))
49 return NULL;
51 gimple *stmt = gsi_stmt (gsi);
52 enum gimple_code code = gimple_code (stmt);
53 if (code == GIMPLE_COND || code == GIMPLE_SWITCH || code == GIMPLE_GOTO)
54 return stmt;
55 return NULL;
58 /* Return true if the CFG contains at least one path from START_BB to END_BB.
59 When a path is found, record in PATH the blocks from END_BB to START_BB.
60 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
61 the recursion to basic blocks belonging to LOOP. */
63 static bool
64 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
65 vec<basic_block, va_gc> *&path,
66 hash_set<basic_block> *visited_bbs, loop_p loop)
68 if (loop != start_bb->loop_father)
69 return false;
71 if (start_bb == end_bb)
73 vec_safe_push (path, start_bb);
74 return true;
77 if (!visited_bbs->add (start_bb))
79 edge e;
80 edge_iterator ei;
81 FOR_EACH_EDGE (e, ei, start_bb->succs)
82 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
84 vec_safe_push (path, start_bb);
85 return true;
89 return false;
92 /* Examine jump threading path PATH to which we want to add BBI.
94 If the resulting path is profitable to thread, then return the
95 final taken edge from the path, NULL otherwise.
97 NAME is the SSA_NAME of the variable we found to have a constant
98 value on PATH. ARG is the value of that SSA_NAME.
100 BBI will be appended to PATH when we have a profitable jump threading
101 path. Callers are responsible for removing BBI from PATH in that case. */
103 static edge
104 profitable_jump_thread_path (vec<basic_block, va_gc> *&path,
105 basic_block bbi, tree name, tree arg)
107 /* Note BBI is not in the path yet, hence the +1 in the test below
108 to make sure BBI is accounted for in the path length test. */
109 int path_length = path->length ();
110 if (path_length + 1 > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
112 if (dump_file && (dump_flags & TDF_DETAILS))
113 fprintf (dump_file, "FSM jump-thread path not considered: "
114 "the number of basic blocks on the path "
115 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
116 return NULL;
119 if (max_threaded_paths <= 0)
121 if (dump_file && (dump_flags & TDF_DETAILS))
122 fprintf (dump_file, "FSM jump-thread path not considered: "
123 "the number of previously recorded FSM paths to "
124 "thread exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
125 return NULL;
128 /* Add BBI to the path.
129 From this point onward, if we decide we the path is not profitable
130 to thread, we must remove BBI from the path. */
131 vec_safe_push (path, bbi);
132 ++path_length;
134 int n_insns = 0;
135 gimple_stmt_iterator gsi;
136 int j;
137 loop_p loop = (*path)[0]->loop_father;
138 bool path_crosses_loops = false;
139 bool threaded_through_latch = false;
140 bool multiway_branch_in_path = false;
141 bool threaded_multiway_branch = false;
143 /* Count the number of instructions on the path: as these instructions
144 will have to be duplicated, we will not record the path if there
145 are too many instructions on the path. Also check that all the
146 blocks in the path belong to a single loop. */
147 for (j = 0; j < path_length; j++)
149 basic_block bb = (*path)[j];
151 /* Remember, blocks in the path are stored in opposite order
152 in the PATH array. The last entry in the array represents
153 the block with an outgoing edge that we will redirect to the
154 jump threading path. Thus we don't care about that block's
155 loop father, nor how many statements are in that block because
156 it will not be copied or whether or not it ends in a multiway
157 branch. */
158 if (j < path_length - 1)
160 if (bb->loop_father != loop)
162 path_crosses_loops = true;
163 break;
166 /* PHIs in the path will create degenerate PHIS in the
167 copied path which will then get propagated away, so
168 looking at just the duplicate path the PHIs would
169 seem unimportant.
171 But those PHIs, because they're assignments to objects
172 typically with lives that exist outside the thread path,
173 will tend to generate PHIs (or at least new PHI arguments)
174 at points where we leave the thread path and rejoin
175 the original blocks. So we do want to account for them.
177 We ignore virtual PHIs. We also ignore cases where BB
178 has a single incoming edge. That's the most common
179 degenerate PHI we'll see here. Finally we ignore PHIs
180 that are associated with the value we're tracking as
181 that object likely dies. */
182 if (EDGE_COUNT (bb->succs) > 1 && EDGE_COUNT (bb->preds) > 1)
184 for (gphi_iterator gsip = gsi_start_phis (bb);
185 !gsi_end_p (gsip);
186 gsi_next (&gsip))
188 gphi *phi = gsip.phi ();
189 tree dst = gimple_phi_result (phi);
191 /* Note that if both NAME and DST are anonymous
192 SSA_NAMEs, then we do not have enough information
193 to consider them associated. */
194 if ((SSA_NAME_VAR (dst) != SSA_NAME_VAR (name)
195 || !SSA_NAME_VAR (dst))
196 && !virtual_operand_p (dst))
197 ++n_insns;
201 for (gsi = gsi_after_labels (bb);
202 !gsi_end_p (gsi);
203 gsi_next_nondebug (&gsi))
205 gimple *stmt = gsi_stmt (gsi);
206 /* Do not count empty statements and labels. */
207 if (gimple_code (stmt) != GIMPLE_NOP
208 && !(gimple_code (stmt) == GIMPLE_ASSIGN
209 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
210 && !is_gimple_debug (stmt))
211 ++n_insns;
214 /* We do not look at the block with the threaded branch
215 in this loop. So if any block with a last statement that
216 is a GIMPLE_SWITCH or GIMPLE_GOTO is seen, then we have a
217 multiway branch on our path.
219 The block in PATH[0] is special, it's the block were we're
220 going to be able to eliminate its branch. */
221 gimple *last = last_stmt (bb);
222 if (last && (gimple_code (last) == GIMPLE_SWITCH
223 || gimple_code (last) == GIMPLE_GOTO))
225 if (j == 0)
226 threaded_multiway_branch = true;
227 else
228 multiway_branch_in_path = true;
232 /* Note if we thread through the latch, we will want to include
233 the last entry in the array when determining if we thread
234 through the loop latch. */
235 if (loop->latch == bb)
236 threaded_through_latch = true;
239 /* We are going to remove the control statement at the end of the
240 last block in the threading path. So don't count it against our
241 statement count. */
242 n_insns--;
244 gimple *stmt = get_gimple_control_stmt ((*path)[0]);
245 gcc_assert (stmt);
246 /* We have found a constant value for ARG. For GIMPLE_SWITCH
247 and GIMPLE_GOTO, we use it as-is. However, for a GIMPLE_COND
248 we need to substitute, fold and simplify so we can determine
249 the edge taken out of the last block. */
250 if (gimple_code (stmt) == GIMPLE_COND)
252 enum tree_code cond_code = gimple_cond_code (stmt);
254 /* We know the underyling format of the condition. */
255 arg = fold_binary (cond_code, boolean_type_node,
256 arg, gimple_cond_rhs (stmt));
259 /* If this path threaded through the loop latch back into the
260 same loop and the destination does not dominate the loop
261 latch, then this thread would create an irreducible loop.
263 We have to know the outgoing edge to figure this out. */
264 edge taken_edge = find_taken_edge ((*path)[0], arg);
266 /* There are cases where we may not be able to extract the
267 taken edge. For example, a computed goto to an absolute
268 address. Handle those cases gracefully. */
269 if (taken_edge == NULL)
271 path->pop ();
272 return NULL;
275 bool creates_irreducible_loop = false;
276 if (threaded_through_latch
277 && loop == taken_edge->dest->loop_father
278 && (determine_bb_domination_status (loop, taken_edge->dest)
279 == DOMST_NONDOMINATING))
280 creates_irreducible_loop = true;
282 if (path_crosses_loops)
284 if (dump_file && (dump_flags & TDF_DETAILS))
285 fprintf (dump_file, "FSM jump-thread path not considered: "
286 "the path crosses loops.\n");
287 path->pop ();
288 return NULL;
291 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
293 if (dump_file && (dump_flags & TDF_DETAILS))
294 fprintf (dump_file, "FSM jump-thread path not considered: "
295 "the number of instructions on the path "
296 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
297 path->pop ();
298 return NULL;
301 /* We avoid creating irreducible inner loops unless we thread through
302 a multiway branch, in which case we have deemed it worth losing
303 other loop optimizations later.
305 We also consider it worth creating an irreducible inner loop if
306 the number of copied statement is low relative to the length of
307 the path -- in that case there's little the traditional loop
308 optimizer would have done anyway, so an irreducible loop is not
309 so bad. */
310 if (!threaded_multiway_branch && creates_irreducible_loop
311 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
312 > path_length * PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS)))
315 if (dump_file && (dump_flags & TDF_DETAILS))
316 fprintf (dump_file,
317 "FSM would create irreducible loop without threading "
318 "multiway branch.\n");
319 path->pop ();
320 return NULL;
324 /* If this path does not thread through the loop latch, then we are
325 using the FSM threader to find old style jump threads. This
326 is good, except the FSM threader does not re-use an existing
327 threading path to reduce code duplication.
329 So for that case, drastically reduce the number of statements
330 we are allowed to copy. */
331 if (!(threaded_through_latch && threaded_multiway_branch)
332 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
333 >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
335 if (dump_file && (dump_flags & TDF_DETAILS))
336 fprintf (dump_file,
337 "FSM did not thread around loop and would copy too "
338 "many statements.\n");
339 path->pop ();
340 return NULL;
343 /* When there is a multi-way branch on the path, then threading can
344 explode the CFG due to duplicating the edges for that multi-way
345 branch. So like above, only allow a multi-way branch on the path
346 if we actually thread a multi-way branch. */
347 if (!threaded_multiway_branch && multiway_branch_in_path)
349 if (dump_file && (dump_flags & TDF_DETAILS))
350 fprintf (dump_file,
351 "FSM Thread through multiway branch without threading "
352 "a multiway branch.\n");
353 path->pop ();
354 return NULL;
356 return taken_edge;
359 /* PATH is vector of blocks forming a jump threading path in reverse
360 order. TAKEN_EDGE is the edge taken from path[0].
362 Convert that path into the form used by register_jump_thread and
363 register the path. */
365 static void
366 convert_and_register_jump_thread_path (vec<basic_block, va_gc> *&path,
367 edge taken_edge)
369 vec<jump_thread_edge *> *jump_thread_path = new vec<jump_thread_edge *> ();
371 /* Record the edges between the blocks in PATH. */
372 for (unsigned int j = 0; j < path->length () - 1; j++)
374 basic_block bb1 = (*path)[path->length () - j - 1];
375 basic_block bb2 = (*path)[path->length () - j - 2];
376 if (bb1 == bb2)
377 continue;
379 edge e = find_edge (bb1, bb2);
380 gcc_assert (e);
381 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
382 jump_thread_path->safe_push (x);
385 /* Add the edge taken when the control variable has value ARG. */
386 jump_thread_edge *x
387 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
388 jump_thread_path->safe_push (x);
390 register_jump_thread (jump_thread_path);
391 --max_threaded_paths;
393 /* Remove BBI from the path. */
394 path->pop ();
397 /* We trace the value of the SSA_NAME NAME back through any phi nodes looking
398 for places where it gets a constant value and save the path. Stop after
399 having recorded MAX_PATHS jump threading paths. */
401 static void
402 fsm_find_control_statement_thread_paths (tree name,
403 hash_set<basic_block> *visited_bbs,
404 vec<basic_block, va_gc> *&path,
405 bool seen_loop_phi)
407 /* If NAME appears in an abnormal PHI, then don't try to trace its
408 value back through PHI nodes. */
409 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
410 return;
412 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
413 basic_block var_bb = gimple_bb (def_stmt);
415 if (var_bb == NULL)
416 return;
418 /* We allow the SSA chain to contains PHIs and simple copies and constant
419 initializations. */
420 if (gimple_code (def_stmt) != GIMPLE_PHI
421 && gimple_code (def_stmt) != GIMPLE_ASSIGN)
422 return;
424 if (gimple_code (def_stmt) == GIMPLE_PHI
425 && (gimple_phi_num_args (def_stmt)
426 >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS)))
427 return;
429 if (gimple_code (def_stmt) == GIMPLE_ASSIGN
430 && gimple_assign_rhs_code (def_stmt) != INTEGER_CST
431 && gimple_assign_rhs_code (def_stmt) != SSA_NAME)
432 return;
434 /* Avoid infinite recursion. */
435 if (visited_bbs->add (var_bb))
436 return;
438 int next_path_length = 0;
439 basic_block last_bb_in_path = path->last ();
441 if (loop_containing_stmt (def_stmt)->header == gimple_bb (def_stmt))
443 /* Do not walk through more than one loop PHI node. */
444 if (seen_loop_phi)
445 return;
446 seen_loop_phi = true;
449 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
450 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
451 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
452 if (var_bb != last_bb_in_path)
454 edge e;
455 int e_count = 0;
456 edge_iterator ei;
457 vec<basic_block, va_gc> *next_path;
458 vec_alloc (next_path, 10);
460 /* When VAR_BB == LAST_BB_IN_PATH, then the first block in the path
461 will already be in VISITED_BBS. When they are not equal, then we
462 must ensure that first block is accounted for to ensure we do not
463 create bogus jump threading paths. */
464 visited_bbs->add ((*path)[0]);
465 FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
467 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
469 if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
470 e->src->loop_father))
471 ++e_count;
473 delete visited_bbs;
475 /* If there is more than one path, stop. */
476 if (e_count > 1)
478 vec_free (next_path);
479 return;
483 /* Stop if we have not found a path: this could occur when the recursion
484 is stopped by one of the bounds. */
485 if (e_count == 0)
487 vec_free (next_path);
488 return;
491 /* Make sure we haven't already visited any of the nodes in
492 NEXT_PATH. Don't add them here to avoid pollution. */
493 for (unsigned int i = 0; i < next_path->length () - 1; i++)
495 if (visited_bbs->contains ((*next_path)[i]))
497 vec_free (next_path);
498 return;
502 /* Now add the nodes to VISISTED_BBS. */
503 for (unsigned int i = 0; i < next_path->length () - 1; i++)
504 visited_bbs->add ((*next_path)[i]);
506 /* Append all the nodes from NEXT_PATH to PATH. */
507 vec_safe_splice (path, next_path);
508 next_path_length = next_path->length ();
509 vec_free (next_path);
512 gcc_assert (path->last () == var_bb);
514 /* Iterate over the arguments of PHI. */
515 unsigned int i;
516 if (gimple_code (def_stmt) == GIMPLE_PHI)
518 gphi *phi = as_a <gphi *> (def_stmt);
519 for (i = 0; i < gimple_phi_num_args (phi); i++)
521 tree arg = gimple_phi_arg_def (phi, i);
522 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
524 /* Skip edges pointing outside the current loop. */
525 if (!arg || var_bb->loop_father != bbi->loop_father)
526 continue;
528 if (TREE_CODE (arg) == SSA_NAME)
530 vec_safe_push (path, bbi);
531 /* Recursively follow SSA_NAMEs looking for a constant
532 definition. */
533 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
534 seen_loop_phi);
536 path->pop ();
537 continue;
540 if (TREE_CODE (arg) != INTEGER_CST)
541 continue;
543 /* If this is a profitable jump thread path, then convert it
544 into the canonical form and register it. */
545 edge taken_edge = profitable_jump_thread_path (path, bbi, name, arg);
546 if (taken_edge)
547 convert_and_register_jump_thread_path (path, taken_edge);
550 else if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
552 tree arg = gimple_assign_rhs1 (def_stmt);
554 if (TREE_CODE (arg) == SSA_NAME)
555 fsm_find_control_statement_thread_paths (arg, visited_bbs,
556 path, seen_loop_phi);
558 else
560 edge taken_edge = profitable_jump_thread_path (path, var_bb,
561 name, arg);
562 if (taken_edge)
563 convert_and_register_jump_thread_path (path, taken_edge);
567 /* Remove all the nodes that we added from NEXT_PATH. */
568 if (next_path_length)
569 vec_safe_truncate (path, (path->length () - next_path_length));
572 /* Search backwards from BB looking for paths where NAME (an SSA_NAME)
573 is a constant. Record such paths for jump threading.
575 It is assumed that BB ends with a control statement and that by
576 finding a path where NAME is a constant, we can thread the path. */
578 void
579 find_jump_threads_backwards (edge e)
581 if (!flag_expensive_optimizations
582 || optimize_function_for_size_p (cfun)
583 || e->dest->loop_father != e->src->loop_father
584 || loop_depth (e->dest->loop_father) == 0)
585 return;
587 gimple *stmt = get_gimple_control_stmt (e->dest);
588 if (!stmt)
589 return;
591 enum gimple_code code = gimple_code (stmt);
592 tree name = NULL;
593 if (code == GIMPLE_SWITCH)
594 name = gimple_switch_index (as_a <gswitch *> (stmt));
595 else if (code == GIMPLE_GOTO)
596 name = gimple_goto_dest (stmt);
597 else if (code == GIMPLE_COND)
599 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
600 && TREE_CODE (gimple_cond_rhs (stmt)) == INTEGER_CST
601 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
602 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
603 name = gimple_cond_lhs (stmt);
606 if (!name || TREE_CODE (name) != SSA_NAME)
607 return;
609 vec<basic_block, va_gc> *bb_path;
610 vec_alloc (bb_path, 10);
611 vec_safe_push (bb_path, e->dest);
612 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
614 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
615 fsm_find_control_statement_thread_paths (name, visited_bbs, bb_path, false);
617 delete visited_bbs;
618 vec_free (bb_path);