2015-09-21 Steven G. Kargl <kargl@gcc.gnu.org>
[official-gcc.git] / gcc / tree-ssa-threadedge.c
blobc58b5e354080bafd4212cd2043b6a929d3725706
1 /* SSA Jump Threading
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "hard-reg-set.h"
29 #include "ssa.h"
30 #include "alias.h"
31 #include "fold-const.h"
32 #include "flags.h"
33 #include "tm_p.h"
34 #include "cfgloop.h"
35 #include "timevar.h"
36 #include "dumpfile.h"
37 #include "internal-fn.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "langhooks.h"
43 #include "params.h"
44 #include "tree-ssa-scopedtables.h"
45 #include "tree-ssa-threadedge.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-ssa-dom.h"
48 #include "builtins.h"
49 #include "cfganal.h"
51 /* To avoid code explosion due to jump threading, we limit the
52 number of statements we are going to copy. This variable
53 holds the number of statements currently seen that we'll have
54 to copy as part of the jump threading process. */
55 static int stmt_count;
57 /* Array to record value-handles per SSA_NAME. */
58 vec<tree> ssa_name_values;
60 typedef tree (pfn_simplify) (gimple *, gimple *, class avail_exprs_stack *);
62 /* Set the value for the SSA name NAME to VALUE. */
64 void
65 set_ssa_name_value (tree name, tree value)
67 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
68 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
69 if (value && TREE_OVERFLOW_P (value))
70 value = drop_tree_overflow (value);
71 ssa_name_values[SSA_NAME_VERSION (name)] = value;
74 /* Initialize the per SSA_NAME value-handles array. Returns it. */
75 void
76 threadedge_initialize_values (void)
78 gcc_assert (!ssa_name_values.exists ());
79 ssa_name_values.create (num_ssa_names);
82 /* Free the per SSA_NAME value-handle array. */
83 void
84 threadedge_finalize_values (void)
86 ssa_name_values.release ();
89 /* Return TRUE if we may be able to thread an incoming edge into
90 BB to an outgoing edge from BB. Return FALSE otherwise. */
92 bool
93 potentially_threadable_block (basic_block bb)
95 gimple_stmt_iterator gsi;
97 /* Special case. We can get blocks that are forwarders, but are
98 not optimized away because they forward from outside a loop
99 to the loop header. We want to thread through them as we can
100 sometimes thread to the loop exit, which is obviously profitable.
101 the interesting case here is when the block has PHIs. */
102 if (gsi_end_p (gsi_start_nondebug_bb (bb))
103 && !gsi_end_p (gsi_start_phis (bb)))
104 return true;
106 /* If BB has a single successor or a single predecessor, then
107 there is no threading opportunity. */
108 if (single_succ_p (bb) || single_pred_p (bb))
109 return false;
111 /* If BB does not end with a conditional, switch or computed goto,
112 then there is no threading opportunity. */
113 gsi = gsi_last_bb (bb);
114 if (gsi_end_p (gsi)
115 || ! gsi_stmt (gsi)
116 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
117 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
118 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
119 return false;
121 return true;
124 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
125 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
126 BB. If no such ASSERT_EXPR is found, return OP. */
128 static tree
129 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
131 imm_use_iterator imm_iter;
132 gimple *use_stmt;
133 use_operand_p use_p;
135 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
137 use_stmt = USE_STMT (use_p);
138 if (use_stmt != stmt
139 && gimple_assign_single_p (use_stmt)
140 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
141 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
142 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
144 return gimple_assign_lhs (use_stmt);
147 return op;
150 /* Record temporary equivalences created by PHIs at the target of the
151 edge E. Record unwind information for the equivalences onto STACK.
153 If a PHI which prevents threading is encountered, then return FALSE
154 indicating we should not thread this edge, else return TRUE.
156 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
157 of any equivalences recorded. We use this to make invalidation after
158 traversing back edges less painful. */
160 static bool
161 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
163 gphi_iterator gsi;
165 /* Each PHI creates a temporary equivalence, record them.
166 These are context sensitive equivalences and will be removed
167 later. */
168 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
170 gphi *phi = gsi.phi ();
171 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
172 tree dst = gimple_phi_result (phi);
174 /* If the desired argument is not the same as this PHI's result
175 and it is set by a PHI in E->dest, then we can not thread
176 through E->dest. */
177 if (src != dst
178 && TREE_CODE (src) == SSA_NAME
179 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
180 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
181 return false;
183 /* We consider any non-virtual PHI as a statement since it
184 count result in a constant assignment or copy operation. */
185 if (!virtual_operand_p (dst))
186 stmt_count++;
188 const_and_copies->record_const_or_copy (dst, src);
190 return true;
193 /* Fold the RHS of an assignment statement and return it as a tree.
194 May return NULL_TREE if no simplification is possible. */
196 static tree
197 fold_assignment_stmt (gimple *stmt)
199 enum tree_code subcode = gimple_assign_rhs_code (stmt);
201 switch (get_gimple_rhs_class (subcode))
203 case GIMPLE_SINGLE_RHS:
204 return fold (gimple_assign_rhs1 (stmt));
206 case GIMPLE_UNARY_RHS:
208 tree lhs = gimple_assign_lhs (stmt);
209 tree op0 = gimple_assign_rhs1 (stmt);
210 return fold_unary (subcode, TREE_TYPE (lhs), op0);
213 case GIMPLE_BINARY_RHS:
215 tree lhs = gimple_assign_lhs (stmt);
216 tree op0 = gimple_assign_rhs1 (stmt);
217 tree op1 = gimple_assign_rhs2 (stmt);
218 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
221 case GIMPLE_TERNARY_RHS:
223 tree lhs = gimple_assign_lhs (stmt);
224 tree op0 = gimple_assign_rhs1 (stmt);
225 tree op1 = gimple_assign_rhs2 (stmt);
226 tree op2 = gimple_assign_rhs3 (stmt);
228 /* Sadly, we have to handle conditional assignments specially
229 here, because fold expects all the operands of an expression
230 to be folded before the expression itself is folded, but we
231 can't just substitute the folded condition here. */
232 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
233 op0 = fold (op0);
235 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
238 default:
239 gcc_unreachable ();
243 /* Try to simplify each statement in E->dest, ultimately leading to
244 a simplification of the COND_EXPR at the end of E->dest.
246 Record unwind information for temporary equivalences onto STACK.
248 Use SIMPLIFY (a pointer to a callback function) to further simplify
249 statements using pass specific information.
251 We might consider marking just those statements which ultimately
252 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
253 would be recovered by trying to simplify fewer statements.
255 If we are able to simplify a statement into the form
256 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
257 a context sensitive equivalence which may help us simplify
258 later statements in E->dest. */
260 static gimple *
261 record_temporary_equivalences_from_stmts_at_dest (edge e,
262 const_and_copies *const_and_copies,
263 avail_exprs_stack *avail_exprs_stack,
264 pfn_simplify simplify,
265 bool backedge_seen)
267 gimple *stmt = NULL;
268 gimple_stmt_iterator gsi;
269 int max_stmt_count;
271 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
273 /* Walk through each statement in the block recording equivalences
274 we discover. Note any equivalences we discover are context
275 sensitive (ie, are dependent on traversing E) and must be unwound
276 when we're finished processing E. */
277 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
279 tree cached_lhs = NULL;
281 stmt = gsi_stmt (gsi);
283 /* Ignore empty statements and labels. */
284 if (gimple_code (stmt) == GIMPLE_NOP
285 || gimple_code (stmt) == GIMPLE_LABEL
286 || is_gimple_debug (stmt))
287 continue;
289 /* If the statement has volatile operands, then we assume we
290 can not thread through this block. This is overly
291 conservative in some ways. */
292 if (gimple_code (stmt) == GIMPLE_ASM
293 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
294 return NULL;
296 /* If duplicating this block is going to cause too much code
297 expansion, then do not thread through this block. */
298 stmt_count++;
299 if (stmt_count > max_stmt_count)
300 return NULL;
302 /* If this is not a statement that sets an SSA_NAME to a new
303 value, then do not try to simplify this statement as it will
304 not simplify in any way that is helpful for jump threading. */
305 if ((gimple_code (stmt) != GIMPLE_ASSIGN
306 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
307 && (gimple_code (stmt) != GIMPLE_CALL
308 || gimple_call_lhs (stmt) == NULL_TREE
309 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
311 /* STMT might still have DEFS and we need to invalidate any known
312 equivalences for them.
314 Consider if STMT is a GIMPLE_ASM with one or more outputs that
315 feeds a conditional inside a loop. We might derive an equivalence
316 due to the conditional. */
317 tree op;
318 ssa_op_iter iter;
320 if (backedge_seen)
321 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
322 const_and_copies->invalidate (op);
324 continue;
327 /* The result of __builtin_object_size depends on all the arguments
328 of a phi node. Temporarily using only one edge produces invalid
329 results. For example
331 if (x < 6)
332 goto l;
333 else
334 goto l;
337 r = PHI <&w[2].a[1](2), &a.a[6](3)>
338 __builtin_object_size (r, 0)
340 The result of __builtin_object_size is defined to be the maximum of
341 remaining bytes. If we use only one edge on the phi, the result will
342 change to be the remaining bytes for the corresponding phi argument.
344 Similarly for __builtin_constant_p:
346 r = PHI <1(2), 2(3)>
347 __builtin_constant_p (r)
349 Both PHI arguments are constant, but x ? 1 : 2 is still not
350 constant. */
352 if (is_gimple_call (stmt))
354 tree fndecl = gimple_call_fndecl (stmt);
355 if (fndecl
356 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
357 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
359 if (backedge_seen)
361 tree lhs = gimple_get_lhs (stmt);
362 const_and_copies->invalidate (lhs);
364 continue;
368 /* At this point we have a statement which assigns an RHS to an
369 SSA_VAR on the LHS. We want to try and simplify this statement
370 to expose more context sensitive equivalences which in turn may
371 allow us to simplify the condition at the end of the loop.
373 Handle simple copy operations as well as implied copies from
374 ASSERT_EXPRs. */
375 if (gimple_assign_single_p (stmt)
376 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
377 cached_lhs = gimple_assign_rhs1 (stmt);
378 else if (gimple_assign_single_p (stmt)
379 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
380 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
381 else
383 /* A statement that is not a trivial copy or ASSERT_EXPR.
384 We're going to temporarily copy propagate the operands
385 and see if that allows us to simplify this statement. */
386 tree *copy;
387 ssa_op_iter iter;
388 use_operand_p use_p;
389 unsigned int num, i = 0;
391 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
392 copy = XCNEWVEC (tree, num);
394 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
395 the operands. */
396 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
398 tree tmp = NULL;
399 tree use = USE_FROM_PTR (use_p);
401 copy[i++] = use;
402 if (TREE_CODE (use) == SSA_NAME)
403 tmp = SSA_NAME_VALUE (use);
404 if (tmp)
405 SET_USE (use_p, tmp);
408 /* Try to fold/lookup the new expression. Inserting the
409 expression into the hash table is unlikely to help. */
410 if (is_gimple_call (stmt))
411 cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false);
412 else
413 cached_lhs = fold_assignment_stmt (stmt);
415 if (!cached_lhs
416 || (TREE_CODE (cached_lhs) != SSA_NAME
417 && !is_gimple_min_invariant (cached_lhs)))
418 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
420 /* Restore the statement's original uses/defs. */
421 i = 0;
422 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
423 SET_USE (use_p, copy[i++]);
425 free (copy);
428 /* Record the context sensitive equivalence if we were able
429 to simplify this statement.
431 If we have traversed a backedge at some point during threading,
432 then always enter something here. Either a real equivalence,
433 or a NULL_TREE equivalence which is effectively invalidation of
434 prior equivalences. */
435 if (cached_lhs
436 && (TREE_CODE (cached_lhs) == SSA_NAME
437 || is_gimple_min_invariant (cached_lhs)))
438 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
439 cached_lhs);
440 else if (backedge_seen)
441 const_and_copies->invalidate (gimple_get_lhs (stmt));
443 return stmt;
446 /* Once we have passed a backedge in the CFG when threading, we do not want to
447 utilize edge equivalences for simplification purpose. They are no longer
448 necessarily valid. We use this callback rather than the ones provided by
449 DOM/VRP to achieve that effect. */
450 static tree
451 dummy_simplify (gimple *stmt1 ATTRIBUTE_UNUSED, gimple *stmt2 ATTRIBUTE_UNUSED,
452 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
454 return NULL_TREE;
457 /* Simplify the control statement at the end of the block E->dest.
459 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
460 is available to use/clobber in DUMMY_COND.
462 Use SIMPLIFY (a pointer to a callback function) to further simplify
463 a condition using pass specific information.
465 Return the simplified condition or NULL if simplification could
466 not be performed.
468 The available expression table is referenced via AVAIL_EXPRS_STACK. */
470 static tree
471 simplify_control_stmt_condition (edge e,
472 gimple *stmt,
473 class avail_exprs_stack *avail_exprs_stack,
474 gcond *dummy_cond,
475 pfn_simplify simplify,
476 bool handle_dominating_asserts)
478 tree cond, cached_lhs;
479 enum gimple_code code = gimple_code (stmt);
481 /* For comparisons, we have to update both operands, then try
482 to simplify the comparison. */
483 if (code == GIMPLE_COND)
485 tree op0, op1;
486 enum tree_code cond_code;
488 op0 = gimple_cond_lhs (stmt);
489 op1 = gimple_cond_rhs (stmt);
490 cond_code = gimple_cond_code (stmt);
492 /* Get the current value of both operands. */
493 if (TREE_CODE (op0) == SSA_NAME)
495 for (int i = 0; i < 2; i++)
497 if (TREE_CODE (op0) == SSA_NAME
498 && SSA_NAME_VALUE (op0))
499 op0 = SSA_NAME_VALUE (op0);
500 else
501 break;
505 if (TREE_CODE (op1) == SSA_NAME)
507 for (int i = 0; i < 2; i++)
509 if (TREE_CODE (op1) == SSA_NAME
510 && SSA_NAME_VALUE (op1))
511 op1 = SSA_NAME_VALUE (op1);
512 else
513 break;
517 if (handle_dominating_asserts)
519 /* Now see if the operand was consumed by an ASSERT_EXPR
520 which dominates E->src. If so, we want to replace the
521 operand with the LHS of the ASSERT_EXPR. */
522 if (TREE_CODE (op0) == SSA_NAME)
523 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
525 if (TREE_CODE (op1) == SSA_NAME)
526 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
529 /* We may need to canonicalize the comparison. For
530 example, op0 might be a constant while op1 is an
531 SSA_NAME. Failure to canonicalize will cause us to
532 miss threading opportunities. */
533 if (tree_swap_operands_p (op0, op1, false))
535 cond_code = swap_tree_comparison (cond_code);
536 std::swap (op0, op1);
539 /* Stuff the operator and operands into our dummy conditional
540 expression. */
541 gimple_cond_set_code (dummy_cond, cond_code);
542 gimple_cond_set_lhs (dummy_cond, op0);
543 gimple_cond_set_rhs (dummy_cond, op1);
545 /* We absolutely do not care about any type conversions
546 we only care about a zero/nonzero value. */
547 fold_defer_overflow_warnings ();
549 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
550 if (cached_lhs)
551 while (CONVERT_EXPR_P (cached_lhs))
552 cached_lhs = TREE_OPERAND (cached_lhs, 0);
554 fold_undefer_overflow_warnings ((cached_lhs
555 && is_gimple_min_invariant (cached_lhs)),
556 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
558 /* If we have not simplified the condition down to an invariant,
559 then use the pass specific callback to simplify the condition. */
560 if (!cached_lhs
561 || !is_gimple_min_invariant (cached_lhs))
562 cached_lhs = (*simplify) (dummy_cond, stmt, avail_exprs_stack);
564 /* If we were just testing that an integral type was != 0, and that
565 failed, just return the first operand. This gives the FSM code a
566 chance to optimize the path. */
567 if (cached_lhs == NULL
568 && cond_code == NE_EXPR)
570 /* Recover the original operands. They may have been simplified
571 using context sensitive equivalences. Those context sensitive
572 equivalences may not be valid on paths found by the FSM optimizer. */
573 tree op0 = gimple_cond_lhs (stmt);
574 tree op1 = gimple_cond_rhs (stmt);
576 if (INTEGRAL_TYPE_P (TREE_TYPE (op0))
577 && TREE_CODE (op0) == SSA_NAME
578 && integer_zerop (op1))
579 return op0;
582 return cached_lhs;
585 if (code == GIMPLE_SWITCH)
586 cond = gimple_switch_index (as_a <gswitch *> (stmt));
587 else if (code == GIMPLE_GOTO)
588 cond = gimple_goto_dest (stmt);
589 else
590 gcc_unreachable ();
592 /* We can have conditionals which just test the state of a variable
593 rather than use a relational operator. These are simpler to handle. */
594 if (TREE_CODE (cond) == SSA_NAME)
596 tree original_lhs = cond;
597 cached_lhs = cond;
599 /* Get the variable's current value from the equivalence chains.
601 It is possible to get loops in the SSA_NAME_VALUE chains
602 (consider threading the backedge of a loop where we have
603 a loop invariant SSA_NAME used in the condition. */
604 if (cached_lhs)
606 for (int i = 0; i < 2; i++)
608 if (TREE_CODE (cached_lhs) == SSA_NAME
609 && SSA_NAME_VALUE (cached_lhs))
610 cached_lhs = SSA_NAME_VALUE (cached_lhs);
611 else
612 break;
616 /* If we're dominated by a suitable ASSERT_EXPR, then
617 update CACHED_LHS appropriately. */
618 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
619 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
621 /* If we haven't simplified to an invariant yet, then use the
622 pass specific callback to try and simplify it further. */
623 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
624 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
626 /* We couldn't find an invariant. But, callers of this
627 function may be able to do something useful with the
628 unmodified destination. */
629 if (!cached_lhs)
630 cached_lhs = original_lhs;
632 else
633 cached_lhs = NULL;
635 return cached_lhs;
638 /* Copy debug stmts from DEST's chain of single predecessors up to
639 SRC, so that we don't lose the bindings as PHI nodes are introduced
640 when DEST gains new predecessors. */
641 void
642 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
644 if (!MAY_HAVE_DEBUG_STMTS)
645 return;
647 if (!single_pred_p (dest))
648 return;
650 gcc_checking_assert (dest != src);
652 gimple_stmt_iterator gsi = gsi_after_labels (dest);
653 int i = 0;
654 const int alloc_count = 16; // ?? Should this be a PARAM?
656 /* Estimate the number of debug vars overridden in the beginning of
657 DEST, to tell how many we're going to need to begin with. */
658 for (gimple_stmt_iterator si = gsi;
659 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
661 gimple *stmt = gsi_stmt (si);
662 if (!is_gimple_debug (stmt))
663 break;
664 i++;
667 auto_vec<tree, alloc_count> fewvars;
668 hash_set<tree> *vars = NULL;
670 /* If we're already starting with 3/4 of alloc_count, go for a
671 hash_set, otherwise start with an unordered stack-allocated
672 VEC. */
673 if (i * 4 > alloc_count * 3)
674 vars = new hash_set<tree>;
676 /* Now go through the initial debug stmts in DEST again, this time
677 actually inserting in VARS or FEWVARS. Don't bother checking for
678 duplicates in FEWVARS. */
679 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
681 gimple *stmt = gsi_stmt (si);
682 if (!is_gimple_debug (stmt))
683 break;
685 tree var;
687 if (gimple_debug_bind_p (stmt))
688 var = gimple_debug_bind_get_var (stmt);
689 else if (gimple_debug_source_bind_p (stmt))
690 var = gimple_debug_source_bind_get_var (stmt);
691 else
692 gcc_unreachable ();
694 if (vars)
695 vars->add (var);
696 else
697 fewvars.quick_push (var);
700 basic_block bb = dest;
704 bb = single_pred (bb);
705 for (gimple_stmt_iterator si = gsi_last_bb (bb);
706 !gsi_end_p (si); gsi_prev (&si))
708 gimple *stmt = gsi_stmt (si);
709 if (!is_gimple_debug (stmt))
710 continue;
712 tree var;
714 if (gimple_debug_bind_p (stmt))
715 var = gimple_debug_bind_get_var (stmt);
716 else if (gimple_debug_source_bind_p (stmt))
717 var = gimple_debug_source_bind_get_var (stmt);
718 else
719 gcc_unreachable ();
721 /* Discard debug bind overlaps. ??? Unlike stmts from src,
722 copied into a new block that will precede BB, debug bind
723 stmts in bypassed BBs may actually be discarded if
724 they're overwritten by subsequent debug bind stmts, which
725 might be a problem once we introduce stmt frontier notes
726 or somesuch. Adding `&& bb == src' to the condition
727 below will preserve all potentially relevant debug
728 notes. */
729 if (vars && vars->add (var))
730 continue;
731 else if (!vars)
733 int i = fewvars.length ();
734 while (i--)
735 if (fewvars[i] == var)
736 break;
737 if (i >= 0)
738 continue;
740 if (fewvars.length () < (unsigned) alloc_count)
741 fewvars.quick_push (var);
742 else
744 vars = new hash_set<tree>;
745 for (i = 0; i < alloc_count; i++)
746 vars->add (fewvars[i]);
747 fewvars.release ();
748 vars->add (var);
752 stmt = gimple_copy (stmt);
753 /* ??? Should we drop the location of the copy to denote
754 they're artificial bindings? */
755 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
758 while (bb != src && single_pred_p (bb));
760 if (vars)
761 delete vars;
762 else if (fewvars.exists ())
763 fewvars.release ();
766 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
767 need not be duplicated as part of the CFG/SSA updating process).
769 If it is threadable, add it to PATH and VISITED and recurse, ultimately
770 returning TRUE from the toplevel call. Otherwise do nothing and
771 return false.
773 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
774 try and simplify the condition at the end of TAKEN_EDGE->dest.
776 The available expression table is referenced via AVAIL_EXPRS_STACK. */
778 static bool
779 thread_around_empty_blocks (edge taken_edge,
780 gcond *dummy_cond,
781 class avail_exprs_stack *avail_exprs_stack,
782 bool handle_dominating_asserts,
783 pfn_simplify simplify,
784 bitmap visited,
785 vec<jump_thread_edge *> *path,
786 bool *backedge_seen_p)
788 basic_block bb = taken_edge->dest;
789 gimple_stmt_iterator gsi;
790 gimple *stmt;
791 tree cond;
793 /* The key property of these blocks is that they need not be duplicated
794 when threading. Thus they can not have visible side effects such
795 as PHI nodes. */
796 if (!gsi_end_p (gsi_start_phis (bb)))
797 return false;
799 /* Skip over DEBUG statements at the start of the block. */
800 gsi = gsi_start_nondebug_bb (bb);
802 /* If the block has no statements, but does have a single successor, then
803 it's just a forwarding block and we can thread through it trivially.
805 However, note that just threading through empty blocks with single
806 successors is not inherently profitable. For the jump thread to
807 be profitable, we must avoid a runtime conditional.
809 By taking the return value from the recursive call, we get the
810 desired effect of returning TRUE when we found a profitable jump
811 threading opportunity and FALSE otherwise.
813 This is particularly important when this routine is called after
814 processing a joiner block. Returning TRUE too aggressively in
815 that case results in pointless duplication of the joiner block. */
816 if (gsi_end_p (gsi))
818 if (single_succ_p (bb))
820 taken_edge = single_succ_edge (bb);
821 if (!bitmap_bit_p (visited, taken_edge->dest->index))
823 jump_thread_edge *x
824 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
825 path->safe_push (x);
826 bitmap_set_bit (visited, taken_edge->dest->index);
827 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
828 if (*backedge_seen_p)
829 simplify = dummy_simplify;
830 return thread_around_empty_blocks (taken_edge,
831 dummy_cond,
832 avail_exprs_stack,
833 handle_dominating_asserts,
834 simplify,
835 visited,
836 path,
837 backedge_seen_p);
841 /* We have a block with no statements, but multiple successors? */
842 return false;
845 /* The only real statements this block can have are a control
846 flow altering statement. Anything else stops the thread. */
847 stmt = gsi_stmt (gsi);
848 if (gimple_code (stmt) != GIMPLE_COND
849 && gimple_code (stmt) != GIMPLE_GOTO
850 && gimple_code (stmt) != GIMPLE_SWITCH)
851 return false;
853 /* If we have traversed a backedge, then we do not want to look
854 at certain expressions in the table that can not be relied upon.
855 Luckily the only code that looked at those expressions is the
856 SIMPLIFY callback, which we replace if we can no longer use it. */
857 if (*backedge_seen_p)
858 simplify = dummy_simplify;
860 /* Extract and simplify the condition. */
861 cond = simplify_control_stmt_condition (taken_edge, stmt,
862 avail_exprs_stack, dummy_cond,
863 simplify, handle_dominating_asserts);
865 /* If the condition can be statically computed and we have not already
866 visited the destination edge, then add the taken edge to our thread
867 path. */
868 if (cond && is_gimple_min_invariant (cond))
870 taken_edge = find_taken_edge (bb, cond);
872 if (bitmap_bit_p (visited, taken_edge->dest->index))
873 return false;
874 bitmap_set_bit (visited, taken_edge->dest->index);
876 jump_thread_edge *x
877 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
878 path->safe_push (x);
879 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
880 if (*backedge_seen_p)
881 simplify = dummy_simplify;
883 thread_around_empty_blocks (taken_edge,
884 dummy_cond,
885 avail_exprs_stack,
886 handle_dominating_asserts,
887 simplify,
888 visited,
889 path,
890 backedge_seen_p);
891 return true;
894 return false;
897 /* Return true if the CFG contains at least one path from START_BB to END_BB.
898 When a path is found, record in PATH the blocks from END_BB to START_BB.
899 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
900 the recursion to basic blocks belonging to LOOP. */
902 static bool
903 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
904 vec<basic_block, va_gc> *&path,
905 hash_set<basic_block> *visited_bbs, loop_p loop)
907 if (loop != start_bb->loop_father)
908 return false;
910 if (start_bb == end_bb)
912 vec_safe_push (path, start_bb);
913 return true;
916 if (!visited_bbs->add (start_bb))
918 edge e;
919 edge_iterator ei;
920 FOR_EACH_EDGE (e, ei, start_bb->succs)
921 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
923 vec_safe_push (path, start_bb);
924 return true;
928 return false;
931 static int max_threaded_paths;
933 /* We trace the value of the variable EXPR back through any phi nodes looking
934 for places where it gets a constant value and save the path. Stop after
935 having recorded MAX_PATHS jump threading paths. */
937 static void
938 fsm_find_control_statement_thread_paths (tree expr,
939 hash_set<basic_block> *visited_bbs,
940 vec<basic_block, va_gc> *&path,
941 bool seen_loop_phi)
943 tree var = SSA_NAME_VAR (expr);
944 gimple *def_stmt = SSA_NAME_DEF_STMT (expr);
945 basic_block var_bb = gimple_bb (def_stmt);
947 if (var == NULL || var_bb == NULL)
948 return;
950 /* For the moment we assume that an SSA chain only contains phi nodes, and
951 eventually one of the phi arguments will be an integer constant. In the
952 future, this could be extended to also handle simple assignments of
953 arithmetic operations. */
954 if (gimple_code (def_stmt) != GIMPLE_PHI)
955 return;
957 /* Avoid infinite recursion. */
958 if (visited_bbs->add (var_bb))
959 return;
961 gphi *phi = as_a <gphi *> (def_stmt);
962 int next_path_length = 0;
963 basic_block last_bb_in_path = path->last ();
965 if (loop_containing_stmt (phi)->header == gimple_bb (phi))
967 /* Do not walk through more than one loop PHI node. */
968 if (seen_loop_phi)
969 return;
970 seen_loop_phi = true;
973 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
974 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
975 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
976 if (var_bb != last_bb_in_path)
978 edge e;
979 int e_count = 0;
980 edge_iterator ei;
981 vec<basic_block, va_gc> *next_path;
982 vec_alloc (next_path, n_basic_blocks_for_fn (cfun));
984 FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
986 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
988 if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
989 e->src->loop_father))
990 ++e_count;
992 delete visited_bbs;
994 /* If there is more than one path, stop. */
995 if (e_count > 1)
997 vec_free (next_path);
998 return;
1002 /* Stop if we have not found a path: this could occur when the recursion
1003 is stopped by one of the bounds. */
1004 if (e_count == 0)
1006 vec_free (next_path);
1007 return;
1010 /* Make sure we haven't already visited any of the nodes in
1011 NEXT_PATH. Don't add them here to avoid pollution. */
1012 for (unsigned int i = 0; i < next_path->length () - 1; i++)
1014 if (visited_bbs->contains ((*next_path)[i]))
1016 vec_free (next_path);
1017 return;
1021 /* Now add the nodes to VISISTED_BBS. */
1022 for (unsigned int i = 0; i < next_path->length () - 1; i++)
1023 visited_bbs->add ((*next_path)[i]);
1025 /* Append all the nodes from NEXT_PATH to PATH. */
1026 vec_safe_splice (path, next_path);
1027 next_path_length = next_path->length ();
1028 vec_free (next_path);
1031 gcc_assert (path->last () == var_bb);
1033 /* Iterate over the arguments of PHI. */
1034 unsigned int i;
1035 for (i = 0; i < gimple_phi_num_args (phi); i++)
1037 tree arg = gimple_phi_arg_def (phi, i);
1038 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
1040 /* Skip edges pointing outside the current loop. */
1041 if (!arg || var_bb->loop_father != bbi->loop_father)
1042 continue;
1044 if (TREE_CODE (arg) == SSA_NAME)
1046 vec_safe_push (path, bbi);
1047 /* Recursively follow SSA_NAMEs looking for a constant definition. */
1048 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
1049 seen_loop_phi);
1051 path->pop ();
1052 continue;
1055 if (TREE_CODE (arg) != INTEGER_CST)
1056 continue;
1058 int path_length = path->length ();
1059 /* A path with less than 2 basic blocks should not be jump-threaded. */
1060 if (path_length < 2)
1061 continue;
1063 if (path_length > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
1065 if (dump_file && (dump_flags & TDF_DETAILS))
1066 fprintf (dump_file, "FSM jump-thread path not considered: "
1067 "the number of basic blocks on the path "
1068 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
1069 continue;
1072 if (max_threaded_paths <= 0)
1074 if (dump_file && (dump_flags & TDF_DETAILS))
1075 fprintf (dump_file, "FSM jump-thread path not considered: "
1076 "the number of previously recorded FSM paths to thread "
1077 "exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
1078 continue;
1081 /* Add BBI to the path. */
1082 vec_safe_push (path, bbi);
1083 ++path_length;
1085 int n_insns = 0;
1086 gimple_stmt_iterator gsi;
1087 int j;
1088 loop_p loop = (*path)[0]->loop_father;
1089 bool path_crosses_loops = false;
1091 /* Count the number of instructions on the path: as these instructions
1092 will have to be duplicated, we will not record the path if there are
1093 too many instructions on the path. Also check that all the blocks in
1094 the path belong to a single loop. */
1095 for (j = 1; j < path_length - 1; j++)
1097 basic_block bb = (*path)[j];
1099 if (bb->loop_father != loop)
1101 path_crosses_loops = true;
1102 break;
1105 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1107 gimple *stmt = gsi_stmt (gsi);
1108 /* Do not count empty statements and labels. */
1109 if (gimple_code (stmt) != GIMPLE_NOP
1110 && gimple_code (stmt) != GIMPLE_LABEL
1111 && !is_gimple_debug (stmt))
1112 ++n_insns;
1116 if (path_crosses_loops)
1118 if (dump_file && (dump_flags & TDF_DETAILS))
1119 fprintf (dump_file, "FSM jump-thread path not considered: "
1120 "the path crosses loops.\n");
1121 path->pop ();
1122 continue;
1125 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
1127 if (dump_file && (dump_flags & TDF_DETAILS))
1128 fprintf (dump_file, "FSM jump-thread path not considered: "
1129 "the number of instructions on the path "
1130 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
1131 path->pop ();
1132 continue;
1135 vec<jump_thread_edge *> *jump_thread_path
1136 = new vec<jump_thread_edge *> ();
1138 /* Record the edges between the blocks in PATH. */
1139 for (j = 0; j < path_length - 1; j++)
1141 edge e = find_edge ((*path)[path_length - j - 1],
1142 (*path)[path_length - j - 2]);
1143 gcc_assert (e);
1144 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
1145 jump_thread_path->safe_push (x);
1148 /* Add the edge taken when the control variable has value ARG. */
1149 edge taken_edge = find_taken_edge ((*path)[0], arg);
1150 jump_thread_edge *x
1151 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
1152 jump_thread_path->safe_push (x);
1154 register_jump_thread (jump_thread_path);
1155 --max_threaded_paths;
1157 /* Remove BBI from the path. */
1158 path->pop ();
1161 /* Remove all the nodes that we added from NEXT_PATH. */
1162 if (next_path_length)
1163 vec_safe_truncate (path, (path->length () - next_path_length));
1166 /* We are exiting E->src, see if E->dest ends with a conditional
1167 jump which has a known value when reached via E.
1169 E->dest can have arbitrary side effects which, if threading is
1170 successful, will be maintained.
1172 Special care is necessary if E is a back edge in the CFG as we
1173 may have already recorded equivalences for E->dest into our
1174 various tables, including the result of the conditional at
1175 the end of E->dest. Threading opportunities are severely
1176 limited in that case to avoid short-circuiting the loop
1177 incorrectly.
1179 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1180 to avoid allocating memory.
1182 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1183 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1184 used in.
1186 STACK is used to undo temporary equivalences created during the walk of
1187 E->dest.
1189 SIMPLIFY is a pass-specific function used to simplify statements.
1191 Our caller is responsible for restoring the state of the expression
1192 and const_and_copies stacks.
1194 Positive return value is success. Zero return value is failure, but
1195 the block can still be duplicated as a joiner in a jump thread path,
1196 negative indicates the block should not be duplicated and thus is not
1197 suitable for a joiner in a jump threading path. */
1199 static int
1200 thread_through_normal_block (edge e,
1201 gcond *dummy_cond,
1202 bool handle_dominating_asserts,
1203 const_and_copies *const_and_copies,
1204 avail_exprs_stack *avail_exprs_stack,
1205 pfn_simplify simplify,
1206 vec<jump_thread_edge *> *path,
1207 bitmap visited,
1208 bool *backedge_seen_p)
1210 /* If we have traversed a backedge, then we do not want to look
1211 at certain expressions in the table that can not be relied upon.
1212 Luckily the only code that looked at those expressions is the
1213 SIMPLIFY callback, which we replace if we can no longer use it. */
1214 if (*backedge_seen_p)
1215 simplify = dummy_simplify;
1217 /* We want to record any equivalences created by traversing E. */
1218 if (!handle_dominating_asserts)
1219 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
1221 /* PHIs create temporary equivalences.
1222 Note that if we found a PHI that made the block non-threadable, then
1223 we need to bubble that up to our caller in the same manner we do
1224 when we prematurely stop processing statements below. */
1225 if (!record_temporary_equivalences_from_phis (e, const_and_copies))
1226 return -1;
1228 /* Now walk each statement recording any context sensitive
1229 temporary equivalences we can detect. */
1230 gimple *stmt
1231 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
1232 avail_exprs_stack,
1233 simplify,
1234 *backedge_seen_p);
1236 /* There's two reasons STMT might be null, and distinguishing
1237 between them is important.
1239 First the block may not have had any statements. For example, it
1240 might have some PHIs and unconditionally transfer control elsewhere.
1241 Such blocks are suitable for jump threading, particularly as a
1242 joiner block.
1244 The second reason would be if we did not process all the statements
1245 in the block (because there were too many to make duplicating the
1246 block profitable. If we did not look at all the statements, then
1247 we may not have invalidated everything needing invalidation. Thus
1248 we must signal to our caller that this block is not suitable for
1249 use as a joiner in a threading path. */
1250 if (!stmt)
1252 /* First case. The statement simply doesn't have any instructions, but
1253 does have PHIs. */
1254 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1255 && !gsi_end_p (gsi_start_phis (e->dest)))
1256 return 0;
1258 /* Second case. */
1259 return -1;
1262 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1263 will be taken. */
1264 if (gimple_code (stmt) == GIMPLE_COND
1265 || gimple_code (stmt) == GIMPLE_GOTO
1266 || gimple_code (stmt) == GIMPLE_SWITCH)
1268 tree cond;
1270 /* Extract and simplify the condition. */
1271 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
1272 dummy_cond, simplify,
1273 handle_dominating_asserts);
1275 if (!cond)
1276 return 0;
1278 if (is_gimple_min_invariant (cond))
1280 edge taken_edge = find_taken_edge (e->dest, cond);
1281 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1283 /* DEST could be NULL for a computed jump to an absolute
1284 address. */
1285 if (dest == NULL
1286 || dest == e->dest
1287 || bitmap_bit_p (visited, dest->index))
1288 return 0;
1290 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1291 first edge on the path. */
1292 if (path->length () == 0)
1294 jump_thread_edge *x
1295 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1296 path->safe_push (x);
1297 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1300 jump_thread_edge *x
1301 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1302 path->safe_push (x);
1303 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1304 if (*backedge_seen_p)
1305 simplify = dummy_simplify;
1307 /* See if we can thread through DEST as well, this helps capture
1308 secondary effects of threading without having to re-run DOM or
1309 VRP.
1311 We don't want to thread back to a block we have already
1312 visited. This may be overly conservative. */
1313 bitmap_set_bit (visited, dest->index);
1314 bitmap_set_bit (visited, e->dest->index);
1315 thread_around_empty_blocks (taken_edge,
1316 dummy_cond,
1317 avail_exprs_stack,
1318 handle_dominating_asserts,
1319 simplify,
1320 visited,
1321 path,
1322 backedge_seen_p);
1323 return 1;
1326 if (!flag_expensive_optimizations
1327 || optimize_function_for_size_p (cfun)
1328 || TREE_CODE (cond) != SSA_NAME
1329 || e->dest->loop_father != e->src->loop_father
1330 || loop_depth (e->dest->loop_father) == 0)
1331 return 0;
1333 /* When COND cannot be simplified, try to find paths from a control
1334 statement back through the PHI nodes which would affect that control
1335 statement. */
1336 vec<basic_block, va_gc> *bb_path;
1337 vec_alloc (bb_path, n_basic_blocks_for_fn (cfun));
1338 vec_safe_push (bb_path, e->dest);
1339 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
1341 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
1342 fsm_find_control_statement_thread_paths (cond, visited_bbs, bb_path,
1343 false);
1345 delete visited_bbs;
1346 vec_free (bb_path);
1348 return 0;
1351 /* We are exiting E->src, see if E->dest ends with a conditional
1352 jump which has a known value when reached via E.
1354 Special care is necessary if E is a back edge in the CFG as we
1355 may have already recorded equivalences for E->dest into our
1356 various tables, including the result of the conditional at
1357 the end of E->dest. Threading opportunities are severely
1358 limited in that case to avoid short-circuiting the loop
1359 incorrectly.
1361 Note it is quite common for the first block inside a loop to
1362 end with a conditional which is either always true or always
1363 false when reached via the loop backedge. Thus we do not want
1364 to blindly disable threading across a loop backedge.
1366 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1367 to avoid allocating memory.
1369 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1370 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1371 used in.
1373 CONST_AND_COPIES is used to undo temporary equivalences created during the
1374 walk of E->dest.
1376 The available expression table is referenced vai AVAIL_EXPRS_STACK.
1378 SIMPLIFY is a pass-specific function used to simplify statements. */
1380 void
1381 thread_across_edge (gcond *dummy_cond,
1382 edge e,
1383 bool handle_dominating_asserts,
1384 class const_and_copies *const_and_copies,
1385 class avail_exprs_stack *avail_exprs_stack,
1386 tree (*simplify) (gimple *, gimple *,
1387 class avail_exprs_stack *))
1389 bitmap visited = BITMAP_ALLOC (NULL);
1390 bool backedge_seen;
1392 stmt_count = 0;
1394 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1395 bitmap_clear (visited);
1396 bitmap_set_bit (visited, e->src->index);
1397 bitmap_set_bit (visited, e->dest->index);
1398 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1399 if (backedge_seen)
1400 simplify = dummy_simplify;
1402 int threaded = thread_through_normal_block (e, dummy_cond,
1403 handle_dominating_asserts,
1404 const_and_copies,
1405 avail_exprs_stack,
1406 simplify, path,
1407 visited, &backedge_seen);
1408 if (threaded > 0)
1410 propagate_threaded_block_debug_into (path->last ()->e->dest,
1411 e->dest);
1412 const_and_copies->pop_to_marker ();
1413 BITMAP_FREE (visited);
1414 register_jump_thread (path);
1415 return;
1417 else
1419 /* Negative and zero return values indicate no threading was possible,
1420 thus there should be no edges on the thread path and no need to walk
1421 through the vector entries. */
1422 gcc_assert (path->length () == 0);
1423 path->release ();
1424 delete path;
1426 /* A negative status indicates the target block was deemed too big to
1427 duplicate. Just quit now rather than trying to use the block as
1428 a joiner in a jump threading path.
1430 This prevents unnecessary code growth, but more importantly if we
1431 do not look at all the statements in the block, then we may have
1432 missed some invalidations if we had traversed a backedge! */
1433 if (threaded < 0)
1435 BITMAP_FREE (visited);
1436 const_and_copies->pop_to_marker ();
1437 return;
1441 /* We were unable to determine what out edge from E->dest is taken. However,
1442 we might still be able to thread through successors of E->dest. This
1443 often occurs when E->dest is a joiner block which then fans back out
1444 based on redundant tests.
1446 If so, we'll copy E->dest and redirect the appropriate predecessor to
1447 the copy. Within the copy of E->dest, we'll thread one or more edges
1448 to points deeper in the CFG.
1450 This is a stopgap until we have a more structured approach to path
1451 isolation. */
1453 edge taken_edge;
1454 edge_iterator ei;
1455 bool found;
1457 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1458 we can safely redirect any of the edges. Just punt those cases. */
1459 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1460 if (taken_edge->flags & EDGE_ABNORMAL)
1462 const_and_copies->pop_to_marker ();
1463 BITMAP_FREE (visited);
1464 return;
1467 /* Look at each successor of E->dest to see if we can thread through it. */
1468 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1470 /* Push a fresh marker so we can unwind the equivalences created
1471 for each of E->dest's successors. */
1472 const_and_copies->push_marker ();
1473 if (avail_exprs_stack)
1474 avail_exprs_stack->push_marker ();
1476 /* Avoid threading to any block we have already visited. */
1477 bitmap_clear (visited);
1478 bitmap_set_bit (visited, e->src->index);
1479 bitmap_set_bit (visited, e->dest->index);
1480 bitmap_set_bit (visited, taken_edge->dest->index);
1481 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1483 /* Record whether or not we were able to thread through a successor
1484 of E->dest. */
1485 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1486 path->safe_push (x);
1488 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1489 path->safe_push (x);
1490 found = false;
1491 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1492 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1493 if (backedge_seen)
1494 simplify = dummy_simplify;
1495 found = thread_around_empty_blocks (taken_edge,
1496 dummy_cond,
1497 avail_exprs_stack,
1498 handle_dominating_asserts,
1499 simplify,
1500 visited,
1501 path,
1502 &backedge_seen);
1504 if (backedge_seen)
1505 simplify = dummy_simplify;
1507 if (!found)
1508 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1509 handle_dominating_asserts,
1510 const_and_copies,
1511 avail_exprs_stack,
1512 simplify, path,
1513 visited, &backedge_seen) > 0;
1515 /* If we were able to thread through a successor of E->dest, then
1516 record the jump threading opportunity. */
1517 if (found)
1519 propagate_threaded_block_debug_into (path->last ()->e->dest,
1520 taken_edge->dest);
1521 register_jump_thread (path);
1523 else
1525 delete_jump_thread_path (path);
1528 /* And unwind the equivalence table. */
1529 if (avail_exprs_stack)
1530 avail_exprs_stack->pop_to_marker ();
1531 const_and_copies->pop_to_marker ();
1533 BITMAP_FREE (visited);
1536 const_and_copies->pop_to_marker ();