Daily bump.
[official-gcc.git] / gcc / tree-ssa-threadedge.c
blobc715e842d997fbdec5e8766e60ef7697b8a9dd94
1 /* SSA Jump Threading
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "pointer-set.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
41 #include "tree-cfg.h"
42 #include "tree-phinodes.h"
43 #include "ssa-iterators.h"
44 #include "stringpool.h"
45 #include "tree-ssanames.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
49 #include "params.h"
50 #include "tree-ssa-threadedge.h"
52 /* To avoid code explosion due to jump threading, we limit the
53 number of statements we are going to copy. This variable
54 holds the number of statements currently seen that we'll have
55 to copy as part of the jump threading process. */
56 static int stmt_count;
58 /* Array to record value-handles per SSA_NAME. */
59 vec<tree> ssa_name_values;
61 /* Set the value for the SSA name NAME to VALUE. */
63 void
64 set_ssa_name_value (tree name, tree value)
66 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
67 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
68 if (value && TREE_OVERFLOW_P (value))
69 value = drop_tree_overflow (value);
70 ssa_name_values[SSA_NAME_VERSION (name)] = value;
73 /* Initialize the per SSA_NAME value-handles array. Returns it. */
74 void
75 threadedge_initialize_values (void)
77 gcc_assert (!ssa_name_values.exists ());
78 ssa_name_values.create (num_ssa_names);
81 /* Free the per SSA_NAME value-handle array. */
82 void
83 threadedge_finalize_values (void)
85 ssa_name_values.release ();
88 /* Return TRUE if we may be able to thread an incoming edge into
89 BB to an outgoing edge from BB. Return FALSE otherwise. */
91 bool
92 potentially_threadable_block (basic_block bb)
94 gimple_stmt_iterator gsi;
96 /* If BB has a single successor or a single predecessor, then
97 there is no threading opportunity. */
98 if (single_succ_p (bb) || single_pred_p (bb))
99 return false;
101 /* If BB does not end with a conditional, switch or computed goto,
102 then there is no threading opportunity. */
103 gsi = gsi_last_bb (bb);
104 if (gsi_end_p (gsi)
105 || ! gsi_stmt (gsi)
106 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
107 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
108 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
109 return false;
111 return true;
114 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
116 BB. If no such ASSERT_EXPR is found, return OP. */
118 static tree
119 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
121 imm_use_iterator imm_iter;
122 gimple use_stmt;
123 use_operand_p use_p;
125 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
127 use_stmt = USE_STMT (use_p);
128 if (use_stmt != stmt
129 && gimple_assign_single_p (use_stmt)
130 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
132 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
134 return gimple_assign_lhs (use_stmt);
137 return op;
140 /* We record temporary equivalences created by PHI nodes or
141 statements within the target block. Doing so allows us to
142 identify more jump threading opportunities, even in blocks
143 with side effects.
145 We keep track of those temporary equivalences in a stack
146 structure so that we can unwind them when we're done processing
147 a particular edge. This routine handles unwinding the data
148 structures. */
150 static void
151 remove_temporary_equivalences (vec<tree> *stack)
153 while (stack->length () > 0)
155 tree prev_value, dest;
157 dest = stack->pop ();
159 /* A NULL value indicates we should stop unwinding, otherwise
160 pop off the next entry as they're recorded in pairs. */
161 if (dest == NULL)
162 break;
164 prev_value = stack->pop ();
165 set_ssa_name_value (dest, prev_value);
169 /* Record a temporary equivalence, saving enough information so that
170 we can restore the state of recorded equivalences when we're
171 done processing the current edge. */
173 static void
174 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
176 tree prev_x = SSA_NAME_VALUE (x);
178 /* Y may be NULL if we are invalidating entries in the table. */
179 if (y && TREE_CODE (y) == SSA_NAME)
181 tree tmp = SSA_NAME_VALUE (y);
182 y = tmp ? tmp : y;
185 set_ssa_name_value (x, y);
186 stack->reserve (2);
187 stack->quick_push (prev_x);
188 stack->quick_push (x);
191 /* Record temporary equivalences created by PHIs at the target of the
192 edge E. Record unwind information for the equivalences onto STACK.
194 If a PHI which prevents threading is encountered, then return FALSE
195 indicating we should not thread this edge, else return TRUE.
197 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
198 of any equivalences recorded. We use this to make invalidation after
199 traversing back edges less painful. */
201 static bool
202 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
204 gimple_stmt_iterator gsi;
206 /* Each PHI creates a temporary equivalence, record them.
207 These are context sensitive equivalences and will be removed
208 later. */
209 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
211 gimple phi = gsi_stmt (gsi);
212 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
213 tree dst = gimple_phi_result (phi);
215 /* If the desired argument is not the same as this PHI's result
216 and it is set by a PHI in E->dest, then we can not thread
217 through E->dest. */
218 if (src != dst
219 && TREE_CODE (src) == SSA_NAME
220 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
221 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
222 return false;
224 /* We consider any non-virtual PHI as a statement since it
225 count result in a constant assignment or copy operation. */
226 if (!virtual_operand_p (dst))
227 stmt_count++;
229 record_temporary_equivalence (dst, src, stack);
231 return true;
234 /* Fold the RHS of an assignment statement and return it as a tree.
235 May return NULL_TREE if no simplification is possible. */
237 static tree
238 fold_assignment_stmt (gimple stmt)
240 enum tree_code subcode = gimple_assign_rhs_code (stmt);
242 switch (get_gimple_rhs_class (subcode))
244 case GIMPLE_SINGLE_RHS:
245 return fold (gimple_assign_rhs1 (stmt));
247 case GIMPLE_UNARY_RHS:
249 tree lhs = gimple_assign_lhs (stmt);
250 tree op0 = gimple_assign_rhs1 (stmt);
251 return fold_unary (subcode, TREE_TYPE (lhs), op0);
254 case GIMPLE_BINARY_RHS:
256 tree lhs = gimple_assign_lhs (stmt);
257 tree op0 = gimple_assign_rhs1 (stmt);
258 tree op1 = gimple_assign_rhs2 (stmt);
259 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
262 case GIMPLE_TERNARY_RHS:
264 tree lhs = gimple_assign_lhs (stmt);
265 tree op0 = gimple_assign_rhs1 (stmt);
266 tree op1 = gimple_assign_rhs2 (stmt);
267 tree op2 = gimple_assign_rhs3 (stmt);
269 /* Sadly, we have to handle conditional assignments specially
270 here, because fold expects all the operands of an expression
271 to be folded before the expression itself is folded, but we
272 can't just substitute the folded condition here. */
273 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
274 op0 = fold (op0);
276 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
279 default:
280 gcc_unreachable ();
284 /* A new value has been assigned to LHS. If necessary, invalidate any
285 equivalences that are no longer valid. */
286 static void
287 invalidate_equivalences (tree lhs, vec<tree> *stack)
290 for (unsigned int i = 1; i < num_ssa_names; i++)
291 if (ssa_name (i) && SSA_NAME_VALUE (ssa_name (i)) == lhs)
292 record_temporary_equivalence (ssa_name (i), NULL_TREE, stack);
294 if (SSA_NAME_VALUE (lhs))
295 record_temporary_equivalence (lhs, NULL_TREE, stack);
298 /* Try to simplify each statement in E->dest, ultimately leading to
299 a simplification of the COND_EXPR at the end of E->dest.
301 Record unwind information for temporary equivalences onto STACK.
303 Use SIMPLIFY (a pointer to a callback function) to further simplify
304 statements using pass specific information.
306 We might consider marking just those statements which ultimately
307 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
308 would be recovered by trying to simplify fewer statements.
310 If we are able to simplify a statement into the form
311 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
312 a context sensitive equivalence which may help us simplify
313 later statements in E->dest. */
315 static gimple
316 record_temporary_equivalences_from_stmts_at_dest (edge e,
317 vec<tree> *stack,
318 tree (*simplify) (gimple,
319 gimple),
320 bool backedge_seen)
322 gimple stmt = NULL;
323 gimple_stmt_iterator gsi;
324 int max_stmt_count;
326 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
328 /* Walk through each statement in the block recording equivalences
329 we discover. Note any equivalences we discover are context
330 sensitive (ie, are dependent on traversing E) and must be unwound
331 when we're finished processing E. */
332 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
334 tree cached_lhs = NULL;
336 stmt = gsi_stmt (gsi);
338 /* Ignore empty statements and labels. */
339 if (gimple_code (stmt) == GIMPLE_NOP
340 || gimple_code (stmt) == GIMPLE_LABEL
341 || is_gimple_debug (stmt))
342 continue;
344 /* If the statement has volatile operands, then we assume we
345 can not thread through this block. This is overly
346 conservative in some ways. */
347 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
348 return NULL;
350 /* If duplicating this block is going to cause too much code
351 expansion, then do not thread through this block. */
352 stmt_count++;
353 if (stmt_count > max_stmt_count)
354 return NULL;
356 /* If this is not a statement that sets an SSA_NAME to a new
357 value, then do not try to simplify this statement as it will
358 not simplify in any way that is helpful for jump threading. */
359 if ((gimple_code (stmt) != GIMPLE_ASSIGN
360 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
361 && (gimple_code (stmt) != GIMPLE_CALL
362 || gimple_call_lhs (stmt) == NULL_TREE
363 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
365 /* STMT might still have DEFS and we need to invalidate any known
366 equivalences for them.
368 Consider if STMT is a GIMPLE_ASM with one or more outputs that
369 feeds a conditional inside a loop. We might derive an equivalence
370 due to the conditional. */
371 tree op;
372 ssa_op_iter iter;
374 if (backedge_seen)
375 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
376 invalidate_equivalences (op, stack);
378 continue;
381 /* The result of __builtin_object_size depends on all the arguments
382 of a phi node. Temporarily using only one edge produces invalid
383 results. For example
385 if (x < 6)
386 goto l;
387 else
388 goto l;
391 r = PHI <&w[2].a[1](2), &a.a[6](3)>
392 __builtin_object_size (r, 0)
394 The result of __builtin_object_size is defined to be the maximum of
395 remaining bytes. If we use only one edge on the phi, the result will
396 change to be the remaining bytes for the corresponding phi argument.
398 Similarly for __builtin_constant_p:
400 r = PHI <1(2), 2(3)>
401 __builtin_constant_p (r)
403 Both PHI arguments are constant, but x ? 1 : 2 is still not
404 constant. */
406 if (is_gimple_call (stmt))
408 tree fndecl = gimple_call_fndecl (stmt);
409 if (fndecl
410 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
411 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
413 if (backedge_seen)
415 tree lhs = gimple_get_lhs (stmt);
416 invalidate_equivalences (lhs, stack);
418 continue;
422 /* At this point we have a statement which assigns an RHS to an
423 SSA_VAR on the LHS. We want to try and simplify this statement
424 to expose more context sensitive equivalences which in turn may
425 allow us to simplify the condition at the end of the loop.
427 Handle simple copy operations as well as implied copies from
428 ASSERT_EXPRs. */
429 if (gimple_assign_single_p (stmt)
430 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
431 cached_lhs = gimple_assign_rhs1 (stmt);
432 else if (gimple_assign_single_p (stmt)
433 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
434 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
435 else
437 /* A statement that is not a trivial copy or ASSERT_EXPR.
438 We're going to temporarily copy propagate the operands
439 and see if that allows us to simplify this statement. */
440 tree *copy;
441 ssa_op_iter iter;
442 use_operand_p use_p;
443 unsigned int num, i = 0;
445 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
446 copy = XCNEWVEC (tree, num);
448 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
449 the operands. */
450 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
452 tree tmp = NULL;
453 tree use = USE_FROM_PTR (use_p);
455 copy[i++] = use;
456 if (TREE_CODE (use) == SSA_NAME)
457 tmp = SSA_NAME_VALUE (use);
458 if (tmp)
459 SET_USE (use_p, tmp);
462 /* Try to fold/lookup the new expression. Inserting the
463 expression into the hash table is unlikely to help. */
464 if (is_gimple_call (stmt))
465 cached_lhs = fold_call_stmt (stmt, false);
466 else
467 cached_lhs = fold_assignment_stmt (stmt);
469 if (!cached_lhs
470 || (TREE_CODE (cached_lhs) != SSA_NAME
471 && !is_gimple_min_invariant (cached_lhs)))
472 cached_lhs = (*simplify) (stmt, stmt);
474 /* Restore the statement's original uses/defs. */
475 i = 0;
476 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
477 SET_USE (use_p, copy[i++]);
479 free (copy);
482 /* Record the context sensitive equivalence if we were able
483 to simplify this statement.
485 If we have traversed a backedge at some point during threading,
486 then always enter something here. Either a real equivalence,
487 or a NULL_TREE equivalence which is effectively invalidation of
488 prior equivalences. */
489 if (cached_lhs
490 && (TREE_CODE (cached_lhs) == SSA_NAME
491 || is_gimple_min_invariant (cached_lhs)))
492 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
493 else if (backedge_seen)
494 invalidate_equivalences (gimple_get_lhs (stmt), stack);
496 return stmt;
499 /* Once we have passed a backedge in the CFG when threading, we do not want to
500 utilize edge equivalences for simplification purpose. They are no longer
501 necessarily valid. We use this callback rather than the ones provided by
502 DOM/VRP to achieve that effect. */
503 static tree
504 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED, gimple stmt2 ATTRIBUTE_UNUSED)
506 return NULL_TREE;
509 /* Simplify the control statement at the end of the block E->dest.
511 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
512 is available to use/clobber in DUMMY_COND.
514 Use SIMPLIFY (a pointer to a callback function) to further simplify
515 a condition using pass specific information.
517 Return the simplified condition or NULL if simplification could
518 not be performed. */
520 static tree
521 simplify_control_stmt_condition (edge e,
522 gimple stmt,
523 gimple dummy_cond,
524 tree (*simplify) (gimple, gimple),
525 bool handle_dominating_asserts)
527 tree cond, cached_lhs;
528 enum gimple_code code = gimple_code (stmt);
530 /* For comparisons, we have to update both operands, then try
531 to simplify the comparison. */
532 if (code == GIMPLE_COND)
534 tree op0, op1;
535 enum tree_code cond_code;
537 op0 = gimple_cond_lhs (stmt);
538 op1 = gimple_cond_rhs (stmt);
539 cond_code = gimple_cond_code (stmt);
541 /* Get the current value of both operands. */
542 if (TREE_CODE (op0) == SSA_NAME)
544 tree tmp = SSA_NAME_VALUE (op0);
545 if (tmp)
546 op0 = tmp;
549 if (TREE_CODE (op1) == SSA_NAME)
551 tree tmp = SSA_NAME_VALUE (op1);
552 if (tmp)
553 op1 = tmp;
556 if (handle_dominating_asserts)
558 /* Now see if the operand was consumed by an ASSERT_EXPR
559 which dominates E->src. If so, we want to replace the
560 operand with the LHS of the ASSERT_EXPR. */
561 if (TREE_CODE (op0) == SSA_NAME)
562 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
564 if (TREE_CODE (op1) == SSA_NAME)
565 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
568 /* We may need to canonicalize the comparison. For
569 example, op0 might be a constant while op1 is an
570 SSA_NAME. Failure to canonicalize will cause us to
571 miss threading opportunities. */
572 if (tree_swap_operands_p (op0, op1, false))
574 tree tmp;
575 cond_code = swap_tree_comparison (cond_code);
576 tmp = op0;
577 op0 = op1;
578 op1 = tmp;
581 /* Stuff the operator and operands into our dummy conditional
582 expression. */
583 gimple_cond_set_code (dummy_cond, cond_code);
584 gimple_cond_set_lhs (dummy_cond, op0);
585 gimple_cond_set_rhs (dummy_cond, op1);
587 /* We absolutely do not care about any type conversions
588 we only care about a zero/nonzero value. */
589 fold_defer_overflow_warnings ();
591 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
592 if (cached_lhs)
593 while (CONVERT_EXPR_P (cached_lhs))
594 cached_lhs = TREE_OPERAND (cached_lhs, 0);
596 fold_undefer_overflow_warnings ((cached_lhs
597 && is_gimple_min_invariant (cached_lhs)),
598 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
600 /* If we have not simplified the condition down to an invariant,
601 then use the pass specific callback to simplify the condition. */
602 if (!cached_lhs
603 || !is_gimple_min_invariant (cached_lhs))
604 cached_lhs = (*simplify) (dummy_cond, stmt);
606 return cached_lhs;
609 if (code == GIMPLE_SWITCH)
610 cond = gimple_switch_index (stmt);
611 else if (code == GIMPLE_GOTO)
612 cond = gimple_goto_dest (stmt);
613 else
614 gcc_unreachable ();
616 /* We can have conditionals which just test the state of a variable
617 rather than use a relational operator. These are simpler to handle. */
618 if (TREE_CODE (cond) == SSA_NAME)
620 cached_lhs = cond;
622 /* Get the variable's current value from the equivalence chains.
624 It is possible to get loops in the SSA_NAME_VALUE chains
625 (consider threading the backedge of a loop where we have
626 a loop invariant SSA_NAME used in the condition. */
627 if (cached_lhs
628 && TREE_CODE (cached_lhs) == SSA_NAME
629 && SSA_NAME_VALUE (cached_lhs))
630 cached_lhs = SSA_NAME_VALUE (cached_lhs);
632 /* If we're dominated by a suitable ASSERT_EXPR, then
633 update CACHED_LHS appropriately. */
634 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
635 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
637 /* If we haven't simplified to an invariant yet, then use the
638 pass specific callback to try and simplify it further. */
639 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
640 cached_lhs = (*simplify) (stmt, stmt);
642 else
643 cached_lhs = NULL;
645 return cached_lhs;
648 /* Copy debug stmts from DEST's chain of single predecessors up to
649 SRC, so that we don't lose the bindings as PHI nodes are introduced
650 when DEST gains new predecessors. */
651 void
652 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
654 if (!MAY_HAVE_DEBUG_STMTS)
655 return;
657 if (!single_pred_p (dest))
658 return;
660 gcc_checking_assert (dest != src);
662 gimple_stmt_iterator gsi = gsi_after_labels (dest);
663 int i = 0;
664 const int alloc_count = 16; // ?? Should this be a PARAM?
666 /* Estimate the number of debug vars overridden in the beginning of
667 DEST, to tell how many we're going to need to begin with. */
668 for (gimple_stmt_iterator si = gsi;
669 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
671 gimple stmt = gsi_stmt (si);
672 if (!is_gimple_debug (stmt))
673 break;
674 i++;
677 auto_vec<tree, alloc_count> fewvars;
678 pointer_set_t *vars = NULL;
680 /* If we're already starting with 3/4 of alloc_count, go for a
681 pointer_set, otherwise start with an unordered stack-allocated
682 VEC. */
683 if (i * 4 > alloc_count * 3)
684 vars = pointer_set_create ();
686 /* Now go through the initial debug stmts in DEST again, this time
687 actually inserting in VARS or FEWVARS. Don't bother checking for
688 duplicates in FEWVARS. */
689 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
691 gimple stmt = gsi_stmt (si);
692 if (!is_gimple_debug (stmt))
693 break;
695 tree var;
697 if (gimple_debug_bind_p (stmt))
698 var = gimple_debug_bind_get_var (stmt);
699 else if (gimple_debug_source_bind_p (stmt))
700 var = gimple_debug_source_bind_get_var (stmt);
701 else
702 gcc_unreachable ();
704 if (vars)
705 pointer_set_insert (vars, var);
706 else
707 fewvars.quick_push (var);
710 basic_block bb = dest;
714 bb = single_pred (bb);
715 for (gimple_stmt_iterator si = gsi_last_bb (bb);
716 !gsi_end_p (si); gsi_prev (&si))
718 gimple stmt = gsi_stmt (si);
719 if (!is_gimple_debug (stmt))
720 continue;
722 tree var;
724 if (gimple_debug_bind_p (stmt))
725 var = gimple_debug_bind_get_var (stmt);
726 else if (gimple_debug_source_bind_p (stmt))
727 var = gimple_debug_source_bind_get_var (stmt);
728 else
729 gcc_unreachable ();
731 /* Discard debug bind overlaps. ??? Unlike stmts from src,
732 copied into a new block that will precede BB, debug bind
733 stmts in bypassed BBs may actually be discarded if
734 they're overwritten by subsequent debug bind stmts, which
735 might be a problem once we introduce stmt frontier notes
736 or somesuch. Adding `&& bb == src' to the condition
737 below will preserve all potentially relevant debug
738 notes. */
739 if (vars && pointer_set_insert (vars, var))
740 continue;
741 else if (!vars)
743 int i = fewvars.length ();
744 while (i--)
745 if (fewvars[i] == var)
746 break;
747 if (i >= 0)
748 continue;
750 if (fewvars.length () < (unsigned) alloc_count)
751 fewvars.quick_push (var);
752 else
754 vars = pointer_set_create ();
755 for (i = 0; i < alloc_count; i++)
756 pointer_set_insert (vars, fewvars[i]);
757 fewvars.release ();
758 pointer_set_insert (vars, var);
762 stmt = gimple_copy (stmt);
763 /* ??? Should we drop the location of the copy to denote
764 they're artificial bindings? */
765 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
768 while (bb != src && single_pred_p (bb));
770 if (vars)
771 pointer_set_destroy (vars);
772 else if (fewvars.exists ())
773 fewvars.release ();
776 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
777 need not be duplicated as part of the CFG/SSA updating process).
779 If it is threadable, add it to PATH and VISITED and recurse, ultimately
780 returning TRUE from the toplevel call. Otherwise do nothing and
781 return false.
783 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
784 try and simplify the condition at the end of TAKEN_EDGE->dest. */
785 static bool
786 thread_around_empty_blocks (edge taken_edge,
787 gimple dummy_cond,
788 bool handle_dominating_asserts,
789 tree (*simplify) (gimple, gimple),
790 bitmap visited,
791 vec<jump_thread_edge *> *path,
792 bool *backedge_seen_p)
794 basic_block bb = taken_edge->dest;
795 gimple_stmt_iterator gsi;
796 gimple stmt;
797 tree cond;
799 /* The key property of these blocks is that they need not be duplicated
800 when threading. Thus they can not have visible side effects such
801 as PHI nodes. */
802 if (!gsi_end_p (gsi_start_phis (bb)))
803 return false;
805 /* Skip over DEBUG statements at the start of the block. */
806 gsi = gsi_start_nondebug_bb (bb);
808 /* If the block has no statements, but does have a single successor, then
809 it's just a forwarding block and we can thread through it trivially.
811 However, note that just threading through empty blocks with single
812 successors is not inherently profitable. For the jump thread to
813 be profitable, we must avoid a runtime conditional.
815 By taking the return value from the recursive call, we get the
816 desired effect of returning TRUE when we found a profitable jump
817 threading opportunity and FALSE otherwise.
819 This is particularly important when this routine is called after
820 processing a joiner block. Returning TRUE too aggressively in
821 that case results in pointless duplication of the joiner block. */
822 if (gsi_end_p (gsi))
824 if (single_succ_p (bb))
826 taken_edge = single_succ_edge (bb);
827 if (!bitmap_bit_p (visited, taken_edge->dest->index))
829 jump_thread_edge *x
830 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
831 path->safe_push (x);
832 bitmap_set_bit (visited, taken_edge->dest->index);
833 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
834 if (*backedge_seen_p)
835 simplify = dummy_simplify;
836 return thread_around_empty_blocks (taken_edge,
837 dummy_cond,
838 handle_dominating_asserts,
839 simplify,
840 visited,
841 path,
842 backedge_seen_p);
846 /* We have a block with no statements, but multiple successors? */
847 return false;
850 /* The only real statements this block can have are a control
851 flow altering statement. Anything else stops the thread. */
852 stmt = gsi_stmt (gsi);
853 if (gimple_code (stmt) != GIMPLE_COND
854 && gimple_code (stmt) != GIMPLE_GOTO
855 && gimple_code (stmt) != GIMPLE_SWITCH)
856 return false;
858 /* If we have traversed a backedge, then we do not want to look
859 at certain expressions in the table that can not be relied upon.
860 Luckily the only code that looked at those expressions is the
861 SIMPLIFY callback, which we replace if we can no longer use it. */
862 if (*backedge_seen_p)
863 simplify = dummy_simplify;
865 /* Extract and simplify the condition. */
866 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
867 simplify, handle_dominating_asserts);
869 /* If the condition can be statically computed and we have not already
870 visited the destination edge, then add the taken edge to our thread
871 path. */
872 if (cond && is_gimple_min_invariant (cond))
874 taken_edge = find_taken_edge (bb, cond);
876 if (bitmap_bit_p (visited, taken_edge->dest->index))
877 return false;
878 bitmap_set_bit (visited, taken_edge->dest->index);
880 jump_thread_edge *x
881 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
882 path->safe_push (x);
883 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
884 if (*backedge_seen_p)
885 simplify = dummy_simplify;
887 thread_around_empty_blocks (taken_edge,
888 dummy_cond,
889 handle_dominating_asserts,
890 simplify,
891 visited,
892 path,
893 backedge_seen_p);
894 return true;
897 return false;
900 /* We are exiting E->src, see if E->dest ends with a conditional
901 jump which has a known value when reached via E.
903 E->dest can have arbitrary side effects which, if threading is
904 successful, will be maintained.
906 Special care is necessary if E is a back edge in the CFG as we
907 may have already recorded equivalences for E->dest into our
908 various tables, including the result of the conditional at
909 the end of E->dest. Threading opportunities are severely
910 limited in that case to avoid short-circuiting the loop
911 incorrectly.
913 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
914 to avoid allocating memory.
916 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
917 the simplified condition with left-hand sides of ASSERT_EXPRs they are
918 used in.
920 STACK is used to undo temporary equivalences created during the walk of
921 E->dest.
923 SIMPLIFY is a pass-specific function used to simplify statements.
925 Our caller is responsible for restoring the state of the expression
926 and const_and_copies stacks.
928 Positive return value is success. Zero return value is failure, but
929 the block can still be duplicated as a joiner in a jump thread path,
930 negative indicates the block should not be duplicated and thus is not
931 suitable for a joiner in a jump threading path. */
933 static int
934 thread_through_normal_block (edge e,
935 gimple dummy_cond,
936 bool handle_dominating_asserts,
937 vec<tree> *stack,
938 tree (*simplify) (gimple, gimple),
939 vec<jump_thread_edge *> *path,
940 bitmap visited,
941 bool *backedge_seen_p)
943 /* If we have traversed a backedge, then we do not want to look
944 at certain expressions in the table that can not be relied upon.
945 Luckily the only code that looked at those expressions is the
946 SIMPLIFY callback, which we replace if we can no longer use it. */
947 if (*backedge_seen_p)
948 simplify = dummy_simplify;
950 /* PHIs create temporary equivalences.
951 Note that if we found a PHI that made the block non-threadable, then
952 we need to bubble that up to our caller in the same manner we do
953 when we prematurely stop processing statements below. */
954 if (!record_temporary_equivalences_from_phis (e, stack))
955 return -1;
957 /* Now walk each statement recording any context sensitive
958 temporary equivalences we can detect. */
959 gimple stmt
960 = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify,
961 *backedge_seen_p);
963 /* If we didn't look at all the statements, the most likely reason is
964 there were too many and thus duplicating this block is not profitable.
966 Also note if we do not look at all the statements, then we may not
967 have invalidated equivalences that are no longer valid if we threaded
968 around a loop. Thus we must signal to our caller that this block
969 is not suitable for use as a joiner in a threading path. */
970 if (!stmt)
971 return -1;
973 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
974 will be taken. */
975 if (gimple_code (stmt) == GIMPLE_COND
976 || gimple_code (stmt) == GIMPLE_GOTO
977 || gimple_code (stmt) == GIMPLE_SWITCH)
979 tree cond;
981 /* Extract and simplify the condition. */
982 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
983 handle_dominating_asserts);
985 if (cond && is_gimple_min_invariant (cond))
987 edge taken_edge = find_taken_edge (e->dest, cond);
988 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
990 /* DEST could be NULL for a computed jump to an absolute
991 address. */
992 if (dest == NULL
993 || dest == e->dest
994 || bitmap_bit_p (visited, dest->index))
995 return 0;
997 /* Only push the EDGE_START_JUMP_THREAD marker if this is
998 first edge on the path. */
999 if (path->length () == 0)
1001 jump_thread_edge *x
1002 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1003 path->safe_push (x);
1004 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1007 jump_thread_edge *x
1008 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1009 path->safe_push (x);
1010 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1011 if (*backedge_seen_p)
1012 simplify = dummy_simplify;
1014 /* See if we can thread through DEST as well, this helps capture
1015 secondary effects of threading without having to re-run DOM or
1016 VRP.
1018 We don't want to thread back to a block we have already
1019 visited. This may be overly conservative. */
1020 bitmap_set_bit (visited, dest->index);
1021 bitmap_set_bit (visited, e->dest->index);
1022 thread_around_empty_blocks (taken_edge,
1023 dummy_cond,
1024 handle_dominating_asserts,
1025 simplify,
1026 visited,
1027 path,
1028 backedge_seen_p);
1029 return 1;
1032 return 0;
1035 /* We are exiting E->src, see if E->dest ends with a conditional
1036 jump which has a known value when reached via E.
1038 Special care is necessary if E is a back edge in the CFG as we
1039 may have already recorded equivalences for E->dest into our
1040 various tables, including the result of the conditional at
1041 the end of E->dest. Threading opportunities are severely
1042 limited in that case to avoid short-circuiting the loop
1043 incorrectly.
1045 Note it is quite common for the first block inside a loop to
1046 end with a conditional which is either always true or always
1047 false when reached via the loop backedge. Thus we do not want
1048 to blindly disable threading across a loop backedge.
1050 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1051 to avoid allocating memory.
1053 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1054 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1055 used in.
1057 STACK is used to undo temporary equivalences created during the walk of
1058 E->dest.
1060 SIMPLIFY is a pass-specific function used to simplify statements. */
1062 void
1063 thread_across_edge (gimple dummy_cond,
1064 edge e,
1065 bool handle_dominating_asserts,
1066 vec<tree> *stack,
1067 tree (*simplify) (gimple, gimple))
1069 bitmap visited = BITMAP_ALLOC (NULL);
1070 bool backedge_seen;
1072 stmt_count = 0;
1074 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1075 bitmap_clear (visited);
1076 bitmap_set_bit (visited, e->src->index);
1077 bitmap_set_bit (visited, e->dest->index);
1078 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1079 if (backedge_seen)
1080 simplify = dummy_simplify;
1082 int threaded = thread_through_normal_block (e, dummy_cond,
1083 handle_dominating_asserts,
1084 stack, simplify, path,
1085 visited, &backedge_seen);
1086 if (threaded > 0)
1088 propagate_threaded_block_debug_into (path->last ()->e->dest,
1089 e->dest);
1090 remove_temporary_equivalences (stack);
1091 BITMAP_FREE (visited);
1092 register_jump_thread (path);
1093 return;
1095 else
1097 /* Negative and zero return values indicate no threading was possible,
1098 thus there should be no edges on the thread path and no need to walk
1099 through the vector entries. */
1100 gcc_assert (path->length () == 0);
1101 path->release ();
1103 /* A negative status indicates the target block was deemed too big to
1104 duplicate. Just quit now rather than trying to use the block as
1105 a joiner in a jump threading path.
1107 This prevents unnecessary code growth, but more importantly if we
1108 do not look at all the statements in the block, then we may have
1109 missed some invalidations if we had traversed a backedge! */
1110 if (threaded < 0)
1112 BITMAP_FREE (visited);
1113 remove_temporary_equivalences (stack);
1114 return;
1118 /* We were unable to determine what out edge from E->dest is taken. However,
1119 we might still be able to thread through successors of E->dest. This
1120 often occurs when E->dest is a joiner block which then fans back out
1121 based on redundant tests.
1123 If so, we'll copy E->dest and redirect the appropriate predecessor to
1124 the copy. Within the copy of E->dest, we'll thread one or more edges
1125 to points deeper in the CFG.
1127 This is a stopgap until we have a more structured approach to path
1128 isolation. */
1130 edge taken_edge;
1131 edge_iterator ei;
1132 bool found;
1134 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1135 we can safely redirect any of the edges. Just punt those cases. */
1136 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1137 if (taken_edge->flags & EDGE_ABNORMAL)
1139 remove_temporary_equivalences (stack);
1140 BITMAP_FREE (visited);
1141 return;
1144 /* Look at each successor of E->dest to see if we can thread through it. */
1145 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1147 /* Push a fresh marker so we can unwind the equivalences created
1148 for each of E->dest's successors. */
1149 stack->safe_push (NULL_TREE);
1151 /* Avoid threading to any block we have already visited. */
1152 bitmap_clear (visited);
1153 bitmap_set_bit (visited, e->src->index);
1154 bitmap_set_bit (visited, e->dest->index);
1155 bitmap_set_bit (visited, taken_edge->dest->index);
1156 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1158 /* Record whether or not we were able to thread through a successor
1159 of E->dest. */
1160 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1161 path->safe_push (x);
1163 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1164 path->safe_push (x);
1165 found = false;
1166 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1167 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1168 if (backedge_seen)
1169 simplify = dummy_simplify;
1170 found = thread_around_empty_blocks (taken_edge,
1171 dummy_cond,
1172 handle_dominating_asserts,
1173 simplify,
1174 visited,
1175 path,
1176 &backedge_seen);
1178 if (backedge_seen)
1179 simplify = dummy_simplify;
1181 if (!found)
1182 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1183 handle_dominating_asserts,
1184 stack, simplify, path, visited,
1185 &backedge_seen) > 0;
1187 /* If we were able to thread through a successor of E->dest, then
1188 record the jump threading opportunity. */
1189 if (found)
1191 propagate_threaded_block_debug_into (path->last ()->e->dest,
1192 taken_edge->dest);
1193 register_jump_thread (path);
1195 else
1197 delete_jump_thread_path (path);
1200 /* And unwind the equivalence table. */
1201 remove_temporary_equivalences (stack);
1203 BITMAP_FREE (visited);
1206 remove_temporary_equivalences (stack);