2015-10-18 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / tree-ssa-threadedge.c
blobda2fb1fde46991fe5710768d059ce0cfcb44a59f
1 /* SSA Jump Threading
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "params.h"
35 #include "tree-ssa-scopedtables.h"
36 #include "tree-ssa-threadedge.h"
37 #include "tree-ssa-threadbackward.h"
38 #include "tree-ssa-dom.h"
39 #include "builtins.h"
41 /* To avoid code explosion due to jump threading, we limit the
42 number of statements we are going to copy. This variable
43 holds the number of statements currently seen that we'll have
44 to copy as part of the jump threading process. */
45 static int stmt_count;
47 /* Array to record value-handles per SSA_NAME. */
48 vec<tree> ssa_name_values;
50 typedef tree (pfn_simplify) (gimple *, gimple *, class avail_exprs_stack *);
52 /* Set the value for the SSA name NAME to VALUE. */
54 void
55 set_ssa_name_value (tree name, tree value)
57 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
58 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
59 if (value && TREE_OVERFLOW_P (value))
60 value = drop_tree_overflow (value);
61 ssa_name_values[SSA_NAME_VERSION (name)] = value;
64 /* Initialize the per SSA_NAME value-handles array. Returns it. */
65 void
66 threadedge_initialize_values (void)
68 gcc_assert (!ssa_name_values.exists ());
69 ssa_name_values.create (num_ssa_names);
72 /* Free the per SSA_NAME value-handle array. */
73 void
74 threadedge_finalize_values (void)
76 ssa_name_values.release ();
79 /* Return TRUE if we may be able to thread an incoming edge into
80 BB to an outgoing edge from BB. Return FALSE otherwise. */
82 bool
83 potentially_threadable_block (basic_block bb)
85 gimple_stmt_iterator gsi;
87 /* Special case. We can get blocks that are forwarders, but are
88 not optimized away because they forward from outside a loop
89 to the loop header. We want to thread through them as we can
90 sometimes thread to the loop exit, which is obviously profitable.
91 the interesting case here is when the block has PHIs. */
92 if (gsi_end_p (gsi_start_nondebug_bb (bb))
93 && !gsi_end_p (gsi_start_phis (bb)))
94 return true;
96 /* If BB has a single successor or a single predecessor, then
97 there is no threading opportunity. */
98 if (single_succ_p (bb) || single_pred_p (bb))
99 return false;
101 /* If BB does not end with a conditional, switch or computed goto,
102 then there is no threading opportunity. */
103 gsi = gsi_last_bb (bb);
104 if (gsi_end_p (gsi)
105 || ! gsi_stmt (gsi)
106 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
107 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
108 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
109 return false;
111 return true;
114 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
116 BB. If no such ASSERT_EXPR is found, return OP. */
118 static tree
119 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
121 imm_use_iterator imm_iter;
122 gimple *use_stmt;
123 use_operand_p use_p;
125 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
127 use_stmt = USE_STMT (use_p);
128 if (use_stmt != stmt
129 && gimple_assign_single_p (use_stmt)
130 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
132 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
134 return gimple_assign_lhs (use_stmt);
137 return op;
140 /* Record temporary equivalences created by PHIs at the target of the
141 edge E. Record unwind information for the equivalences onto STACK.
143 If a PHI which prevents threading is encountered, then return FALSE
144 indicating we should not thread this edge, else return TRUE.
146 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
147 of any equivalences recorded. We use this to make invalidation after
148 traversing back edges less painful. */
150 static bool
151 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
153 gphi_iterator gsi;
155 /* Each PHI creates a temporary equivalence, record them.
156 These are context sensitive equivalences and will be removed
157 later. */
158 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
160 gphi *phi = gsi.phi ();
161 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
162 tree dst = gimple_phi_result (phi);
164 /* If the desired argument is not the same as this PHI's result
165 and it is set by a PHI in E->dest, then we can not thread
166 through E->dest. */
167 if (src != dst
168 && TREE_CODE (src) == SSA_NAME
169 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
170 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
171 return false;
173 /* We consider any non-virtual PHI as a statement since it
174 count result in a constant assignment or copy operation. */
175 if (!virtual_operand_p (dst))
176 stmt_count++;
178 const_and_copies->record_const_or_copy (dst, src);
180 return true;
183 /* Fold the RHS of an assignment statement and return it as a tree.
184 May return NULL_TREE if no simplification is possible. */
186 static tree
187 fold_assignment_stmt (gimple *stmt)
189 enum tree_code subcode = gimple_assign_rhs_code (stmt);
191 switch (get_gimple_rhs_class (subcode))
193 case GIMPLE_SINGLE_RHS:
194 return fold (gimple_assign_rhs1 (stmt));
196 case GIMPLE_UNARY_RHS:
198 tree lhs = gimple_assign_lhs (stmt);
199 tree op0 = gimple_assign_rhs1 (stmt);
200 return fold_unary (subcode, TREE_TYPE (lhs), op0);
203 case GIMPLE_BINARY_RHS:
205 tree lhs = gimple_assign_lhs (stmt);
206 tree op0 = gimple_assign_rhs1 (stmt);
207 tree op1 = gimple_assign_rhs2 (stmt);
208 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
211 case GIMPLE_TERNARY_RHS:
213 tree lhs = gimple_assign_lhs (stmt);
214 tree op0 = gimple_assign_rhs1 (stmt);
215 tree op1 = gimple_assign_rhs2 (stmt);
216 tree op2 = gimple_assign_rhs3 (stmt);
218 /* Sadly, we have to handle conditional assignments specially
219 here, because fold expects all the operands of an expression
220 to be folded before the expression itself is folded, but we
221 can't just substitute the folded condition here. */
222 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
223 op0 = fold (op0);
225 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
228 default:
229 gcc_unreachable ();
233 /* Try to simplify each statement in E->dest, ultimately leading to
234 a simplification of the COND_EXPR at the end of E->dest.
236 Record unwind information for temporary equivalences onto STACK.
238 Use SIMPLIFY (a pointer to a callback function) to further simplify
239 statements using pass specific information.
241 We might consider marking just those statements which ultimately
242 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
243 would be recovered by trying to simplify fewer statements.
245 If we are able to simplify a statement into the form
246 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
247 a context sensitive equivalence which may help us simplify
248 later statements in E->dest. */
250 static gimple *
251 record_temporary_equivalences_from_stmts_at_dest (edge e,
252 const_and_copies *const_and_copies,
253 avail_exprs_stack *avail_exprs_stack,
254 pfn_simplify simplify,
255 bool backedge_seen)
257 gimple *stmt = NULL;
258 gimple_stmt_iterator gsi;
259 int max_stmt_count;
261 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
263 /* Walk through each statement in the block recording equivalences
264 we discover. Note any equivalences we discover are context
265 sensitive (ie, are dependent on traversing E) and must be unwound
266 when we're finished processing E. */
267 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
269 tree cached_lhs = NULL;
271 stmt = gsi_stmt (gsi);
273 /* Ignore empty statements and labels. */
274 if (gimple_code (stmt) == GIMPLE_NOP
275 || gimple_code (stmt) == GIMPLE_LABEL
276 || is_gimple_debug (stmt))
277 continue;
279 /* If the statement has volatile operands, then we assume we
280 can not thread through this block. This is overly
281 conservative in some ways. */
282 if (gimple_code (stmt) == GIMPLE_ASM
283 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
284 return NULL;
286 /* If duplicating this block is going to cause too much code
287 expansion, then do not thread through this block. */
288 stmt_count++;
289 if (stmt_count > max_stmt_count)
290 return NULL;
292 /* If this is not a statement that sets an SSA_NAME to a new
293 value, then do not try to simplify this statement as it will
294 not simplify in any way that is helpful for jump threading. */
295 if ((gimple_code (stmt) != GIMPLE_ASSIGN
296 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
297 && (gimple_code (stmt) != GIMPLE_CALL
298 || gimple_call_lhs (stmt) == NULL_TREE
299 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
301 /* STMT might still have DEFS and we need to invalidate any known
302 equivalences for them.
304 Consider if STMT is a GIMPLE_ASM with one or more outputs that
305 feeds a conditional inside a loop. We might derive an equivalence
306 due to the conditional. */
307 tree op;
308 ssa_op_iter iter;
310 if (backedge_seen)
311 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
312 const_and_copies->invalidate (op);
314 continue;
317 /* The result of __builtin_object_size depends on all the arguments
318 of a phi node. Temporarily using only one edge produces invalid
319 results. For example
321 if (x < 6)
322 goto l;
323 else
324 goto l;
327 r = PHI <&w[2].a[1](2), &a.a[6](3)>
328 __builtin_object_size (r, 0)
330 The result of __builtin_object_size is defined to be the maximum of
331 remaining bytes. If we use only one edge on the phi, the result will
332 change to be the remaining bytes for the corresponding phi argument.
334 Similarly for __builtin_constant_p:
336 r = PHI <1(2), 2(3)>
337 __builtin_constant_p (r)
339 Both PHI arguments are constant, but x ? 1 : 2 is still not
340 constant. */
342 if (is_gimple_call (stmt))
344 tree fndecl = gimple_call_fndecl (stmt);
345 if (fndecl
346 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
347 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
349 if (backedge_seen)
351 tree lhs = gimple_get_lhs (stmt);
352 const_and_copies->invalidate (lhs);
354 continue;
358 /* At this point we have a statement which assigns an RHS to an
359 SSA_VAR on the LHS. We want to try and simplify this statement
360 to expose more context sensitive equivalences which in turn may
361 allow us to simplify the condition at the end of the loop.
363 Handle simple copy operations as well as implied copies from
364 ASSERT_EXPRs. */
365 if (gimple_assign_single_p (stmt)
366 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
367 cached_lhs = gimple_assign_rhs1 (stmt);
368 else if (gimple_assign_single_p (stmt)
369 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
370 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
371 else
373 /* A statement that is not a trivial copy or ASSERT_EXPR.
374 We're going to temporarily copy propagate the operands
375 and see if that allows us to simplify this statement. */
376 tree *copy;
377 ssa_op_iter iter;
378 use_operand_p use_p;
379 unsigned int num, i = 0;
381 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
382 copy = XCNEWVEC (tree, num);
384 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
385 the operands. */
386 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
388 tree tmp = NULL;
389 tree use = USE_FROM_PTR (use_p);
391 copy[i++] = use;
392 if (TREE_CODE (use) == SSA_NAME)
393 tmp = SSA_NAME_VALUE (use);
394 if (tmp)
395 SET_USE (use_p, tmp);
398 /* Try to fold/lookup the new expression. Inserting the
399 expression into the hash table is unlikely to help. */
400 if (is_gimple_call (stmt))
401 cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false);
402 else
403 cached_lhs = fold_assignment_stmt (stmt);
405 if (!cached_lhs
406 || (TREE_CODE (cached_lhs) != SSA_NAME
407 && !is_gimple_min_invariant (cached_lhs)))
408 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
410 /* Restore the statement's original uses/defs. */
411 i = 0;
412 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
413 SET_USE (use_p, copy[i++]);
415 free (copy);
418 /* Record the context sensitive equivalence if we were able
419 to simplify this statement.
421 If we have traversed a backedge at some point during threading,
422 then always enter something here. Either a real equivalence,
423 or a NULL_TREE equivalence which is effectively invalidation of
424 prior equivalences. */
425 if (cached_lhs
426 && (TREE_CODE (cached_lhs) == SSA_NAME
427 || is_gimple_min_invariant (cached_lhs)))
428 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt),
429 cached_lhs);
430 else if (backedge_seen)
431 const_and_copies->invalidate (gimple_get_lhs (stmt));
433 return stmt;
436 /* Once we have passed a backedge in the CFG when threading, we do not want to
437 utilize edge equivalences for simplification purpose. They are no longer
438 necessarily valid. We use this callback rather than the ones provided by
439 DOM/VRP to achieve that effect. */
440 static tree
441 dummy_simplify (gimple *stmt1 ATTRIBUTE_UNUSED, gimple *stmt2 ATTRIBUTE_UNUSED,
442 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
444 return NULL_TREE;
447 /* Simplify the control statement at the end of the block E->dest.
449 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
450 is available to use/clobber in DUMMY_COND.
452 Use SIMPLIFY (a pointer to a callback function) to further simplify
453 a condition using pass specific information.
455 Return the simplified condition or NULL if simplification could
456 not be performed.
458 The available expression table is referenced via AVAIL_EXPRS_STACK. */
460 static tree
461 simplify_control_stmt_condition (edge e,
462 gimple *stmt,
463 class avail_exprs_stack *avail_exprs_stack,
464 gcond *dummy_cond,
465 pfn_simplify simplify,
466 bool handle_dominating_asserts)
468 tree cond, cached_lhs;
469 enum gimple_code code = gimple_code (stmt);
471 /* For comparisons, we have to update both operands, then try
472 to simplify the comparison. */
473 if (code == GIMPLE_COND)
475 tree op0, op1;
476 enum tree_code cond_code;
478 op0 = gimple_cond_lhs (stmt);
479 op1 = gimple_cond_rhs (stmt);
480 cond_code = gimple_cond_code (stmt);
482 /* Get the current value of both operands. */
483 if (TREE_CODE (op0) == SSA_NAME)
485 for (int i = 0; i < 2; i++)
487 if (TREE_CODE (op0) == SSA_NAME
488 && SSA_NAME_VALUE (op0))
489 op0 = SSA_NAME_VALUE (op0);
490 else
491 break;
495 if (TREE_CODE (op1) == SSA_NAME)
497 for (int i = 0; i < 2; i++)
499 if (TREE_CODE (op1) == SSA_NAME
500 && SSA_NAME_VALUE (op1))
501 op1 = SSA_NAME_VALUE (op1);
502 else
503 break;
507 if (handle_dominating_asserts)
509 /* Now see if the operand was consumed by an ASSERT_EXPR
510 which dominates E->src. If so, we want to replace the
511 operand with the LHS of the ASSERT_EXPR. */
512 if (TREE_CODE (op0) == SSA_NAME)
513 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
515 if (TREE_CODE (op1) == SSA_NAME)
516 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
519 /* We may need to canonicalize the comparison. For
520 example, op0 might be a constant while op1 is an
521 SSA_NAME. Failure to canonicalize will cause us to
522 miss threading opportunities. */
523 if (tree_swap_operands_p (op0, op1, false))
525 cond_code = swap_tree_comparison (cond_code);
526 std::swap (op0, op1);
529 /* Stuff the operator and operands into our dummy conditional
530 expression. */
531 gimple_cond_set_code (dummy_cond, cond_code);
532 gimple_cond_set_lhs (dummy_cond, op0);
533 gimple_cond_set_rhs (dummy_cond, op1);
535 /* We absolutely do not care about any type conversions
536 we only care about a zero/nonzero value. */
537 fold_defer_overflow_warnings ();
539 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
540 if (cached_lhs)
541 while (CONVERT_EXPR_P (cached_lhs))
542 cached_lhs = TREE_OPERAND (cached_lhs, 0);
544 fold_undefer_overflow_warnings ((cached_lhs
545 && is_gimple_min_invariant (cached_lhs)),
546 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
548 /* If we have not simplified the condition down to an invariant,
549 then use the pass specific callback to simplify the condition. */
550 if (!cached_lhs
551 || !is_gimple_min_invariant (cached_lhs))
552 cached_lhs = (*simplify) (dummy_cond, stmt, avail_exprs_stack);
554 /* If we were testing an integer/pointer against a constant, then
555 we can use the FSM code to trace the value of the SSA_NAME. If
556 a value is found, then the condition will collapse to a constant.
558 Return the SSA_NAME we want to trace back rather than the full
559 expression and give the FSM threader a chance to find its value. */
560 if (cached_lhs == NULL)
562 /* Recover the original operands. They may have been simplified
563 using context sensitive equivalences. Those context sensitive
564 equivalences may not be valid on paths found by the FSM optimizer. */
565 tree op0 = gimple_cond_lhs (stmt);
566 tree op1 = gimple_cond_rhs (stmt);
568 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
569 || POINTER_TYPE_P (TREE_TYPE (op0)))
570 && TREE_CODE (op0) == SSA_NAME
571 && TREE_CODE (op1) == INTEGER_CST)
572 return op0;
575 return cached_lhs;
578 if (code == GIMPLE_SWITCH)
579 cond = gimple_switch_index (as_a <gswitch *> (stmt));
580 else if (code == GIMPLE_GOTO)
581 cond = gimple_goto_dest (stmt);
582 else
583 gcc_unreachable ();
585 /* We can have conditionals which just test the state of a variable
586 rather than use a relational operator. These are simpler to handle. */
587 if (TREE_CODE (cond) == SSA_NAME)
589 tree original_lhs = cond;
590 cached_lhs = cond;
592 /* Get the variable's current value from the equivalence chains.
594 It is possible to get loops in the SSA_NAME_VALUE chains
595 (consider threading the backedge of a loop where we have
596 a loop invariant SSA_NAME used in the condition. */
597 if (cached_lhs)
599 for (int i = 0; i < 2; i++)
601 if (TREE_CODE (cached_lhs) == SSA_NAME
602 && SSA_NAME_VALUE (cached_lhs))
603 cached_lhs = SSA_NAME_VALUE (cached_lhs);
604 else
605 break;
609 /* If we're dominated by a suitable ASSERT_EXPR, then
610 update CACHED_LHS appropriately. */
611 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
612 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
614 /* If we haven't simplified to an invariant yet, then use the
615 pass specific callback to try and simplify it further. */
616 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
617 cached_lhs = (*simplify) (stmt, stmt, avail_exprs_stack);
619 /* We couldn't find an invariant. But, callers of this
620 function may be able to do something useful with the
621 unmodified destination. */
622 if (!cached_lhs)
623 cached_lhs = original_lhs;
625 else
626 cached_lhs = NULL;
628 return cached_lhs;
631 /* Copy debug stmts from DEST's chain of single predecessors up to
632 SRC, so that we don't lose the bindings as PHI nodes are introduced
633 when DEST gains new predecessors. */
634 void
635 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
637 if (!MAY_HAVE_DEBUG_STMTS)
638 return;
640 if (!single_pred_p (dest))
641 return;
643 gcc_checking_assert (dest != src);
645 gimple_stmt_iterator gsi = gsi_after_labels (dest);
646 int i = 0;
647 const int alloc_count = 16; // ?? Should this be a PARAM?
649 /* Estimate the number of debug vars overridden in the beginning of
650 DEST, to tell how many we're going to need to begin with. */
651 for (gimple_stmt_iterator si = gsi;
652 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
654 gimple *stmt = gsi_stmt (si);
655 if (!is_gimple_debug (stmt))
656 break;
657 i++;
660 auto_vec<tree, alloc_count> fewvars;
661 hash_set<tree> *vars = NULL;
663 /* If we're already starting with 3/4 of alloc_count, go for a
664 hash_set, otherwise start with an unordered stack-allocated
665 VEC. */
666 if (i * 4 > alloc_count * 3)
667 vars = new hash_set<tree>;
669 /* Now go through the initial debug stmts in DEST again, this time
670 actually inserting in VARS or FEWVARS. Don't bother checking for
671 duplicates in FEWVARS. */
672 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
674 gimple *stmt = gsi_stmt (si);
675 if (!is_gimple_debug (stmt))
676 break;
678 tree var;
680 if (gimple_debug_bind_p (stmt))
681 var = gimple_debug_bind_get_var (stmt);
682 else if (gimple_debug_source_bind_p (stmt))
683 var = gimple_debug_source_bind_get_var (stmt);
684 else
685 gcc_unreachable ();
687 if (vars)
688 vars->add (var);
689 else
690 fewvars.quick_push (var);
693 basic_block bb = dest;
697 bb = single_pred (bb);
698 for (gimple_stmt_iterator si = gsi_last_bb (bb);
699 !gsi_end_p (si); gsi_prev (&si))
701 gimple *stmt = gsi_stmt (si);
702 if (!is_gimple_debug (stmt))
703 continue;
705 tree var;
707 if (gimple_debug_bind_p (stmt))
708 var = gimple_debug_bind_get_var (stmt);
709 else if (gimple_debug_source_bind_p (stmt))
710 var = gimple_debug_source_bind_get_var (stmt);
711 else
712 gcc_unreachable ();
714 /* Discard debug bind overlaps. ??? Unlike stmts from src,
715 copied into a new block that will precede BB, debug bind
716 stmts in bypassed BBs may actually be discarded if
717 they're overwritten by subsequent debug bind stmts, which
718 might be a problem once we introduce stmt frontier notes
719 or somesuch. Adding `&& bb == src' to the condition
720 below will preserve all potentially relevant debug
721 notes. */
722 if (vars && vars->add (var))
723 continue;
724 else if (!vars)
726 int i = fewvars.length ();
727 while (i--)
728 if (fewvars[i] == var)
729 break;
730 if (i >= 0)
731 continue;
733 if (fewvars.length () < (unsigned) alloc_count)
734 fewvars.quick_push (var);
735 else
737 vars = new hash_set<tree>;
738 for (i = 0; i < alloc_count; i++)
739 vars->add (fewvars[i]);
740 fewvars.release ();
741 vars->add (var);
745 stmt = gimple_copy (stmt);
746 /* ??? Should we drop the location of the copy to denote
747 they're artificial bindings? */
748 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
751 while (bb != src && single_pred_p (bb));
753 if (vars)
754 delete vars;
755 else if (fewvars.exists ())
756 fewvars.release ();
759 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
760 need not be duplicated as part of the CFG/SSA updating process).
762 If it is threadable, add it to PATH and VISITED and recurse, ultimately
763 returning TRUE from the toplevel call. Otherwise do nothing and
764 return false.
766 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
767 try and simplify the condition at the end of TAKEN_EDGE->dest.
769 The available expression table is referenced via AVAIL_EXPRS_STACK. */
771 static bool
772 thread_around_empty_blocks (edge taken_edge,
773 gcond *dummy_cond,
774 class avail_exprs_stack *avail_exprs_stack,
775 bool handle_dominating_asserts,
776 pfn_simplify simplify,
777 bitmap visited,
778 vec<jump_thread_edge *> *path,
779 bool *backedge_seen_p)
781 basic_block bb = taken_edge->dest;
782 gimple_stmt_iterator gsi;
783 gimple *stmt;
784 tree cond;
786 /* The key property of these blocks is that they need not be duplicated
787 when threading. Thus they can not have visible side effects such
788 as PHI nodes. */
789 if (!gsi_end_p (gsi_start_phis (bb)))
790 return false;
792 /* Skip over DEBUG statements at the start of the block. */
793 gsi = gsi_start_nondebug_bb (bb);
795 /* If the block has no statements, but does have a single successor, then
796 it's just a forwarding block and we can thread through it trivially.
798 However, note that just threading through empty blocks with single
799 successors is not inherently profitable. For the jump thread to
800 be profitable, we must avoid a runtime conditional.
802 By taking the return value from the recursive call, we get the
803 desired effect of returning TRUE when we found a profitable jump
804 threading opportunity and FALSE otherwise.
806 This is particularly important when this routine is called after
807 processing a joiner block. Returning TRUE too aggressively in
808 that case results in pointless duplication of the joiner block. */
809 if (gsi_end_p (gsi))
811 if (single_succ_p (bb))
813 taken_edge = single_succ_edge (bb);
814 if (!bitmap_bit_p (visited, taken_edge->dest->index))
816 jump_thread_edge *x
817 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
818 path->safe_push (x);
819 bitmap_set_bit (visited, taken_edge->dest->index);
820 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
821 if (*backedge_seen_p)
822 simplify = dummy_simplify;
823 return thread_around_empty_blocks (taken_edge,
824 dummy_cond,
825 avail_exprs_stack,
826 handle_dominating_asserts,
827 simplify,
828 visited,
829 path,
830 backedge_seen_p);
834 /* We have a block with no statements, but multiple successors? */
835 return false;
838 /* The only real statements this block can have are a control
839 flow altering statement. Anything else stops the thread. */
840 stmt = gsi_stmt (gsi);
841 if (gimple_code (stmt) != GIMPLE_COND
842 && gimple_code (stmt) != GIMPLE_GOTO
843 && gimple_code (stmt) != GIMPLE_SWITCH)
844 return false;
846 /* If we have traversed a backedge, then we do not want to look
847 at certain expressions in the table that can not be relied upon.
848 Luckily the only code that looked at those expressions is the
849 SIMPLIFY callback, which we replace if we can no longer use it. */
850 if (*backedge_seen_p)
851 simplify = dummy_simplify;
853 /* Extract and simplify the condition. */
854 cond = simplify_control_stmt_condition (taken_edge, stmt,
855 avail_exprs_stack, dummy_cond,
856 simplify, handle_dominating_asserts);
858 /* If the condition can be statically computed and we have not already
859 visited the destination edge, then add the taken edge to our thread
860 path. */
861 if (cond && is_gimple_min_invariant (cond))
863 taken_edge = find_taken_edge (bb, cond);
865 if (bitmap_bit_p (visited, taken_edge->dest->index))
866 return false;
867 bitmap_set_bit (visited, taken_edge->dest->index);
869 jump_thread_edge *x
870 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
871 path->safe_push (x);
872 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
873 if (*backedge_seen_p)
874 simplify = dummy_simplify;
876 thread_around_empty_blocks (taken_edge,
877 dummy_cond,
878 avail_exprs_stack,
879 handle_dominating_asserts,
880 simplify,
881 visited,
882 path,
883 backedge_seen_p);
884 return true;
887 return false;
890 /* We are exiting E->src, see if E->dest ends with a conditional
891 jump which has a known value when reached via E.
893 E->dest can have arbitrary side effects which, if threading is
894 successful, will be maintained.
896 Special care is necessary if E is a back edge in the CFG as we
897 may have already recorded equivalences for E->dest into our
898 various tables, including the result of the conditional at
899 the end of E->dest. Threading opportunities are severely
900 limited in that case to avoid short-circuiting the loop
901 incorrectly.
903 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
904 to avoid allocating memory.
906 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
907 the simplified condition with left-hand sides of ASSERT_EXPRs they are
908 used in.
910 STACK is used to undo temporary equivalences created during the walk of
911 E->dest.
913 SIMPLIFY is a pass-specific function used to simplify statements.
915 Our caller is responsible for restoring the state of the expression
916 and const_and_copies stacks.
918 Positive return value is success. Zero return value is failure, but
919 the block can still be duplicated as a joiner in a jump thread path,
920 negative indicates the block should not be duplicated and thus is not
921 suitable for a joiner in a jump threading path. */
923 static int
924 thread_through_normal_block (edge e,
925 gcond *dummy_cond,
926 bool handle_dominating_asserts,
927 const_and_copies *const_and_copies,
928 avail_exprs_stack *avail_exprs_stack,
929 pfn_simplify simplify,
930 vec<jump_thread_edge *> *path,
931 bitmap visited,
932 bool *backedge_seen_p)
934 /* If we have traversed a backedge, then we do not want to look
935 at certain expressions in the table that can not be relied upon.
936 Luckily the only code that looked at those expressions is the
937 SIMPLIFY callback, which we replace if we can no longer use it. */
938 if (*backedge_seen_p)
939 simplify = dummy_simplify;
941 /* We want to record any equivalences created by traversing E. */
942 if (!handle_dominating_asserts)
943 record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
945 /* PHIs create temporary equivalences.
946 Note that if we found a PHI that made the block non-threadable, then
947 we need to bubble that up to our caller in the same manner we do
948 when we prematurely stop processing statements below. */
949 if (!record_temporary_equivalences_from_phis (e, const_and_copies))
950 return -1;
952 /* Now walk each statement recording any context sensitive
953 temporary equivalences we can detect. */
954 gimple *stmt
955 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies,
956 avail_exprs_stack,
957 simplify,
958 *backedge_seen_p);
960 /* There's two reasons STMT might be null, and distinguishing
961 between them is important.
963 First the block may not have had any statements. For example, it
964 might have some PHIs and unconditionally transfer control elsewhere.
965 Such blocks are suitable for jump threading, particularly as a
966 joiner block.
968 The second reason would be if we did not process all the statements
969 in the block (because there were too many to make duplicating the
970 block profitable. If we did not look at all the statements, then
971 we may not have invalidated everything needing invalidation. Thus
972 we must signal to our caller that this block is not suitable for
973 use as a joiner in a threading path. */
974 if (!stmt)
976 /* First case. The statement simply doesn't have any instructions, but
977 does have PHIs. */
978 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
979 && !gsi_end_p (gsi_start_phis (e->dest)))
980 return 0;
982 /* Second case. */
983 return -1;
986 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
987 will be taken. */
988 if (gimple_code (stmt) == GIMPLE_COND
989 || gimple_code (stmt) == GIMPLE_GOTO
990 || gimple_code (stmt) == GIMPLE_SWITCH)
992 tree cond;
994 /* Extract and simplify the condition. */
995 cond = simplify_control_stmt_condition (e, stmt, avail_exprs_stack,
996 dummy_cond, simplify,
997 handle_dominating_asserts);
999 if (!cond)
1000 return 0;
1002 if (is_gimple_min_invariant (cond))
1004 edge taken_edge = find_taken_edge (e->dest, cond);
1005 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1007 /* DEST could be NULL for a computed jump to an absolute
1008 address. */
1009 if (dest == NULL
1010 || dest == e->dest
1011 || bitmap_bit_p (visited, dest->index))
1012 return 0;
1014 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1015 first edge on the path. */
1016 if (path->length () == 0)
1018 jump_thread_edge *x
1019 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1020 path->safe_push (x);
1021 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1024 jump_thread_edge *x
1025 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1026 path->safe_push (x);
1027 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1028 if (*backedge_seen_p)
1029 simplify = dummy_simplify;
1031 /* See if we can thread through DEST as well, this helps capture
1032 secondary effects of threading without having to re-run DOM or
1033 VRP.
1035 We don't want to thread back to a block we have already
1036 visited. This may be overly conservative. */
1037 bitmap_set_bit (visited, dest->index);
1038 bitmap_set_bit (visited, e->dest->index);
1039 thread_around_empty_blocks (taken_edge,
1040 dummy_cond,
1041 avail_exprs_stack,
1042 handle_dominating_asserts,
1043 simplify,
1044 visited,
1045 path,
1046 backedge_seen_p);
1047 return 1;
1050 if (!flag_expensive_optimizations
1051 || optimize_function_for_size_p (cfun)
1052 || !(TREE_CODE (cond) == SSA_NAME
1053 || (TREE_CODE_CLASS (TREE_CODE (cond)) == tcc_comparison
1054 && TREE_CODE (TREE_OPERAND (cond, 0)) == SSA_NAME
1055 && TREE_CODE (TREE_OPERAND (cond, 1)) == INTEGER_CST))
1056 || e->dest->loop_father != e->src->loop_father
1057 || loop_depth (e->dest->loop_father) == 0)
1058 return 0;
1060 /* Extract the SSA_NAME we want to trace backwards if COND is not
1061 already a bare SSA_NAME. */
1062 if (TREE_CODE (cond) != SSA_NAME)
1063 cond = TREE_OPERAND (cond, 0);
1065 /* When COND cannot be simplified, try to find paths from a control
1066 statement back through the PHI nodes which would affect that control
1067 statement. */
1068 find_jump_threads_backwards (cond, e->dest);
1070 return 0;
1073 /* We are exiting E->src, see if E->dest ends with a conditional
1074 jump which has a known value when reached via E.
1076 Special care is necessary if E is a back edge in the CFG as we
1077 may have already recorded equivalences for E->dest into our
1078 various tables, including the result of the conditional at
1079 the end of E->dest. Threading opportunities are severely
1080 limited in that case to avoid short-circuiting the loop
1081 incorrectly.
1083 Note it is quite common for the first block inside a loop to
1084 end with a conditional which is either always true or always
1085 false when reached via the loop backedge. Thus we do not want
1086 to blindly disable threading across a loop backedge.
1088 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1089 to avoid allocating memory.
1091 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1092 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1093 used in.
1095 CONST_AND_COPIES is used to undo temporary equivalences created during the
1096 walk of E->dest.
1098 The available expression table is referenced vai AVAIL_EXPRS_STACK.
1100 SIMPLIFY is a pass-specific function used to simplify statements. */
1102 void
1103 thread_across_edge (gcond *dummy_cond,
1104 edge e,
1105 bool handle_dominating_asserts,
1106 class const_and_copies *const_and_copies,
1107 class avail_exprs_stack *avail_exprs_stack,
1108 tree (*simplify) (gimple *, gimple *,
1109 class avail_exprs_stack *))
1111 bitmap visited = BITMAP_ALLOC (NULL);
1112 bool backedge_seen;
1114 stmt_count = 0;
1116 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1117 bitmap_clear (visited);
1118 bitmap_set_bit (visited, e->src->index);
1119 bitmap_set_bit (visited, e->dest->index);
1120 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1121 if (backedge_seen)
1122 simplify = dummy_simplify;
1124 int threaded = thread_through_normal_block (e, dummy_cond,
1125 handle_dominating_asserts,
1126 const_and_copies,
1127 avail_exprs_stack,
1128 simplify, path,
1129 visited, &backedge_seen);
1130 if (threaded > 0)
1132 propagate_threaded_block_debug_into (path->last ()->e->dest,
1133 e->dest);
1134 const_and_copies->pop_to_marker ();
1135 BITMAP_FREE (visited);
1136 register_jump_thread (path);
1137 return;
1139 else
1141 /* Negative and zero return values indicate no threading was possible,
1142 thus there should be no edges on the thread path and no need to walk
1143 through the vector entries. */
1144 gcc_assert (path->length () == 0);
1145 path->release ();
1146 delete path;
1148 /* A negative status indicates the target block was deemed too big to
1149 duplicate. Just quit now rather than trying to use the block as
1150 a joiner in a jump threading path.
1152 This prevents unnecessary code growth, but more importantly if we
1153 do not look at all the statements in the block, then we may have
1154 missed some invalidations if we had traversed a backedge! */
1155 if (threaded < 0)
1157 BITMAP_FREE (visited);
1158 const_and_copies->pop_to_marker ();
1159 return;
1163 /* We were unable to determine what out edge from E->dest is taken. However,
1164 we might still be able to thread through successors of E->dest. This
1165 often occurs when E->dest is a joiner block which then fans back out
1166 based on redundant tests.
1168 If so, we'll copy E->dest and redirect the appropriate predecessor to
1169 the copy. Within the copy of E->dest, we'll thread one or more edges
1170 to points deeper in the CFG.
1172 This is a stopgap until we have a more structured approach to path
1173 isolation. */
1175 edge taken_edge;
1176 edge_iterator ei;
1177 bool found;
1179 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1180 we can safely redirect any of the edges. Just punt those cases. */
1181 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1182 if (taken_edge->flags & EDGE_ABNORMAL)
1184 const_and_copies->pop_to_marker ();
1185 BITMAP_FREE (visited);
1186 return;
1189 /* Look at each successor of E->dest to see if we can thread through it. */
1190 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1192 /* Push a fresh marker so we can unwind the equivalences created
1193 for each of E->dest's successors. */
1194 const_and_copies->push_marker ();
1195 if (avail_exprs_stack)
1196 avail_exprs_stack->push_marker ();
1198 /* Avoid threading to any block we have already visited. */
1199 bitmap_clear (visited);
1200 bitmap_set_bit (visited, e->src->index);
1201 bitmap_set_bit (visited, e->dest->index);
1202 bitmap_set_bit (visited, taken_edge->dest->index);
1203 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1205 /* Record whether or not we were able to thread through a successor
1206 of E->dest. */
1207 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1208 path->safe_push (x);
1210 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1211 path->safe_push (x);
1212 found = false;
1213 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1214 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1215 if (backedge_seen)
1216 simplify = dummy_simplify;
1217 found = thread_around_empty_blocks (taken_edge,
1218 dummy_cond,
1219 avail_exprs_stack,
1220 handle_dominating_asserts,
1221 simplify,
1222 visited,
1223 path,
1224 &backedge_seen);
1226 if (backedge_seen)
1227 simplify = dummy_simplify;
1229 if (!found)
1230 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1231 handle_dominating_asserts,
1232 const_and_copies,
1233 avail_exprs_stack,
1234 simplify, path,
1235 visited, &backedge_seen) > 0;
1237 /* If we were able to thread through a successor of E->dest, then
1238 record the jump threading opportunity. */
1239 if (found)
1241 propagate_threaded_block_debug_into (path->last ()->e->dest,
1242 taken_edge->dest);
1243 register_jump_thread (path);
1245 else
1247 delete_jump_thread_path (path);
1250 /* And unwind the equivalence table. */
1251 if (avail_exprs_stack)
1252 avail_exprs_stack->pop_to_marker ();
1253 const_and_copies->pop_to_marker ();
1255 BITMAP_FREE (visited);
1258 const_and_copies->pop_to_marker ();