2013-10-22 Jan-Benedict Glaw <jbglaw@lug-owl.de>
[official-gcc.git] / gcc / tree-ssa-threadedge.c
blobebd93cbfd27ebe46fe55753b720e2624ac30dd38
1 /* SSA Jump Threading
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "tree-ssa.h"
34 #include "tree-ssa-propagate.h"
35 #include "tree-ssa-threadupdate.h"
36 #include "langhooks.h"
37 #include "params.h"
38 #include "tree-ssa-threadedge.h"
40 /* To avoid code explosion due to jump threading, we limit the
41 number of statements we are going to copy. This variable
42 holds the number of statements currently seen that we'll have
43 to copy as part of the jump threading process. */
44 static int stmt_count;
46 /* Array to record value-handles per SSA_NAME. */
47 vec<tree> ssa_name_values;
49 /* Set the value for the SSA name NAME to VALUE. */
51 void
52 set_ssa_name_value (tree name, tree value)
54 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
55 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
56 ssa_name_values[SSA_NAME_VERSION (name)] = value;
59 /* Initialize the per SSA_NAME value-handles array. Returns it. */
60 void
61 threadedge_initialize_values (void)
63 gcc_assert (!ssa_name_values.exists ());
64 ssa_name_values.create (num_ssa_names);
67 /* Free the per SSA_NAME value-handle array. */
68 void
69 threadedge_finalize_values (void)
71 ssa_name_values.release ();
74 /* Return TRUE if we may be able to thread an incoming edge into
75 BB to an outgoing edge from BB. Return FALSE otherwise. */
77 bool
78 potentially_threadable_block (basic_block bb)
80 gimple_stmt_iterator gsi;
82 /* If BB has a single successor or a single predecessor, then
83 there is no threading opportunity. */
84 if (single_succ_p (bb) || single_pred_p (bb))
85 return false;
87 /* If BB does not end with a conditional, switch or computed goto,
88 then there is no threading opportunity. */
89 gsi = gsi_last_bb (bb);
90 if (gsi_end_p (gsi)
91 || ! gsi_stmt (gsi)
92 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
93 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
94 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
95 return false;
97 return true;
100 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
101 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
102 BB. If no such ASSERT_EXPR is found, return OP. */
104 static tree
105 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
107 imm_use_iterator imm_iter;
108 gimple use_stmt;
109 use_operand_p use_p;
111 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
113 use_stmt = USE_STMT (use_p);
114 if (use_stmt != stmt
115 && gimple_assign_single_p (use_stmt)
116 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
117 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
118 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
120 return gimple_assign_lhs (use_stmt);
123 return op;
126 /* We record temporary equivalences created by PHI nodes or
127 statements within the target block. Doing so allows us to
128 identify more jump threading opportunities, even in blocks
129 with side effects.
131 We keep track of those temporary equivalences in a stack
132 structure so that we can unwind them when we're done processing
133 a particular edge. This routine handles unwinding the data
134 structures. */
136 static void
137 remove_temporary_equivalences (vec<tree> *stack)
139 while (stack->length () > 0)
141 tree prev_value, dest;
143 dest = stack->pop ();
145 /* A NULL value indicates we should stop unwinding, otherwise
146 pop off the next entry as they're recorded in pairs. */
147 if (dest == NULL)
148 break;
150 prev_value = stack->pop ();
151 set_ssa_name_value (dest, prev_value);
155 /* Record a temporary equivalence, saving enough information so that
156 we can restore the state of recorded equivalences when we're
157 done processing the current edge. */
159 static void
160 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
162 tree prev_x = SSA_NAME_VALUE (x);
164 if (TREE_CODE (y) == SSA_NAME)
166 tree tmp = SSA_NAME_VALUE (y);
167 y = tmp ? tmp : y;
170 set_ssa_name_value (x, y);
171 stack->reserve (2);
172 stack->quick_push (prev_x);
173 stack->quick_push (x);
176 /* Record temporary equivalences created by PHIs at the target of the
177 edge E. Record unwind information for the equivalences onto STACK.
179 If a PHI which prevents threading is encountered, then return FALSE
180 indicating we should not thread this edge, else return TRUE. */
182 static bool
183 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
185 gimple_stmt_iterator gsi;
187 /* Each PHI creates a temporary equivalence, record them.
188 These are context sensitive equivalences and will be removed
189 later. */
190 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
192 gimple phi = gsi_stmt (gsi);
193 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
194 tree dst = gimple_phi_result (phi);
196 /* If the desired argument is not the same as this PHI's result
197 and it is set by a PHI in E->dest, then we can not thread
198 through E->dest. */
199 if (src != dst
200 && TREE_CODE (src) == SSA_NAME
201 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
202 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
203 return false;
205 /* We consider any non-virtual PHI as a statement since it
206 count result in a constant assignment or copy operation. */
207 if (!virtual_operand_p (dst))
208 stmt_count++;
210 record_temporary_equivalence (dst, src, stack);
212 return true;
215 /* Fold the RHS of an assignment statement and return it as a tree.
216 May return NULL_TREE if no simplification is possible. */
218 static tree
219 fold_assignment_stmt (gimple stmt)
221 enum tree_code subcode = gimple_assign_rhs_code (stmt);
223 switch (get_gimple_rhs_class (subcode))
225 case GIMPLE_SINGLE_RHS:
226 return fold (gimple_assign_rhs1 (stmt));
228 case GIMPLE_UNARY_RHS:
230 tree lhs = gimple_assign_lhs (stmt);
231 tree op0 = gimple_assign_rhs1 (stmt);
232 return fold_unary (subcode, TREE_TYPE (lhs), op0);
235 case GIMPLE_BINARY_RHS:
237 tree lhs = gimple_assign_lhs (stmt);
238 tree op0 = gimple_assign_rhs1 (stmt);
239 tree op1 = gimple_assign_rhs2 (stmt);
240 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
243 case GIMPLE_TERNARY_RHS:
245 tree lhs = gimple_assign_lhs (stmt);
246 tree op0 = gimple_assign_rhs1 (stmt);
247 tree op1 = gimple_assign_rhs2 (stmt);
248 tree op2 = gimple_assign_rhs3 (stmt);
250 /* Sadly, we have to handle conditional assignments specially
251 here, because fold expects all the operands of an expression
252 to be folded before the expression itself is folded, but we
253 can't just substitute the folded condition here. */
254 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
255 op0 = fold (op0);
257 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
260 default:
261 gcc_unreachable ();
265 /* Try to simplify each statement in E->dest, ultimately leading to
266 a simplification of the COND_EXPR at the end of E->dest.
268 Record unwind information for temporary equivalences onto STACK.
270 Use SIMPLIFY (a pointer to a callback function) to further simplify
271 statements using pass specific information.
273 We might consider marking just those statements which ultimately
274 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
275 would be recovered by trying to simplify fewer statements.
277 If we are able to simplify a statement into the form
278 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
279 a context sensitive equivalence which may help us simplify
280 later statements in E->dest. */
282 static gimple
283 record_temporary_equivalences_from_stmts_at_dest (edge e,
284 vec<tree> *stack,
285 tree (*simplify) (gimple,
286 gimple))
288 gimple stmt = NULL;
289 gimple_stmt_iterator gsi;
290 int max_stmt_count;
292 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
294 /* Walk through each statement in the block recording equivalences
295 we discover. Note any equivalences we discover are context
296 sensitive (ie, are dependent on traversing E) and must be unwound
297 when we're finished processing E. */
298 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
300 tree cached_lhs = NULL;
302 stmt = gsi_stmt (gsi);
304 /* Ignore empty statements and labels. */
305 if (gimple_code (stmt) == GIMPLE_NOP
306 || gimple_code (stmt) == GIMPLE_LABEL
307 || is_gimple_debug (stmt))
308 continue;
310 /* If the statement has volatile operands, then we assume we
311 can not thread through this block. This is overly
312 conservative in some ways. */
313 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
314 return NULL;
316 /* If duplicating this block is going to cause too much code
317 expansion, then do not thread through this block. */
318 stmt_count++;
319 if (stmt_count > max_stmt_count)
320 return NULL;
322 /* If this is not a statement that sets an SSA_NAME to a new
323 value, then do not try to simplify this statement as it will
324 not simplify in any way that is helpful for jump threading. */
325 if ((gimple_code (stmt) != GIMPLE_ASSIGN
326 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
327 && (gimple_code (stmt) != GIMPLE_CALL
328 || gimple_call_lhs (stmt) == NULL_TREE
329 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
330 continue;
332 /* The result of __builtin_object_size depends on all the arguments
333 of a phi node. Temporarily using only one edge produces invalid
334 results. For example
336 if (x < 6)
337 goto l;
338 else
339 goto l;
342 r = PHI <&w[2].a[1](2), &a.a[6](3)>
343 __builtin_object_size (r, 0)
345 The result of __builtin_object_size is defined to be the maximum of
346 remaining bytes. If we use only one edge on the phi, the result will
347 change to be the remaining bytes for the corresponding phi argument.
349 Similarly for __builtin_constant_p:
351 r = PHI <1(2), 2(3)>
352 __builtin_constant_p (r)
354 Both PHI arguments are constant, but x ? 1 : 2 is still not
355 constant. */
357 if (is_gimple_call (stmt))
359 tree fndecl = gimple_call_fndecl (stmt);
360 if (fndecl
361 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
362 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
363 continue;
366 /* At this point we have a statement which assigns an RHS to an
367 SSA_VAR on the LHS. We want to try and simplify this statement
368 to expose more context sensitive equivalences which in turn may
369 allow us to simplify the condition at the end of the loop.
371 Handle simple copy operations as well as implied copies from
372 ASSERT_EXPRs. */
373 if (gimple_assign_single_p (stmt)
374 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
375 cached_lhs = gimple_assign_rhs1 (stmt);
376 else if (gimple_assign_single_p (stmt)
377 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
378 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
379 else
381 /* A statement that is not a trivial copy or ASSERT_EXPR.
382 We're going to temporarily copy propagate the operands
383 and see if that allows us to simplify this statement. */
384 tree *copy;
385 ssa_op_iter iter;
386 use_operand_p use_p;
387 unsigned int num, i = 0;
389 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
390 copy = XCNEWVEC (tree, num);
392 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
393 the operands. */
394 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
396 tree tmp = NULL;
397 tree use = USE_FROM_PTR (use_p);
399 copy[i++] = use;
400 if (TREE_CODE (use) == SSA_NAME)
401 tmp = SSA_NAME_VALUE (use);
402 if (tmp)
403 SET_USE (use_p, tmp);
406 /* Try to fold/lookup the new expression. Inserting the
407 expression into the hash table is unlikely to help. */
408 if (is_gimple_call (stmt))
409 cached_lhs = fold_call_stmt (stmt, false);
410 else
411 cached_lhs = fold_assignment_stmt (stmt);
413 if (!cached_lhs
414 || (TREE_CODE (cached_lhs) != SSA_NAME
415 && !is_gimple_min_invariant (cached_lhs)))
416 cached_lhs = (*simplify) (stmt, stmt);
418 /* Restore the statement's original uses/defs. */
419 i = 0;
420 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
421 SET_USE (use_p, copy[i++]);
423 free (copy);
426 /* Record the context sensitive equivalence if we were able
427 to simplify this statement. */
428 if (cached_lhs
429 && (TREE_CODE (cached_lhs) == SSA_NAME
430 || is_gimple_min_invariant (cached_lhs)))
431 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
433 return stmt;
436 /* Simplify the control statement at the end of the block E->dest.
438 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
439 is available to use/clobber in DUMMY_COND.
441 Use SIMPLIFY (a pointer to a callback function) to further simplify
442 a condition using pass specific information.
444 Return the simplified condition or NULL if simplification could
445 not be performed. */
447 static tree
448 simplify_control_stmt_condition (edge e,
449 gimple stmt,
450 gimple dummy_cond,
451 tree (*simplify) (gimple, gimple),
452 bool handle_dominating_asserts)
454 tree cond, cached_lhs;
455 enum gimple_code code = gimple_code (stmt);
457 /* For comparisons, we have to update both operands, then try
458 to simplify the comparison. */
459 if (code == GIMPLE_COND)
461 tree op0, op1;
462 enum tree_code cond_code;
464 op0 = gimple_cond_lhs (stmt);
465 op1 = gimple_cond_rhs (stmt);
466 cond_code = gimple_cond_code (stmt);
468 /* Get the current value of both operands. */
469 if (TREE_CODE (op0) == SSA_NAME)
471 tree tmp = SSA_NAME_VALUE (op0);
472 if (tmp)
473 op0 = tmp;
476 if (TREE_CODE (op1) == SSA_NAME)
478 tree tmp = SSA_NAME_VALUE (op1);
479 if (tmp)
480 op1 = tmp;
483 if (handle_dominating_asserts)
485 /* Now see if the operand was consumed by an ASSERT_EXPR
486 which dominates E->src. If so, we want to replace the
487 operand with the LHS of the ASSERT_EXPR. */
488 if (TREE_CODE (op0) == SSA_NAME)
489 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
491 if (TREE_CODE (op1) == SSA_NAME)
492 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
495 /* We may need to canonicalize the comparison. For
496 example, op0 might be a constant while op1 is an
497 SSA_NAME. Failure to canonicalize will cause us to
498 miss threading opportunities. */
499 if (tree_swap_operands_p (op0, op1, false))
501 tree tmp;
502 cond_code = swap_tree_comparison (cond_code);
503 tmp = op0;
504 op0 = op1;
505 op1 = tmp;
508 /* Stuff the operator and operands into our dummy conditional
509 expression. */
510 gimple_cond_set_code (dummy_cond, cond_code);
511 gimple_cond_set_lhs (dummy_cond, op0);
512 gimple_cond_set_rhs (dummy_cond, op1);
514 /* We absolutely do not care about any type conversions
515 we only care about a zero/nonzero value. */
516 fold_defer_overflow_warnings ();
518 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
519 if (cached_lhs)
520 while (CONVERT_EXPR_P (cached_lhs))
521 cached_lhs = TREE_OPERAND (cached_lhs, 0);
523 fold_undefer_overflow_warnings ((cached_lhs
524 && is_gimple_min_invariant (cached_lhs)),
525 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
527 /* If we have not simplified the condition down to an invariant,
528 then use the pass specific callback to simplify the condition. */
529 if (!cached_lhs
530 || !is_gimple_min_invariant (cached_lhs))
531 cached_lhs = (*simplify) (dummy_cond, stmt);
533 return cached_lhs;
536 if (code == GIMPLE_SWITCH)
537 cond = gimple_switch_index (stmt);
538 else if (code == GIMPLE_GOTO)
539 cond = gimple_goto_dest (stmt);
540 else
541 gcc_unreachable ();
543 /* We can have conditionals which just test the state of a variable
544 rather than use a relational operator. These are simpler to handle. */
545 if (TREE_CODE (cond) == SSA_NAME)
547 cached_lhs = cond;
549 /* Get the variable's current value from the equivalence chains.
551 It is possible to get loops in the SSA_NAME_VALUE chains
552 (consider threading the backedge of a loop where we have
553 a loop invariant SSA_NAME used in the condition. */
554 if (cached_lhs
555 && TREE_CODE (cached_lhs) == SSA_NAME
556 && SSA_NAME_VALUE (cached_lhs))
557 cached_lhs = SSA_NAME_VALUE (cached_lhs);
559 /* If we're dominated by a suitable ASSERT_EXPR, then
560 update CACHED_LHS appropriately. */
561 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
562 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
564 /* If we haven't simplified to an invariant yet, then use the
565 pass specific callback to try and simplify it further. */
566 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
567 cached_lhs = (*simplify) (stmt, stmt);
569 else
570 cached_lhs = NULL;
572 return cached_lhs;
575 /* Return TRUE if the statement at the end of e->dest depends on
576 the output of any statement in BB. Otherwise return FALSE.
578 This is used when we are threading a backedge and need to ensure
579 that temporary equivalences from BB do not affect the condition
580 in e->dest. */
582 static bool
583 cond_arg_set_in_bb (edge e, basic_block bb)
585 ssa_op_iter iter;
586 use_operand_p use_p;
587 gimple last = last_stmt (e->dest);
589 /* E->dest does not have to end with a control transferring
590 instruction. This can occur when we try to extend a jump
591 threading opportunity deeper into the CFG. In that case
592 it is safe for this check to return false. */
593 if (!last)
594 return false;
596 if (gimple_code (last) != GIMPLE_COND
597 && gimple_code (last) != GIMPLE_GOTO
598 && gimple_code (last) != GIMPLE_SWITCH)
599 return false;
601 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
603 tree use = USE_FROM_PTR (use_p);
605 if (TREE_CODE (use) == SSA_NAME
606 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
607 && gimple_bb (SSA_NAME_DEF_STMT (use)) == bb)
608 return true;
610 return false;
613 /* Copy debug stmts from DEST's chain of single predecessors up to
614 SRC, so that we don't lose the bindings as PHI nodes are introduced
615 when DEST gains new predecessors. */
616 void
617 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
619 if (!MAY_HAVE_DEBUG_STMTS)
620 return;
622 if (!single_pred_p (dest))
623 return;
625 gcc_checking_assert (dest != src);
627 gimple_stmt_iterator gsi = gsi_after_labels (dest);
628 int i = 0;
629 const int alloc_count = 16; // ?? Should this be a PARAM?
631 /* Estimate the number of debug vars overridden in the beginning of
632 DEST, to tell how many we're going to need to begin with. */
633 for (gimple_stmt_iterator si = gsi;
634 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
636 gimple stmt = gsi_stmt (si);
637 if (!is_gimple_debug (stmt))
638 break;
639 i++;
642 vec<tree, va_stack> fewvars = vNULL;
643 pointer_set_t *vars = NULL;
645 /* If we're already starting with 3/4 of alloc_count, go for a
646 pointer_set, otherwise start with an unordered stack-allocated
647 VEC. */
648 if (i * 4 > alloc_count * 3)
649 vars = pointer_set_create ();
650 else if (alloc_count)
651 vec_stack_alloc (tree, fewvars, alloc_count);
653 /* Now go through the initial debug stmts in DEST again, this time
654 actually inserting in VARS or FEWVARS. Don't bother checking for
655 duplicates in FEWVARS. */
656 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
658 gimple stmt = gsi_stmt (si);
659 if (!is_gimple_debug (stmt))
660 break;
662 tree var;
664 if (gimple_debug_bind_p (stmt))
665 var = gimple_debug_bind_get_var (stmt);
666 else if (gimple_debug_source_bind_p (stmt))
667 var = gimple_debug_source_bind_get_var (stmt);
668 else
669 gcc_unreachable ();
671 if (vars)
672 pointer_set_insert (vars, var);
673 else
674 fewvars.quick_push (var);
677 basic_block bb = dest;
681 bb = single_pred (bb);
682 for (gimple_stmt_iterator si = gsi_last_bb (bb);
683 !gsi_end_p (si); gsi_prev (&si))
685 gimple stmt = gsi_stmt (si);
686 if (!is_gimple_debug (stmt))
687 continue;
689 tree var;
691 if (gimple_debug_bind_p (stmt))
692 var = gimple_debug_bind_get_var (stmt);
693 else if (gimple_debug_source_bind_p (stmt))
694 var = gimple_debug_source_bind_get_var (stmt);
695 else
696 gcc_unreachable ();
698 /* Discard debug bind overlaps. ??? Unlike stmts from src,
699 copied into a new block that will precede BB, debug bind
700 stmts in bypassed BBs may actually be discarded if
701 they're overwritten by subsequent debug bind stmts, which
702 might be a problem once we introduce stmt frontier notes
703 or somesuch. Adding `&& bb == src' to the condition
704 below will preserve all potentially relevant debug
705 notes. */
706 if (vars && pointer_set_insert (vars, var))
707 continue;
708 else if (!vars)
710 int i = fewvars.length ();
711 while (i--)
712 if (fewvars[i] == var)
713 break;
714 if (i >= 0)
715 continue;
717 if (fewvars.length () < (unsigned) alloc_count)
718 fewvars.quick_push (var);
719 else
721 vars = pointer_set_create ();
722 for (i = 0; i < alloc_count; i++)
723 pointer_set_insert (vars, fewvars[i]);
724 fewvars.release ();
725 pointer_set_insert (vars, var);
729 stmt = gimple_copy (stmt);
730 /* ??? Should we drop the location of the copy to denote
731 they're artificial bindings? */
732 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
735 while (bb != src && single_pred_p (bb));
737 if (vars)
738 pointer_set_destroy (vars);
739 else if (fewvars.exists ())
740 fewvars.release ();
743 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
744 need not be duplicated as part of the CFG/SSA updating process).
746 If it is threadable, add it to PATH and VISITED and recurse, ultimately
747 returning TRUE from the toplevel call. Otherwise do nothing and
748 return false.
750 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
751 try and simplify the condition at the end of TAKEN_EDGE->dest. */
752 static bool
753 thread_around_empty_blocks (edge taken_edge,
754 gimple dummy_cond,
755 bool handle_dominating_asserts,
756 tree (*simplify) (gimple, gimple),
757 bitmap visited,
758 vec<jump_thread_edge *> *path)
760 basic_block bb = taken_edge->dest;
761 gimple_stmt_iterator gsi;
762 gimple stmt;
763 tree cond;
765 /* The key property of these blocks is that they need not be duplicated
766 when threading. Thus they can not have visible side effects such
767 as PHI nodes. */
768 if (!gsi_end_p (gsi_start_phis (bb)))
769 return false;
771 /* Skip over DEBUG statements at the start of the block. */
772 gsi = gsi_start_nondebug_bb (bb);
774 /* If the block has no statements, but does have a single successor, then
775 it's just a forwarding block and we can thread through it trivially.
777 However, note that just threading through empty blocks with single
778 successors is not inherently profitable. For the jump thread to
779 be profitable, we must avoid a runtime conditional.
781 By taking the return value from the recursive call, we get the
782 desired effect of returning TRUE when we found a profitable jump
783 threading opportunity and FALSE otherwise.
785 This is particularly important when this routine is called after
786 processing a joiner block. Returning TRUE too aggressively in
787 that case results in pointless duplication of the joiner block. */
788 if (gsi_end_p (gsi))
790 if (single_succ_p (bb))
792 taken_edge = single_succ_edge (bb);
793 if ((taken_edge->flags & EDGE_DFS_BACK) == 0
794 && !bitmap_bit_p (visited, taken_edge->dest->index))
796 jump_thread_edge *x
797 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
798 path->safe_push (x);
799 bitmap_set_bit (visited, taken_edge->dest->index);
800 return thread_around_empty_blocks (taken_edge,
801 dummy_cond,
802 handle_dominating_asserts,
803 simplify,
804 visited,
805 path);
809 /* We have a block with no statements, but multiple successors? */
810 return false;
813 /* The only real statements this block can have are a control
814 flow altering statement. Anything else stops the thread. */
815 stmt = gsi_stmt (gsi);
816 if (gimple_code (stmt) != GIMPLE_COND
817 && gimple_code (stmt) != GIMPLE_GOTO
818 && gimple_code (stmt) != GIMPLE_SWITCH)
819 return false;
821 /* Extract and simplify the condition. */
822 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
823 simplify, handle_dominating_asserts);
825 /* If the condition can be statically computed and we have not already
826 visited the destination edge, then add the taken edge to our thread
827 path. */
828 if (cond && is_gimple_min_invariant (cond))
830 taken_edge = find_taken_edge (bb, cond);
832 if (bitmap_bit_p (visited, taken_edge->dest->index))
833 return false;
834 bitmap_set_bit (visited, taken_edge->dest->index);
836 jump_thread_edge *x
837 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
838 path->safe_push (x);
840 thread_around_empty_blocks (taken_edge,
841 dummy_cond,
842 handle_dominating_asserts,
843 simplify,
844 visited,
845 path);
846 return true;
849 return false;
852 /* We are exiting E->src, see if E->dest ends with a conditional
853 jump which has a known value when reached via E.
855 E->dest can have arbitrary side effects which, if threading is
856 successful, will be maintained.
858 Special care is necessary if E is a back edge in the CFG as we
859 may have already recorded equivalences for E->dest into our
860 various tables, including the result of the conditional at
861 the end of E->dest. Threading opportunities are severely
862 limited in that case to avoid short-circuiting the loop
863 incorrectly.
865 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
866 to avoid allocating memory.
868 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
869 the simplified condition with left-hand sides of ASSERT_EXPRs they are
870 used in.
872 STACK is used to undo temporary equivalences created during the walk of
873 E->dest.
875 SIMPLIFY is a pass-specific function used to simplify statements.
877 Our caller is responsible for restoring the state of the expression
878 and const_and_copies stacks. */
880 static bool
881 thread_through_normal_block (edge e,
882 gimple dummy_cond,
883 bool handle_dominating_asserts,
884 vec<tree> *stack,
885 tree (*simplify) (gimple, gimple),
886 vec<jump_thread_edge *> *path,
887 bitmap visited)
889 /* If E is a backedge, then we want to verify that the COND_EXPR,
890 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
891 by any statements in e->dest. If it is affected, then it is not
892 safe to thread this edge. */
893 if (e->flags & EDGE_DFS_BACK)
895 if (cond_arg_set_in_bb (e, e->dest))
896 return false;
899 /* PHIs create temporary equivalences. */
900 if (!record_temporary_equivalences_from_phis (e, stack))
901 return false;
903 /* Now walk each statement recording any context sensitive
904 temporary equivalences we can detect. */
905 gimple stmt
906 = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
907 if (!stmt)
908 return false;
910 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
911 will be taken. */
912 if (gimple_code (stmt) == GIMPLE_COND
913 || gimple_code (stmt) == GIMPLE_GOTO
914 || gimple_code (stmt) == GIMPLE_SWITCH)
916 tree cond;
918 /* Extract and simplify the condition. */
919 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
920 handle_dominating_asserts);
922 if (cond && is_gimple_min_invariant (cond))
924 edge taken_edge = find_taken_edge (e->dest, cond);
925 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
927 /* DEST could be NULL for a computed jump to an absolute
928 address. */
929 if (dest == NULL || dest == e->dest || bitmap_bit_p (visited, dest->index))
930 return false;
932 jump_thread_edge *x
933 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
934 path->safe_push (x);
936 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
937 path->safe_push (x);
939 /* See if we can thread through DEST as well, this helps capture
940 secondary effects of threading without having to re-run DOM or
941 VRP. */
942 if ((e->flags & EDGE_DFS_BACK) == 0
943 || ! cond_arg_set_in_bb (taken_edge, e->dest))
945 /* We don't want to thread back to a block we have already
946 visited. This may be overly conservative. */
947 bitmap_set_bit (visited, dest->index);
948 bitmap_set_bit (visited, e->dest->index);
949 thread_around_empty_blocks (taken_edge,
950 dummy_cond,
951 handle_dominating_asserts,
952 simplify,
953 visited,
954 path);
956 return true;
959 return false;
962 /* We are exiting E->src, see if E->dest ends with a conditional
963 jump which has a known value when reached via E.
965 Special care is necessary if E is a back edge in the CFG as we
966 may have already recorded equivalences for E->dest into our
967 various tables, including the result of the conditional at
968 the end of E->dest. Threading opportunities are severely
969 limited in that case to avoid short-circuiting the loop
970 incorrectly.
972 Note it is quite common for the first block inside a loop to
973 end with a conditional which is either always true or always
974 false when reached via the loop backedge. Thus we do not want
975 to blindly disable threading across a loop backedge.
977 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
978 to avoid allocating memory.
980 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
981 the simplified condition with left-hand sides of ASSERT_EXPRs they are
982 used in.
984 STACK is used to undo temporary equivalences created during the walk of
985 E->dest.
987 SIMPLIFY is a pass-specific function used to simplify statements. */
989 void
990 thread_across_edge (gimple dummy_cond,
991 edge e,
992 bool handle_dominating_asserts,
993 vec<tree> *stack,
994 tree (*simplify) (gimple, gimple))
996 bitmap visited = BITMAP_ALLOC (NULL);
998 stmt_count = 0;
1000 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1001 bitmap_clear (visited);
1002 bitmap_set_bit (visited, e->src->index);
1003 bitmap_set_bit (visited, e->dest->index);
1004 if (thread_through_normal_block (e, dummy_cond, handle_dominating_asserts,
1005 stack, simplify, path, visited))
1007 propagate_threaded_block_debug_into (path->last ()->e->dest,
1008 e->dest);
1009 remove_temporary_equivalences (stack);
1010 BITMAP_FREE (visited);
1011 register_jump_thread (path);
1012 return;
1014 else
1016 /* There should be no edges on the path, so no need to walk through
1017 the vector entries. */
1018 gcc_assert (path->length () == 0);
1019 path->release ();
1022 /* We were unable to determine what out edge from E->dest is taken. However,
1023 we might still be able to thread through successors of E->dest. This
1024 often occurs when E->dest is a joiner block which then fans back out
1025 based on redundant tests.
1027 If so, we'll copy E->dest and redirect the appropriate predecessor to
1028 the copy. Within the copy of E->dest, we'll thread one or more edges
1029 to points deeper in the CFG.
1031 This is a stopgap until we have a more structured approach to path
1032 isolation. */
1034 edge taken_edge;
1035 edge_iterator ei;
1036 bool found;
1038 /* Look at each successor of E->dest to see if we can thread through it. */
1039 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1041 /* Avoid threading to any block we have already visited. */
1042 bitmap_clear (visited);
1043 bitmap_set_bit (visited, taken_edge->dest->index);
1044 bitmap_set_bit (visited, e->dest->index);
1045 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1047 /* Record whether or not we were able to thread through a successor
1048 of E->dest. */
1049 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1050 path->safe_push (x);
1052 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1053 path->safe_push (x);
1054 found = false;
1055 if ((e->flags & EDGE_DFS_BACK) == 0
1056 || ! cond_arg_set_in_bb (path->last ()->e, e->dest))
1057 found = thread_around_empty_blocks (taken_edge,
1058 dummy_cond,
1059 handle_dominating_asserts,
1060 simplify,
1061 visited,
1062 path);
1064 /* If we were able to thread through a successor of E->dest, then
1065 record the jump threading opportunity. */
1066 if (found)
1068 propagate_threaded_block_debug_into (path->last ()->e->dest,
1069 taken_edge->dest);
1070 register_jump_thread (path);
1072 else
1074 for (unsigned int i = 0; i < path->length (); i++)
1075 delete (*path)[i];
1076 path->release();
1079 BITMAP_FREE (visited);
1082 remove_temporary_equivalences (stack);