2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Jeff Law <law@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "basic-block.h"
34 #include "tree-flow.h"
35 #include "tree-ssa-propagate.h"
36 #include "langhooks.h"
39 /* To avoid code explosion due to jump threading, we limit the
40 number of statements we are going to copy. This variable
41 holds the number of statements currently seen that we'll have
42 to copy as part of the jump threading process. */
43 static int stmt_count
;
45 /* Array to record value-handles per SSA_NAME. */
46 vec
<tree
> ssa_name_values
;
48 /* Set the value for the SSA name NAME to VALUE. */
51 set_ssa_name_value (tree name
, tree value
)
53 if (SSA_NAME_VERSION (name
) >= ssa_name_values
.length ())
54 ssa_name_values
.safe_grow_cleared (SSA_NAME_VERSION (name
) + 1);
55 ssa_name_values
[SSA_NAME_VERSION (name
)] = value
;
58 /* Initialize the per SSA_NAME value-handles array. Returns it. */
60 threadedge_initialize_values (void)
62 gcc_assert (!ssa_name_values
.exists ());
63 ssa_name_values
.create (num_ssa_names
);
66 /* Free the per SSA_NAME value-handle array. */
68 threadedge_finalize_values (void)
70 ssa_name_values
.release ();
73 /* Return TRUE if we may be able to thread an incoming edge into
74 BB to an outgoing edge from BB. Return FALSE otherwise. */
77 potentially_threadable_block (basic_block bb
)
79 gimple_stmt_iterator gsi
;
81 /* If BB has a single successor or a single predecessor, then
82 there is no threading opportunity. */
83 if (single_succ_p (bb
) || single_pred_p (bb
))
86 /* If BB does not end with a conditional, switch or computed goto,
87 then there is no threading opportunity. */
88 gsi
= gsi_last_bb (bb
);
91 || (gimple_code (gsi_stmt (gsi
)) != GIMPLE_COND
92 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_GOTO
93 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_SWITCH
))
99 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
100 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
101 BB. If no such ASSERT_EXPR is found, return OP. */
104 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple stmt
)
106 imm_use_iterator imm_iter
;
110 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
112 use_stmt
= USE_STMT (use_p
);
114 && gimple_assign_single_p (use_stmt
)
115 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
116 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
117 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
119 return gimple_assign_lhs (use_stmt
);
125 /* We record temporary equivalences created by PHI nodes or
126 statements within the target block. Doing so allows us to
127 identify more jump threading opportunities, even in blocks
130 We keep track of those temporary equivalences in a stack
131 structure so that we can unwind them when we're done processing
132 a particular edge. This routine handles unwinding the data
136 remove_temporary_equivalences (vec
<tree
> *stack
)
138 while (stack
->length () > 0)
140 tree prev_value
, dest
;
142 dest
= stack
->pop ();
144 /* A NULL value indicates we should stop unwinding, otherwise
145 pop off the next entry as they're recorded in pairs. */
149 prev_value
= stack
->pop ();
150 set_ssa_name_value (dest
, prev_value
);
154 /* Record a temporary equivalence, saving enough information so that
155 we can restore the state of recorded equivalences when we're
156 done processing the current edge. */
159 record_temporary_equivalence (tree x
, tree y
, vec
<tree
> *stack
)
161 tree prev_x
= SSA_NAME_VALUE (x
);
163 if (TREE_CODE (y
) == SSA_NAME
)
165 tree tmp
= SSA_NAME_VALUE (y
);
169 set_ssa_name_value (x
, y
);
171 stack
->quick_push (prev_x
);
172 stack
->quick_push (x
);
175 /* Record temporary equivalences created by PHIs at the target of the
176 edge E. Record unwind information for the equivalences onto STACK.
178 If a PHI which prevents threading is encountered, then return FALSE
179 indicating we should not thread this edge, else return TRUE. */
182 record_temporary_equivalences_from_phis (edge e
, vec
<tree
> *stack
)
184 gimple_stmt_iterator gsi
;
186 /* Each PHI creates a temporary equivalence, record them.
187 These are context sensitive equivalences and will be removed
189 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
191 gimple phi
= gsi_stmt (gsi
);
192 tree src
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
193 tree dst
= gimple_phi_result (phi
);
195 /* If the desired argument is not the same as this PHI's result
196 and it is set by a PHI in E->dest, then we can not thread
199 && TREE_CODE (src
) == SSA_NAME
200 && gimple_code (SSA_NAME_DEF_STMT (src
)) == GIMPLE_PHI
201 && gimple_bb (SSA_NAME_DEF_STMT (src
)) == e
->dest
)
204 /* We consider any non-virtual PHI as a statement since it
205 count result in a constant assignment or copy operation. */
206 if (!virtual_operand_p (dst
))
209 record_temporary_equivalence (dst
, src
, stack
);
214 /* Fold the RHS of an assignment statement and return it as a tree.
215 May return NULL_TREE if no simplification is possible. */
218 fold_assignment_stmt (gimple stmt
)
220 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
222 switch (get_gimple_rhs_class (subcode
))
224 case GIMPLE_SINGLE_RHS
:
225 return fold (gimple_assign_rhs1 (stmt
));
227 case GIMPLE_UNARY_RHS
:
229 tree lhs
= gimple_assign_lhs (stmt
);
230 tree op0
= gimple_assign_rhs1 (stmt
);
231 return fold_unary (subcode
, TREE_TYPE (lhs
), op0
);
234 case GIMPLE_BINARY_RHS
:
236 tree lhs
= gimple_assign_lhs (stmt
);
237 tree op0
= gimple_assign_rhs1 (stmt
);
238 tree op1
= gimple_assign_rhs2 (stmt
);
239 return fold_binary (subcode
, TREE_TYPE (lhs
), op0
, op1
);
242 case GIMPLE_TERNARY_RHS
:
244 tree lhs
= gimple_assign_lhs (stmt
);
245 tree op0
= gimple_assign_rhs1 (stmt
);
246 tree op1
= gimple_assign_rhs2 (stmt
);
247 tree op2
= gimple_assign_rhs3 (stmt
);
249 /* Sadly, we have to handle conditional assignments specially
250 here, because fold expects all the operands of an expression
251 to be folded before the expression itself is folded, but we
252 can't just substitute the folded condition here. */
253 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
256 return fold_ternary (subcode
, TREE_TYPE (lhs
), op0
, op1
, op2
);
264 /* Try to simplify each statement in E->dest, ultimately leading to
265 a simplification of the COND_EXPR at the end of E->dest.
267 Record unwind information for temporary equivalences onto STACK.
269 Use SIMPLIFY (a pointer to a callback function) to further simplify
270 statements using pass specific information.
272 We might consider marking just those statements which ultimately
273 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
274 would be recovered by trying to simplify fewer statements.
276 If we are able to simplify a statement into the form
277 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
278 a context sensitive equivalence which may help us simplify
279 later statements in E->dest. */
282 record_temporary_equivalences_from_stmts_at_dest (edge e
,
284 tree (*simplify
) (gimple
,
288 gimple_stmt_iterator gsi
;
291 max_stmt_count
= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
);
293 /* Walk through each statement in the block recording equivalences
294 we discover. Note any equivalences we discover are context
295 sensitive (ie, are dependent on traversing E) and must be unwound
296 when we're finished processing E. */
297 for (gsi
= gsi_start_bb (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
299 tree cached_lhs
= NULL
;
301 stmt
= gsi_stmt (gsi
);
303 /* Ignore empty statements and labels. */
304 if (gimple_code (stmt
) == GIMPLE_NOP
305 || gimple_code (stmt
) == GIMPLE_LABEL
306 || is_gimple_debug (stmt
))
309 /* If the statement has volatile operands, then we assume we
310 can not thread through this block. This is overly
311 conservative in some ways. */
312 if (gimple_code (stmt
) == GIMPLE_ASM
&& gimple_asm_volatile_p (stmt
))
315 /* If duplicating this block is going to cause too much code
316 expansion, then do not thread through this block. */
318 if (stmt_count
> max_stmt_count
)
321 /* If this is not a statement that sets an SSA_NAME to a new
322 value, then do not try to simplify this statement as it will
323 not simplify in any way that is helpful for jump threading. */
324 if ((gimple_code (stmt
) != GIMPLE_ASSIGN
325 || TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
326 && (gimple_code (stmt
) != GIMPLE_CALL
327 || gimple_call_lhs (stmt
) == NULL_TREE
328 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
))
331 /* The result of __builtin_object_size depends on all the arguments
332 of a phi node. Temporarily using only one edge produces invalid
341 r = PHI <&w[2].a[1](2), &a.a[6](3)>
342 __builtin_object_size (r, 0)
344 The result of __builtin_object_size is defined to be the maximum of
345 remaining bytes. If we use only one edge on the phi, the result will
346 change to be the remaining bytes for the corresponding phi argument.
348 Similarly for __builtin_constant_p:
351 __builtin_constant_p (r)
353 Both PHI arguments are constant, but x ? 1 : 2 is still not
356 if (is_gimple_call (stmt
))
358 tree fndecl
= gimple_call_fndecl (stmt
);
360 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_OBJECT_SIZE
361 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
))
365 /* At this point we have a statement which assigns an RHS to an
366 SSA_VAR on the LHS. We want to try and simplify this statement
367 to expose more context sensitive equivalences which in turn may
368 allow us to simplify the condition at the end of the loop.
370 Handle simple copy operations as well as implied copies from
372 if (gimple_assign_single_p (stmt
)
373 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
374 cached_lhs
= gimple_assign_rhs1 (stmt
);
375 else if (gimple_assign_single_p (stmt
)
376 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
377 cached_lhs
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0);
380 /* A statement that is not a trivial copy or ASSERT_EXPR.
381 We're going to temporarily copy propagate the operands
382 and see if that allows us to simplify this statement. */
386 unsigned int num
, i
= 0;
388 num
= NUM_SSA_OPERANDS (stmt
, (SSA_OP_USE
| SSA_OP_VUSE
));
389 copy
= XCNEWVEC (tree
, num
);
391 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
393 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
396 tree use
= USE_FROM_PTR (use_p
);
399 if (TREE_CODE (use
) == SSA_NAME
)
400 tmp
= SSA_NAME_VALUE (use
);
402 SET_USE (use_p
, tmp
);
405 /* Try to fold/lookup the new expression. Inserting the
406 expression into the hash table is unlikely to help. */
407 if (is_gimple_call (stmt
))
408 cached_lhs
= fold_call_stmt (stmt
, false);
410 cached_lhs
= fold_assignment_stmt (stmt
);
413 || (TREE_CODE (cached_lhs
) != SSA_NAME
414 && !is_gimple_min_invariant (cached_lhs
)))
415 cached_lhs
= (*simplify
) (stmt
, stmt
);
417 /* Restore the statement's original uses/defs. */
419 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
420 SET_USE (use_p
, copy
[i
++]);
425 /* Record the context sensitive equivalence if we were able
426 to simplify this statement. */
428 && (TREE_CODE (cached_lhs
) == SSA_NAME
429 || is_gimple_min_invariant (cached_lhs
)))
430 record_temporary_equivalence (gimple_get_lhs (stmt
), cached_lhs
, stack
);
435 /* Simplify the control statement at the end of the block E->dest.
437 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
438 is available to use/clobber in DUMMY_COND.
440 Use SIMPLIFY (a pointer to a callback function) to further simplify
441 a condition using pass specific information.
443 Return the simplified condition or NULL if simplification could
447 simplify_control_stmt_condition (edge e
,
450 tree (*simplify
) (gimple
, gimple
),
451 bool handle_dominating_asserts
)
453 tree cond
, cached_lhs
;
454 enum gimple_code code
= gimple_code (stmt
);
456 /* For comparisons, we have to update both operands, then try
457 to simplify the comparison. */
458 if (code
== GIMPLE_COND
)
461 enum tree_code cond_code
;
463 op0
= gimple_cond_lhs (stmt
);
464 op1
= gimple_cond_rhs (stmt
);
465 cond_code
= gimple_cond_code (stmt
);
467 /* Get the current value of both operands. */
468 if (TREE_CODE (op0
) == SSA_NAME
)
470 tree tmp
= SSA_NAME_VALUE (op0
);
475 if (TREE_CODE (op1
) == SSA_NAME
)
477 tree tmp
= SSA_NAME_VALUE (op1
);
482 if (handle_dominating_asserts
)
484 /* Now see if the operand was consumed by an ASSERT_EXPR
485 which dominates E->src. If so, we want to replace the
486 operand with the LHS of the ASSERT_EXPR. */
487 if (TREE_CODE (op0
) == SSA_NAME
)
488 op0
= lhs_of_dominating_assert (op0
, e
->src
, stmt
);
490 if (TREE_CODE (op1
) == SSA_NAME
)
491 op1
= lhs_of_dominating_assert (op1
, e
->src
, stmt
);
494 /* We may need to canonicalize the comparison. For
495 example, op0 might be a constant while op1 is an
496 SSA_NAME. Failure to canonicalize will cause us to
497 miss threading opportunities. */
498 if (tree_swap_operands_p (op0
, op1
, false))
501 cond_code
= swap_tree_comparison (cond_code
);
507 /* Stuff the operator and operands into our dummy conditional
509 gimple_cond_set_code (dummy_cond
, cond_code
);
510 gimple_cond_set_lhs (dummy_cond
, op0
);
511 gimple_cond_set_rhs (dummy_cond
, op1
);
513 /* We absolutely do not care about any type conversions
514 we only care about a zero/nonzero value. */
515 fold_defer_overflow_warnings ();
517 cached_lhs
= fold_binary (cond_code
, boolean_type_node
, op0
, op1
);
519 while (CONVERT_EXPR_P (cached_lhs
))
520 cached_lhs
= TREE_OPERAND (cached_lhs
, 0);
522 fold_undefer_overflow_warnings ((cached_lhs
523 && is_gimple_min_invariant (cached_lhs
)),
524 stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
526 /* If we have not simplified the condition down to an invariant,
527 then use the pass specific callback to simplify the condition. */
529 || !is_gimple_min_invariant (cached_lhs
))
530 cached_lhs
= (*simplify
) (dummy_cond
, stmt
);
535 if (code
== GIMPLE_SWITCH
)
536 cond
= gimple_switch_index (stmt
);
537 else if (code
== GIMPLE_GOTO
)
538 cond
= gimple_goto_dest (stmt
);
542 /* We can have conditionals which just test the state of a variable
543 rather than use a relational operator. These are simpler to handle. */
544 if (TREE_CODE (cond
) == SSA_NAME
)
548 /* Get the variable's current value from the equivalence chains.
550 It is possible to get loops in the SSA_NAME_VALUE chains
551 (consider threading the backedge of a loop where we have
552 a loop invariant SSA_NAME used in the condition. */
554 && TREE_CODE (cached_lhs
) == SSA_NAME
555 && SSA_NAME_VALUE (cached_lhs
))
556 cached_lhs
= SSA_NAME_VALUE (cached_lhs
);
558 /* If we're dominated by a suitable ASSERT_EXPR, then
559 update CACHED_LHS appropriately. */
560 if (handle_dominating_asserts
&& TREE_CODE (cached_lhs
) == SSA_NAME
)
561 cached_lhs
= lhs_of_dominating_assert (cached_lhs
, e
->src
, stmt
);
563 /* If we haven't simplified to an invariant yet, then use the
564 pass specific callback to try and simplify it further. */
565 if (cached_lhs
&& ! is_gimple_min_invariant (cached_lhs
))
566 cached_lhs
= (*simplify
) (stmt
, stmt
);
574 /* Return TRUE if the statement at the end of e->dest depends on
575 the output of any statement in BB. Otherwise return FALSE.
577 This is used when we are threading a backedge and need to ensure
578 that temporary equivalences from BB do not affect the condition
582 cond_arg_set_in_bb (edge e
, basic_block bb
)
586 gimple last
= last_stmt (e
->dest
);
588 /* E->dest does not have to end with a control transferring
589 instruction. This can occurr when we try to extend a jump
590 threading opportunity deeper into the CFG. In that case
591 it is safe for this check to return false. */
595 if (gimple_code (last
) != GIMPLE_COND
596 && gimple_code (last
) != GIMPLE_GOTO
597 && gimple_code (last
) != GIMPLE_SWITCH
)
600 FOR_EACH_SSA_USE_OPERAND (use_p
, last
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
602 tree use
= USE_FROM_PTR (use_p
);
604 if (TREE_CODE (use
) == SSA_NAME
605 && gimple_code (SSA_NAME_DEF_STMT (use
)) != GIMPLE_PHI
606 && gimple_bb (SSA_NAME_DEF_STMT (use
)) == bb
)
612 /* Copy debug stmts from DEST's chain of single predecessors up to
613 SRC, so that we don't lose the bindings as PHI nodes are introduced
614 when DEST gains new predecessors. */
616 propagate_threaded_block_debug_into (basic_block dest
, basic_block src
)
618 if (!MAY_HAVE_DEBUG_STMTS
)
621 if (!single_pred_p (dest
))
624 gcc_checking_assert (dest
!= src
);
626 gimple_stmt_iterator gsi
= gsi_after_labels (dest
);
628 const int alloc_count
= 16; // ?? Should this be a PARAM?
630 /* Estimate the number of debug vars overridden in the beginning of
631 DEST, to tell how many we're going to need to begin with. */
632 for (gimple_stmt_iterator si
= gsi
;
633 i
* 4 <= alloc_count
* 3 && !gsi_end_p (si
); gsi_next (&si
))
635 gimple stmt
= gsi_stmt (si
);
636 if (!is_gimple_debug (stmt
))
641 vec
<tree
, va_stack
> fewvars
= vec
<tree
, va_stack
>();
642 pointer_set_t
*vars
= NULL
;
644 /* If we're already starting with 3/4 of alloc_count, go for a
645 pointer_set, otherwise start with an unordered stack-allocated
647 if (i
* 4 > alloc_count
* 3)
648 vars
= pointer_set_create ();
649 else if (alloc_count
)
650 vec_stack_alloc (tree
, fewvars
, alloc_count
);
652 /* Now go through the initial debug stmts in DEST again, this time
653 actually inserting in VARS or FEWVARS. Don't bother checking for
654 duplicates in FEWVARS. */
655 for (gimple_stmt_iterator si
= gsi
; !gsi_end_p (si
); gsi_next (&si
))
657 gimple stmt
= gsi_stmt (si
);
658 if (!is_gimple_debug (stmt
))
663 if (gimple_debug_bind_p (stmt
))
664 var
= gimple_debug_bind_get_var (stmt
);
665 else if (gimple_debug_source_bind_p (stmt
))
666 var
= gimple_debug_source_bind_get_var (stmt
);
671 pointer_set_insert (vars
, var
);
673 fewvars
.quick_push (var
);
676 basic_block bb
= dest
;
680 bb
= single_pred (bb
);
681 for (gimple_stmt_iterator si
= gsi_last_bb (bb
);
682 !gsi_end_p (si
); gsi_prev (&si
))
684 gimple stmt
= gsi_stmt (si
);
685 if (!is_gimple_debug (stmt
))
690 if (gimple_debug_bind_p (stmt
))
691 var
= gimple_debug_bind_get_var (stmt
);
692 else if (gimple_debug_source_bind_p (stmt
))
693 var
= gimple_debug_source_bind_get_var (stmt
);
697 /* Discard debug bind overlaps. ??? Unlike stmts from src,
698 copied into a new block that will precede BB, debug bind
699 stmts in bypassed BBs may actually be discarded if
700 they're overwritten by subsequent debug bind stmts, which
701 might be a problem once we introduce stmt frontier notes
702 or somesuch. Adding `&& bb == src' to the condition
703 below will preserve all potentially relevant debug
705 if (vars
&& pointer_set_insert (vars
, var
))
709 int i
= fewvars
.length ();
711 if (fewvars
[i
] == var
)
716 if (fewvars
.length () < alloc_count
)
717 fewvars
.quick_push (var
);
720 vars
= pointer_set_create ();
721 for (i
= 0; i
< alloc_count
; i
++)
722 pointer_set_insert (vars
, fewvars
[i
]);
724 pointer_set_insert (vars
, var
);
728 stmt
= gimple_copy (stmt
);
729 /* ??? Should we drop the location of the copy to denote
730 they're artificial bindings? */
731 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
734 while (bb
!= src
&& single_pred_p (bb
));
737 pointer_set_destroy (vars
);
738 else if (fewvars
.exists ())
742 /* TAKEN_EDGE represents the an edge taken as a result of jump threading.
743 See if we can thread around TAKEN_EDGE->dest as well. If so, return
744 the edge out of TAKEN_EDGE->dest that we can statically compute will be
747 We are much more restrictive as to the contents of TAKEN_EDGE->dest
748 as the path isolation code in tree-ssa-threadupdate.c isn't prepared
749 to handle copying intermediate blocks on a threaded path.
751 Long term a more consistent and structured approach to path isolation
752 would be a huge help. */
754 thread_around_empty_block (edge taken_edge
,
756 bool handle_dominating_asserts
,
757 tree (*simplify
) (gimple
, gimple
),
760 basic_block bb
= taken_edge
->dest
;
761 gimple_stmt_iterator gsi
;
765 /* This block must have a single predecessor (E->dest). */
766 if (!single_pred_p (bb
))
769 /* This block must have more than one successor. */
770 if (single_succ_p (bb
))
773 /* This block can have no PHI nodes. This is overly conservative. */
774 if (!gsi_end_p (gsi_start_phis (bb
)))
777 /* Skip over DEBUG statements at the start of the block. */
778 gsi
= gsi_start_nondebug_bb (bb
);
783 /* This block can have no statements other than its control altering
784 statement. This is overly conservative. */
785 stmt
= gsi_stmt (gsi
);
786 if (gimple_code (stmt
) != GIMPLE_COND
787 && gimple_code (stmt
) != GIMPLE_GOTO
788 && gimple_code (stmt
) != GIMPLE_SWITCH
)
791 /* Extract and simplify the condition. */
792 cond
= simplify_control_stmt_condition (taken_edge
, stmt
, dummy_cond
,
793 simplify
, handle_dominating_asserts
);
795 /* If the condition can be statically computed and we have not already
796 visited the destination edge, then add the taken edge to our thread
798 if (cond
&& is_gimple_min_invariant (cond
))
800 edge taken_edge
= find_taken_edge (bb
, cond
);
802 if (bitmap_bit_p (visited
, taken_edge
->dest
->index
))
804 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
811 /* E1 and E2 are edges into the same basic block. Return TRUE if the
812 PHI arguments associated with those edges are equal or there are no
813 PHI arguments, otherwise return FALSE. */
816 phi_args_equal_on_edges (edge e1
, edge e2
)
818 gimple_stmt_iterator gsi
;
819 int indx1
= e1
->dest_idx
;
820 int indx2
= e2
->dest_idx
;
822 for (gsi
= gsi_start_phis (e1
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
824 gimple phi
= gsi_stmt (gsi
);
826 if (!operand_equal_p (gimple_phi_arg_def (phi
, indx1
),
827 gimple_phi_arg_def (phi
, indx2
), 0))
833 /* We are exiting E->src, see if E->dest ends with a conditional
834 jump which has a known value when reached via E.
836 Special care is necessary if E is a back edge in the CFG as we
837 may have already recorded equivalences for E->dest into our
838 various tables, including the result of the conditional at
839 the end of E->dest. Threading opportunities are severely
840 limited in that case to avoid short-circuiting the loop
843 Note it is quite common for the first block inside a loop to
844 end with a conditional which is either always true or always
845 false when reached via the loop backedge. Thus we do not want
846 to blindly disable threading across a loop backedge.
848 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
849 to avoid allocating memory.
851 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
852 the simplified condition with left-hand sides of ASSERT_EXPRs they are
855 STACK is used to undo temporary equivalences created during the walk of
858 SIMPLIFY is a pass-specific function used to simplify statements. */
861 thread_across_edge (gimple dummy_cond
,
863 bool handle_dominating_asserts
,
865 tree (*simplify
) (gimple
, gimple
))
869 /* If E is a backedge, then we want to verify that the COND_EXPR,
870 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
871 by any statements in e->dest. If it is affected, then it is not
872 safe to thread this edge. */
873 if (e
->flags
& EDGE_DFS_BACK
)
875 if (cond_arg_set_in_bb (e
, e
->dest
))
881 /* PHIs create temporary equivalences. */
882 if (!record_temporary_equivalences_from_phis (e
, stack
))
885 /* Now walk each statement recording any context sensitive
886 temporary equivalences we can detect. */
887 stmt
= record_temporary_equivalences_from_stmts_at_dest (e
, stack
, simplify
);
891 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
893 if (gimple_code (stmt
) == GIMPLE_COND
894 || gimple_code (stmt
) == GIMPLE_GOTO
895 || gimple_code (stmt
) == GIMPLE_SWITCH
)
899 /* Extract and simplify the condition. */
900 cond
= simplify_control_stmt_condition (e
, stmt
, dummy_cond
, simplify
,
901 handle_dominating_asserts
);
903 if (cond
&& is_gimple_min_invariant (cond
))
905 edge taken_edge
= find_taken_edge (e
->dest
, cond
);
906 basic_block dest
= (taken_edge
? taken_edge
->dest
: NULL
);
913 /* DEST could be null for a computed jump to an absolute
914 address. If DEST is not null, then see if we can thread
915 through it as well, this helps capture secondary effects
916 of threading without having to re-run DOM or VRP. */
918 && ((e
->flags
& EDGE_DFS_BACK
) == 0
919 || ! cond_arg_set_in_bb (taken_edge
, e
->dest
)))
921 /* We don't want to thread back to a block we have already
922 visited. This may be overly conservative. */
923 visited
= BITMAP_ALLOC (NULL
);
924 bitmap_set_bit (visited
, dest
->index
);
925 bitmap_set_bit (visited
, e
->dest
->index
);
928 e2
= thread_around_empty_block (taken_edge
,
930 handle_dominating_asserts
,
937 BITMAP_FREE (visited
);
940 remove_temporary_equivalences (stack
);
943 propagate_threaded_block_debug_into (taken_edge
->dest
, e
->dest
);
944 register_jump_thread (e
, taken_edge
, NULL
);
949 /* We were unable to determine what out edge from E->dest is taken. However,
950 we might still be able to thread through successors of E->dest. This
951 often occurs when E->dest is a joiner block which then fans back out
952 based on redundant tests.
954 If so, we'll copy E->dest and redirect the appropriate predecessor to
955 the copy. Within the copy of E->dest, we'll thread one or more edges
956 to points deeper in the CFG.
958 This is a stopgap until we have a more structured approach to path
961 edge e2
, e3
, taken_edge
;
964 bitmap visited
= BITMAP_ALLOC (NULL
);
966 /* Look at each successor of E->dest to see if we can thread through it. */
967 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
969 /* Avoid threading to any block we have already visited. */
970 bitmap_clear (visited
);
971 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
972 bitmap_set_bit (visited
, e
->dest
->index
);
974 /* Record whether or not we were able to thread through a successor
980 if ((e
->flags
& EDGE_DFS_BACK
) == 0
981 || ! cond_arg_set_in_bb (e3
, e
->dest
))
982 e2
= thread_around_empty_block (e3
,
984 handle_dominating_asserts
,
998 /* If we were able to thread through a successor of E->dest, then
999 record the jump threading opportunity. */
1003 /* If there is already an edge from the block to be duplicated
1004 (E2->src) to the final target (E3->dest), then make sure that
1005 the PHI args associated with the edges E2 and E3 are the
1007 tmp
= find_edge (taken_edge
->src
, e3
->dest
);
1008 if (!tmp
|| phi_args_equal_on_edges (tmp
, e3
))
1010 propagate_threaded_block_debug_into (e3
->dest
,
1012 register_jump_thread (e
, taken_edge
, e3
);
1017 BITMAP_FREE (visited
);
1021 remove_temporary_equivalences (stack
);