2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jeff Law <law@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "basic-block.h"
34 #include "tree-flow.h"
35 #include "tree-ssa-propagate.h"
36 #include "langhooks.h"
39 /* To avoid code explosion due to jump threading, we limit the
40 number of statements we are going to copy. This variable
41 holds the number of statements currently seen that we'll have
42 to copy as part of the jump threading process. */
43 static int stmt_count
;
45 /* Array to record value-handles per SSA_NAME. */
46 VEC(tree
,heap
) *ssa_name_values
;
48 /* Set the value for the SSA name NAME to VALUE. */
51 set_ssa_name_value (tree name
, tree value
)
53 if (SSA_NAME_VERSION (name
) >= VEC_length (tree
, ssa_name_values
))
54 VEC_safe_grow_cleared (tree
, heap
, ssa_name_values
,
55 SSA_NAME_VERSION (name
) + 1);
56 VEC_replace (tree
, ssa_name_values
, SSA_NAME_VERSION (name
), value
);
59 /* Initialize the per SSA_NAME value-handles array. Returns it. */
61 threadedge_initialize_values (void)
63 gcc_assert (ssa_name_values
== NULL
);
64 ssa_name_values
= VEC_alloc(tree
, heap
, num_ssa_names
);
67 /* Free the per SSA_NAME value-handle array. */
69 threadedge_finalize_values (void)
71 VEC_free(tree
, heap
, ssa_name_values
);
74 /* Return TRUE if we may be able to thread an incoming edge into
75 BB to an outgoing edge from BB. Return FALSE otherwise. */
78 potentially_threadable_block (basic_block bb
)
80 gimple_stmt_iterator gsi
;
82 /* If BB has a single successor or a single predecessor, then
83 there is no threading opportunity. */
84 if (single_succ_p (bb
) || single_pred_p (bb
))
87 /* If BB does not end with a conditional, switch or computed goto,
88 then there is no threading opportunity. */
89 gsi
= gsi_last_bb (bb
);
92 || (gimple_code (gsi_stmt (gsi
)) != GIMPLE_COND
93 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_GOTO
94 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_SWITCH
))
100 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
101 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
102 BB. If no such ASSERT_EXPR is found, return OP. */
105 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple stmt
)
107 imm_use_iterator imm_iter
;
111 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
113 use_stmt
= USE_STMT (use_p
);
115 && gimple_assign_single_p (use_stmt
)
116 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
117 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
118 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
120 return gimple_assign_lhs (use_stmt
);
126 /* We record temporary equivalences created by PHI nodes or
127 statements within the target block. Doing so allows us to
128 identify more jump threading opportunities, even in blocks
131 We keep track of those temporary equivalences in a stack
132 structure so that we can unwind them when we're done processing
133 a particular edge. This routine handles unwinding the data
137 remove_temporary_equivalences (VEC(tree
, heap
) **stack
)
139 while (VEC_length (tree
, *stack
) > 0)
141 tree prev_value
, dest
;
143 dest
= VEC_pop (tree
, *stack
);
145 /* A NULL value indicates we should stop unwinding, otherwise
146 pop off the next entry as they're recorded in pairs. */
150 prev_value
= VEC_pop (tree
, *stack
);
151 set_ssa_name_value (dest
, prev_value
);
155 /* Record a temporary equivalence, saving enough information so that
156 we can restore the state of recorded equivalences when we're
157 done processing the current edge. */
160 record_temporary_equivalence (tree x
, tree y
, VEC(tree
, heap
) **stack
)
162 tree prev_x
= SSA_NAME_VALUE (x
);
164 if (TREE_CODE (y
) == SSA_NAME
)
166 tree tmp
= SSA_NAME_VALUE (y
);
170 set_ssa_name_value (x
, y
);
171 VEC_reserve (tree
, heap
, *stack
, 2);
172 VEC_quick_push (tree
, *stack
, prev_x
);
173 VEC_quick_push (tree
, *stack
, x
);
176 /* Record temporary equivalences created by PHIs at the target of the
177 edge E. Record unwind information for the equivalences onto STACK.
179 If a PHI which prevents threading is encountered, then return FALSE
180 indicating we should not thread this edge, else return TRUE. */
183 record_temporary_equivalences_from_phis (edge e
, VEC(tree
, heap
) **stack
)
185 gimple_stmt_iterator gsi
;
187 /* Each PHI creates a temporary equivalence, record them.
188 These are context sensitive equivalences and will be removed
190 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
192 gimple phi
= gsi_stmt (gsi
);
193 tree src
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
194 tree dst
= gimple_phi_result (phi
);
196 /* If the desired argument is not the same as this PHI's result
197 and it is set by a PHI in E->dest, then we can not thread
200 && TREE_CODE (src
) == SSA_NAME
201 && gimple_code (SSA_NAME_DEF_STMT (src
)) == GIMPLE_PHI
202 && gimple_bb (SSA_NAME_DEF_STMT (src
)) == e
->dest
)
205 /* We consider any non-virtual PHI as a statement since it
206 count result in a constant assignment or copy operation. */
207 if (!virtual_operand_p (dst
))
210 record_temporary_equivalence (dst
, src
, stack
);
215 /* Fold the RHS of an assignment statement and return it as a tree.
216 May return NULL_TREE if no simplification is possible. */
219 fold_assignment_stmt (gimple stmt
)
221 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
223 switch (get_gimple_rhs_class (subcode
))
225 case GIMPLE_SINGLE_RHS
:
226 return fold (gimple_assign_rhs1 (stmt
));
228 case GIMPLE_UNARY_RHS
:
230 tree lhs
= gimple_assign_lhs (stmt
);
231 tree op0
= gimple_assign_rhs1 (stmt
);
232 return fold_unary (subcode
, TREE_TYPE (lhs
), op0
);
235 case GIMPLE_BINARY_RHS
:
237 tree lhs
= gimple_assign_lhs (stmt
);
238 tree op0
= gimple_assign_rhs1 (stmt
);
239 tree op1
= gimple_assign_rhs2 (stmt
);
240 return fold_binary (subcode
, TREE_TYPE (lhs
), op0
, op1
);
243 case GIMPLE_TERNARY_RHS
:
245 tree lhs
= gimple_assign_lhs (stmt
);
246 tree op0
= gimple_assign_rhs1 (stmt
);
247 tree op1
= gimple_assign_rhs2 (stmt
);
248 tree op2
= gimple_assign_rhs3 (stmt
);
250 /* Sadly, we have to handle conditional assignments specially
251 here, because fold expects all the operands of an expression
252 to be folded before the expression itself is folded, but we
253 can't just substitute the folded condition here. */
254 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
257 return fold_ternary (subcode
, TREE_TYPE (lhs
), op0
, op1
, op2
);
265 /* Try to simplify each statement in E->dest, ultimately leading to
266 a simplification of the COND_EXPR at the end of E->dest.
268 Record unwind information for temporary equivalences onto STACK.
270 Use SIMPLIFY (a pointer to a callback function) to further simplify
271 statements using pass specific information.
273 We might consider marking just those statements which ultimately
274 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
275 would be recovered by trying to simplify fewer statements.
277 If we are able to simplify a statement into the form
278 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
279 a context sensitive equivalence which may help us simplify
280 later statements in E->dest. */
283 record_temporary_equivalences_from_stmts_at_dest (edge e
,
284 VEC(tree
, heap
) **stack
,
285 tree (*simplify
) (gimple
,
289 gimple_stmt_iterator gsi
;
292 max_stmt_count
= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
);
294 /* Walk through each statement in the block recording equivalences
295 we discover. Note any equivalences we discover are context
296 sensitive (ie, are dependent on traversing E) and must be unwound
297 when we're finished processing E. */
298 for (gsi
= gsi_start_bb (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
300 tree cached_lhs
= NULL
;
302 stmt
= gsi_stmt (gsi
);
304 /* Ignore empty statements and labels. */
305 if (gimple_code (stmt
) == GIMPLE_NOP
306 || gimple_code (stmt
) == GIMPLE_LABEL
307 || is_gimple_debug (stmt
))
310 /* If the statement has volatile operands, then we assume we
311 can not thread through this block. This is overly
312 conservative in some ways. */
313 if (gimple_code (stmt
) == GIMPLE_ASM
&& gimple_asm_volatile_p (stmt
))
316 /* If duplicating this block is going to cause too much code
317 expansion, then do not thread through this block. */
319 if (stmt_count
> max_stmt_count
)
322 /* If this is not a statement that sets an SSA_NAME to a new
323 value, then do not try to simplify this statement as it will
324 not simplify in any way that is helpful for jump threading. */
325 if ((gimple_code (stmt
) != GIMPLE_ASSIGN
326 || TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
327 && (gimple_code (stmt
) != GIMPLE_CALL
328 || gimple_call_lhs (stmt
) == NULL_TREE
329 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
))
332 /* The result of __builtin_object_size depends on all the arguments
333 of a phi node. Temporarily using only one edge produces invalid
342 r = PHI <&w[2].a[1](2), &a.a[6](3)>
343 __builtin_object_size (r, 0)
345 The result of __builtin_object_size is defined to be the maximum of
346 remaining bytes. If we use only one edge on the phi, the result will
347 change to be the remaining bytes for the corresponding phi argument.
349 Similarly for __builtin_constant_p:
352 __builtin_constant_p (r)
354 Both PHI arguments are constant, but x ? 1 : 2 is still not
357 if (is_gimple_call (stmt
))
359 tree fndecl
= gimple_call_fndecl (stmt
);
361 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_OBJECT_SIZE
362 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
))
366 /* At this point we have a statement which assigns an RHS to an
367 SSA_VAR on the LHS. We want to try and simplify this statement
368 to expose more context sensitive equivalences which in turn may
369 allow us to simplify the condition at the end of the loop.
371 Handle simple copy operations as well as implied copies from
373 if (gimple_assign_single_p (stmt
)
374 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
375 cached_lhs
= gimple_assign_rhs1 (stmt
);
376 else if (gimple_assign_single_p (stmt
)
377 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
378 cached_lhs
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0);
381 /* A statement that is not a trivial copy or ASSERT_EXPR.
382 We're going to temporarily copy propagate the operands
383 and see if that allows us to simplify this statement. */
387 unsigned int num
, i
= 0;
389 num
= NUM_SSA_OPERANDS (stmt
, (SSA_OP_USE
| SSA_OP_VUSE
));
390 copy
= XCNEWVEC (tree
, num
);
392 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
394 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
397 tree use
= USE_FROM_PTR (use_p
);
400 if (TREE_CODE (use
) == SSA_NAME
)
401 tmp
= SSA_NAME_VALUE (use
);
403 SET_USE (use_p
, tmp
);
406 /* Try to fold/lookup the new expression. Inserting the
407 expression into the hash table is unlikely to help. */
408 if (is_gimple_call (stmt
))
409 cached_lhs
= fold_call_stmt (stmt
, false);
411 cached_lhs
= fold_assignment_stmt (stmt
);
414 || (TREE_CODE (cached_lhs
) != SSA_NAME
415 && !is_gimple_min_invariant (cached_lhs
)))
416 cached_lhs
= (*simplify
) (stmt
, stmt
);
418 /* Restore the statement's original uses/defs. */
420 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
421 SET_USE (use_p
, copy
[i
++]);
426 /* Record the context sensitive equivalence if we were able
427 to simplify this statement. */
429 && (TREE_CODE (cached_lhs
) == SSA_NAME
430 || is_gimple_min_invariant (cached_lhs
)))
431 record_temporary_equivalence (gimple_get_lhs (stmt
), cached_lhs
, stack
);
436 /* Simplify the control statement at the end of the block E->dest.
438 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
439 is available to use/clobber in DUMMY_COND.
441 Use SIMPLIFY (a pointer to a callback function) to further simplify
442 a condition using pass specific information.
444 Return the simplified condition or NULL if simplification could
448 simplify_control_stmt_condition (edge e
,
451 tree (*simplify
) (gimple
, gimple
),
452 bool handle_dominating_asserts
)
454 tree cond
, cached_lhs
;
455 enum gimple_code code
= gimple_code (stmt
);
457 /* For comparisons, we have to update both operands, then try
458 to simplify the comparison. */
459 if (code
== GIMPLE_COND
)
462 enum tree_code cond_code
;
464 op0
= gimple_cond_lhs (stmt
);
465 op1
= gimple_cond_rhs (stmt
);
466 cond_code
= gimple_cond_code (stmt
);
468 /* Get the current value of both operands. */
469 if (TREE_CODE (op0
) == SSA_NAME
)
471 tree tmp
= SSA_NAME_VALUE (op0
);
476 if (TREE_CODE (op1
) == SSA_NAME
)
478 tree tmp
= SSA_NAME_VALUE (op1
);
483 if (handle_dominating_asserts
)
485 /* Now see if the operand was consumed by an ASSERT_EXPR
486 which dominates E->src. If so, we want to replace the
487 operand with the LHS of the ASSERT_EXPR. */
488 if (TREE_CODE (op0
) == SSA_NAME
)
489 op0
= lhs_of_dominating_assert (op0
, e
->src
, stmt
);
491 if (TREE_CODE (op1
) == SSA_NAME
)
492 op1
= lhs_of_dominating_assert (op1
, e
->src
, stmt
);
495 /* We may need to canonicalize the comparison. For
496 example, op0 might be a constant while op1 is an
497 SSA_NAME. Failure to canonicalize will cause us to
498 miss threading opportunities. */
499 if (tree_swap_operands_p (op0
, op1
, false))
502 cond_code
= swap_tree_comparison (cond_code
);
508 /* Stuff the operator and operands into our dummy conditional
510 gimple_cond_set_code (dummy_cond
, cond_code
);
511 gimple_cond_set_lhs (dummy_cond
, op0
);
512 gimple_cond_set_rhs (dummy_cond
, op1
);
514 /* We absolutely do not care about any type conversions
515 we only care about a zero/nonzero value. */
516 fold_defer_overflow_warnings ();
518 cached_lhs
= fold_binary (cond_code
, boolean_type_node
, op0
, op1
);
520 while (CONVERT_EXPR_P (cached_lhs
))
521 cached_lhs
= TREE_OPERAND (cached_lhs
, 0);
523 fold_undefer_overflow_warnings ((cached_lhs
524 && is_gimple_min_invariant (cached_lhs
)),
525 stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
527 /* If we have not simplified the condition down to an invariant,
528 then use the pass specific callback to simplify the condition. */
530 || !is_gimple_min_invariant (cached_lhs
))
531 cached_lhs
= (*simplify
) (dummy_cond
, stmt
);
536 if (code
== GIMPLE_SWITCH
)
537 cond
= gimple_switch_index (stmt
);
538 else if (code
== GIMPLE_GOTO
)
539 cond
= gimple_goto_dest (stmt
);
543 /* We can have conditionals which just test the state of a variable
544 rather than use a relational operator. These are simpler to handle. */
545 if (TREE_CODE (cond
) == SSA_NAME
)
549 /* Get the variable's current value from the equivalence chains.
551 It is possible to get loops in the SSA_NAME_VALUE chains
552 (consider threading the backedge of a loop where we have
553 a loop invariant SSA_NAME used in the condition. */
555 && TREE_CODE (cached_lhs
) == SSA_NAME
556 && SSA_NAME_VALUE (cached_lhs
))
557 cached_lhs
= SSA_NAME_VALUE (cached_lhs
);
559 /* If we're dominated by a suitable ASSERT_EXPR, then
560 update CACHED_LHS appropriately. */
561 if (handle_dominating_asserts
&& TREE_CODE (cached_lhs
) == SSA_NAME
)
562 cached_lhs
= lhs_of_dominating_assert (cached_lhs
, e
->src
, stmt
);
564 /* If we haven't simplified to an invariant yet, then use the
565 pass specific callback to try and simplify it further. */
566 if (cached_lhs
&& ! is_gimple_min_invariant (cached_lhs
))
567 cached_lhs
= (*simplify
) (stmt
, stmt
);
575 /* Return TRUE if the statement at the end of e->dest depends on
576 the output of any statement in BB. Otherwise return FALSE.
578 This is used when we are threading a backedge and need to ensure
579 that temporary equivalences from BB do not affect the condition
583 cond_arg_set_in_bb (edge e
, basic_block bb
)
587 gimple last
= last_stmt (e
->dest
);
589 /* E->dest does not have to end with a control transferring
590 instruction. This can occurr when we try to extend a jump
591 threading opportunity deeper into the CFG. In that case
592 it is safe for this check to return false. */
596 if (gimple_code (last
) != GIMPLE_COND
597 && gimple_code (last
) != GIMPLE_GOTO
598 && gimple_code (last
) != GIMPLE_SWITCH
)
601 FOR_EACH_SSA_USE_OPERAND (use_p
, last
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
603 tree use
= USE_FROM_PTR (use_p
);
605 if (TREE_CODE (use
) == SSA_NAME
606 && gimple_code (SSA_NAME_DEF_STMT (use
)) != GIMPLE_PHI
607 && gimple_bb (SSA_NAME_DEF_STMT (use
)) == bb
)
613 /* TAKEN_EDGE represents the an edge taken as a result of jump threading.
614 See if we can thread around TAKEN_EDGE->dest as well. If so, return
615 the edge out of TAKEN_EDGE->dest that we can statically compute will be
618 We are much more restrictive as to the contents of TAKEN_EDGE->dest
619 as the path isolation code in tree-ssa-threadupdate.c isn't prepared
620 to handle copying intermediate blocks on a threaded path.
622 Long term a more consistent and structured approach to path isolation
623 would be a huge help. */
625 thread_around_empty_block (edge taken_edge
,
627 bool handle_dominating_asserts
,
628 tree (*simplify
) (gimple
, gimple
),
631 basic_block bb
= taken_edge
->dest
;
632 gimple_stmt_iterator gsi
;
636 /* This block must have a single predecessor (E->dest). */
637 if (!single_pred_p (bb
))
640 /* Before threading, copy DEBUG stmts from the predecessor, so that
641 we don't lose the bindings as we redirect the edges. */
642 if (MAY_HAVE_DEBUG_STMTS
)
644 gsi
= gsi_after_labels (bb
);
645 for (gimple_stmt_iterator si
= gsi_last_bb (taken_edge
->src
);
646 !gsi_end_p (si
); gsi_prev (&si
))
648 stmt
= gsi_stmt (si
);
649 if (!is_gimple_debug (stmt
))
652 stmt
= gimple_copy (stmt
);
653 /* ??? Should we drop the location of the copy? */
654 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
658 /* This block must have more than one successor. */
659 if (single_succ_p (bb
))
662 /* This block can have no PHI nodes. This is overly conservative. */
663 if (!gsi_end_p (gsi_start_phis (bb
)))
666 /* Skip over DEBUG statements at the start of the block. */
667 gsi
= gsi_start_nondebug_bb (bb
);
672 /* This block can have no statements other than its control altering
673 statement. This is overly conservative. */
674 stmt
= gsi_stmt (gsi
);
675 if (gimple_code (stmt
) != GIMPLE_COND
676 && gimple_code (stmt
) != GIMPLE_GOTO
677 && gimple_code (stmt
) != GIMPLE_SWITCH
)
680 /* Extract and simplify the condition. */
681 cond
= simplify_control_stmt_condition (taken_edge
, stmt
, dummy_cond
,
682 simplify
, handle_dominating_asserts
);
684 /* If the condition can be statically computed and we have not already
685 visited the destination edge, then add the taken edge to our thread
687 if (cond
&& is_gimple_min_invariant (cond
))
689 edge taken_edge
= find_taken_edge (bb
, cond
);
691 if (bitmap_bit_p (visited
, taken_edge
->dest
->index
))
693 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
700 /* E1 and E2 are edges into the same basic block. Return TRUE if the
701 PHI arguments associated with those edges are equal or there are no
702 PHI arguments, otherwise return FALSE. */
705 phi_args_equal_on_edges (edge e1
, edge e2
)
707 gimple_stmt_iterator gsi
;
708 int indx1
= e1
->dest_idx
;
709 int indx2
= e2
->dest_idx
;
711 for (gsi
= gsi_start_phis (e1
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
713 gimple phi
= gsi_stmt (gsi
);
715 if (!operand_equal_p (gimple_phi_arg_def (phi
, indx1
),
716 gimple_phi_arg_def (phi
, indx2
), 0))
722 /* We are exiting E->src, see if E->dest ends with a conditional
723 jump which has a known value when reached via E.
725 Special care is necessary if E is a back edge in the CFG as we
726 may have already recorded equivalences for E->dest into our
727 various tables, including the result of the conditional at
728 the end of E->dest. Threading opportunities are severely
729 limited in that case to avoid short-circuiting the loop
732 Note it is quite common for the first block inside a loop to
733 end with a conditional which is either always true or always
734 false when reached via the loop backedge. Thus we do not want
735 to blindly disable threading across a loop backedge.
737 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
738 to avoid allocating memory.
740 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
741 the simplified condition with left-hand sides of ASSERT_EXPRs they are
744 STACK is used to undo temporary equivalences created during the walk of
747 SIMPLIFY is a pass-specific function used to simplify statements. */
750 thread_across_edge (gimple dummy_cond
,
752 bool handle_dominating_asserts
,
753 VEC(tree
, heap
) **stack
,
754 tree (*simplify
) (gimple
, gimple
))
758 /* If E is a backedge, then we want to verify that the COND_EXPR,
759 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
760 by any statements in e->dest. If it is affected, then it is not
761 safe to thread this edge. */
762 if (e
->flags
& EDGE_DFS_BACK
)
764 if (cond_arg_set_in_bb (e
, e
->dest
))
770 /* PHIs create temporary equivalences. */
771 if (!record_temporary_equivalences_from_phis (e
, stack
))
774 /* Now walk each statement recording any context sensitive
775 temporary equivalences we can detect. */
776 stmt
= record_temporary_equivalences_from_stmts_at_dest (e
, stack
, simplify
);
780 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
782 if (gimple_code (stmt
) == GIMPLE_COND
783 || gimple_code (stmt
) == GIMPLE_GOTO
784 || gimple_code (stmt
) == GIMPLE_SWITCH
)
788 /* Extract and simplify the condition. */
789 cond
= simplify_control_stmt_condition (e
, stmt
, dummy_cond
, simplify
,
790 handle_dominating_asserts
);
792 if (cond
&& is_gimple_min_invariant (cond
))
794 edge taken_edge
= find_taken_edge (e
->dest
, cond
);
795 basic_block dest
= (taken_edge
? taken_edge
->dest
: NULL
);
802 /* DEST could be null for a computed jump to an absolute
803 address. If DEST is not null, then see if we can thread
804 through it as well, this helps capture secondary effects
805 of threading without having to re-run DOM or VRP. */
807 && ((e
->flags
& EDGE_DFS_BACK
) == 0
808 || ! cond_arg_set_in_bb (taken_edge
, e
->dest
)))
810 /* We don't want to thread back to a block we have already
811 visited. This may be overly conservative. */
812 visited
= BITMAP_ALLOC (NULL
);
813 bitmap_set_bit (visited
, dest
->index
);
814 bitmap_set_bit (visited
, e
->dest
->index
);
817 e2
= thread_around_empty_block (taken_edge
,
819 handle_dominating_asserts
,
826 BITMAP_FREE (visited
);
829 remove_temporary_equivalences (stack
);
830 register_jump_thread (e
, taken_edge
, NULL
);
835 /* We were unable to determine what out edge from E->dest is taken. However,
836 we might still be able to thread through successors of E->dest. This
837 often occurs when E->dest is a joiner block which then fans back out
838 based on redundant tests.
840 If so, we'll copy E->dest and redirect the appropriate predecessor to
841 the copy. Within the copy of E->dest, we'll thread one or more edges
842 to points deeper in the CFG.
844 This is a stopgap until we have a more structured approach to path
847 edge e2
, e3
, taken_edge
;
850 bitmap visited
= BITMAP_ALLOC (NULL
);
852 /* Look at each successor of E->dest to see if we can thread through it. */
853 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
855 /* Avoid threading to any block we have already visited. */
856 bitmap_clear (visited
);
857 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
858 bitmap_set_bit (visited
, e
->dest
->index
);
860 /* Record whether or not we were able to thread through a successor
866 if ((e
->flags
& EDGE_DFS_BACK
) == 0
867 || ! cond_arg_set_in_bb (e3
, e
->dest
))
868 e2
= thread_around_empty_block (e3
,
870 handle_dominating_asserts
,
884 /* If we were able to thread through a successor of E->dest, then
885 record the jump threading opportunity. */
889 /* If there is already an edge from the block to be duplicated
890 (E2->src) to the final target (E3->dest), then make sure that
891 the PHI args associated with the edges E2 and E3 are the
893 tmp
= find_edge (taken_edge
->src
, e3
->dest
);
894 if (!tmp
|| phi_args_equal_on_edges (tmp
, e3
))
895 register_jump_thread (e
, taken_edge
, e3
);
899 BITMAP_FREE (visited
);
903 remove_temporary_equivalences (stack
);