* lto.c (lto_balanced_map): Fix typos in head comment.
[official-gcc.git] / gcc / tree-ssa-threadedge.c
blob1fee9bf84c37bbdb1d79aada1e6b5fde61982337
1 /* SSA Jump Threading
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jeff Law <law@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "output.h"
32 #include "function.h"
33 #include "timevar.h"
34 #include "tree-dump.h"
35 #include "tree-flow.h"
36 #include "tree-pass.h"
37 #include "tree-ssa-propagate.h"
38 #include "langhooks.h"
39 #include "params.h"
41 /* To avoid code explosion due to jump threading, we limit the
42 number of statements we are going to copy. This variable
43 holds the number of statements currently seen that we'll have
44 to copy as part of the jump threading process. */
45 static int stmt_count;
47 /* Array to record value-handles per SSA_NAME. */
48 VEC(tree,heap) *ssa_name_values;
50 /* Set the value for the SSA name NAME to VALUE. */
52 void
53 set_ssa_name_value (tree name, tree value)
55 if (SSA_NAME_VERSION (name) >= VEC_length (tree, ssa_name_values))
56 VEC_safe_grow_cleared (tree, heap, ssa_name_values,
57 SSA_NAME_VERSION (name) + 1);
58 VEC_replace (tree, ssa_name_values, SSA_NAME_VERSION (name), value);
61 /* Initialize the per SSA_NAME value-handles array. Returns it. */
62 void
63 threadedge_initialize_values (void)
65 gcc_assert (ssa_name_values == NULL);
66 ssa_name_values = VEC_alloc(tree, heap, num_ssa_names);
69 /* Free the per SSA_NAME value-handle array. */
70 void
71 threadedge_finalize_values (void)
73 VEC_free(tree, heap, ssa_name_values);
76 /* Return TRUE if we may be able to thread an incoming edge into
77 BB to an outgoing edge from BB. Return FALSE otherwise. */
79 bool
80 potentially_threadable_block (basic_block bb)
82 gimple_stmt_iterator gsi;
84 /* If BB has a single successor or a single predecessor, then
85 there is no threading opportunity. */
86 if (single_succ_p (bb) || single_pred_p (bb))
87 return false;
89 /* If BB does not end with a conditional, switch or computed goto,
90 then there is no threading opportunity. */
91 gsi = gsi_last_bb (bb);
92 if (gsi_end_p (gsi)
93 || ! gsi_stmt (gsi)
94 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
95 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
96 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
97 return false;
99 return true;
102 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
103 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
104 BB. If no such ASSERT_EXPR is found, return OP. */
106 static tree
107 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
109 imm_use_iterator imm_iter;
110 gimple use_stmt;
111 use_operand_p use_p;
113 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
115 use_stmt = USE_STMT (use_p);
116 if (use_stmt != stmt
117 && gimple_assign_single_p (use_stmt)
118 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
119 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
120 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
122 return gimple_assign_lhs (use_stmt);
125 return op;
128 /* We record temporary equivalences created by PHI nodes or
129 statements within the target block. Doing so allows us to
130 identify more jump threading opportunities, even in blocks
131 with side effects.
133 We keep track of those temporary equivalences in a stack
134 structure so that we can unwind them when we're done processing
135 a particular edge. This routine handles unwinding the data
136 structures. */
138 static void
139 remove_temporary_equivalences (VEC(tree, heap) **stack)
141 while (VEC_length (tree, *stack) > 0)
143 tree prev_value, dest;
145 dest = VEC_pop (tree, *stack);
147 /* A NULL value indicates we should stop unwinding, otherwise
148 pop off the next entry as they're recorded in pairs. */
149 if (dest == NULL)
150 break;
152 prev_value = VEC_pop (tree, *stack);
153 set_ssa_name_value (dest, prev_value);
157 /* Record a temporary equivalence, saving enough information so that
158 we can restore the state of recorded equivalences when we're
159 done processing the current edge. */
161 static void
162 record_temporary_equivalence (tree x, tree y, VEC(tree, heap) **stack)
164 tree prev_x = SSA_NAME_VALUE (x);
166 if (TREE_CODE (y) == SSA_NAME)
168 tree tmp = SSA_NAME_VALUE (y);
169 y = tmp ? tmp : y;
172 set_ssa_name_value (x, y);
173 VEC_reserve (tree, heap, *stack, 2);
174 VEC_quick_push (tree, *stack, prev_x);
175 VEC_quick_push (tree, *stack, x);
178 /* Record temporary equivalences created by PHIs at the target of the
179 edge E. Record unwind information for the equivalences onto STACK.
181 If a PHI which prevents threading is encountered, then return FALSE
182 indicating we should not thread this edge, else return TRUE. */
184 static bool
185 record_temporary_equivalences_from_phis (edge e, VEC(tree, heap) **stack)
187 gimple_stmt_iterator gsi;
189 /* Each PHI creates a temporary equivalence, record them.
190 These are context sensitive equivalences and will be removed
191 later. */
192 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
194 gimple phi = gsi_stmt (gsi);
195 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
196 tree dst = gimple_phi_result (phi);
198 /* If the desired argument is not the same as this PHI's result
199 and it is set by a PHI in E->dest, then we can not thread
200 through E->dest. */
201 if (src != dst
202 && TREE_CODE (src) == SSA_NAME
203 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
204 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
205 return false;
207 /* We consider any non-virtual PHI as a statement since it
208 count result in a constant assignment or copy operation. */
209 if (is_gimple_reg (dst))
210 stmt_count++;
212 record_temporary_equivalence (dst, src, stack);
214 return true;
217 /* Fold the RHS of an assignment statement and return it as a tree.
218 May return NULL_TREE if no simplification is possible. */
220 static tree
221 fold_assignment_stmt (gimple stmt)
223 enum tree_code subcode = gimple_assign_rhs_code (stmt);
225 switch (get_gimple_rhs_class (subcode))
227 case GIMPLE_SINGLE_RHS:
229 tree rhs = gimple_assign_rhs1 (stmt);
231 if (TREE_CODE (rhs) == COND_EXPR)
233 /* Sadly, we have to handle conditional assignments specially
234 here, because fold expects all the operands of an expression
235 to be folded before the expression itself is folded, but we
236 can't just substitute the folded condition here. */
237 tree cond = fold (COND_EXPR_COND (rhs));
238 if (cond == boolean_true_node)
239 rhs = COND_EXPR_THEN (rhs);
240 else if (cond == boolean_false_node)
241 rhs = COND_EXPR_ELSE (rhs);
244 return fold (rhs);
247 case GIMPLE_UNARY_RHS:
249 tree lhs = gimple_assign_lhs (stmt);
250 tree op0 = gimple_assign_rhs1 (stmt);
251 return fold_unary (subcode, TREE_TYPE (lhs), op0);
254 case GIMPLE_BINARY_RHS:
256 tree lhs = gimple_assign_lhs (stmt);
257 tree op0 = gimple_assign_rhs1 (stmt);
258 tree op1 = gimple_assign_rhs2 (stmt);
259 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
262 case GIMPLE_TERNARY_RHS:
264 tree lhs = gimple_assign_lhs (stmt);
265 tree op0 = gimple_assign_rhs1 (stmt);
266 tree op1 = gimple_assign_rhs2 (stmt);
267 tree op2 = gimple_assign_rhs3 (stmt);
268 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
271 default:
272 gcc_unreachable ();
276 /* Try to simplify each statement in E->dest, ultimately leading to
277 a simplification of the COND_EXPR at the end of E->dest.
279 Record unwind information for temporary equivalences onto STACK.
281 Use SIMPLIFY (a pointer to a callback function) to further simplify
282 statements using pass specific information.
284 We might consider marking just those statements which ultimately
285 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
286 would be recovered by trying to simplify fewer statements.
288 If we are able to simplify a statement into the form
289 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
290 a context sensitive equivalence which may help us simplify
291 later statements in E->dest. */
293 static gimple
294 record_temporary_equivalences_from_stmts_at_dest (edge e,
295 VEC(tree, heap) **stack,
296 tree (*simplify) (gimple,
297 gimple))
299 gimple stmt = NULL;
300 gimple_stmt_iterator gsi;
301 int max_stmt_count;
303 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
305 /* Walk through each statement in the block recording equivalences
306 we discover. Note any equivalences we discover are context
307 sensitive (ie, are dependent on traversing E) and must be unwound
308 when we're finished processing E. */
309 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
311 tree cached_lhs = NULL;
313 stmt = gsi_stmt (gsi);
315 /* Ignore empty statements and labels. */
316 if (gimple_code (stmt) == GIMPLE_NOP
317 || gimple_code (stmt) == GIMPLE_LABEL
318 || is_gimple_debug (stmt))
319 continue;
321 /* If the statement has volatile operands, then we assume we
322 can not thread through this block. This is overly
323 conservative in some ways. */
324 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
325 return NULL;
327 /* If duplicating this block is going to cause too much code
328 expansion, then do not thread through this block. */
329 stmt_count++;
330 if (stmt_count > max_stmt_count)
331 return NULL;
333 /* If this is not a statement that sets an SSA_NAME to a new
334 value, then do not try to simplify this statement as it will
335 not simplify in any way that is helpful for jump threading. */
336 if ((gimple_code (stmt) != GIMPLE_ASSIGN
337 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
338 && (gimple_code (stmt) != GIMPLE_CALL
339 || gimple_call_lhs (stmt) == NULL_TREE
340 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
341 continue;
343 /* The result of __builtin_object_size depends on all the arguments
344 of a phi node. Temporarily using only one edge produces invalid
345 results. For example
347 if (x < 6)
348 goto l;
349 else
350 goto l;
353 r = PHI <&w[2].a[1](2), &a.a[6](3)>
354 __builtin_object_size (r, 0)
356 The result of __builtin_object_size is defined to be the maximum of
357 remaining bytes. If we use only one edge on the phi, the result will
358 change to be the remaining bytes for the corresponding phi argument.
360 Similarly for __builtin_constant_p:
362 r = PHI <1(2), 2(3)>
363 __builtin_constant_p (r)
365 Both PHI arguments are constant, but x ? 1 : 2 is still not
366 constant. */
368 if (is_gimple_call (stmt))
370 tree fndecl = gimple_call_fndecl (stmt);
371 if (fndecl
372 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
373 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
374 continue;
377 /* At this point we have a statement which assigns an RHS to an
378 SSA_VAR on the LHS. We want to try and simplify this statement
379 to expose more context sensitive equivalences which in turn may
380 allow us to simplify the condition at the end of the loop.
382 Handle simple copy operations as well as implied copies from
383 ASSERT_EXPRs. */
384 if (gimple_assign_single_p (stmt)
385 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
386 cached_lhs = gimple_assign_rhs1 (stmt);
387 else if (gimple_assign_single_p (stmt)
388 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
389 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
390 else
392 /* A statement that is not a trivial copy or ASSERT_EXPR.
393 We're going to temporarily copy propagate the operands
394 and see if that allows us to simplify this statement. */
395 tree *copy;
396 ssa_op_iter iter;
397 use_operand_p use_p;
398 unsigned int num, i = 0;
400 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
401 copy = XCNEWVEC (tree, num);
403 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
404 the operands. */
405 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
407 tree tmp = NULL;
408 tree use = USE_FROM_PTR (use_p);
410 copy[i++] = use;
411 if (TREE_CODE (use) == SSA_NAME)
412 tmp = SSA_NAME_VALUE (use);
413 if (tmp)
414 SET_USE (use_p, tmp);
417 /* Try to fold/lookup the new expression. Inserting the
418 expression into the hash table is unlikely to help. */
419 if (is_gimple_call (stmt))
420 cached_lhs = fold_call_stmt (stmt, false);
421 else
422 cached_lhs = fold_assignment_stmt (stmt);
424 if (!cached_lhs
425 || (TREE_CODE (cached_lhs) != SSA_NAME
426 && !is_gimple_min_invariant (cached_lhs)))
427 cached_lhs = (*simplify) (stmt, stmt);
429 /* Restore the statement's original uses/defs. */
430 i = 0;
431 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
432 SET_USE (use_p, copy[i++]);
434 free (copy);
437 /* Record the context sensitive equivalence if we were able
438 to simplify this statement. */
439 if (cached_lhs
440 && (TREE_CODE (cached_lhs) == SSA_NAME
441 || is_gimple_min_invariant (cached_lhs)))
442 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
444 return stmt;
447 /* Simplify the control statement at the end of the block E->dest.
449 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
450 is available to use/clobber in DUMMY_COND.
452 Use SIMPLIFY (a pointer to a callback function) to further simplify
453 a condition using pass specific information.
455 Return the simplified condition or NULL if simplification could
456 not be performed. */
458 static tree
459 simplify_control_stmt_condition (edge e,
460 gimple stmt,
461 gimple dummy_cond,
462 tree (*simplify) (gimple, gimple),
463 bool handle_dominating_asserts)
465 tree cond, cached_lhs;
466 enum gimple_code code = gimple_code (stmt);
468 /* For comparisons, we have to update both operands, then try
469 to simplify the comparison. */
470 if (code == GIMPLE_COND)
472 tree op0, op1;
473 enum tree_code cond_code;
475 op0 = gimple_cond_lhs (stmt);
476 op1 = gimple_cond_rhs (stmt);
477 cond_code = gimple_cond_code (stmt);
479 /* Get the current value of both operands. */
480 if (TREE_CODE (op0) == SSA_NAME)
482 tree tmp = SSA_NAME_VALUE (op0);
483 if (tmp)
484 op0 = tmp;
487 if (TREE_CODE (op1) == SSA_NAME)
489 tree tmp = SSA_NAME_VALUE (op1);
490 if (tmp)
491 op1 = tmp;
494 if (handle_dominating_asserts)
496 /* Now see if the operand was consumed by an ASSERT_EXPR
497 which dominates E->src. If so, we want to replace the
498 operand with the LHS of the ASSERT_EXPR. */
499 if (TREE_CODE (op0) == SSA_NAME)
500 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
502 if (TREE_CODE (op1) == SSA_NAME)
503 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
506 /* We may need to canonicalize the comparison. For
507 example, op0 might be a constant while op1 is an
508 SSA_NAME. Failure to canonicalize will cause us to
509 miss threading opportunities. */
510 if (tree_swap_operands_p (op0, op1, false))
512 tree tmp;
513 cond_code = swap_tree_comparison (cond_code);
514 tmp = op0;
515 op0 = op1;
516 op1 = tmp;
519 /* Stuff the operator and operands into our dummy conditional
520 expression. */
521 gimple_cond_set_code (dummy_cond, cond_code);
522 gimple_cond_set_lhs (dummy_cond, op0);
523 gimple_cond_set_rhs (dummy_cond, op1);
525 /* We absolutely do not care about any type conversions
526 we only care about a zero/nonzero value. */
527 fold_defer_overflow_warnings ();
529 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
530 if (cached_lhs)
531 while (CONVERT_EXPR_P (cached_lhs))
532 cached_lhs = TREE_OPERAND (cached_lhs, 0);
534 fold_undefer_overflow_warnings ((cached_lhs
535 && is_gimple_min_invariant (cached_lhs)),
536 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
538 /* If we have not simplified the condition down to an invariant,
539 then use the pass specific callback to simplify the condition. */
540 if (!cached_lhs
541 || !is_gimple_min_invariant (cached_lhs))
542 cached_lhs = (*simplify) (dummy_cond, stmt);
544 return cached_lhs;
547 if (code == GIMPLE_SWITCH)
548 cond = gimple_switch_index (stmt);
549 else if (code == GIMPLE_GOTO)
550 cond = gimple_goto_dest (stmt);
551 else
552 gcc_unreachable ();
554 /* We can have conditionals which just test the state of a variable
555 rather than use a relational operator. These are simpler to handle. */
556 if (TREE_CODE (cond) == SSA_NAME)
558 cached_lhs = cond;
560 /* Get the variable's current value from the equivalence chains.
562 It is possible to get loops in the SSA_NAME_VALUE chains
563 (consider threading the backedge of a loop where we have
564 a loop invariant SSA_NAME used in the condition. */
565 if (cached_lhs
566 && TREE_CODE (cached_lhs) == SSA_NAME
567 && SSA_NAME_VALUE (cached_lhs))
568 cached_lhs = SSA_NAME_VALUE (cached_lhs);
570 /* If we're dominated by a suitable ASSERT_EXPR, then
571 update CACHED_LHS appropriately. */
572 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
573 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
575 /* If we haven't simplified to an invariant yet, then use the
576 pass specific callback to try and simplify it further. */
577 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
578 cached_lhs = (*simplify) (stmt, stmt);
580 else
581 cached_lhs = NULL;
583 return cached_lhs;
586 /* TAKEN_EDGE represents the an edge taken as a result of jump threading.
587 See if we can thread around TAKEN_EDGE->dest as well. If so, return
588 the edge out of TAKEN_EDGE->dest that we can statically compute will be
589 traversed.
591 We are much more restrictive as to the contents of TAKEN_EDGE->dest
592 as the path isolation code in tree-ssa-threadupdate.c isn't prepared
593 to handle copying intermediate blocks on a threaded path.
595 Long term a more consistent and structured approach to path isolation
596 would be a huge help. */
597 static edge
598 thread_around_empty_block (edge taken_edge,
599 gimple dummy_cond,
600 bool handle_dominating_asserts,
601 tree (*simplify) (gimple, gimple),
602 bitmap visited)
604 basic_block bb = taken_edge->dest;
605 gimple_stmt_iterator gsi;
606 gimple stmt;
607 tree cond;
609 /* This block must have a single predecessor (E->dest). */
610 if (!single_pred_p (bb))
611 return NULL;
613 /* This block must have more than one successor. */
614 if (single_succ_p (bb))
615 return NULL;
617 /* This block can have no PHI nodes. This is overly conservative. */
618 if (!gsi_end_p (gsi_start_phis (bb)))
619 return NULL;
621 /* Skip over DEBUG statements at the start of the block. */
622 gsi = gsi_start_nondebug_bb (bb);
624 if (gsi_end_p (gsi))
625 return NULL;
627 /* This block can have no statements other than its control altering
628 statement. This is overly conservative. */
629 stmt = gsi_stmt (gsi);
630 if (gimple_code (stmt) != GIMPLE_COND
631 && gimple_code (stmt) != GIMPLE_GOTO
632 && gimple_code (stmt) != GIMPLE_SWITCH)
633 return NULL;
635 /* Extract and simplify the condition. */
636 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
637 simplify, handle_dominating_asserts);
639 /* If the condition can be statically computed and we have not already
640 visited the destination edge, then add the taken edge to our thread
641 path. */
642 if (cond && is_gimple_min_invariant (cond))
644 edge taken_edge = find_taken_edge (bb, cond);
646 if (bitmap_bit_p (visited, taken_edge->dest->index))
647 return NULL;
648 bitmap_set_bit (visited, taken_edge->dest->index);
649 return taken_edge;
652 return NULL;
656 /* We are exiting E->src, see if E->dest ends with a conditional
657 jump which has a known value when reached via E.
659 Special care is necessary if E is a back edge in the CFG as we
660 may have already recorded equivalences for E->dest into our
661 various tables, including the result of the conditional at
662 the end of E->dest. Threading opportunities are severely
663 limited in that case to avoid short-circuiting the loop
664 incorrectly.
666 Note it is quite common for the first block inside a loop to
667 end with a conditional which is either always true or always
668 false when reached via the loop backedge. Thus we do not want
669 to blindly disable threading across a loop backedge.
671 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
672 to avoid allocating memory.
674 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
675 the simplified condition with left-hand sides of ASSERT_EXPRs they are
676 used in.
678 STACK is used to undo temporary equivalences created during the walk of
679 E->dest.
681 SIMPLIFY is a pass-specific function used to simplify statements. */
683 void
684 thread_across_edge (gimple dummy_cond,
685 edge e,
686 bool handle_dominating_asserts,
687 VEC(tree, heap) **stack,
688 tree (*simplify) (gimple, gimple))
690 gimple stmt;
692 /* If E is a backedge, then we want to verify that the COND_EXPR,
693 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
694 by any statements in e->dest. If it is affected, then it is not
695 safe to thread this edge. */
696 if (e->flags & EDGE_DFS_BACK)
698 ssa_op_iter iter;
699 use_operand_p use_p;
700 gimple last = gsi_stmt (gsi_last_bb (e->dest));
702 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
704 tree use = USE_FROM_PTR (use_p);
706 if (TREE_CODE (use) == SSA_NAME
707 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
708 && gimple_bb (SSA_NAME_DEF_STMT (use)) == e->dest)
709 goto fail;
713 stmt_count = 0;
715 /* PHIs create temporary equivalences. */
716 if (!record_temporary_equivalences_from_phis (e, stack))
717 goto fail;
719 /* Now walk each statement recording any context sensitive
720 temporary equivalences we can detect. */
721 stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
722 if (!stmt)
723 goto fail;
725 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
726 will be taken. */
727 if (gimple_code (stmt) == GIMPLE_COND
728 || gimple_code (stmt) == GIMPLE_GOTO
729 || gimple_code (stmt) == GIMPLE_SWITCH)
731 tree cond;
733 /* Extract and simplify the condition. */
734 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
735 handle_dominating_asserts);
737 if (cond && is_gimple_min_invariant (cond))
739 edge taken_edge = find_taken_edge (e->dest, cond);
740 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
741 bitmap visited;
742 edge e2;
744 if (dest == e->dest)
745 goto fail;
747 /* DEST could be null for a computed jump to an absolute
748 address. If DEST is not null, then see if we can thread
749 through it as well, this helps capture secondary effects
750 of threading without having to re-run DOM or VRP. */
751 if (dest)
753 /* We don't want to thread back to a block we have already
754 visited. This may be overly conservative. */
755 visited = BITMAP_ALLOC (NULL);
756 bitmap_set_bit (visited, dest->index);
757 bitmap_set_bit (visited, e->dest->index);
760 e2 = thread_around_empty_block (taken_edge,
761 dummy_cond,
762 handle_dominating_asserts,
763 simplify,
764 visited);
765 if (e2)
766 taken_edge = e2;
768 while (e2);
769 BITMAP_FREE (visited);
772 remove_temporary_equivalences (stack);
773 register_jump_thread (e, taken_edge);
777 fail:
778 remove_temporary_equivalences (stack);