2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "hard-reg-set.h"
36 #include "dominance.h"
37 #include "basic-block.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-expr.h"
46 #include "gimple-iterator.h"
47 #include "gimple-ssa.h"
49 #include "tree-phinodes.h"
50 #include "ssa-iterators.h"
51 #include "stringpool.h"
52 #include "tree-ssanames.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-ssa-threadupdate.h"
55 #include "langhooks.h"
57 #include "tree-ssa-threadedge.h"
60 /* To avoid code explosion due to jump threading, we limit the
61 number of statements we are going to copy. This variable
62 holds the number of statements currently seen that we'll have
63 to copy as part of the jump threading process. */
64 static int stmt_count
;
66 /* Array to record value-handles per SSA_NAME. */
67 vec
<tree
> ssa_name_values
;
69 /* Set the value for the SSA name NAME to VALUE. */
72 set_ssa_name_value (tree name
, tree value
)
74 if (SSA_NAME_VERSION (name
) >= ssa_name_values
.length ())
75 ssa_name_values
.safe_grow_cleared (SSA_NAME_VERSION (name
) + 1);
76 if (value
&& TREE_OVERFLOW_P (value
))
77 value
= drop_tree_overflow (value
);
78 ssa_name_values
[SSA_NAME_VERSION (name
)] = value
;
81 /* Initialize the per SSA_NAME value-handles array. Returns it. */
83 threadedge_initialize_values (void)
85 gcc_assert (!ssa_name_values
.exists ());
86 ssa_name_values
.create (num_ssa_names
);
89 /* Free the per SSA_NAME value-handle array. */
91 threadedge_finalize_values (void)
93 ssa_name_values
.release ();
96 /* Return TRUE if we may be able to thread an incoming edge into
97 BB to an outgoing edge from BB. Return FALSE otherwise. */
100 potentially_threadable_block (basic_block bb
)
102 gimple_stmt_iterator gsi
;
104 /* If BB has a single successor or a single predecessor, then
105 there is no threading opportunity. */
106 if (single_succ_p (bb
) || single_pred_p (bb
))
109 /* If BB does not end with a conditional, switch or computed goto,
110 then there is no threading opportunity. */
111 gsi
= gsi_last_bb (bb
);
114 || (gimple_code (gsi_stmt (gsi
)) != GIMPLE_COND
115 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_GOTO
116 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_SWITCH
))
122 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
123 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
124 BB. If no such ASSERT_EXPR is found, return OP. */
127 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple stmt
)
129 imm_use_iterator imm_iter
;
133 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
135 use_stmt
= USE_STMT (use_p
);
137 && gimple_assign_single_p (use_stmt
)
138 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
139 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
140 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
142 return gimple_assign_lhs (use_stmt
);
148 /* We record temporary equivalences created by PHI nodes or
149 statements within the target block. Doing so allows us to
150 identify more jump threading opportunities, even in blocks
153 We keep track of those temporary equivalences in a stack
154 structure so that we can unwind them when we're done processing
155 a particular edge. This routine handles unwinding the data
159 remove_temporary_equivalences (vec
<tree
> *stack
)
161 while (stack
->length () > 0)
163 tree prev_value
, dest
;
165 dest
= stack
->pop ();
167 /* A NULL value indicates we should stop unwinding, otherwise
168 pop off the next entry as they're recorded in pairs. */
172 prev_value
= stack
->pop ();
173 set_ssa_name_value (dest
, prev_value
);
177 /* Record a temporary equivalence, saving enough information so that
178 we can restore the state of recorded equivalences when we're
179 done processing the current edge. */
182 record_temporary_equivalence (tree x
, tree y
, vec
<tree
> *stack
)
184 tree prev_x
= SSA_NAME_VALUE (x
);
186 /* Y may be NULL if we are invalidating entries in the table. */
187 if (y
&& TREE_CODE (y
) == SSA_NAME
)
189 tree tmp
= SSA_NAME_VALUE (y
);
193 set_ssa_name_value (x
, y
);
195 stack
->quick_push (prev_x
);
196 stack
->quick_push (x
);
199 /* Record temporary equivalences created by PHIs at the target of the
200 edge E. Record unwind information for the equivalences onto STACK.
202 If a PHI which prevents threading is encountered, then return FALSE
203 indicating we should not thread this edge, else return TRUE.
205 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
206 of any equivalences recorded. We use this to make invalidation after
207 traversing back edges less painful. */
210 record_temporary_equivalences_from_phis (edge e
, vec
<tree
> *stack
)
212 gimple_stmt_iterator gsi
;
214 /* Each PHI creates a temporary equivalence, record them.
215 These are context sensitive equivalences and will be removed
217 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
219 gimple phi
= gsi_stmt (gsi
);
220 tree src
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
221 tree dst
= gimple_phi_result (phi
);
223 /* If the desired argument is not the same as this PHI's result
224 and it is set by a PHI in E->dest, then we can not thread
227 && TREE_CODE (src
) == SSA_NAME
228 && gimple_code (SSA_NAME_DEF_STMT (src
)) == GIMPLE_PHI
229 && gimple_bb (SSA_NAME_DEF_STMT (src
)) == e
->dest
)
232 /* We consider any non-virtual PHI as a statement since it
233 count result in a constant assignment or copy operation. */
234 if (!virtual_operand_p (dst
))
237 record_temporary_equivalence (dst
, src
, stack
);
242 /* Fold the RHS of an assignment statement and return it as a tree.
243 May return NULL_TREE if no simplification is possible. */
246 fold_assignment_stmt (gimple stmt
)
248 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
250 switch (get_gimple_rhs_class (subcode
))
252 case GIMPLE_SINGLE_RHS
:
253 return fold (gimple_assign_rhs1 (stmt
));
255 case GIMPLE_UNARY_RHS
:
257 tree lhs
= gimple_assign_lhs (stmt
);
258 tree op0
= gimple_assign_rhs1 (stmt
);
259 return fold_unary (subcode
, TREE_TYPE (lhs
), op0
);
262 case GIMPLE_BINARY_RHS
:
264 tree lhs
= gimple_assign_lhs (stmt
);
265 tree op0
= gimple_assign_rhs1 (stmt
);
266 tree op1
= gimple_assign_rhs2 (stmt
);
267 return fold_binary (subcode
, TREE_TYPE (lhs
), op0
, op1
);
270 case GIMPLE_TERNARY_RHS
:
272 tree lhs
= gimple_assign_lhs (stmt
);
273 tree op0
= gimple_assign_rhs1 (stmt
);
274 tree op1
= gimple_assign_rhs2 (stmt
);
275 tree op2
= gimple_assign_rhs3 (stmt
);
277 /* Sadly, we have to handle conditional assignments specially
278 here, because fold expects all the operands of an expression
279 to be folded before the expression itself is folded, but we
280 can't just substitute the folded condition here. */
281 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
284 return fold_ternary (subcode
, TREE_TYPE (lhs
), op0
, op1
, op2
);
292 /* A new value has been assigned to LHS. If necessary, invalidate any
293 equivalences that are no longer valid. */
295 invalidate_equivalences (tree lhs
, vec
<tree
> *stack
)
298 for (unsigned int i
= 1; i
< num_ssa_names
; i
++)
299 if (ssa_name (i
) && SSA_NAME_VALUE (ssa_name (i
)) == lhs
)
300 record_temporary_equivalence (ssa_name (i
), NULL_TREE
, stack
);
302 if (SSA_NAME_VALUE (lhs
))
303 record_temporary_equivalence (lhs
, NULL_TREE
, stack
);
306 /* Try to simplify each statement in E->dest, ultimately leading to
307 a simplification of the COND_EXPR at the end of E->dest.
309 Record unwind information for temporary equivalences onto STACK.
311 Use SIMPLIFY (a pointer to a callback function) to further simplify
312 statements using pass specific information.
314 We might consider marking just those statements which ultimately
315 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
316 would be recovered by trying to simplify fewer statements.
318 If we are able to simplify a statement into the form
319 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
320 a context sensitive equivalence which may help us simplify
321 later statements in E->dest. */
324 record_temporary_equivalences_from_stmts_at_dest (edge e
,
326 tree (*simplify
) (gimple
,
331 gimple_stmt_iterator gsi
;
334 max_stmt_count
= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
);
336 /* Walk through each statement in the block recording equivalences
337 we discover. Note any equivalences we discover are context
338 sensitive (ie, are dependent on traversing E) and must be unwound
339 when we're finished processing E. */
340 for (gsi
= gsi_start_bb (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
342 tree cached_lhs
= NULL
;
344 stmt
= gsi_stmt (gsi
);
346 /* Ignore empty statements and labels. */
347 if (gimple_code (stmt
) == GIMPLE_NOP
348 || gimple_code (stmt
) == GIMPLE_LABEL
349 || is_gimple_debug (stmt
))
352 /* If the statement has volatile operands, then we assume we
353 can not thread through this block. This is overly
354 conservative in some ways. */
355 if (gimple_code (stmt
) == GIMPLE_ASM
&& gimple_asm_volatile_p (stmt
))
358 /* If duplicating this block is going to cause too much code
359 expansion, then do not thread through this block. */
361 if (stmt_count
> max_stmt_count
)
364 /* If this is not a statement that sets an SSA_NAME to a new
365 value, then do not try to simplify this statement as it will
366 not simplify in any way that is helpful for jump threading. */
367 if ((gimple_code (stmt
) != GIMPLE_ASSIGN
368 || TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
369 && (gimple_code (stmt
) != GIMPLE_CALL
370 || gimple_call_lhs (stmt
) == NULL_TREE
371 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
))
373 /* STMT might still have DEFS and we need to invalidate any known
374 equivalences for them.
376 Consider if STMT is a GIMPLE_ASM with one or more outputs that
377 feeds a conditional inside a loop. We might derive an equivalence
378 due to the conditional. */
383 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_DEF
)
384 invalidate_equivalences (op
, stack
);
389 /* The result of __builtin_object_size depends on all the arguments
390 of a phi node. Temporarily using only one edge produces invalid
399 r = PHI <&w[2].a[1](2), &a.a[6](3)>
400 __builtin_object_size (r, 0)
402 The result of __builtin_object_size is defined to be the maximum of
403 remaining bytes. If we use only one edge on the phi, the result will
404 change to be the remaining bytes for the corresponding phi argument.
406 Similarly for __builtin_constant_p:
409 __builtin_constant_p (r)
411 Both PHI arguments are constant, but x ? 1 : 2 is still not
414 if (is_gimple_call (stmt
))
416 tree fndecl
= gimple_call_fndecl (stmt
);
418 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_OBJECT_SIZE
419 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
))
423 tree lhs
= gimple_get_lhs (stmt
);
424 invalidate_equivalences (lhs
, stack
);
430 /* At this point we have a statement which assigns an RHS to an
431 SSA_VAR on the LHS. We want to try and simplify this statement
432 to expose more context sensitive equivalences which in turn may
433 allow us to simplify the condition at the end of the loop.
435 Handle simple copy operations as well as implied copies from
437 if (gimple_assign_single_p (stmt
)
438 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
439 cached_lhs
= gimple_assign_rhs1 (stmt
);
440 else if (gimple_assign_single_p (stmt
)
441 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
442 cached_lhs
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0);
445 /* A statement that is not a trivial copy or ASSERT_EXPR.
446 We're going to temporarily copy propagate the operands
447 and see if that allows us to simplify this statement. */
451 unsigned int num
, i
= 0;
453 num
= NUM_SSA_OPERANDS (stmt
, (SSA_OP_USE
| SSA_OP_VUSE
));
454 copy
= XCNEWVEC (tree
, num
);
456 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
458 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
461 tree use
= USE_FROM_PTR (use_p
);
464 if (TREE_CODE (use
) == SSA_NAME
)
465 tmp
= SSA_NAME_VALUE (use
);
467 SET_USE (use_p
, tmp
);
470 /* Try to fold/lookup the new expression. Inserting the
471 expression into the hash table is unlikely to help. */
472 if (is_gimple_call (stmt
))
473 cached_lhs
= fold_call_stmt (stmt
, false);
475 cached_lhs
= fold_assignment_stmt (stmt
);
478 || (TREE_CODE (cached_lhs
) != SSA_NAME
479 && !is_gimple_min_invariant (cached_lhs
)))
480 cached_lhs
= (*simplify
) (stmt
, stmt
);
482 /* Restore the statement's original uses/defs. */
484 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
485 SET_USE (use_p
, copy
[i
++]);
490 /* Record the context sensitive equivalence if we were able
491 to simplify this statement.
493 If we have traversed a backedge at some point during threading,
494 then always enter something here. Either a real equivalence,
495 or a NULL_TREE equivalence which is effectively invalidation of
496 prior equivalences. */
498 && (TREE_CODE (cached_lhs
) == SSA_NAME
499 || is_gimple_min_invariant (cached_lhs
)))
500 record_temporary_equivalence (gimple_get_lhs (stmt
), cached_lhs
, stack
);
501 else if (backedge_seen
)
502 invalidate_equivalences (gimple_get_lhs (stmt
), stack
);
507 /* Once we have passed a backedge in the CFG when threading, we do not want to
508 utilize edge equivalences for simplification purpose. They are no longer
509 necessarily valid. We use this callback rather than the ones provided by
510 DOM/VRP to achieve that effect. */
512 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED
, gimple stmt2 ATTRIBUTE_UNUSED
)
517 /* Simplify the control statement at the end of the block E->dest.
519 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
520 is available to use/clobber in DUMMY_COND.
522 Use SIMPLIFY (a pointer to a callback function) to further simplify
523 a condition using pass specific information.
525 Return the simplified condition or NULL if simplification could
529 simplify_control_stmt_condition (edge e
,
532 tree (*simplify
) (gimple
, gimple
),
533 bool handle_dominating_asserts
)
535 tree cond
, cached_lhs
;
536 enum gimple_code code
= gimple_code (stmt
);
538 /* For comparisons, we have to update both operands, then try
539 to simplify the comparison. */
540 if (code
== GIMPLE_COND
)
543 enum tree_code cond_code
;
545 op0
= gimple_cond_lhs (stmt
);
546 op1
= gimple_cond_rhs (stmt
);
547 cond_code
= gimple_cond_code (stmt
);
549 /* Get the current value of both operands. */
550 if (TREE_CODE (op0
) == SSA_NAME
)
552 for (int i
= 0; i
< 2; i
++)
554 if (TREE_CODE (op0
) == SSA_NAME
555 && SSA_NAME_VALUE (op0
))
556 op0
= SSA_NAME_VALUE (op0
);
562 if (TREE_CODE (op1
) == SSA_NAME
)
564 for (int i
= 0; i
< 2; i
++)
566 if (TREE_CODE (op1
) == SSA_NAME
567 && SSA_NAME_VALUE (op1
))
568 op1
= SSA_NAME_VALUE (op1
);
574 if (handle_dominating_asserts
)
576 /* Now see if the operand was consumed by an ASSERT_EXPR
577 which dominates E->src. If so, we want to replace the
578 operand with the LHS of the ASSERT_EXPR. */
579 if (TREE_CODE (op0
) == SSA_NAME
)
580 op0
= lhs_of_dominating_assert (op0
, e
->src
, stmt
);
582 if (TREE_CODE (op1
) == SSA_NAME
)
583 op1
= lhs_of_dominating_assert (op1
, e
->src
, stmt
);
586 /* We may need to canonicalize the comparison. For
587 example, op0 might be a constant while op1 is an
588 SSA_NAME. Failure to canonicalize will cause us to
589 miss threading opportunities. */
590 if (tree_swap_operands_p (op0
, op1
, false))
593 cond_code
= swap_tree_comparison (cond_code
);
599 /* Stuff the operator and operands into our dummy conditional
601 gimple_cond_set_code (dummy_cond
, cond_code
);
602 gimple_cond_set_lhs (dummy_cond
, op0
);
603 gimple_cond_set_rhs (dummy_cond
, op1
);
605 /* We absolutely do not care about any type conversions
606 we only care about a zero/nonzero value. */
607 fold_defer_overflow_warnings ();
609 cached_lhs
= fold_binary (cond_code
, boolean_type_node
, op0
, op1
);
611 while (CONVERT_EXPR_P (cached_lhs
))
612 cached_lhs
= TREE_OPERAND (cached_lhs
, 0);
614 fold_undefer_overflow_warnings ((cached_lhs
615 && is_gimple_min_invariant (cached_lhs
)),
616 stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
618 /* If we have not simplified the condition down to an invariant,
619 then use the pass specific callback to simplify the condition. */
621 || !is_gimple_min_invariant (cached_lhs
))
622 cached_lhs
= (*simplify
) (dummy_cond
, stmt
);
627 if (code
== GIMPLE_SWITCH
)
628 cond
= gimple_switch_index (stmt
);
629 else if (code
== GIMPLE_GOTO
)
630 cond
= gimple_goto_dest (stmt
);
634 /* We can have conditionals which just test the state of a variable
635 rather than use a relational operator. These are simpler to handle. */
636 if (TREE_CODE (cond
) == SSA_NAME
)
640 /* Get the variable's current value from the equivalence chains.
642 It is possible to get loops in the SSA_NAME_VALUE chains
643 (consider threading the backedge of a loop where we have
644 a loop invariant SSA_NAME used in the condition. */
647 for (int i
= 0; i
< 2; i
++)
649 if (TREE_CODE (cached_lhs
) == SSA_NAME
650 && SSA_NAME_VALUE (cached_lhs
))
651 cached_lhs
= SSA_NAME_VALUE (cached_lhs
);
657 /* If we're dominated by a suitable ASSERT_EXPR, then
658 update CACHED_LHS appropriately. */
659 if (handle_dominating_asserts
&& TREE_CODE (cached_lhs
) == SSA_NAME
)
660 cached_lhs
= lhs_of_dominating_assert (cached_lhs
, e
->src
, stmt
);
662 /* If we haven't simplified to an invariant yet, then use the
663 pass specific callback to try and simplify it further. */
664 if (cached_lhs
&& ! is_gimple_min_invariant (cached_lhs
))
665 cached_lhs
= (*simplify
) (stmt
, stmt
);
673 /* Copy debug stmts from DEST's chain of single predecessors up to
674 SRC, so that we don't lose the bindings as PHI nodes are introduced
675 when DEST gains new predecessors. */
677 propagate_threaded_block_debug_into (basic_block dest
, basic_block src
)
679 if (!MAY_HAVE_DEBUG_STMTS
)
682 if (!single_pred_p (dest
))
685 gcc_checking_assert (dest
!= src
);
687 gimple_stmt_iterator gsi
= gsi_after_labels (dest
);
689 const int alloc_count
= 16; // ?? Should this be a PARAM?
691 /* Estimate the number of debug vars overridden in the beginning of
692 DEST, to tell how many we're going to need to begin with. */
693 for (gimple_stmt_iterator si
= gsi
;
694 i
* 4 <= alloc_count
* 3 && !gsi_end_p (si
); gsi_next (&si
))
696 gimple stmt
= gsi_stmt (si
);
697 if (!is_gimple_debug (stmt
))
702 auto_vec
<tree
, alloc_count
> fewvars
;
703 hash_set
<tree
> *vars
= NULL
;
705 /* If we're already starting with 3/4 of alloc_count, go for a
706 hash_set, otherwise start with an unordered stack-allocated
708 if (i
* 4 > alloc_count
* 3)
709 vars
= new hash_set
<tree
>;
711 /* Now go through the initial debug stmts in DEST again, this time
712 actually inserting in VARS or FEWVARS. Don't bother checking for
713 duplicates in FEWVARS. */
714 for (gimple_stmt_iterator si
= gsi
; !gsi_end_p (si
); gsi_next (&si
))
716 gimple stmt
= gsi_stmt (si
);
717 if (!is_gimple_debug (stmt
))
722 if (gimple_debug_bind_p (stmt
))
723 var
= gimple_debug_bind_get_var (stmt
);
724 else if (gimple_debug_source_bind_p (stmt
))
725 var
= gimple_debug_source_bind_get_var (stmt
);
732 fewvars
.quick_push (var
);
735 basic_block bb
= dest
;
739 bb
= single_pred (bb
);
740 for (gimple_stmt_iterator si
= gsi_last_bb (bb
);
741 !gsi_end_p (si
); gsi_prev (&si
))
743 gimple stmt
= gsi_stmt (si
);
744 if (!is_gimple_debug (stmt
))
749 if (gimple_debug_bind_p (stmt
))
750 var
= gimple_debug_bind_get_var (stmt
);
751 else if (gimple_debug_source_bind_p (stmt
))
752 var
= gimple_debug_source_bind_get_var (stmt
);
756 /* Discard debug bind overlaps. ??? Unlike stmts from src,
757 copied into a new block that will precede BB, debug bind
758 stmts in bypassed BBs may actually be discarded if
759 they're overwritten by subsequent debug bind stmts, which
760 might be a problem once we introduce stmt frontier notes
761 or somesuch. Adding `&& bb == src' to the condition
762 below will preserve all potentially relevant debug
764 if (vars
&& vars
->add (var
))
768 int i
= fewvars
.length ();
770 if (fewvars
[i
] == var
)
775 if (fewvars
.length () < (unsigned) alloc_count
)
776 fewvars
.quick_push (var
);
779 vars
= new hash_set
<tree
>;
780 for (i
= 0; i
< alloc_count
; i
++)
781 vars
->add (fewvars
[i
]);
787 stmt
= gimple_copy (stmt
);
788 /* ??? Should we drop the location of the copy to denote
789 they're artificial bindings? */
790 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
793 while (bb
!= src
&& single_pred_p (bb
));
797 else if (fewvars
.exists ())
801 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
802 need not be duplicated as part of the CFG/SSA updating process).
804 If it is threadable, add it to PATH and VISITED and recurse, ultimately
805 returning TRUE from the toplevel call. Otherwise do nothing and
808 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
809 try and simplify the condition at the end of TAKEN_EDGE->dest. */
811 thread_around_empty_blocks (edge taken_edge
,
813 bool handle_dominating_asserts
,
814 tree (*simplify
) (gimple
, gimple
),
816 vec
<jump_thread_edge
*> *path
,
817 bool *backedge_seen_p
)
819 basic_block bb
= taken_edge
->dest
;
820 gimple_stmt_iterator gsi
;
824 /* The key property of these blocks is that they need not be duplicated
825 when threading. Thus they can not have visible side effects such
827 if (!gsi_end_p (gsi_start_phis (bb
)))
830 /* Skip over DEBUG statements at the start of the block. */
831 gsi
= gsi_start_nondebug_bb (bb
);
833 /* If the block has no statements, but does have a single successor, then
834 it's just a forwarding block and we can thread through it trivially.
836 However, note that just threading through empty blocks with single
837 successors is not inherently profitable. For the jump thread to
838 be profitable, we must avoid a runtime conditional.
840 By taking the return value from the recursive call, we get the
841 desired effect of returning TRUE when we found a profitable jump
842 threading opportunity and FALSE otherwise.
844 This is particularly important when this routine is called after
845 processing a joiner block. Returning TRUE too aggressively in
846 that case results in pointless duplication of the joiner block. */
849 if (single_succ_p (bb
))
851 taken_edge
= single_succ_edge (bb
);
852 if (!bitmap_bit_p (visited
, taken_edge
->dest
->index
))
855 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
857 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
858 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
859 if (*backedge_seen_p
)
860 simplify
= dummy_simplify
;
861 return thread_around_empty_blocks (taken_edge
,
863 handle_dominating_asserts
,
871 /* We have a block with no statements, but multiple successors? */
875 /* The only real statements this block can have are a control
876 flow altering statement. Anything else stops the thread. */
877 stmt
= gsi_stmt (gsi
);
878 if (gimple_code (stmt
) != GIMPLE_COND
879 && gimple_code (stmt
) != GIMPLE_GOTO
880 && gimple_code (stmt
) != GIMPLE_SWITCH
)
883 /* If we have traversed a backedge, then we do not want to look
884 at certain expressions in the table that can not be relied upon.
885 Luckily the only code that looked at those expressions is the
886 SIMPLIFY callback, which we replace if we can no longer use it. */
887 if (*backedge_seen_p
)
888 simplify
= dummy_simplify
;
890 /* Extract and simplify the condition. */
891 cond
= simplify_control_stmt_condition (taken_edge
, stmt
, dummy_cond
,
892 simplify
, handle_dominating_asserts
);
894 /* If the condition can be statically computed and we have not already
895 visited the destination edge, then add the taken edge to our thread
897 if (cond
&& is_gimple_min_invariant (cond
))
899 taken_edge
= find_taken_edge (bb
, cond
);
901 if (bitmap_bit_p (visited
, taken_edge
->dest
->index
))
903 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
906 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
908 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
909 if (*backedge_seen_p
)
910 simplify
= dummy_simplify
;
912 thread_around_empty_blocks (taken_edge
,
914 handle_dominating_asserts
,
925 /* We are exiting E->src, see if E->dest ends with a conditional
926 jump which has a known value when reached via E.
928 E->dest can have arbitrary side effects which, if threading is
929 successful, will be maintained.
931 Special care is necessary if E is a back edge in the CFG as we
932 may have already recorded equivalences for E->dest into our
933 various tables, including the result of the conditional at
934 the end of E->dest. Threading opportunities are severely
935 limited in that case to avoid short-circuiting the loop
938 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
939 to avoid allocating memory.
941 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
942 the simplified condition with left-hand sides of ASSERT_EXPRs they are
945 STACK is used to undo temporary equivalences created during the walk of
948 SIMPLIFY is a pass-specific function used to simplify statements.
950 Our caller is responsible for restoring the state of the expression
951 and const_and_copies stacks.
953 Positive return value is success. Zero return value is failure, but
954 the block can still be duplicated as a joiner in a jump thread path,
955 negative indicates the block should not be duplicated and thus is not
956 suitable for a joiner in a jump threading path. */
959 thread_through_normal_block (edge e
,
961 bool handle_dominating_asserts
,
963 tree (*simplify
) (gimple
, gimple
),
964 vec
<jump_thread_edge
*> *path
,
966 bool *backedge_seen_p
)
968 /* If we have traversed a backedge, then we do not want to look
969 at certain expressions in the table that can not be relied upon.
970 Luckily the only code that looked at those expressions is the
971 SIMPLIFY callback, which we replace if we can no longer use it. */
972 if (*backedge_seen_p
)
973 simplify
= dummy_simplify
;
975 /* PHIs create temporary equivalences.
976 Note that if we found a PHI that made the block non-threadable, then
977 we need to bubble that up to our caller in the same manner we do
978 when we prematurely stop processing statements below. */
979 if (!record_temporary_equivalences_from_phis (e
, stack
))
982 /* Now walk each statement recording any context sensitive
983 temporary equivalences we can detect. */
985 = record_temporary_equivalences_from_stmts_at_dest (e
, stack
, simplify
,
988 /* If we didn't look at all the statements, the most likely reason is
989 there were too many and thus duplicating this block is not profitable.
991 Also note if we do not look at all the statements, then we may not
992 have invalidated equivalences that are no longer valid if we threaded
993 around a loop. Thus we must signal to our caller that this block
994 is not suitable for use as a joiner in a threading path. */
998 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1000 if (gimple_code (stmt
) == GIMPLE_COND
1001 || gimple_code (stmt
) == GIMPLE_GOTO
1002 || gimple_code (stmt
) == GIMPLE_SWITCH
)
1006 /* Extract and simplify the condition. */
1007 cond
= simplify_control_stmt_condition (e
, stmt
, dummy_cond
, simplify
,
1008 handle_dominating_asserts
);
1010 if (cond
&& is_gimple_min_invariant (cond
))
1012 edge taken_edge
= find_taken_edge (e
->dest
, cond
);
1013 basic_block dest
= (taken_edge
? taken_edge
->dest
: NULL
);
1015 /* DEST could be NULL for a computed jump to an absolute
1019 || bitmap_bit_p (visited
, dest
->index
))
1022 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1023 first edge on the path. */
1024 if (path
->length () == 0)
1027 = new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1028 path
->safe_push (x
);
1029 *backedge_seen_p
|= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1033 = new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_BLOCK
);
1034 path
->safe_push (x
);
1035 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1036 if (*backedge_seen_p
)
1037 simplify
= dummy_simplify
;
1039 /* See if we can thread through DEST as well, this helps capture
1040 secondary effects of threading without having to re-run DOM or
1043 We don't want to thread back to a block we have already
1044 visited. This may be overly conservative. */
1045 bitmap_set_bit (visited
, dest
->index
);
1046 bitmap_set_bit (visited
, e
->dest
->index
);
1047 thread_around_empty_blocks (taken_edge
,
1049 handle_dominating_asserts
,
1060 /* We are exiting E->src, see if E->dest ends with a conditional
1061 jump which has a known value when reached via E.
1063 Special care is necessary if E is a back edge in the CFG as we
1064 may have already recorded equivalences for E->dest into our
1065 various tables, including the result of the conditional at
1066 the end of E->dest. Threading opportunities are severely
1067 limited in that case to avoid short-circuiting the loop
1070 Note it is quite common for the first block inside a loop to
1071 end with a conditional which is either always true or always
1072 false when reached via the loop backedge. Thus we do not want
1073 to blindly disable threading across a loop backedge.
1075 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1076 to avoid allocating memory.
1078 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1079 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1082 STACK is used to undo temporary equivalences created during the walk of
1085 SIMPLIFY is a pass-specific function used to simplify statements. */
1088 thread_across_edge (gimple dummy_cond
,
1090 bool handle_dominating_asserts
,
1092 tree (*simplify
) (gimple
, gimple
))
1094 bitmap visited
= BITMAP_ALLOC (NULL
);
1099 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1100 bitmap_clear (visited
);
1101 bitmap_set_bit (visited
, e
->src
->index
);
1102 bitmap_set_bit (visited
, e
->dest
->index
);
1103 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1105 simplify
= dummy_simplify
;
1107 int threaded
= thread_through_normal_block (e
, dummy_cond
,
1108 handle_dominating_asserts
,
1109 stack
, simplify
, path
,
1110 visited
, &backedge_seen
);
1113 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1115 remove_temporary_equivalences (stack
);
1116 BITMAP_FREE (visited
);
1117 register_jump_thread (path
);
1122 /* Negative and zero return values indicate no threading was possible,
1123 thus there should be no edges on the thread path and no need to walk
1124 through the vector entries. */
1125 gcc_assert (path
->length () == 0);
1128 /* A negative status indicates the target block was deemed too big to
1129 duplicate. Just quit now rather than trying to use the block as
1130 a joiner in a jump threading path.
1132 This prevents unnecessary code growth, but more importantly if we
1133 do not look at all the statements in the block, then we may have
1134 missed some invalidations if we had traversed a backedge! */
1137 BITMAP_FREE (visited
);
1138 remove_temporary_equivalences (stack
);
1143 /* We were unable to determine what out edge from E->dest is taken. However,
1144 we might still be able to thread through successors of E->dest. This
1145 often occurs when E->dest is a joiner block which then fans back out
1146 based on redundant tests.
1148 If so, we'll copy E->dest and redirect the appropriate predecessor to
1149 the copy. Within the copy of E->dest, we'll thread one or more edges
1150 to points deeper in the CFG.
1152 This is a stopgap until we have a more structured approach to path
1159 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1160 we can safely redirect any of the edges. Just punt those cases. */
1161 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1162 if (taken_edge
->flags
& EDGE_ABNORMAL
)
1164 remove_temporary_equivalences (stack
);
1165 BITMAP_FREE (visited
);
1169 /* Look at each successor of E->dest to see if we can thread through it. */
1170 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1172 /* Push a fresh marker so we can unwind the equivalences created
1173 for each of E->dest's successors. */
1174 stack
->safe_push (NULL_TREE
);
1176 /* Avoid threading to any block we have already visited. */
1177 bitmap_clear (visited
);
1178 bitmap_set_bit (visited
, e
->src
->index
);
1179 bitmap_set_bit (visited
, e
->dest
->index
);
1180 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
1181 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1183 /* Record whether or not we were able to thread through a successor
1185 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1186 path
->safe_push (x
);
1188 x
= new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_JOINER_BLOCK
);
1189 path
->safe_push (x
);
1191 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1192 backedge_seen
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1194 simplify
= dummy_simplify
;
1195 found
= thread_around_empty_blocks (taken_edge
,
1197 handle_dominating_asserts
,
1204 simplify
= dummy_simplify
;
1207 found
= thread_through_normal_block (path
->last ()->e
, dummy_cond
,
1208 handle_dominating_asserts
,
1209 stack
, simplify
, path
, visited
,
1210 &backedge_seen
) > 0;
1212 /* If we were able to thread through a successor of E->dest, then
1213 record the jump threading opportunity. */
1216 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1218 register_jump_thread (path
);
1222 delete_jump_thread_path (path
);
1225 /* And unwind the equivalence table. */
1226 remove_temporary_equivalences (stack
);
1228 BITMAP_FREE (visited
);
1231 remove_temporary_equivalences (stack
);