2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
33 #include "pointer-set.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
42 #include "tree-phinodes.h"
43 #include "ssa-iterators.h"
44 #include "stringpool.h"
45 #include "tree-ssanames.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
50 #include "tree-ssa-threadedge.h"
52 /* To avoid code explosion due to jump threading, we limit the
53 number of statements we are going to copy. This variable
54 holds the number of statements currently seen that we'll have
55 to copy as part of the jump threading process. */
56 static int stmt_count
;
58 /* Array to record value-handles per SSA_NAME. */
59 vec
<tree
> ssa_name_values
;
61 /* Set the value for the SSA name NAME to VALUE. */
64 set_ssa_name_value (tree name
, tree value
)
66 if (SSA_NAME_VERSION (name
) >= ssa_name_values
.length ())
67 ssa_name_values
.safe_grow_cleared (SSA_NAME_VERSION (name
) + 1);
68 if (value
&& TREE_OVERFLOW_P (value
))
69 value
= drop_tree_overflow (value
);
70 ssa_name_values
[SSA_NAME_VERSION (name
)] = value
;
73 /* Initialize the per SSA_NAME value-handles array. Returns it. */
75 threadedge_initialize_values (void)
77 gcc_assert (!ssa_name_values
.exists ());
78 ssa_name_values
.create (num_ssa_names
);
81 /* Free the per SSA_NAME value-handle array. */
83 threadedge_finalize_values (void)
85 ssa_name_values
.release ();
88 /* Return TRUE if we may be able to thread an incoming edge into
89 BB to an outgoing edge from BB. Return FALSE otherwise. */
92 potentially_threadable_block (basic_block bb
)
94 gimple_stmt_iterator gsi
;
96 /* If BB has a single successor or a single predecessor, then
97 there is no threading opportunity. */
98 if (single_succ_p (bb
) || single_pred_p (bb
))
101 /* If BB does not end with a conditional, switch or computed goto,
102 then there is no threading opportunity. */
103 gsi
= gsi_last_bb (bb
);
106 || (gimple_code (gsi_stmt (gsi
)) != GIMPLE_COND
107 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_GOTO
108 && gimple_code (gsi_stmt (gsi
)) != GIMPLE_SWITCH
))
114 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
115 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
116 BB. If no such ASSERT_EXPR is found, return OP. */
119 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple stmt
)
121 imm_use_iterator imm_iter
;
125 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
127 use_stmt
= USE_STMT (use_p
);
129 && gimple_assign_single_p (use_stmt
)
130 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
131 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
132 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
134 return gimple_assign_lhs (use_stmt
);
140 /* We record temporary equivalences created by PHI nodes or
141 statements within the target block. Doing so allows us to
142 identify more jump threading opportunities, even in blocks
145 We keep track of those temporary equivalences in a stack
146 structure so that we can unwind them when we're done processing
147 a particular edge. This routine handles unwinding the data
151 remove_temporary_equivalences (vec
<tree
> *stack
)
153 while (stack
->length () > 0)
155 tree prev_value
, dest
;
157 dest
= stack
->pop ();
159 /* A NULL value indicates we should stop unwinding, otherwise
160 pop off the next entry as they're recorded in pairs. */
164 prev_value
= stack
->pop ();
165 set_ssa_name_value (dest
, prev_value
);
169 /* Record a temporary equivalence, saving enough information so that
170 we can restore the state of recorded equivalences when we're
171 done processing the current edge. */
174 record_temporary_equivalence (tree x
, tree y
, vec
<tree
> *stack
)
176 tree prev_x
= SSA_NAME_VALUE (x
);
178 /* Y may be NULL if we are invalidating entries in the table. */
179 if (y
&& TREE_CODE (y
) == SSA_NAME
)
181 tree tmp
= SSA_NAME_VALUE (y
);
185 set_ssa_name_value (x
, y
);
187 stack
->quick_push (prev_x
);
188 stack
->quick_push (x
);
191 /* Record temporary equivalences created by PHIs at the target of the
192 edge E. Record unwind information for the equivalences onto STACK.
194 If a PHI which prevents threading is encountered, then return FALSE
195 indicating we should not thread this edge, else return TRUE.
197 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
198 of any equivalences recorded. We use this to make invalidation after
199 traversing back edges less painful. */
202 record_temporary_equivalences_from_phis (edge e
, vec
<tree
> *stack
,
204 bitmap src_map
, bitmap dst_map
)
206 gimple_stmt_iterator gsi
;
208 /* Each PHI creates a temporary equivalence, record them.
209 These are context sensitive equivalences and will be removed
211 for (gsi
= gsi_start_phis (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
213 gimple phi
= gsi_stmt (gsi
);
214 tree src
= PHI_ARG_DEF_FROM_EDGE (phi
, e
);
215 tree dst
= gimple_phi_result (phi
);
217 /* If the desired argument is not the same as this PHI's result
218 and it is set by a PHI in E->dest, then we can not thread
221 && TREE_CODE (src
) == SSA_NAME
222 && gimple_code (SSA_NAME_DEF_STMT (src
)) == GIMPLE_PHI
223 && gimple_bb (SSA_NAME_DEF_STMT (src
)) == e
->dest
)
226 /* We consider any non-virtual PHI as a statement since it
227 count result in a constant assignment or copy operation. */
228 if (!virtual_operand_p (dst
))
231 record_temporary_equivalence (dst
, src
, stack
);
233 /* If we have crossed a backedge, then start recording equivalences
234 we might need to invalidate. */
235 if (backedge_seen
&& TREE_CODE (src
) == SSA_NAME
)
237 bitmap_set_bit (src_map
, SSA_NAME_VERSION (src
));
238 bitmap_set_bit (dst_map
, SSA_NAME_VERSION (dst
));
244 /* Fold the RHS of an assignment statement and return it as a tree.
245 May return NULL_TREE if no simplification is possible. */
248 fold_assignment_stmt (gimple stmt
)
250 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
252 switch (get_gimple_rhs_class (subcode
))
254 case GIMPLE_SINGLE_RHS
:
255 return fold (gimple_assign_rhs1 (stmt
));
257 case GIMPLE_UNARY_RHS
:
259 tree lhs
= gimple_assign_lhs (stmt
);
260 tree op0
= gimple_assign_rhs1 (stmt
);
261 return fold_unary (subcode
, TREE_TYPE (lhs
), op0
);
264 case GIMPLE_BINARY_RHS
:
266 tree lhs
= gimple_assign_lhs (stmt
);
267 tree op0
= gimple_assign_rhs1 (stmt
);
268 tree op1
= gimple_assign_rhs2 (stmt
);
269 return fold_binary (subcode
, TREE_TYPE (lhs
), op0
, op1
);
272 case GIMPLE_TERNARY_RHS
:
274 tree lhs
= gimple_assign_lhs (stmt
);
275 tree op0
= gimple_assign_rhs1 (stmt
);
276 tree op1
= gimple_assign_rhs2 (stmt
);
277 tree op2
= gimple_assign_rhs3 (stmt
);
279 /* Sadly, we have to handle conditional assignments specially
280 here, because fold expects all the operands of an expression
281 to be folded before the expression itself is folded, but we
282 can't just substitute the folded condition here. */
283 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
286 return fold_ternary (subcode
, TREE_TYPE (lhs
), op0
, op1
, op2
);
294 /* A new value has been assigned to LHS. If necessary, invalidate any
295 equivalences that are no longer valid. */
297 invalidate_equivalences (tree lhs
, vec
<tree
> *stack
,
298 bitmap src_map
, bitmap dst_map
)
300 /* SRC_MAP contains the source SSA_NAMEs for equivalences created by PHI
301 nodes. If an entry in SRC_MAP changes, there's some destination that
302 has been recorded as equivalent to the source and that equivalency
303 needs to be eliminated. */
304 if (bitmap_bit_p (src_map
, SSA_NAME_VERSION (lhs
)))
309 /* We know that the LHS of STMT was used as the RHS in an equivalency
310 created by a PHI. All the LHS of such PHIs were recorded into DST_MAP.
311 So we can iterate over them to see if any have the LHS of STMT as
312 an equivalence, and if so, remove the equivalence as it is no longer
314 EXECUTE_IF_SET_IN_BITMAP (dst_map
, 0, i
, bi
)
316 if (SSA_NAME_VALUE (ssa_name (i
)) == lhs
)
317 record_temporary_equivalence (ssa_name (i
), NULL_TREE
, stack
);
322 /* Try to simplify each statement in E->dest, ultimately leading to
323 a simplification of the COND_EXPR at the end of E->dest.
325 Record unwind information for temporary equivalences onto STACK.
327 Use SIMPLIFY (a pointer to a callback function) to further simplify
328 statements using pass specific information.
330 We might consider marking just those statements which ultimately
331 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
332 would be recovered by trying to simplify fewer statements.
334 If we are able to simplify a statement into the form
335 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
336 a context sensitive equivalence which may help us simplify
337 later statements in E->dest. */
340 record_temporary_equivalences_from_stmts_at_dest (edge e
,
342 tree (*simplify
) (gimple
,
349 gimple_stmt_iterator gsi
;
352 max_stmt_count
= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
);
354 /* Walk through each statement in the block recording equivalences
355 we discover. Note any equivalences we discover are context
356 sensitive (ie, are dependent on traversing E) and must be unwound
357 when we're finished processing E. */
358 for (gsi
= gsi_start_bb (e
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
360 tree cached_lhs
= NULL
;
362 stmt
= gsi_stmt (gsi
);
364 /* Ignore empty statements and labels. */
365 if (gimple_code (stmt
) == GIMPLE_NOP
366 || gimple_code (stmt
) == GIMPLE_LABEL
367 || is_gimple_debug (stmt
))
370 /* If the statement has volatile operands, then we assume we
371 can not thread through this block. This is overly
372 conservative in some ways. */
373 if (gimple_code (stmt
) == GIMPLE_ASM
&& gimple_asm_volatile_p (stmt
))
376 /* If duplicating this block is going to cause too much code
377 expansion, then do not thread through this block. */
379 if (stmt_count
> max_stmt_count
)
382 /* If this is not a statement that sets an SSA_NAME to a new
383 value, then do not try to simplify this statement as it will
384 not simplify in any way that is helpful for jump threading. */
385 if ((gimple_code (stmt
) != GIMPLE_ASSIGN
386 || TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
387 && (gimple_code (stmt
) != GIMPLE_CALL
388 || gimple_call_lhs (stmt
) == NULL_TREE
389 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
))
392 /* The result of __builtin_object_size depends on all the arguments
393 of a phi node. Temporarily using only one edge produces invalid
402 r = PHI <&w[2].a[1](2), &a.a[6](3)>
403 __builtin_object_size (r, 0)
405 The result of __builtin_object_size is defined to be the maximum of
406 remaining bytes. If we use only one edge on the phi, the result will
407 change to be the remaining bytes for the corresponding phi argument.
409 Similarly for __builtin_constant_p:
412 __builtin_constant_p (r)
414 Both PHI arguments are constant, but x ? 1 : 2 is still not
417 if (is_gimple_call (stmt
))
419 tree fndecl
= gimple_call_fndecl (stmt
);
421 && (DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_OBJECT_SIZE
422 || DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
))
426 tree lhs
= gimple_get_lhs (stmt
);
427 record_temporary_equivalence (lhs
, NULL_TREE
, stack
);
428 invalidate_equivalences (lhs
, stack
, src_map
, dst_map
);
434 /* At this point we have a statement which assigns an RHS to an
435 SSA_VAR on the LHS. We want to try and simplify this statement
436 to expose more context sensitive equivalences which in turn may
437 allow us to simplify the condition at the end of the loop.
439 Handle simple copy operations as well as implied copies from
441 if (gimple_assign_single_p (stmt
)
442 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
443 cached_lhs
= gimple_assign_rhs1 (stmt
);
444 else if (gimple_assign_single_p (stmt
)
445 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
446 cached_lhs
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0);
449 /* A statement that is not a trivial copy or ASSERT_EXPR.
450 We're going to temporarily copy propagate the operands
451 and see if that allows us to simplify this statement. */
455 unsigned int num
, i
= 0;
457 num
= NUM_SSA_OPERANDS (stmt
, (SSA_OP_USE
| SSA_OP_VUSE
));
458 copy
= XCNEWVEC (tree
, num
);
460 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
462 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
465 tree use
= USE_FROM_PTR (use_p
);
468 if (TREE_CODE (use
) == SSA_NAME
)
469 tmp
= SSA_NAME_VALUE (use
);
471 SET_USE (use_p
, tmp
);
474 /* Try to fold/lookup the new expression. Inserting the
475 expression into the hash table is unlikely to help. */
476 if (is_gimple_call (stmt
))
477 cached_lhs
= fold_call_stmt (stmt
, false);
479 cached_lhs
= fold_assignment_stmt (stmt
);
482 || (TREE_CODE (cached_lhs
) != SSA_NAME
483 && !is_gimple_min_invariant (cached_lhs
)))
484 cached_lhs
= (*simplify
) (stmt
, stmt
);
486 /* Restore the statement's original uses/defs. */
488 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
| SSA_OP_VUSE
)
489 SET_USE (use_p
, copy
[i
++]);
494 /* Record the context sensitive equivalence if we were able
495 to simplify this statement.
497 If we have traversed a backedge at some point during threading,
498 then always enter something here. Either a real equivalence,
499 or a NULL_TREE equivalence which is effectively invalidation of
500 prior equivalences. */
502 && (TREE_CODE (cached_lhs
) == SSA_NAME
503 || is_gimple_min_invariant (cached_lhs
)))
504 record_temporary_equivalence (gimple_get_lhs (stmt
), cached_lhs
, stack
);
505 else if (backedge_seen
)
506 record_temporary_equivalence (gimple_get_lhs (stmt
), NULL_TREE
, stack
);
509 invalidate_equivalences (gimple_get_lhs (stmt
), stack
,
515 /* Once we have passed a backedge in the CFG when threading, we do not want to
516 utilize edge equivalences for simplification purpose. They are no longer
517 necessarily valid. We use this callback rather than the ones provided by
518 DOM/VRP to achieve that effect. */
520 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED
, gimple stmt2 ATTRIBUTE_UNUSED
)
525 /* Simplify the control statement at the end of the block E->dest.
527 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
528 is available to use/clobber in DUMMY_COND.
530 Use SIMPLIFY (a pointer to a callback function) to further simplify
531 a condition using pass specific information.
533 Return the simplified condition or NULL if simplification could
537 simplify_control_stmt_condition (edge e
,
540 tree (*simplify
) (gimple
, gimple
),
541 bool handle_dominating_asserts
)
543 tree cond
, cached_lhs
;
544 enum gimple_code code
= gimple_code (stmt
);
546 /* For comparisons, we have to update both operands, then try
547 to simplify the comparison. */
548 if (code
== GIMPLE_COND
)
551 enum tree_code cond_code
;
553 op0
= gimple_cond_lhs (stmt
);
554 op1
= gimple_cond_rhs (stmt
);
555 cond_code
= gimple_cond_code (stmt
);
557 /* Get the current value of both operands. */
558 if (TREE_CODE (op0
) == SSA_NAME
)
560 tree tmp
= SSA_NAME_VALUE (op0
);
565 if (TREE_CODE (op1
) == SSA_NAME
)
567 tree tmp
= SSA_NAME_VALUE (op1
);
572 if (handle_dominating_asserts
)
574 /* Now see if the operand was consumed by an ASSERT_EXPR
575 which dominates E->src. If so, we want to replace the
576 operand with the LHS of the ASSERT_EXPR. */
577 if (TREE_CODE (op0
) == SSA_NAME
)
578 op0
= lhs_of_dominating_assert (op0
, e
->src
, stmt
);
580 if (TREE_CODE (op1
) == SSA_NAME
)
581 op1
= lhs_of_dominating_assert (op1
, e
->src
, stmt
);
584 /* We may need to canonicalize the comparison. For
585 example, op0 might be a constant while op1 is an
586 SSA_NAME. Failure to canonicalize will cause us to
587 miss threading opportunities. */
588 if (tree_swap_operands_p (op0
, op1
, false))
591 cond_code
= swap_tree_comparison (cond_code
);
597 /* Stuff the operator and operands into our dummy conditional
599 gimple_cond_set_code (dummy_cond
, cond_code
);
600 gimple_cond_set_lhs (dummy_cond
, op0
);
601 gimple_cond_set_rhs (dummy_cond
, op1
);
603 /* We absolutely do not care about any type conversions
604 we only care about a zero/nonzero value. */
605 fold_defer_overflow_warnings ();
607 cached_lhs
= fold_binary (cond_code
, boolean_type_node
, op0
, op1
);
609 while (CONVERT_EXPR_P (cached_lhs
))
610 cached_lhs
= TREE_OPERAND (cached_lhs
, 0);
612 fold_undefer_overflow_warnings ((cached_lhs
613 && is_gimple_min_invariant (cached_lhs
)),
614 stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
616 /* If we have not simplified the condition down to an invariant,
617 then use the pass specific callback to simplify the condition. */
619 || !is_gimple_min_invariant (cached_lhs
))
620 cached_lhs
= (*simplify
) (dummy_cond
, stmt
);
625 if (code
== GIMPLE_SWITCH
)
626 cond
= gimple_switch_index (stmt
);
627 else if (code
== GIMPLE_GOTO
)
628 cond
= gimple_goto_dest (stmt
);
632 /* We can have conditionals which just test the state of a variable
633 rather than use a relational operator. These are simpler to handle. */
634 if (TREE_CODE (cond
) == SSA_NAME
)
638 /* Get the variable's current value from the equivalence chains.
640 It is possible to get loops in the SSA_NAME_VALUE chains
641 (consider threading the backedge of a loop where we have
642 a loop invariant SSA_NAME used in the condition. */
644 && TREE_CODE (cached_lhs
) == SSA_NAME
645 && SSA_NAME_VALUE (cached_lhs
))
646 cached_lhs
= SSA_NAME_VALUE (cached_lhs
);
648 /* If we're dominated by a suitable ASSERT_EXPR, then
649 update CACHED_LHS appropriately. */
650 if (handle_dominating_asserts
&& TREE_CODE (cached_lhs
) == SSA_NAME
)
651 cached_lhs
= lhs_of_dominating_assert (cached_lhs
, e
->src
, stmt
);
653 /* If we haven't simplified to an invariant yet, then use the
654 pass specific callback to try and simplify it further. */
655 if (cached_lhs
&& ! is_gimple_min_invariant (cached_lhs
))
656 cached_lhs
= (*simplify
) (stmt
, stmt
);
664 /* Copy debug stmts from DEST's chain of single predecessors up to
665 SRC, so that we don't lose the bindings as PHI nodes are introduced
666 when DEST gains new predecessors. */
668 propagate_threaded_block_debug_into (basic_block dest
, basic_block src
)
670 if (!MAY_HAVE_DEBUG_STMTS
)
673 if (!single_pred_p (dest
))
676 gcc_checking_assert (dest
!= src
);
678 gimple_stmt_iterator gsi
= gsi_after_labels (dest
);
680 const int alloc_count
= 16; // ?? Should this be a PARAM?
682 /* Estimate the number of debug vars overridden in the beginning of
683 DEST, to tell how many we're going to need to begin with. */
684 for (gimple_stmt_iterator si
= gsi
;
685 i
* 4 <= alloc_count
* 3 && !gsi_end_p (si
); gsi_next (&si
))
687 gimple stmt
= gsi_stmt (si
);
688 if (!is_gimple_debug (stmt
))
693 auto_vec
<tree
, alloc_count
> fewvars
;
694 pointer_set_t
*vars
= NULL
;
696 /* If we're already starting with 3/4 of alloc_count, go for a
697 pointer_set, otherwise start with an unordered stack-allocated
699 if (i
* 4 > alloc_count
* 3)
700 vars
= pointer_set_create ();
702 /* Now go through the initial debug stmts in DEST again, this time
703 actually inserting in VARS or FEWVARS. Don't bother checking for
704 duplicates in FEWVARS. */
705 for (gimple_stmt_iterator si
= gsi
; !gsi_end_p (si
); gsi_next (&si
))
707 gimple stmt
= gsi_stmt (si
);
708 if (!is_gimple_debug (stmt
))
713 if (gimple_debug_bind_p (stmt
))
714 var
= gimple_debug_bind_get_var (stmt
);
715 else if (gimple_debug_source_bind_p (stmt
))
716 var
= gimple_debug_source_bind_get_var (stmt
);
721 pointer_set_insert (vars
, var
);
723 fewvars
.quick_push (var
);
726 basic_block bb
= dest
;
730 bb
= single_pred (bb
);
731 for (gimple_stmt_iterator si
= gsi_last_bb (bb
);
732 !gsi_end_p (si
); gsi_prev (&si
))
734 gimple stmt
= gsi_stmt (si
);
735 if (!is_gimple_debug (stmt
))
740 if (gimple_debug_bind_p (stmt
))
741 var
= gimple_debug_bind_get_var (stmt
);
742 else if (gimple_debug_source_bind_p (stmt
))
743 var
= gimple_debug_source_bind_get_var (stmt
);
747 /* Discard debug bind overlaps. ??? Unlike stmts from src,
748 copied into a new block that will precede BB, debug bind
749 stmts in bypassed BBs may actually be discarded if
750 they're overwritten by subsequent debug bind stmts, which
751 might be a problem once we introduce stmt frontier notes
752 or somesuch. Adding `&& bb == src' to the condition
753 below will preserve all potentially relevant debug
755 if (vars
&& pointer_set_insert (vars
, var
))
759 int i
= fewvars
.length ();
761 if (fewvars
[i
] == var
)
766 if (fewvars
.length () < (unsigned) alloc_count
)
767 fewvars
.quick_push (var
);
770 vars
= pointer_set_create ();
771 for (i
= 0; i
< alloc_count
; i
++)
772 pointer_set_insert (vars
, fewvars
[i
]);
774 pointer_set_insert (vars
, var
);
778 stmt
= gimple_copy (stmt
);
779 /* ??? Should we drop the location of the copy to denote
780 they're artificial bindings? */
781 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
784 while (bb
!= src
&& single_pred_p (bb
));
787 pointer_set_destroy (vars
);
788 else if (fewvars
.exists ())
792 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
793 need not be duplicated as part of the CFG/SSA updating process).
795 If it is threadable, add it to PATH and VISITED and recurse, ultimately
796 returning TRUE from the toplevel call. Otherwise do nothing and
799 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
800 try and simplify the condition at the end of TAKEN_EDGE->dest. */
802 thread_around_empty_blocks (edge taken_edge
,
804 bool handle_dominating_asserts
,
805 tree (*simplify
) (gimple
, gimple
),
807 vec
<jump_thread_edge
*> *path
,
808 bool *backedge_seen_p
)
810 basic_block bb
= taken_edge
->dest
;
811 gimple_stmt_iterator gsi
;
815 /* The key property of these blocks is that they need not be duplicated
816 when threading. Thus they can not have visible side effects such
818 if (!gsi_end_p (gsi_start_phis (bb
)))
821 /* Skip over DEBUG statements at the start of the block. */
822 gsi
= gsi_start_nondebug_bb (bb
);
824 /* If the block has no statements, but does have a single successor, then
825 it's just a forwarding block and we can thread through it trivially.
827 However, note that just threading through empty blocks with single
828 successors is not inherently profitable. For the jump thread to
829 be profitable, we must avoid a runtime conditional.
831 By taking the return value from the recursive call, we get the
832 desired effect of returning TRUE when we found a profitable jump
833 threading opportunity and FALSE otherwise.
835 This is particularly important when this routine is called after
836 processing a joiner block. Returning TRUE too aggressively in
837 that case results in pointless duplication of the joiner block. */
840 if (single_succ_p (bb
))
842 taken_edge
= single_succ_edge (bb
);
843 if (!bitmap_bit_p (visited
, taken_edge
->dest
->index
))
846 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
848 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
849 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
850 if (*backedge_seen_p
)
851 simplify
= dummy_simplify
;
852 return thread_around_empty_blocks (taken_edge
,
854 handle_dominating_asserts
,
862 /* We have a block with no statements, but multiple successors? */
866 /* The only real statements this block can have are a control
867 flow altering statement. Anything else stops the thread. */
868 stmt
= gsi_stmt (gsi
);
869 if (gimple_code (stmt
) != GIMPLE_COND
870 && gimple_code (stmt
) != GIMPLE_GOTO
871 && gimple_code (stmt
) != GIMPLE_SWITCH
)
874 /* If we have traversed a backedge, then we do not want to look
875 at certain expressions in the table that can not be relied upon.
876 Luckily the only code that looked at those expressions is the
877 SIMPLIFY callback, which we replace if we can no longer use it. */
878 if (*backedge_seen_p
)
879 simplify
= dummy_simplify
;
881 /* Extract and simplify the condition. */
882 cond
= simplify_control_stmt_condition (taken_edge
, stmt
, dummy_cond
,
883 simplify
, handle_dominating_asserts
);
885 /* If the condition can be statically computed and we have not already
886 visited the destination edge, then add the taken edge to our thread
888 if (cond
&& is_gimple_min_invariant (cond
))
890 taken_edge
= find_taken_edge (bb
, cond
);
892 if (bitmap_bit_p (visited
, taken_edge
->dest
->index
))
894 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
897 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
899 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
900 if (*backedge_seen_p
)
901 simplify
= dummy_simplify
;
903 thread_around_empty_blocks (taken_edge
,
905 handle_dominating_asserts
,
916 /* We are exiting E->src, see if E->dest ends with a conditional
917 jump which has a known value when reached via E.
919 E->dest can have arbitrary side effects which, if threading is
920 successful, will be maintained.
922 Special care is necessary if E is a back edge in the CFG as we
923 may have already recorded equivalences for E->dest into our
924 various tables, including the result of the conditional at
925 the end of E->dest. Threading opportunities are severely
926 limited in that case to avoid short-circuiting the loop
929 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
930 to avoid allocating memory.
932 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
933 the simplified condition with left-hand sides of ASSERT_EXPRs they are
936 STACK is used to undo temporary equivalences created during the walk of
939 SIMPLIFY is a pass-specific function used to simplify statements.
941 Our caller is responsible for restoring the state of the expression
942 and const_and_copies stacks. */
945 thread_through_normal_block (edge e
,
947 bool handle_dominating_asserts
,
949 tree (*simplify
) (gimple
, gimple
),
950 vec
<jump_thread_edge
*> *path
,
952 bool *backedge_seen_p
,
956 /* If we have traversed a backedge, then we do not want to look
957 at certain expressions in the table that can not be relied upon.
958 Luckily the only code that looked at those expressions is the
959 SIMPLIFY callback, which we replace if we can no longer use it. */
960 if (*backedge_seen_p
)
961 simplify
= dummy_simplify
;
963 /* PHIs create temporary equivalences. */
964 if (!record_temporary_equivalences_from_phis (e
, stack
, *backedge_seen_p
,
968 /* Now walk each statement recording any context sensitive
969 temporary equivalences we can detect. */
971 = record_temporary_equivalences_from_stmts_at_dest (e
, stack
, simplify
,
977 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
979 if (gimple_code (stmt
) == GIMPLE_COND
980 || gimple_code (stmt
) == GIMPLE_GOTO
981 || gimple_code (stmt
) == GIMPLE_SWITCH
)
985 /* Extract and simplify the condition. */
986 cond
= simplify_control_stmt_condition (e
, stmt
, dummy_cond
, simplify
,
987 handle_dominating_asserts
);
989 if (cond
&& is_gimple_min_invariant (cond
))
991 edge taken_edge
= find_taken_edge (e
->dest
, cond
);
992 basic_block dest
= (taken_edge
? taken_edge
->dest
: NULL
);
994 /* DEST could be NULL for a computed jump to an absolute
998 || bitmap_bit_p (visited
, dest
->index
))
1001 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1002 first edge on the path. */
1003 if (path
->length () == 0)
1006 = new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1007 path
->safe_push (x
);
1008 *backedge_seen_p
|= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1012 = new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_BLOCK
);
1013 path
->safe_push (x
);
1014 *backedge_seen_p
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1015 if (*backedge_seen_p
)
1016 simplify
= dummy_simplify
;
1018 /* See if we can thread through DEST as well, this helps capture
1019 secondary effects of threading without having to re-run DOM or
1022 We don't want to thread back to a block we have already
1023 visited. This may be overly conservative. */
1024 bitmap_set_bit (visited
, dest
->index
);
1025 bitmap_set_bit (visited
, e
->dest
->index
);
1026 thread_around_empty_blocks (taken_edge
,
1028 handle_dominating_asserts
,
1039 /* We are exiting E->src, see if E->dest ends with a conditional
1040 jump which has a known value when reached via E.
1042 Special care is necessary if E is a back edge in the CFG as we
1043 may have already recorded equivalences for E->dest into our
1044 various tables, including the result of the conditional at
1045 the end of E->dest. Threading opportunities are severely
1046 limited in that case to avoid short-circuiting the loop
1049 Note it is quite common for the first block inside a loop to
1050 end with a conditional which is either always true or always
1051 false when reached via the loop backedge. Thus we do not want
1052 to blindly disable threading across a loop backedge.
1054 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1055 to avoid allocating memory.
1057 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1058 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1061 STACK is used to undo temporary equivalences created during the walk of
1064 SIMPLIFY is a pass-specific function used to simplify statements. */
1067 thread_across_edge (gimple dummy_cond
,
1069 bool handle_dominating_asserts
,
1071 tree (*simplify
) (gimple
, gimple
))
1073 bitmap visited
= BITMAP_ALLOC (NULL
);
1074 bitmap src_map
= BITMAP_ALLOC (NULL
);
1075 bitmap dst_map
= BITMAP_ALLOC (NULL
);
1080 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1081 bitmap_clear (visited
);
1082 bitmap_set_bit (visited
, e
->src
->index
);
1083 bitmap_set_bit (visited
, e
->dest
->index
);
1084 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1086 simplify
= dummy_simplify
;
1088 if (thread_through_normal_block (e
, dummy_cond
, handle_dominating_asserts
,
1089 stack
, simplify
, path
, visited
,
1090 &backedge_seen
, src_map
, dst_map
))
1092 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1094 remove_temporary_equivalences (stack
);
1095 BITMAP_FREE (visited
);
1096 BITMAP_FREE (src_map
);
1097 BITMAP_FREE (dst_map
);
1098 register_jump_thread (path
);
1103 /* There should be no edges on the path, so no need to walk through
1104 the vector entries. */
1105 gcc_assert (path
->length () == 0);
1109 /* We were unable to determine what out edge from E->dest is taken. However,
1110 we might still be able to thread through successors of E->dest. This
1111 often occurs when E->dest is a joiner block which then fans back out
1112 based on redundant tests.
1114 If so, we'll copy E->dest and redirect the appropriate predecessor to
1115 the copy. Within the copy of E->dest, we'll thread one or more edges
1116 to points deeper in the CFG.
1118 This is a stopgap until we have a more structured approach to path
1125 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1126 we can safely redirect any of the edges. Just punt those cases. */
1127 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1128 if (taken_edge
->flags
& EDGE_ABNORMAL
)
1130 remove_temporary_equivalences (stack
);
1131 BITMAP_FREE (visited
);
1132 BITMAP_FREE (src_map
);
1133 BITMAP_FREE (dst_map
);
1137 /* We need to restore the state of the maps to this point each loop
1139 bitmap src_map_copy
= BITMAP_ALLOC (NULL
);
1140 bitmap dst_map_copy
= BITMAP_ALLOC (NULL
);
1141 bitmap_copy (src_map_copy
, src_map
);
1142 bitmap_copy (dst_map_copy
, dst_map
);
1144 /* Look at each successor of E->dest to see if we can thread through it. */
1145 FOR_EACH_EDGE (taken_edge
, ei
, e
->dest
->succs
)
1147 /* Push a fresh marker so we can unwind the equivalences created
1148 for each of E->dest's successors. */
1149 stack
->safe_push (NULL_TREE
);
1150 bitmap_copy (src_map
, src_map_copy
);
1151 bitmap_copy (dst_map
, dst_map_copy
);
1153 /* Avoid threading to any block we have already visited. */
1154 bitmap_clear (visited
);
1155 bitmap_set_bit (visited
, e
->src
->index
);
1156 bitmap_set_bit (visited
, e
->dest
->index
);
1157 bitmap_set_bit (visited
, taken_edge
->dest
->index
);
1158 vec
<jump_thread_edge
*> *path
= new vec
<jump_thread_edge
*> ();
1160 /* Record whether or not we were able to thread through a successor
1162 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1163 path
->safe_push (x
);
1165 x
= new jump_thread_edge (taken_edge
, EDGE_COPY_SRC_JOINER_BLOCK
);
1166 path
->safe_push (x
);
1168 backedge_seen
= ((e
->flags
& EDGE_DFS_BACK
) != 0);
1169 backedge_seen
|= ((taken_edge
->flags
& EDGE_DFS_BACK
) != 0);
1171 simplify
= dummy_simplify
;
1172 found
= thread_around_empty_blocks (taken_edge
,
1174 handle_dominating_asserts
,
1181 simplify
= dummy_simplify
;
1184 found
= thread_through_normal_block (path
->last ()->e
, dummy_cond
,
1185 handle_dominating_asserts
,
1186 stack
, simplify
, path
, visited
,
1190 /* If we were able to thread through a successor of E->dest, then
1191 record the jump threading opportunity. */
1194 propagate_threaded_block_debug_into (path
->last ()->e
->dest
,
1196 register_jump_thread (path
);
1200 delete_jump_thread_path (path
);
1203 /* And unwind the equivalence table. */
1204 remove_temporary_equivalences (stack
);
1206 BITMAP_FREE (visited
);
1207 BITMAP_FREE (src_map
);
1208 BITMAP_FREE (dst_map
);
1209 BITMAP_FREE (src_map_copy
);
1210 BITMAP_FREE (dst_map_copy
);
1213 remove_temporary_equivalences (stack
);