1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "basic-block.h"
29 #include "tree-flow.h"
32 #include "hash-table.h"
34 /* Given a block B, update the CFG and SSA graph to reflect redirecting
35 one or more in-edges to B to instead reach the destination of an
36 out-edge from B while preserving any side effects in B.
38 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
39 side effects of executing B.
41 1. Make a copy of B (including its outgoing edges and statements). Call
42 the copy B'. Note B' has no incoming edges or PHIs at this time.
44 2. Remove the control statement at the end of B' and all outgoing edges
47 3. Add a new argument to each PHI in C with the same value as the existing
48 argument associated with edge B->C. Associate the new PHI arguments
51 4. For each PHI in B, find or create a PHI in B' with an identical
52 PHI_RESULT. Add an argument to the PHI in B' which has the same
53 value as the PHI in B associated with the edge A->B. Associate
54 the new argument in the PHI in B' with the edge A->B.
56 5. Change the edge A->B to A->B'.
58 5a. This automatically deletes any PHI arguments associated with the
61 5b. This automatically associates each new argument added in step 4
64 6. Repeat for other incoming edges into B.
66 7. Put the duplicated resources in B and all the B' blocks into SSA form.
68 Note that block duplication can be minimized by first collecting the
69 set of unique destination blocks that the incoming edges should
72 Block duplication can be further minimized by using B instead of
73 creating B' for one destination if all edges into B are going to be
74 threaded to a successor of B. We had code to do this at one time, but
75 I'm not convinced it is correct with the changes to avoid mucking up
76 the loop structure (which may cancel threading requests, thus a block
77 which we thought was going to become unreachable may still be reachable).
78 This code was also going to get ugly with the introduction of the ability
79 for a single jump thread request to bypass multiple blocks.
81 We further reduce the number of edges and statements we create by
82 not copying all the outgoing edges and the control statement in
83 step #1. We instead create a template block without the outgoing
84 edges and duplicate the template. */
87 /* Steps #5 and #6 of the above algorithm are best implemented by walking
88 all the incoming edges which thread to the same destination edge at
89 the same time. That avoids lots of table lookups to get information
90 for the destination edge.
92 To realize that implementation we create a list of incoming edges
93 which thread to the same outgoing edge. Thus to implement steps
94 #5 and #6 we traverse our hash table of outgoing edge information.
95 For each entry we walk the list of incoming edges which thread to
96 the current outgoing edge. */
104 /* Main data structure recording information regarding B's duplicate
107 /* We need to efficiently record the unique thread destinations of this
108 block and specific information associated with those destinations. We
109 may have many incoming edges threaded to the same outgoing edge. This
110 can be naturally implemented with a hash table. */
112 struct redirection_data
: typed_free_remove
<redirection_data
>
114 /* A duplicate of B with the trailing control statement removed and which
115 targets a single successor of B. */
116 basic_block dup_block
;
118 /* An outgoing edge from B. DUP_BLOCK will have OUTGOING_EDGE->dest as
119 its single successor. */
122 edge intermediate_edge
;
124 /* A list of incoming edges which we want to thread to
125 OUTGOING_EDGE->dest. */
126 struct el
*incoming_edges
;
128 /* hash_table support. */
129 typedef redirection_data value_type
;
130 typedef redirection_data compare_type
;
131 static inline hashval_t
hash (const value_type
*);
132 static inline int equal (const value_type
*, const compare_type
*);
136 redirection_data::hash (const value_type
*p
)
138 edge e
= p
->outgoing_edge
;
139 return e
->dest
->index
;
143 redirection_data::equal (const value_type
*p1
, const compare_type
*p2
)
145 edge e1
= p1
->outgoing_edge
;
146 edge e2
= p2
->outgoing_edge
;
147 edge e3
= p1
->intermediate_edge
;
148 edge e4
= p2
->intermediate_edge
;
149 return e1
== e2
&& e3
== e4
;
152 /* Data structure of information to pass to hash table traversal routines. */
153 struct ssa_local_info_t
155 /* The current block we are working on. */
158 /* A template copy of BB with no outgoing edges or control statement that
159 we use for creating copies. */
160 basic_block template_block
;
162 /* TRUE if we thread one or more jumps, FALSE otherwise. */
166 /* Passes which use the jump threading code register jump threading
167 opportunities as they are discovered. We keep the registered
168 jump threading opportunities in this vector as edge pairs
169 (original_edge, target_edge). */
170 static vec
<edge
> threaded_edges
;
172 /* When we start updating the CFG for threading, data necessary for jump
173 threading is attached to the AUX field for the incoming edge. Use these
174 macros to access the underlying structure attached to the AUX field. */
175 #define THREAD_TARGET(E) ((edge *)(E)->aux)[0]
176 #define THREAD_TARGET2(E) ((edge *)(E)->aux)[1]
178 /* Jump threading statistics. */
180 struct thread_stats_d
182 unsigned long num_threaded_edges
;
185 struct thread_stats_d thread_stats
;
188 /* Remove the last statement in block BB if it is a control statement
189 Also remove all outgoing edges except the edge which reaches DEST_BB.
190 If DEST_BB is NULL, then remove all outgoing edges. */
193 remove_ctrl_stmt_and_useless_edges (basic_block bb
, basic_block dest_bb
)
195 gimple_stmt_iterator gsi
;
199 gsi
= gsi_last_bb (bb
);
201 /* If the duplicate ends with a control statement, then remove it.
203 Note that if we are duplicating the template block rather than the
204 original basic block, then the duplicate might not have any real
208 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_COND
209 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_GOTO
210 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_SWITCH
))
211 gsi_remove (&gsi
, true);
213 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
215 if (e
->dest
!= dest_bb
)
222 /* Create a duplicate of BB. Record the duplicate block in RD. */
225 create_block_for_threading (basic_block bb
, struct redirection_data
*rd
)
230 /* We can use the generic block duplication code and simply remove
231 the stuff we do not need. */
232 rd
->dup_block
= duplicate_block (bb
, NULL
, NULL
);
234 FOR_EACH_EDGE (e
, ei
, rd
->dup_block
->succs
)
237 /* Zero out the profile, since the block is unreachable for now. */
238 rd
->dup_block
->frequency
= 0;
239 rd
->dup_block
->count
= 0;
242 /* Main data structure to hold information for duplicates of BB. */
244 static hash_table
<redirection_data
> redirection_data
;
246 /* Given an outgoing edge E lookup and return its entry in our hash table.
248 If INSERT is true, then we insert the entry into the hash table if
249 it is not already present. INCOMING_EDGE is added to the list of incoming
250 edges associated with E in the hash table. */
252 static struct redirection_data
*
253 lookup_redirection_data (edge e
, enum insert_option insert
)
255 struct redirection_data
**slot
;
256 struct redirection_data
*elt
;
258 /* Build a hash table element so we can see if E is already
260 elt
= XNEW (struct redirection_data
);
261 elt
->intermediate_edge
= THREAD_TARGET2 (e
) ? THREAD_TARGET (e
) : NULL
;
262 elt
->outgoing_edge
= THREAD_TARGET2 (e
) ? THREAD_TARGET2 (e
)
264 elt
->dup_block
= NULL
;
265 elt
->incoming_edges
= NULL
;
267 slot
= redirection_data
.find_slot (elt
, insert
);
269 /* This will only happen if INSERT is false and the entry is not
270 in the hash table. */
277 /* This will only happen if E was not in the hash table and
282 elt
->incoming_edges
= XNEW (struct el
);
283 elt
->incoming_edges
->e
= e
;
284 elt
->incoming_edges
->next
= NULL
;
287 /* E was in the hash table. */
290 /* Free ELT as we do not need it anymore, we will extract the
291 relevant entry from the hash table itself. */
294 /* Get the entry stored in the hash table. */
297 /* If insertion was requested, then we need to add INCOMING_EDGE
298 to the list of incoming edges associated with E. */
301 struct el
*el
= XNEW (struct el
);
302 el
->next
= elt
->incoming_edges
;
304 elt
->incoming_edges
= el
;
311 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
314 copy_phi_args (basic_block bb
, edge src_e
, edge tgt_e
)
316 gimple_stmt_iterator gsi
;
317 int src_indx
= src_e
->dest_idx
;
319 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
321 gimple phi
= gsi_stmt (gsi
);
322 source_location locus
= gimple_phi_arg_location (phi
, src_indx
);
323 add_phi_arg (phi
, gimple_phi_arg_def (phi
, src_indx
), tgt_e
, locus
);
327 /* We have recently made a copy of ORIG_BB, including its outgoing
328 edges. The copy is NEW_BB. Every PHI node in every direct successor of
329 ORIG_BB has a new argument associated with edge from NEW_BB to the
330 successor. Initialize the PHI argument so that it is equal to the PHI
331 argument associated with the edge from ORIG_BB to the successor. */
334 update_destination_phis (basic_block orig_bb
, basic_block new_bb
)
339 FOR_EACH_EDGE (e
, ei
, orig_bb
->succs
)
341 edge e2
= find_edge (new_bb
, e
->dest
);
342 copy_phi_args (e
->dest
, e
, e2
);
346 /* Given a duplicate block and its single destination (both stored
347 in RD). Create an edge between the duplicate and its single
350 Add an additional argument to any PHI nodes at the single
354 create_edge_and_update_destination_phis (struct redirection_data
*rd
,
357 edge e
= make_edge (bb
, rd
->outgoing_edge
->dest
, EDGE_FALLTHRU
);
359 rescan_loop_exit (e
, true, false);
360 e
->probability
= REG_BR_PROB_BASE
;
361 e
->count
= bb
->count
;
363 if (rd
->outgoing_edge
->aux
)
365 e
->aux
= XNEWVEC (edge
, 2);
366 THREAD_TARGET(e
) = THREAD_TARGET (rd
->outgoing_edge
);
367 THREAD_TARGET2(e
) = THREAD_TARGET2 (rd
->outgoing_edge
);
374 /* If there are any PHI nodes at the destination of the outgoing edge
375 from the duplicate block, then we will need to add a new argument
376 to them. The argument should have the same value as the argument
377 associated with the outgoing edge stored in RD. */
378 copy_phi_args (e
->dest
, rd
->outgoing_edge
, e
);
381 /* Wire up the outgoing edges from the duplicate block and
382 update any PHIs as needed. */
384 ssa_fix_duplicate_block_edges (struct redirection_data
*rd
,
385 ssa_local_info_t
*local_info
)
387 /* If we were threading through an joiner block, then we want
388 to keep its control statement and redirect an outgoing edge.
389 Else we want to remove the control statement & edges, then create
390 a new outgoing edge. In both cases we may need to update PHIs. */
391 if (THREAD_TARGET2 (rd
->incoming_edges
->e
))
395 edge e
= rd
->incoming_edges
->e
;
397 /* This updates the PHIs at the destination of the duplicate
399 update_destination_phis (local_info
->bb
, rd
->dup_block
);
401 /* Find the edge from the duplicate block to the block we're
402 threading through. That's the edge we want to redirect. */
403 victim
= find_edge (rd
->dup_block
, THREAD_TARGET (e
)->dest
);
404 e2
= redirect_edge_and_branch (victim
, THREAD_TARGET2 (e
)->dest
);
406 /* If we redirected the edge, then we need to copy PHI arguments
407 at the target. If the edge already existed (e2 != victim case),
408 then the PHIs in the target already have the correct arguments. */
410 copy_phi_args (e2
->dest
, THREAD_TARGET2 (e
), e2
);
414 remove_ctrl_stmt_and_useless_edges (rd
->dup_block
, NULL
);
415 create_edge_and_update_destination_phis (rd
, rd
->dup_block
);
418 /* Hash table traversal callback routine to create duplicate blocks. */
421 ssa_create_duplicates (struct redirection_data
**slot
,
422 ssa_local_info_t
*local_info
)
424 struct redirection_data
*rd
= *slot
;
426 /* Create a template block if we have not done so already. Otherwise
427 use the template to create a new block. */
428 if (local_info
->template_block
== NULL
)
430 create_block_for_threading (local_info
->bb
, rd
);
431 local_info
->template_block
= rd
->dup_block
;
433 /* We do not create any outgoing edges for the template. We will
434 take care of that in a later traversal. That way we do not
435 create edges that are going to just be deleted. */
439 create_block_for_threading (local_info
->template_block
, rd
);
441 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
443 ssa_fix_duplicate_block_edges (rd
, local_info
);
446 /* Keep walking the hash table. */
450 /* We did not create any outgoing edges for the template block during
451 block creation. This hash table traversal callback creates the
452 outgoing edge for the template block. */
455 ssa_fixup_template_block (struct redirection_data
**slot
,
456 ssa_local_info_t
*local_info
)
458 struct redirection_data
*rd
= *slot
;
460 /* If this is the template block halt the traversal after updating
463 If we were threading through an joiner block, then we want
464 to keep its control statement and redirect an outgoing edge.
465 Else we want to remove the control statement & edges, then create
466 a new outgoing edge. In both cases we may need to update PHIs. */
467 if (rd
->dup_block
&& rd
->dup_block
== local_info
->template_block
)
469 ssa_fix_duplicate_block_edges (rd
, local_info
);
476 /* Hash table traversal callback to redirect each incoming edge
477 associated with this hash table element to its new destination. */
480 ssa_redirect_edges (struct redirection_data
**slot
,
481 ssa_local_info_t
*local_info
)
483 struct redirection_data
*rd
= *slot
;
484 struct el
*next
, *el
;
486 /* Walk over all the incoming edges associated associated with this
488 for (el
= rd
->incoming_edges
; el
; el
= next
)
492 /* Go ahead and free this element from the list. Doing this now
493 avoids the need for another list walk when we destroy the hash
498 thread_stats
.num_threaded_edges
++;
499 /* If we are threading through a joiner block, then we have to
500 find the edge we want to redirect and update some PHI nodes. */
501 if (THREAD_TARGET2 (e
))
505 /* We want to redirect the incoming edge to the joiner block (E)
506 to instead reach the duplicate of the joiner block. */
507 e2
= redirect_edge_and_branch (e
, rd
->dup_block
);
508 flush_pending_stmts (e2
);
510 else if (rd
->dup_block
)
514 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
515 fprintf (dump_file
, " Threaded jump %d --> %d to %d\n",
516 e
->src
->index
, e
->dest
->index
, rd
->dup_block
->index
);
518 rd
->dup_block
->count
+= e
->count
;
520 /* Excessive jump threading may make frequencies large enough so
521 the computation overflows. */
522 if (rd
->dup_block
->frequency
< BB_FREQ_MAX
* 2)
523 rd
->dup_block
->frequency
+= EDGE_FREQUENCY (e
);
524 EDGE_SUCC (rd
->dup_block
, 0)->count
+= e
->count
;
525 /* Redirect the incoming edge to the appropriate duplicate
527 e2
= redirect_edge_and_branch (e
, rd
->dup_block
);
528 gcc_assert (e
== e2
);
529 flush_pending_stmts (e2
);
532 /* Go ahead and clear E->aux. It's not needed anymore and failure
533 to clear it will cause all kinds of unpleasant problems later. */
539 /* Indicate that we actually threaded one or more jumps. */
540 if (rd
->incoming_edges
)
541 local_info
->jumps_threaded
= true;
546 /* Return true if this block has no executable statements other than
547 a simple ctrl flow instruction. When the number of outgoing edges
548 is one, this is equivalent to a "forwarder" block. */
551 redirection_block_p (basic_block bb
)
553 gimple_stmt_iterator gsi
;
555 /* Advance to the first executable statement. */
556 gsi
= gsi_start_bb (bb
);
557 while (!gsi_end_p (gsi
)
558 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_LABEL
559 || is_gimple_debug (gsi_stmt (gsi
))
560 || gimple_nop_p (gsi_stmt (gsi
))))
563 /* Check if this is an empty block. */
567 /* Test that we've reached the terminating control statement. */
568 return gsi_stmt (gsi
)
569 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_COND
570 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_GOTO
571 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_SWITCH
);
574 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
575 is reached via one or more specific incoming edges, we know which
576 outgoing edge from BB will be traversed.
578 We want to redirect those incoming edges to the target of the
579 appropriate outgoing edge. Doing so avoids a conditional branch
580 and may expose new optimization opportunities. Note that we have
581 to update dominator tree and SSA graph after such changes.
583 The key to keeping the SSA graph update manageable is to duplicate
584 the side effects occurring in BB so that those side effects still
585 occur on the paths which bypass BB after redirecting edges.
587 We accomplish this by creating duplicates of BB and arranging for
588 the duplicates to unconditionally pass control to one specific
589 successor of BB. We then revector the incoming edges into BB to
590 the appropriate duplicate of BB.
592 If NOLOOP_ONLY is true, we only perform the threading as long as it
593 does not affect the structure of the loops in a nontrivial way. */
596 thread_block (basic_block bb
, bool noloop_only
)
598 /* E is an incoming edge into BB that we may or may not want to
599 redirect to a duplicate of BB. */
602 ssa_local_info_t local_info
;
603 struct loop
*loop
= bb
->loop_father
;
605 /* To avoid scanning a linear array for the element we need we instead
606 use a hash table. For normal code there should be no noticeable
607 difference. However, if we have a block with a large number of
608 incoming and outgoing edges such linear searches can get expensive. */
609 redirection_data
.create (EDGE_COUNT (bb
->succs
));
611 /* If we thread the latch of the loop to its exit, the loop ceases to
612 exist. Make sure we do not restrict ourselves in order to preserve
614 if (loop
->header
== bb
)
616 e
= loop_latch_edge (loop
);
619 e2
= THREAD_TARGET (e
);
623 if (e2
&& loop_exit_edge_p (loop
, e2
))
627 loops_state_set (LOOPS_NEED_FIXUP
);
631 /* Record each unique threaded destination into a hash table for
632 efficient lookups. */
633 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
638 if (THREAD_TARGET2 (e
))
639 e2
= THREAD_TARGET2 (e
);
641 e2
= THREAD_TARGET (e
);
643 if (!e2
|| noloop_only
)
645 /* If NOLOOP_ONLY is true, we only allow threading through the
646 header of a loop to exit edges.
648 There are two cases to consider. The first when BB is the
649 loop header. We will attempt to thread this elsewhere, so
650 we can just continue here. */
652 if (bb
== bb
->loop_father
->header
653 && (!loop_exit_edge_p (bb
->loop_father
, e2
)
654 || THREAD_TARGET2 (e
)))
658 /* The second occurs when there was loop header buried in a jump
659 threading path. We do not try and thread this elsewhere, so
660 just cancel the jump threading request by clearing the AUX
662 if (bb
->loop_father
!= e2
->src
->loop_father
663 && !loop_exit_edge_p (e2
->src
->loop_father
, e2
))
665 /* Since this case is not handled by our special code
666 to thread through a loop header, we must explicitly
667 cancel the threading request here. */
674 if (e
->dest
== e2
->src
)
675 update_bb_profile_for_threading (e
->dest
, EDGE_FREQUENCY (e
),
676 e
->count
, THREAD_TARGET (e
));
678 /* Insert the outgoing edge into the hash table if it is not
679 already in the hash table. */
680 lookup_redirection_data (e
, INSERT
);
683 /* We do not update dominance info. */
684 free_dominance_info (CDI_DOMINATORS
);
686 /* We know we only thread through the loop header to loop exits.
687 Let the basic block duplication hook know we are not creating
688 a multiple entry loop. */
690 && bb
== bb
->loop_father
->header
)
691 set_loop_copy (bb
->loop_father
, loop_outer (bb
->loop_father
));
693 /* Now create duplicates of BB.
695 Note that for a block with a high outgoing degree we can waste
696 a lot of time and memory creating and destroying useless edges.
698 So we first duplicate BB and remove the control structure at the
699 tail of the duplicate as well as all outgoing edges from the
700 duplicate. We then use that duplicate block as a template for
701 the rest of the duplicates. */
702 local_info
.template_block
= NULL
;
704 local_info
.jumps_threaded
= false;
705 redirection_data
.traverse
<ssa_local_info_t
*, ssa_create_duplicates
>
708 /* The template does not have an outgoing edge. Create that outgoing
709 edge and update PHI nodes as the edge's target as necessary.
711 We do this after creating all the duplicates to avoid creating
712 unnecessary edges. */
713 redirection_data
.traverse
<ssa_local_info_t
*, ssa_fixup_template_block
>
716 /* The hash table traversals above created the duplicate blocks (and the
717 statements within the duplicate blocks). This loop creates PHI nodes for
718 the duplicated blocks and redirects the incoming edges into BB to reach
719 the duplicates of BB. */
720 redirection_data
.traverse
<ssa_local_info_t
*, ssa_redirect_edges
>
723 /* Done with this block. Clear REDIRECTION_DATA. */
724 redirection_data
.dispose ();
727 && bb
== bb
->loop_father
->header
)
728 set_loop_copy (bb
->loop_father
, NULL
);
730 /* Indicate to our caller whether or not any jumps were threaded. */
731 return local_info
.jumps_threaded
;
734 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
735 copy of E->dest created during threading, or E->dest if it was not necessary
736 to copy it (E is its single predecessor). */
739 thread_single_edge (edge e
)
741 basic_block bb
= e
->dest
;
742 edge eto
= THREAD_TARGET (e
);
743 struct redirection_data rd
;
748 thread_stats
.num_threaded_edges
++;
750 if (single_pred_p (bb
))
752 /* If BB has just a single predecessor, we should only remove the
753 control statements at its end, and successors except for ETO. */
754 remove_ctrl_stmt_and_useless_edges (bb
, eto
->dest
);
756 /* And fixup the flags on the single remaining edge. */
757 eto
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
| EDGE_ABNORMAL
);
758 eto
->flags
|= EDGE_FALLTHRU
;
763 /* Otherwise, we need to create a copy. */
764 if (e
->dest
== eto
->src
)
765 update_bb_profile_for_threading (bb
, EDGE_FREQUENCY (e
), e
->count
, eto
);
767 rd
.outgoing_edge
= eto
;
769 create_block_for_threading (bb
, &rd
);
770 remove_ctrl_stmt_and_useless_edges (rd
.dup_block
, NULL
);
771 create_edge_and_update_destination_phis (&rd
, rd
.dup_block
);
773 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
774 fprintf (dump_file
, " Threaded jump %d --> %d to %d\n",
775 e
->src
->index
, e
->dest
->index
, rd
.dup_block
->index
);
777 rd
.dup_block
->count
= e
->count
;
778 rd
.dup_block
->frequency
= EDGE_FREQUENCY (e
);
779 single_succ_edge (rd
.dup_block
)->count
= e
->count
;
780 redirect_edge_and_branch (e
, rd
.dup_block
);
781 flush_pending_stmts (e
);
786 /* Callback for dfs_enumerate_from. Returns true if BB is different
787 from STOP and DBDS_CE_STOP. */
789 static basic_block dbds_ce_stop
;
791 dbds_continue_enumeration_p (const_basic_block bb
, const void *stop
)
793 return (bb
!= (const_basic_block
) stop
794 && bb
!= dbds_ce_stop
);
797 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
798 returns the state. */
802 /* BB does not dominate latch of the LOOP. */
804 /* The LOOP is broken (there is no path from the header to its latch. */
806 /* BB dominates the latch of the LOOP. */
810 static enum bb_dom_status
811 determine_bb_domination_status (struct loop
*loop
, basic_block bb
)
813 basic_block
*bblocks
;
815 bool bb_reachable
= false;
819 /* This function assumes BB is a successor of LOOP->header.
820 If that is not the case return DOMST_NONDOMINATING which
825 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
827 if (e
->src
== loop
->header
)
835 return DOMST_NONDOMINATING
;
838 if (bb
== loop
->latch
)
839 return DOMST_DOMINATING
;
841 /* Check that BB dominates LOOP->latch, and that it is back-reachable
844 bblocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
845 dbds_ce_stop
= loop
->header
;
846 nblocks
= dfs_enumerate_from (loop
->latch
, 1, dbds_continue_enumeration_p
,
847 bblocks
, loop
->num_nodes
, bb
);
848 for (i
= 0; i
< nblocks
; i
++)
849 FOR_EACH_EDGE (e
, ei
, bblocks
[i
]->preds
)
851 if (e
->src
== loop
->header
)
854 return DOMST_NONDOMINATING
;
861 return (bb_reachable
? DOMST_DOMINATING
: DOMST_LOOP_BROKEN
);
864 /* Return true if BB is part of the new pre-header that is created
865 when threading the latch to DATA. */
868 def_split_header_continue_p (const_basic_block bb
, const void *data
)
870 const_basic_block new_header
= (const_basic_block
) data
;
871 const struct loop
*l
;
874 || loop_depth (bb
->loop_father
) < loop_depth (new_header
->loop_father
))
876 for (l
= bb
->loop_father
; l
; l
= loop_outer (l
))
877 if (l
== new_header
->loop_father
)
882 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
883 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
884 to the inside of the loop. */
887 thread_through_loop_header (struct loop
*loop
, bool may_peel_loop_headers
)
889 basic_block header
= loop
->header
;
890 edge e
, tgt_edge
, latch
= loop_latch_edge (loop
);
892 basic_block tgt_bb
, atgt_bb
;
893 enum bb_dom_status domst
;
895 /* We have already threaded through headers to exits, so all the threading
896 requests now are to the inside of the loop. We need to avoid creating
897 irreducible regions (i.e., loops with more than one entry block), and
898 also loop with several latch edges, or new subloops of the loop (although
899 there are cases where it might be appropriate, it is difficult to decide,
900 and doing it wrongly may confuse other optimizers).
902 We could handle more general cases here. However, the intention is to
903 preserve some information about the loop, which is impossible if its
904 structure changes significantly, in a way that is not well understood.
905 Thus we only handle few important special cases, in which also updating
906 of the loop-carried information should be feasible:
908 1) Propagation of latch edge to a block that dominates the latch block
909 of a loop. This aims to handle the following idiom:
920 After threading the latch edge, this becomes
931 The original header of the loop is moved out of it, and we may thread
932 the remaining edges through it without further constraints.
934 2) All entry edges are propagated to a single basic block that dominates
935 the latch block of the loop. This aims to handle the following idiom
936 (normally created for "for" loops):
959 /* Threading through the header won't improve the code if the header has just
961 if (single_succ_p (header
))
966 if (THREAD_TARGET2 (latch
))
968 tgt_edge
= THREAD_TARGET (latch
);
969 tgt_bb
= tgt_edge
->dest
;
971 else if (!may_peel_loop_headers
972 && !redirection_block_p (loop
->header
))
978 FOR_EACH_EDGE (e
, ei
, header
->preds
)
985 /* If latch is not threaded, and there is a header
986 edge that is not threaded, we would create loop
987 with multiple entries. */
991 if (THREAD_TARGET2 (e
))
993 tgt_edge
= THREAD_TARGET (e
);
994 atgt_bb
= tgt_edge
->dest
;
997 /* Two targets of threading would make us create loop
998 with multiple entries. */
999 else if (tgt_bb
!= atgt_bb
)
1005 /* There are no threading requests. */
1009 /* Redirecting to empty loop latch is useless. */
1010 if (tgt_bb
== loop
->latch
1011 && empty_block_p (loop
->latch
))
1015 /* The target block must dominate the loop latch, otherwise we would be
1016 creating a subloop. */
1017 domst
= determine_bb_domination_status (loop
, tgt_bb
);
1018 if (domst
== DOMST_NONDOMINATING
)
1020 if (domst
== DOMST_LOOP_BROKEN
)
1022 /* If the loop ceased to exist, mark it as such, and thread through its
1024 loop
->header
= NULL
;
1026 loops_state_set (LOOPS_NEED_FIXUP
);
1027 return thread_block (header
, false);
1030 if (tgt_bb
->loop_father
->header
== tgt_bb
)
1032 /* If the target of the threading is a header of a subloop, we need
1033 to create a preheader for it, so that the headers of the two loops
1035 if (EDGE_COUNT (tgt_bb
->preds
) > 2)
1037 tgt_bb
= create_preheader (tgt_bb
->loop_father
, 0);
1038 gcc_assert (tgt_bb
!= NULL
);
1041 tgt_bb
= split_edge (tgt_edge
);
1046 basic_block
*bblocks
;
1047 unsigned nblocks
, i
;
1049 /* First handle the case latch edge is redirected. We are copying
1050 the loop header but not creating a multiple entry loop. Make the
1051 cfg manipulation code aware of that fact. */
1052 set_loop_copy (loop
, loop
);
1053 loop
->latch
= thread_single_edge (latch
);
1054 set_loop_copy (loop
, NULL
);
1055 gcc_assert (single_succ (loop
->latch
) == tgt_bb
);
1056 loop
->header
= tgt_bb
;
1058 /* Remove the new pre-header blocks from our loop. */
1059 bblocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1060 nblocks
= dfs_enumerate_from (header
, 0, def_split_header_continue_p
,
1061 bblocks
, loop
->num_nodes
, tgt_bb
);
1062 for (i
= 0; i
< nblocks
; i
++)
1063 if (bblocks
[i
]->loop_father
== loop
)
1065 remove_bb_from_loops (bblocks
[i
]);
1066 add_bb_to_loop (bblocks
[i
], loop_outer (loop
));
1070 /* If the new header has multiple latches mark it so. */
1071 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
1072 if (e
->src
->loop_father
== loop
1073 && e
->src
!= loop
->latch
)
1076 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
1079 /* Cancel remaining threading requests that would make the
1080 loop a multiple entry loop. */
1081 FOR_EACH_EDGE (e
, ei
, header
->preds
)
1088 if (THREAD_TARGET2 (e
))
1089 e2
= THREAD_TARGET2 (e
);
1091 e2
= THREAD_TARGET (e
);
1093 if (e
->src
->loop_father
!= e2
->dest
->loop_father
1094 && e2
->dest
!= loop
->header
)
1101 /* Thread the remaining edges through the former header. */
1102 thread_block (header
, false);
1106 basic_block new_preheader
;
1108 /* Now consider the case entry edges are redirected to the new entry
1109 block. Remember one entry edge, so that we can find the new
1110 preheader (its destination after threading). */
1111 FOR_EACH_EDGE (e
, ei
, header
->preds
)
1117 /* The duplicate of the header is the new preheader of the loop. Ensure
1118 that it is placed correctly in the loop hierarchy. */
1119 set_loop_copy (loop
, loop_outer (loop
));
1121 thread_block (header
, false);
1122 set_loop_copy (loop
, NULL
);
1123 new_preheader
= e
->dest
;
1125 /* Create the new latch block. This is always necessary, as the latch
1126 must have only a single successor, but the original header had at
1127 least two successors. */
1129 mfb_kj_edge
= single_succ_edge (new_preheader
);
1130 loop
->header
= mfb_kj_edge
->dest
;
1131 latch
= make_forwarder_block (tgt_bb
, mfb_keep_just
, NULL
);
1132 loop
->header
= latch
->dest
;
1133 loop
->latch
= latch
->src
;
1139 /* We failed to thread anything. Cancel the requests. */
1140 FOR_EACH_EDGE (e
, ei
, header
->preds
)
1148 /* Walk through the registered jump threads and convert them into a
1149 form convenient for this pass.
1151 Any block which has incoming edges threaded to outgoing edges
1152 will have its entry in THREADED_BLOCK set.
1154 Any threaded edge will have its new outgoing edge stored in the
1155 original edge's AUX field.
1157 This form avoids the need to walk all the edges in the CFG to
1158 discover blocks which need processing and avoids unnecessary
1159 hash table lookups to map from threaded edge to new target. */
1162 mark_threaded_blocks (bitmap threaded_blocks
)
1166 bitmap tmp
= BITMAP_ALLOC (NULL
);
1171 /* It is possible to have jump threads in which one is a subpath
1172 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1173 block and (B, C), (C, D) where no joiner block exists.
1175 When this occurs ignore the jump thread request with the joiner
1176 block. It's totally subsumed by the simpler jump thread request.
1178 This results in less block copying, simpler CFGs. More improtantly,
1179 when we duplicate the joiner block, B, in this case we will create
1180 a new threading opportunity that we wouldn't be able to optimize
1181 until the next jump threading iteration.
1183 So first convert the jump thread requests which do not require a
1185 for (i
= 0; i
< threaded_edges
.length (); i
+= 3)
1187 edge e
= threaded_edges
[i
];
1189 if (threaded_edges
[i
+ 2] == NULL
)
1191 edge
*x
= XNEWVEC (edge
, 2);
1194 THREAD_TARGET (e
) = threaded_edges
[i
+ 1];
1195 THREAD_TARGET2 (e
) = NULL
;
1196 bitmap_set_bit (tmp
, e
->dest
->index
);
1201 /* Now iterate again, converting cases where we threaded through
1202 a joiner block, but ignoring those where we have already
1203 threaded through the joiner block. */
1204 for (i
= 0; i
< threaded_edges
.length (); i
+= 3)
1206 edge e
= threaded_edges
[i
];
1208 if (threaded_edges
[i
+ 2] != NULL
1209 && threaded_edges
[i
+ 1]->aux
== NULL
)
1211 edge
*x
= XNEWVEC (edge
, 2);
1214 THREAD_TARGET (e
) = threaded_edges
[i
+ 1];
1215 THREAD_TARGET2 (e
) = threaded_edges
[i
+ 2];
1216 bitmap_set_bit (tmp
, e
->dest
->index
);
1221 /* If optimizing for size, only thread through block if we don't have
1222 to duplicate it or it's an otherwise empty redirection block. */
1223 if (optimize_function_for_size_p (cfun
))
1225 EXECUTE_IF_SET_IN_BITMAP (tmp
, 0, i
, bi
)
1227 bb
= BASIC_BLOCK (i
);
1228 if (EDGE_COUNT (bb
->preds
) > 1
1229 && !redirection_block_p (bb
))
1231 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1238 bitmap_set_bit (threaded_blocks
, i
);
1242 bitmap_copy (threaded_blocks
, tmp
);
1248 /* Walk through all blocks and thread incoming edges to the appropriate
1249 outgoing edge for each edge pair recorded in THREADED_EDGES.
1251 It is the caller's responsibility to fix the dominance information
1252 and rewrite duplicated SSA_NAMEs back into SSA form.
1254 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1255 loop headers if it does not simplify the loop.
1257 Returns true if one or more edges were threaded, false otherwise. */
1260 thread_through_all_blocks (bool may_peel_loop_headers
)
1262 bool retval
= false;
1265 bitmap threaded_blocks
;
1269 /* We must know about loops in order to preserve them. */
1270 gcc_assert (current_loops
!= NULL
);
1272 if (!threaded_edges
.exists ())
1275 threaded_blocks
= BITMAP_ALLOC (NULL
);
1276 memset (&thread_stats
, 0, sizeof (thread_stats
));
1278 mark_threaded_blocks (threaded_blocks
);
1280 initialize_original_copy_tables ();
1282 /* First perform the threading requests that do not affect
1284 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks
, 0, i
, bi
)
1286 basic_block bb
= BASIC_BLOCK (i
);
1288 if (EDGE_COUNT (bb
->preds
) > 0)
1289 retval
|= thread_block (bb
, true);
1292 /* Then perform the threading through loop headers. We start with the
1293 innermost loop, so that the changes in cfg we perform won't affect
1294 further threading. */
1295 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
1298 || !bitmap_bit_p (threaded_blocks
, loop
->header
->index
))
1301 retval
|= thread_through_loop_header (loop
, may_peel_loop_headers
);
1304 statistics_counter_event (cfun
, "Jumps threaded",
1305 thread_stats
.num_threaded_edges
);
1307 free_original_copy_tables ();
1309 BITMAP_FREE (threaded_blocks
);
1310 threaded_blocks
= NULL
;
1311 threaded_edges
.release ();
1314 loops_state_set (LOOPS_NEED_FIXUP
);
1319 /* Register a jump threading opportunity. We queue up all the jump
1320 threading opportunities discovered by a pass and update the CFG
1321 and SSA form all at once.
1323 E is the edge we can thread, E2 is the new target edge, i.e., we
1324 are effectively recording that E->dest can be changed to E2->dest
1325 after fixing the SSA graph. */
1328 register_jump_thread (vec
<edge
> path
, bool through_joiner
)
1330 /* Convert PATH into 3 edge representation we've been using. This
1331 is temporary until we convert this file to use a path representation
1337 if (!through_joiner
)
1340 e3
= path
[path
.length () - 1];
1342 /* This can occur if we're jumping to a constant address or
1343 or something similar. Just get out now. */
1347 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1352 " Registering jump thread %s:",
1353 through_joiner
? "(through joiner block)" : "");
1355 for (i
= 0; i
< path
.length (); i
++)
1356 fprintf (dump_file
, " (%d, %d); ",
1357 path
[i
]->src
->index
, path
[i
]->dest
->index
);
1358 fputc ('\n', dump_file
);
1361 if (!threaded_edges
.exists ())
1362 threaded_edges
.create (15);
1364 threaded_edges
.safe_push (e
);
1365 threaded_edges
.safe_push (e2
);
1366 threaded_edges
.safe_push (e3
);