re PR middle-end/55027 (simplify vector multiplication by 1)
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blobeca88a910c1ba6680ea11df151b41751d8acf7f1
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "function.h"
30 #include "tree-flow.h"
31 #include "dumpfile.h"
32 #include "cfgloop.h"
33 #include "hash-table.h"
35 /* Given a block B, update the CFG and SSA graph to reflect redirecting
36 one or more in-edges to B to instead reach the destination of an
37 out-edge from B while preserving any side effects in B.
39 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
40 side effects of executing B.
42 1. Make a copy of B (including its outgoing edges and statements). Call
43 the copy B'. Note B' has no incoming edges or PHIs at this time.
45 2. Remove the control statement at the end of B' and all outgoing edges
46 except B'->C.
48 3. Add a new argument to each PHI in C with the same value as the existing
49 argument associated with edge B->C. Associate the new PHI arguments
50 with the edge B'->C.
52 4. For each PHI in B, find or create a PHI in B' with an identical
53 PHI_RESULT. Add an argument to the PHI in B' which has the same
54 value as the PHI in B associated with the edge A->B. Associate
55 the new argument in the PHI in B' with the edge A->B.
57 5. Change the edge A->B to A->B'.
59 5a. This automatically deletes any PHI arguments associated with the
60 edge A->B in B.
62 5b. This automatically associates each new argument added in step 4
63 with the edge A->B'.
65 6. Repeat for other incoming edges into B.
67 7. Put the duplicated resources in B and all the B' blocks into SSA form.
69 Note that block duplication can be minimized by first collecting the
70 set of unique destination blocks that the incoming edges should
71 be threaded to.
73 Block duplication can be further minimized by using B instead of
74 creating B' for one destination if all edges into B are going to be
75 threaded to a successor of B. We had code to do this at one time, but
76 I'm not convinced it is correct with the changes to avoid mucking up
77 the loop structure (which may cancel threading requests, thus a block
78 which we thought was going to become unreachable may still be reachable).
79 This code was also going to get ugly with the introduction of the ability
80 for a single jump thread request to bypass multiple blocks.
82 We further reduce the number of edges and statements we create by
83 not copying all the outgoing edges and the control statement in
84 step #1. We instead create a template block without the outgoing
85 edges and duplicate the template. */
88 /* Steps #5 and #6 of the above algorithm are best implemented by walking
89 all the incoming edges which thread to the same destination edge at
90 the same time. That avoids lots of table lookups to get information
91 for the destination edge.
93 To realize that implementation we create a list of incoming edges
94 which thread to the same outgoing edge. Thus to implement steps
95 #5 and #6 we traverse our hash table of outgoing edge information.
96 For each entry we walk the list of incoming edges which thread to
97 the current outgoing edge. */
99 struct el
101 edge e;
102 struct el *next;
105 /* Main data structure recording information regarding B's duplicate
106 blocks. */
108 /* We need to efficiently record the unique thread destinations of this
109 block and specific information associated with those destinations. We
110 may have many incoming edges threaded to the same outgoing edge. This
111 can be naturally implemented with a hash table. */
113 struct redirection_data : typed_free_remove<redirection_data>
115 /* A duplicate of B with the trailing control statement removed and which
116 targets a single successor of B. */
117 basic_block dup_block;
119 /* An outgoing edge from B. DUP_BLOCK will have OUTGOING_EDGE->dest as
120 its single successor. */
121 edge outgoing_edge;
123 edge intermediate_edge;
125 /* A list of incoming edges which we want to thread to
126 OUTGOING_EDGE->dest. */
127 struct el *incoming_edges;
129 /* hash_table support. */
130 typedef redirection_data value_type;
131 typedef redirection_data compare_type;
132 static inline hashval_t hash (const value_type *);
133 static inline int equal (const value_type *, const compare_type *);
136 inline hashval_t
137 redirection_data::hash (const value_type *p)
139 edge e = p->outgoing_edge;
140 return e->dest->index;
143 inline int
144 redirection_data::equal (const value_type *p1, const compare_type *p2)
146 edge e1 = p1->outgoing_edge;
147 edge e2 = p2->outgoing_edge;
148 edge e3 = p1->intermediate_edge;
149 edge e4 = p2->intermediate_edge;
150 return e1 == e2 && e3 == e4;
153 /* Data structure of information to pass to hash table traversal routines. */
154 struct ssa_local_info_t
156 /* The current block we are working on. */
157 basic_block bb;
159 /* A template copy of BB with no outgoing edges or control statement that
160 we use for creating copies. */
161 basic_block template_block;
163 /* TRUE if we thread one or more jumps, FALSE otherwise. */
164 bool jumps_threaded;
167 /* Passes which use the jump threading code register jump threading
168 opportunities as they are discovered. We keep the registered
169 jump threading opportunities in this vector as edge pairs
170 (original_edge, target_edge). */
171 static VEC(edge,heap) *threaded_edges;
173 /* When we start updating the CFG for threading, data necessary for jump
174 threading is attached to the AUX field for the incoming edge. Use these
175 macros to access the underlying structure attached to the AUX field. */
176 #define THREAD_TARGET(E) ((edge *)(E)->aux)[0]
177 #define THREAD_TARGET2(E) ((edge *)(E)->aux)[1]
179 /* Jump threading statistics. */
181 struct thread_stats_d
183 unsigned long num_threaded_edges;
186 struct thread_stats_d thread_stats;
189 /* Remove the last statement in block BB if it is a control statement
190 Also remove all outgoing edges except the edge which reaches DEST_BB.
191 If DEST_BB is NULL, then remove all outgoing edges. */
193 static void
194 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
196 gimple_stmt_iterator gsi;
197 edge e;
198 edge_iterator ei;
200 gsi = gsi_last_bb (bb);
202 /* If the duplicate ends with a control statement, then remove it.
204 Note that if we are duplicating the template block rather than the
205 original basic block, then the duplicate might not have any real
206 statements in it. */
207 if (!gsi_end_p (gsi)
208 && gsi_stmt (gsi)
209 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
210 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
211 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
212 gsi_remove (&gsi, true);
214 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
216 if (e->dest != dest_bb)
217 remove_edge (e);
218 else
219 ei_next (&ei);
223 /* Create a duplicate of BB. Record the duplicate block in RD. */
225 static void
226 create_block_for_threading (basic_block bb, struct redirection_data *rd)
228 edge_iterator ei;
229 edge e;
231 /* We can use the generic block duplication code and simply remove
232 the stuff we do not need. */
233 rd->dup_block = duplicate_block (bb, NULL, NULL);
235 FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
236 e->aux = NULL;
238 /* Zero out the profile, since the block is unreachable for now. */
239 rd->dup_block->frequency = 0;
240 rd->dup_block->count = 0;
243 /* Main data structure to hold information for duplicates of BB. */
245 static hash_table <redirection_data> redirection_data;
247 /* Given an outgoing edge E lookup and return its entry in our hash table.
249 If INSERT is true, then we insert the entry into the hash table if
250 it is not already present. INCOMING_EDGE is added to the list of incoming
251 edges associated with E in the hash table. */
253 static struct redirection_data *
254 lookup_redirection_data (edge e, enum insert_option insert)
256 struct redirection_data **slot;
257 struct redirection_data *elt;
259 /* Build a hash table element so we can see if E is already
260 in the table. */
261 elt = XNEW (struct redirection_data);
262 elt->intermediate_edge = THREAD_TARGET2 (e) ? THREAD_TARGET (e) : NULL;
263 elt->outgoing_edge = THREAD_TARGET2 (e) ? THREAD_TARGET2 (e)
264 : THREAD_TARGET (e);
265 elt->dup_block = NULL;
266 elt->incoming_edges = NULL;
268 slot = redirection_data.find_slot (elt, insert);
270 /* This will only happen if INSERT is false and the entry is not
271 in the hash table. */
272 if (slot == NULL)
274 free (elt);
275 return NULL;
278 /* This will only happen if E was not in the hash table and
279 INSERT is true. */
280 if (*slot == NULL)
282 *slot = elt;
283 elt->incoming_edges = XNEW (struct el);
284 elt->incoming_edges->e = e;
285 elt->incoming_edges->next = NULL;
286 return elt;
288 /* E was in the hash table. */
289 else
291 /* Free ELT as we do not need it anymore, we will extract the
292 relevant entry from the hash table itself. */
293 free (elt);
295 /* Get the entry stored in the hash table. */
296 elt = *slot;
298 /* If insertion was requested, then we need to add INCOMING_EDGE
299 to the list of incoming edges associated with E. */
300 if (insert)
302 struct el *el = XNEW (struct el);
303 el->next = elt->incoming_edges;
304 el->e = e;
305 elt->incoming_edges = el;
308 return elt;
312 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
314 static void
315 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
317 gimple_stmt_iterator gsi;
318 int src_indx = src_e->dest_idx;
320 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
322 gimple phi = gsi_stmt (gsi);
323 source_location locus = gimple_phi_arg_location (phi, src_indx);
324 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
328 /* We have recently made a copy of ORIG_BB, including its outgoing
329 edges. The copy is NEW_BB. Every PHI node in every direct successor of
330 ORIG_BB has a new argument associated with edge from NEW_BB to the
331 successor. Initialize the PHI argument so that it is equal to the PHI
332 argument associated with the edge from ORIG_BB to the successor. */
334 static void
335 update_destination_phis (basic_block orig_bb, basic_block new_bb)
337 edge_iterator ei;
338 edge e;
340 FOR_EACH_EDGE (e, ei, orig_bb->succs)
342 edge e2 = find_edge (new_bb, e->dest);
343 copy_phi_args (e->dest, e, e2);
347 /* Given a duplicate block and its single destination (both stored
348 in RD). Create an edge between the duplicate and its single
349 destination.
351 Add an additional argument to any PHI nodes at the single
352 destination. */
354 static void
355 create_edge_and_update_destination_phis (struct redirection_data *rd,
356 basic_block bb)
358 edge e = make_edge (bb, rd->outgoing_edge->dest, EDGE_FALLTHRU);
360 rescan_loop_exit (e, true, false);
361 e->probability = REG_BR_PROB_BASE;
362 e->count = bb->count;
364 if (rd->outgoing_edge->aux)
366 e->aux = XNEWVEC (edge, 2);
367 THREAD_TARGET(e) = THREAD_TARGET (rd->outgoing_edge);
368 THREAD_TARGET2(e) = THREAD_TARGET2 (rd->outgoing_edge);
370 else
372 e->aux = NULL;
375 /* If there are any PHI nodes at the destination of the outgoing edge
376 from the duplicate block, then we will need to add a new argument
377 to them. The argument should have the same value as the argument
378 associated with the outgoing edge stored in RD. */
379 copy_phi_args (e->dest, rd->outgoing_edge, e);
382 /* Wire up the outgoing edges from the duplicate block and
383 update any PHIs as needed. */
384 void
385 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
386 ssa_local_info_t *local_info)
388 /* If we were threading through an joiner block, then we want
389 to keep its control statement and redirect an outgoing edge.
390 Else we want to remove the control statement & edges, then create
391 a new outgoing edge. In both cases we may need to update PHIs. */
392 if (THREAD_TARGET2 (rd->incoming_edges->e))
394 edge victim;
395 edge e2;
396 edge e = rd->incoming_edges->e;
398 /* This updates the PHIs at the destination of the duplicate
399 block. */
400 update_destination_phis (local_info->bb, rd->dup_block);
402 /* Find the edge from the duplicate block to the block we're
403 threading through. That's the edge we want to redirect. */
404 victim = find_edge (rd->dup_block, THREAD_TARGET (e)->dest);
405 e2 = redirect_edge_and_branch (victim, THREAD_TARGET2 (e)->dest);
407 /* If we redirected the edge, then we need to copy PHI arguments
408 at the target. If the edge already existed (e2 != victim case),
409 then the PHIs in the target already have the correct arguments. */
410 if (e2 == victim)
411 copy_phi_args (e2->dest, THREAD_TARGET2 (e), e2);
413 else
415 remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
416 create_edge_and_update_destination_phis (rd, rd->dup_block);
419 /* Hash table traversal callback routine to create duplicate blocks. */
422 ssa_create_duplicates (struct redirection_data **slot,
423 ssa_local_info_t *local_info)
425 struct redirection_data *rd = *slot;
427 /* Create a template block if we have not done so already. Otherwise
428 use the template to create a new block. */
429 if (local_info->template_block == NULL)
431 create_block_for_threading (local_info->bb, rd);
432 local_info->template_block = rd->dup_block;
434 /* We do not create any outgoing edges for the template. We will
435 take care of that in a later traversal. That way we do not
436 create edges that are going to just be deleted. */
438 else
440 create_block_for_threading (local_info->template_block, rd);
442 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
443 block. */
444 ssa_fix_duplicate_block_edges (rd, local_info);
447 /* Keep walking the hash table. */
448 return 1;
451 /* We did not create any outgoing edges for the template block during
452 block creation. This hash table traversal callback creates the
453 outgoing edge for the template block. */
455 inline int
456 ssa_fixup_template_block (struct redirection_data **slot,
457 ssa_local_info_t *local_info)
459 struct redirection_data *rd = *slot;
461 /* If this is the template block halt the traversal after updating
462 it appropriately.
464 If we were threading through an joiner block, then we want
465 to keep its control statement and redirect an outgoing edge.
466 Else we want to remove the control statement & edges, then create
467 a new outgoing edge. In both cases we may need to update PHIs. */
468 if (rd->dup_block && rd->dup_block == local_info->template_block)
470 ssa_fix_duplicate_block_edges (rd, local_info);
471 return 0;
474 return 1;
477 /* Hash table traversal callback to redirect each incoming edge
478 associated with this hash table element to its new destination. */
481 ssa_redirect_edges (struct redirection_data **slot,
482 ssa_local_info_t *local_info)
484 struct redirection_data *rd = *slot;
485 struct el *next, *el;
487 /* Walk over all the incoming edges associated associated with this
488 hash table entry. */
489 for (el = rd->incoming_edges; el; el = next)
491 edge e = el->e;
493 /* Go ahead and free this element from the list. Doing this now
494 avoids the need for another list walk when we destroy the hash
495 table. */
496 next = el->next;
497 free (el);
499 thread_stats.num_threaded_edges++;
500 /* If we are threading through a joiner block, then we have to
501 find the edge we want to redirect and update some PHI nodes. */
502 if (THREAD_TARGET2 (e))
504 edge e2;
506 /* We want to redirect the incoming edge to the joiner block (E)
507 to instead reach the duplicate of the joiner block. */
508 e2 = redirect_edge_and_branch (e, rd->dup_block);
509 flush_pending_stmts (e2);
511 else if (rd->dup_block)
513 edge e2;
515 if (dump_file && (dump_flags & TDF_DETAILS))
516 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
517 e->src->index, e->dest->index, rd->dup_block->index);
519 rd->dup_block->count += e->count;
521 /* Excessive jump threading may make frequencies large enough so
522 the computation overflows. */
523 if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
524 rd->dup_block->frequency += EDGE_FREQUENCY (e);
525 EDGE_SUCC (rd->dup_block, 0)->count += e->count;
526 /* Redirect the incoming edge to the appropriate duplicate
527 block. */
528 e2 = redirect_edge_and_branch (e, rd->dup_block);
529 gcc_assert (e == e2);
530 flush_pending_stmts (e2);
533 /* Go ahead and clear E->aux. It's not needed anymore and failure
534 to clear it will cause all kinds of unpleasant problems later. */
535 free (e->aux);
536 e->aux = NULL;
540 /* Indicate that we actually threaded one or more jumps. */
541 if (rd->incoming_edges)
542 local_info->jumps_threaded = true;
544 return 1;
547 /* Return true if this block has no executable statements other than
548 a simple ctrl flow instruction. When the number of outgoing edges
549 is one, this is equivalent to a "forwarder" block. */
551 static bool
552 redirection_block_p (basic_block bb)
554 gimple_stmt_iterator gsi;
556 /* Advance to the first executable statement. */
557 gsi = gsi_start_bb (bb);
558 while (!gsi_end_p (gsi)
559 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
560 || is_gimple_debug (gsi_stmt (gsi))
561 || gimple_nop_p (gsi_stmt (gsi))))
562 gsi_next (&gsi);
564 /* Check if this is an empty block. */
565 if (gsi_end_p (gsi))
566 return true;
568 /* Test that we've reached the terminating control statement. */
569 return gsi_stmt (gsi)
570 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
571 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
572 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
575 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
576 is reached via one or more specific incoming edges, we know which
577 outgoing edge from BB will be traversed.
579 We want to redirect those incoming edges to the target of the
580 appropriate outgoing edge. Doing so avoids a conditional branch
581 and may expose new optimization opportunities. Note that we have
582 to update dominator tree and SSA graph after such changes.
584 The key to keeping the SSA graph update manageable is to duplicate
585 the side effects occurring in BB so that those side effects still
586 occur on the paths which bypass BB after redirecting edges.
588 We accomplish this by creating duplicates of BB and arranging for
589 the duplicates to unconditionally pass control to one specific
590 successor of BB. We then revector the incoming edges into BB to
591 the appropriate duplicate of BB.
593 If NOLOOP_ONLY is true, we only perform the threading as long as it
594 does not affect the structure of the loops in a nontrivial way. */
596 static bool
597 thread_block (basic_block bb, bool noloop_only)
599 /* E is an incoming edge into BB that we may or may not want to
600 redirect to a duplicate of BB. */
601 edge e, e2;
602 edge_iterator ei;
603 ssa_local_info_t local_info;
604 struct loop *loop = bb->loop_father;
606 /* To avoid scanning a linear array for the element we need we instead
607 use a hash table. For normal code there should be no noticeable
608 difference. However, if we have a block with a large number of
609 incoming and outgoing edges such linear searches can get expensive. */
610 redirection_data.create (EDGE_COUNT (bb->succs));
612 /* If we thread the latch of the loop to its exit, the loop ceases to
613 exist. Make sure we do not restrict ourselves in order to preserve
614 this loop. */
615 if (loop->header == bb)
617 e = loop_latch_edge (loop);
619 if (e->aux)
620 e2 = THREAD_TARGET (e);
621 else
622 e2 = NULL;
624 if (e2 && loop_exit_edge_p (loop, e2))
626 loop->header = NULL;
627 loop->latch = NULL;
628 loops_state_set (LOOPS_NEED_FIXUP);
632 /* Record each unique threaded destination into a hash table for
633 efficient lookups. */
634 FOR_EACH_EDGE (e, ei, bb->preds)
636 if (e->aux == NULL)
637 continue;
639 if (THREAD_TARGET2 (e))
640 e2 = THREAD_TARGET2 (e);
641 else
642 e2 = THREAD_TARGET (e);
644 if (!e2
645 /* If NOLOOP_ONLY is true, we only allow threading through the
646 header of a loop to exit edges. */
647 || (noloop_only
648 && bb == bb->loop_father->header
649 && (!loop_exit_edge_p (bb->loop_father, e2)
650 || THREAD_TARGET2 (e))))
651 continue;
653 if (e->dest == e2->src)
654 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
655 e->count, THREAD_TARGET (e));
657 /* Insert the outgoing edge into the hash table if it is not
658 already in the hash table. */
659 lookup_redirection_data (e, INSERT);
662 /* We do not update dominance info. */
663 free_dominance_info (CDI_DOMINATORS);
665 /* We know we only thread through the loop header to loop exits.
666 Let the basic block duplication hook know we are not creating
667 a multiple entry loop. */
668 if (noloop_only
669 && bb == bb->loop_father->header)
670 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
672 /* Now create duplicates of BB.
674 Note that for a block with a high outgoing degree we can waste
675 a lot of time and memory creating and destroying useless edges.
677 So we first duplicate BB and remove the control structure at the
678 tail of the duplicate as well as all outgoing edges from the
679 duplicate. We then use that duplicate block as a template for
680 the rest of the duplicates. */
681 local_info.template_block = NULL;
682 local_info.bb = bb;
683 local_info.jumps_threaded = false;
684 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
685 (&local_info);
687 /* The template does not have an outgoing edge. Create that outgoing
688 edge and update PHI nodes as the edge's target as necessary.
690 We do this after creating all the duplicates to avoid creating
691 unnecessary edges. */
692 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
693 (&local_info);
695 /* The hash table traversals above created the duplicate blocks (and the
696 statements within the duplicate blocks). This loop creates PHI nodes for
697 the duplicated blocks and redirects the incoming edges into BB to reach
698 the duplicates of BB. */
699 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
700 (&local_info);
702 /* Done with this block. Clear REDIRECTION_DATA. */
703 redirection_data.dispose ();
705 if (noloop_only
706 && bb == bb->loop_father->header)
707 set_loop_copy (bb->loop_father, NULL);
709 /* Indicate to our caller whether or not any jumps were threaded. */
710 return local_info.jumps_threaded;
713 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
714 copy of E->dest created during threading, or E->dest if it was not necessary
715 to copy it (E is its single predecessor). */
717 static basic_block
718 thread_single_edge (edge e)
720 basic_block bb = e->dest;
721 edge eto = THREAD_TARGET (e);
722 struct redirection_data rd;
724 free (e->aux);
725 e->aux = NULL;
727 thread_stats.num_threaded_edges++;
729 if (single_pred_p (bb))
731 /* If BB has just a single predecessor, we should only remove the
732 control statements at its end, and successors except for ETO. */
733 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
735 /* And fixup the flags on the single remaining edge. */
736 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
737 eto->flags |= EDGE_FALLTHRU;
739 return bb;
742 /* Otherwise, we need to create a copy. */
743 if (e->dest == eto->src)
744 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
746 rd.outgoing_edge = eto;
748 create_block_for_threading (bb, &rd);
749 remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
750 create_edge_and_update_destination_phis (&rd, rd.dup_block);
752 if (dump_file && (dump_flags & TDF_DETAILS))
753 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
754 e->src->index, e->dest->index, rd.dup_block->index);
756 rd.dup_block->count = e->count;
757 rd.dup_block->frequency = EDGE_FREQUENCY (e);
758 single_succ_edge (rd.dup_block)->count = e->count;
759 redirect_edge_and_branch (e, rd.dup_block);
760 flush_pending_stmts (e);
762 return rd.dup_block;
765 /* Callback for dfs_enumerate_from. Returns true if BB is different
766 from STOP and DBDS_CE_STOP. */
768 static basic_block dbds_ce_stop;
769 static bool
770 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
772 return (bb != (const_basic_block) stop
773 && bb != dbds_ce_stop);
776 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
777 returns the state. */
779 enum bb_dom_status
781 /* BB does not dominate latch of the LOOP. */
782 DOMST_NONDOMINATING,
783 /* The LOOP is broken (there is no path from the header to its latch. */
784 DOMST_LOOP_BROKEN,
785 /* BB dominates the latch of the LOOP. */
786 DOMST_DOMINATING
789 static enum bb_dom_status
790 determine_bb_domination_status (struct loop *loop, basic_block bb)
792 basic_block *bblocks;
793 unsigned nblocks, i;
794 bool bb_reachable = false;
795 edge_iterator ei;
796 edge e;
798 /* This function assumes BB is a successor of LOOP->header.
799 If that is not the case return DOMST_NONDOMINATING which
800 is always safe. */
802 bool ok = false;
804 FOR_EACH_EDGE (e, ei, bb->preds)
806 if (e->src == loop->header)
808 ok = true;
809 break;
813 if (!ok)
814 return DOMST_NONDOMINATING;
817 if (bb == loop->latch)
818 return DOMST_DOMINATING;
820 /* Check that BB dominates LOOP->latch, and that it is back-reachable
821 from it. */
823 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
824 dbds_ce_stop = loop->header;
825 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
826 bblocks, loop->num_nodes, bb);
827 for (i = 0; i < nblocks; i++)
828 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
830 if (e->src == loop->header)
832 free (bblocks);
833 return DOMST_NONDOMINATING;
835 if (e->src == bb)
836 bb_reachable = true;
839 free (bblocks);
840 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
843 /* Return true if BB is part of the new pre-header that is created
844 when threading the latch to DATA. */
846 static bool
847 def_split_header_continue_p (const_basic_block bb, const void *data)
849 const_basic_block new_header = (const_basic_block) data;
850 const struct loop *l;
852 if (bb == new_header
853 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
854 return false;
855 for (l = bb->loop_father; l; l = loop_outer (l))
856 if (l == new_header->loop_father)
857 return true;
858 return false;
861 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
862 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
863 to the inside of the loop. */
865 static bool
866 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
868 basic_block header = loop->header;
869 edge e, tgt_edge, latch = loop_latch_edge (loop);
870 edge_iterator ei;
871 basic_block tgt_bb, atgt_bb;
872 enum bb_dom_status domst;
874 /* We have already threaded through headers to exits, so all the threading
875 requests now are to the inside of the loop. We need to avoid creating
876 irreducible regions (i.e., loops with more than one entry block), and
877 also loop with several latch edges, or new subloops of the loop (although
878 there are cases where it might be appropriate, it is difficult to decide,
879 and doing it wrongly may confuse other optimizers).
881 We could handle more general cases here. However, the intention is to
882 preserve some information about the loop, which is impossible if its
883 structure changes significantly, in a way that is not well understood.
884 Thus we only handle few important special cases, in which also updating
885 of the loop-carried information should be feasible:
887 1) Propagation of latch edge to a block that dominates the latch block
888 of a loop. This aims to handle the following idiom:
890 first = 1;
891 while (1)
893 if (first)
894 initialize;
895 first = 0;
896 body;
899 After threading the latch edge, this becomes
901 first = 1;
902 if (first)
903 initialize;
904 while (1)
906 first = 0;
907 body;
910 The original header of the loop is moved out of it, and we may thread
911 the remaining edges through it without further constraints.
913 2) All entry edges are propagated to a single basic block that dominates
914 the latch block of the loop. This aims to handle the following idiom
915 (normally created for "for" loops):
917 i = 0;
918 while (1)
920 if (i >= 100)
921 break;
922 body;
923 i++;
926 This becomes
928 i = 0;
929 while (1)
931 body;
932 i++;
933 if (i >= 100)
934 break;
938 /* Threading through the header won't improve the code if the header has just
939 one successor. */
940 if (single_succ_p (header))
941 goto fail;
943 if (latch->aux)
945 if (THREAD_TARGET2 (latch))
946 goto fail;
947 tgt_edge = THREAD_TARGET (latch);
948 tgt_bb = tgt_edge->dest;
950 else if (!may_peel_loop_headers
951 && !redirection_block_p (loop->header))
952 goto fail;
953 else
955 tgt_bb = NULL;
956 tgt_edge = NULL;
957 FOR_EACH_EDGE (e, ei, header->preds)
959 if (!e->aux)
961 if (e == latch)
962 continue;
964 /* If latch is not threaded, and there is a header
965 edge that is not threaded, we would create loop
966 with multiple entries. */
967 goto fail;
970 if (THREAD_TARGET2 (e))
971 goto fail;
972 tgt_edge = THREAD_TARGET (e);
973 atgt_bb = tgt_edge->dest;
974 if (!tgt_bb)
975 tgt_bb = atgt_bb;
976 /* Two targets of threading would make us create loop
977 with multiple entries. */
978 else if (tgt_bb != atgt_bb)
979 goto fail;
982 if (!tgt_bb)
984 /* There are no threading requests. */
985 return false;
988 /* Redirecting to empty loop latch is useless. */
989 if (tgt_bb == loop->latch
990 && empty_block_p (loop->latch))
991 goto fail;
994 /* The target block must dominate the loop latch, otherwise we would be
995 creating a subloop. */
996 domst = determine_bb_domination_status (loop, tgt_bb);
997 if (domst == DOMST_NONDOMINATING)
998 goto fail;
999 if (domst == DOMST_LOOP_BROKEN)
1001 /* If the loop ceased to exist, mark it as such, and thread through its
1002 original header. */
1003 loop->header = NULL;
1004 loop->latch = NULL;
1005 loops_state_set (LOOPS_NEED_FIXUP);
1006 return thread_block (header, false);
1009 if (tgt_bb->loop_father->header == tgt_bb)
1011 /* If the target of the threading is a header of a subloop, we need
1012 to create a preheader for it, so that the headers of the two loops
1013 do not merge. */
1014 if (EDGE_COUNT (tgt_bb->preds) > 2)
1016 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1017 gcc_assert (tgt_bb != NULL);
1019 else
1020 tgt_bb = split_edge (tgt_edge);
1023 if (latch->aux)
1025 basic_block *bblocks;
1026 unsigned nblocks, i;
1028 /* First handle the case latch edge is redirected. We are copying
1029 the loop header but not creating a multiple entry loop. Make the
1030 cfg manipulation code aware of that fact. */
1031 set_loop_copy (loop, loop);
1032 loop->latch = thread_single_edge (latch);
1033 set_loop_copy (loop, NULL);
1034 gcc_assert (single_succ (loop->latch) == tgt_bb);
1035 loop->header = tgt_bb;
1037 /* Remove the new pre-header blocks from our loop. */
1038 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1039 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1040 bblocks, loop->num_nodes, tgt_bb);
1041 for (i = 0; i < nblocks; i++)
1042 if (bblocks[i]->loop_father == loop)
1044 remove_bb_from_loops (bblocks[i]);
1045 add_bb_to_loop (bblocks[i], loop_outer (loop));
1047 free (bblocks);
1049 /* If the new header has multiple latches mark it so. */
1050 FOR_EACH_EDGE (e, ei, loop->header->preds)
1051 if (e->src->loop_father == loop
1052 && e->src != loop->latch)
1054 loop->latch = NULL;
1055 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1058 /* Cancel remaining threading requests that would make the
1059 loop a multiple entry loop. */
1060 FOR_EACH_EDGE (e, ei, header->preds)
1062 edge e2;
1064 if (e->aux == NULL)
1065 continue;
1067 if (THREAD_TARGET2 (e))
1068 e2 = THREAD_TARGET2 (e);
1069 else
1070 e2 = THREAD_TARGET (e);
1072 if (e->src->loop_father != e2->dest->loop_father
1073 && e2->dest != loop->header)
1075 free (e->aux);
1076 e->aux = NULL;
1080 /* Thread the remaining edges through the former header. */
1081 thread_block (header, false);
1083 else
1085 basic_block new_preheader;
1087 /* Now consider the case entry edges are redirected to the new entry
1088 block. Remember one entry edge, so that we can find the new
1089 preheader (its destination after threading). */
1090 FOR_EACH_EDGE (e, ei, header->preds)
1092 if (e->aux)
1093 break;
1096 /* The duplicate of the header is the new preheader of the loop. Ensure
1097 that it is placed correctly in the loop hierarchy. */
1098 set_loop_copy (loop, loop_outer (loop));
1100 thread_block (header, false);
1101 set_loop_copy (loop, NULL);
1102 new_preheader = e->dest;
1104 /* Create the new latch block. This is always necessary, as the latch
1105 must have only a single successor, but the original header had at
1106 least two successors. */
1107 loop->latch = NULL;
1108 mfb_kj_edge = single_succ_edge (new_preheader);
1109 loop->header = mfb_kj_edge->dest;
1110 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1111 loop->header = latch->dest;
1112 loop->latch = latch->src;
1115 return true;
1117 fail:
1118 /* We failed to thread anything. Cancel the requests. */
1119 FOR_EACH_EDGE (e, ei, header->preds)
1121 free (e->aux);
1122 e->aux = NULL;
1124 return false;
1127 /* Walk through the registered jump threads and convert them into a
1128 form convenient for this pass.
1130 Any block which has incoming edges threaded to outgoing edges
1131 will have its entry in THREADED_BLOCK set.
1133 Any threaded edge will have its new outgoing edge stored in the
1134 original edge's AUX field.
1136 This form avoids the need to walk all the edges in the CFG to
1137 discover blocks which need processing and avoids unnecessary
1138 hash table lookups to map from threaded edge to new target. */
1140 static void
1141 mark_threaded_blocks (bitmap threaded_blocks)
1143 unsigned int i;
1144 bitmap_iterator bi;
1145 bitmap tmp = BITMAP_ALLOC (NULL);
1146 basic_block bb;
1147 edge e;
1148 edge_iterator ei;
1150 for (i = 0; i < VEC_length (edge, threaded_edges); i += 3)
1152 edge e = VEC_index (edge, threaded_edges, i);
1153 edge *x = XNEWVEC (edge, 2);
1155 e->aux = x;
1156 THREAD_TARGET (e) = VEC_index (edge, threaded_edges, i + 1);
1157 THREAD_TARGET2 (e) = VEC_index (edge, threaded_edges, i + 2);
1158 bitmap_set_bit (tmp, e->dest->index);
1161 /* If optimizing for size, only thread through block if we don't have
1162 to duplicate it or it's an otherwise empty redirection block. */
1163 if (optimize_function_for_size_p (cfun))
1165 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1167 bb = BASIC_BLOCK (i);
1168 if (EDGE_COUNT (bb->preds) > 1
1169 && !redirection_block_p (bb))
1171 FOR_EACH_EDGE (e, ei, bb->preds)
1173 free (e->aux);
1174 e->aux = NULL;
1177 else
1178 bitmap_set_bit (threaded_blocks, i);
1181 else
1182 bitmap_copy (threaded_blocks, tmp);
1184 BITMAP_FREE(tmp);
1188 /* Walk through all blocks and thread incoming edges to the appropriate
1189 outgoing edge for each edge pair recorded in THREADED_EDGES.
1191 It is the caller's responsibility to fix the dominance information
1192 and rewrite duplicated SSA_NAMEs back into SSA form.
1194 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1195 loop headers if it does not simplify the loop.
1197 Returns true if one or more edges were threaded, false otherwise. */
1199 bool
1200 thread_through_all_blocks (bool may_peel_loop_headers)
1202 bool retval = false;
1203 unsigned int i;
1204 bitmap_iterator bi;
1205 bitmap threaded_blocks;
1206 struct loop *loop;
1207 loop_iterator li;
1209 /* We must know about loops in order to preserve them. */
1210 gcc_assert (current_loops != NULL);
1212 if (threaded_edges == NULL)
1213 return false;
1215 threaded_blocks = BITMAP_ALLOC (NULL);
1216 memset (&thread_stats, 0, sizeof (thread_stats));
1218 mark_threaded_blocks (threaded_blocks);
1220 initialize_original_copy_tables ();
1222 /* First perform the threading requests that do not affect
1223 loop structure. */
1224 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1226 basic_block bb = BASIC_BLOCK (i);
1228 if (EDGE_COUNT (bb->preds) > 0)
1229 retval |= thread_block (bb, true);
1232 /* Then perform the threading through loop headers. We start with the
1233 innermost loop, so that the changes in cfg we perform won't affect
1234 further threading. */
1235 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1237 if (!loop->header
1238 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1239 continue;
1241 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1244 statistics_counter_event (cfun, "Jumps threaded",
1245 thread_stats.num_threaded_edges);
1247 free_original_copy_tables ();
1249 BITMAP_FREE (threaded_blocks);
1250 threaded_blocks = NULL;
1251 VEC_free (edge, heap, threaded_edges);
1252 threaded_edges = NULL;
1254 if (retval)
1255 loops_state_set (LOOPS_NEED_FIXUP);
1257 return retval;
1260 /* Register a jump threading opportunity. We queue up all the jump
1261 threading opportunities discovered by a pass and update the CFG
1262 and SSA form all at once.
1264 E is the edge we can thread, E2 is the new target edge, i.e., we
1265 are effectively recording that E->dest can be changed to E2->dest
1266 after fixing the SSA graph. */
1268 void
1269 register_jump_thread (edge e, edge e2, edge e3)
1271 /* This can occur if we're jumping to a constant address or
1272 or something similar. Just get out now. */
1273 if (e2 == NULL)
1274 return;
1276 if (threaded_edges == NULL)
1277 threaded_edges = VEC_alloc (edge, heap, 15);
1279 if (dump_file && (dump_flags & TDF_DETAILS)
1280 && e->dest != e2->src)
1281 fprintf (dump_file,
1282 " Registering jump thread around one or more intermediate blocks\n");
1284 VEC_safe_push (edge, heap, threaded_edges, e);
1285 VEC_safe_push (edge, heap, threaded_edges, e2);
1286 VEC_safe_push (edge, heap, threaded_edges, e3);