vectorizer cost model enhancement
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blob75273ca3da5784521c8d75e23b31608f390d4e34
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "flags.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "function.h"
29 #include "tree-ssa.h"
30 #include "tree-ssa-threadupdate.h"
31 #include "dumpfile.h"
32 #include "cfgloop.h"
33 #include "hash-table.h"
35 /* Given a block B, update the CFG and SSA graph to reflect redirecting
36 one or more in-edges to B to instead reach the destination of an
37 out-edge from B while preserving any side effects in B.
39 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
40 side effects of executing B.
42 1. Make a copy of B (including its outgoing edges and statements). Call
43 the copy B'. Note B' has no incoming edges or PHIs at this time.
45 2. Remove the control statement at the end of B' and all outgoing edges
46 except B'->C.
48 3. Add a new argument to each PHI in C with the same value as the existing
49 argument associated with edge B->C. Associate the new PHI arguments
50 with the edge B'->C.
52 4. For each PHI in B, find or create a PHI in B' with an identical
53 PHI_RESULT. Add an argument to the PHI in B' which has the same
54 value as the PHI in B associated with the edge A->B. Associate
55 the new argument in the PHI in B' with the edge A->B.
57 5. Change the edge A->B to A->B'.
59 5a. This automatically deletes any PHI arguments associated with the
60 edge A->B in B.
62 5b. This automatically associates each new argument added in step 4
63 with the edge A->B'.
65 6. Repeat for other incoming edges into B.
67 7. Put the duplicated resources in B and all the B' blocks into SSA form.
69 Note that block duplication can be minimized by first collecting the
70 set of unique destination blocks that the incoming edges should
71 be threaded to.
73 Block duplication can be further minimized by using B instead of
74 creating B' for one destination if all edges into B are going to be
75 threaded to a successor of B. We had code to do this at one time, but
76 I'm not convinced it is correct with the changes to avoid mucking up
77 the loop structure (which may cancel threading requests, thus a block
78 which we thought was going to become unreachable may still be reachable).
79 This code was also going to get ugly with the introduction of the ability
80 for a single jump thread request to bypass multiple blocks.
82 We further reduce the number of edges and statements we create by
83 not copying all the outgoing edges and the control statement in
84 step #1. We instead create a template block without the outgoing
85 edges and duplicate the template. */
88 /* Steps #5 and #6 of the above algorithm are best implemented by walking
89 all the incoming edges which thread to the same destination edge at
90 the same time. That avoids lots of table lookups to get information
91 for the destination edge.
93 To realize that implementation we create a list of incoming edges
94 which thread to the same outgoing edge. Thus to implement steps
95 #5 and #6 we traverse our hash table of outgoing edge information.
96 For each entry we walk the list of incoming edges which thread to
97 the current outgoing edge. */
99 struct el
101 edge e;
102 struct el *next;
105 /* Main data structure recording information regarding B's duplicate
106 blocks. */
108 /* We need to efficiently record the unique thread destinations of this
109 block and specific information associated with those destinations. We
110 may have many incoming edges threaded to the same outgoing edge. This
111 can be naturally implemented with a hash table. */
113 struct redirection_data : typed_free_remove<redirection_data>
115 /* A duplicate of B with the trailing control statement removed and which
116 targets a single successor of B. */
117 basic_block dup_block;
119 /* An outgoing edge from B. DUP_BLOCK will have OUTGOING_EDGE->dest as
120 its single successor. */
121 edge outgoing_edge;
123 edge intermediate_edge;
125 /* A list of incoming edges which we want to thread to
126 OUTGOING_EDGE->dest. */
127 struct el *incoming_edges;
129 /* hash_table support. */
130 typedef redirection_data value_type;
131 typedef redirection_data compare_type;
132 static inline hashval_t hash (const value_type *);
133 static inline int equal (const value_type *, const compare_type *);
136 inline hashval_t
137 redirection_data::hash (const value_type *p)
139 edge e = p->outgoing_edge;
140 return e->dest->index;
143 inline int
144 redirection_data::equal (const value_type *p1, const compare_type *p2)
146 edge e1 = p1->outgoing_edge;
147 edge e2 = p2->outgoing_edge;
148 edge e3 = p1->intermediate_edge;
149 edge e4 = p2->intermediate_edge;
150 return e1 == e2 && e3 == e4;
153 /* Data structure of information to pass to hash table traversal routines. */
154 struct ssa_local_info_t
156 /* The current block we are working on. */
157 basic_block bb;
159 /* A template copy of BB with no outgoing edges or control statement that
160 we use for creating copies. */
161 basic_block template_block;
163 /* TRUE if we thread one or more jumps, FALSE otherwise. */
164 bool jumps_threaded;
167 /* Passes which use the jump threading code register jump threading
168 opportunities as they are discovered. We keep the registered
169 jump threading opportunities in this vector as edge pairs
170 (original_edge, target_edge). */
171 static vec<edge> threaded_edges;
173 /* When we start updating the CFG for threading, data necessary for jump
174 threading is attached to the AUX field for the incoming edge. Use these
175 macros to access the underlying structure attached to the AUX field. */
176 #define THREAD_TARGET(E) ((edge *)(E)->aux)[0]
177 #define THREAD_TARGET2(E) ((edge *)(E)->aux)[1]
179 /* Jump threading statistics. */
181 struct thread_stats_d
183 unsigned long num_threaded_edges;
186 struct thread_stats_d thread_stats;
189 /* Remove the last statement in block BB if it is a control statement
190 Also remove all outgoing edges except the edge which reaches DEST_BB.
191 If DEST_BB is NULL, then remove all outgoing edges. */
193 static void
194 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
196 gimple_stmt_iterator gsi;
197 edge e;
198 edge_iterator ei;
200 gsi = gsi_last_bb (bb);
202 /* If the duplicate ends with a control statement, then remove it.
204 Note that if we are duplicating the template block rather than the
205 original basic block, then the duplicate might not have any real
206 statements in it. */
207 if (!gsi_end_p (gsi)
208 && gsi_stmt (gsi)
209 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
210 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
211 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
212 gsi_remove (&gsi, true);
214 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
216 if (e->dest != dest_bb)
217 remove_edge (e);
218 else
219 ei_next (&ei);
223 /* Create a duplicate of BB. Record the duplicate block in RD. */
225 static void
226 create_block_for_threading (basic_block bb, struct redirection_data *rd)
228 edge_iterator ei;
229 edge e;
231 /* We can use the generic block duplication code and simply remove
232 the stuff we do not need. */
233 rd->dup_block = duplicate_block (bb, NULL, NULL);
235 FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
236 e->aux = NULL;
238 /* Zero out the profile, since the block is unreachable for now. */
239 rd->dup_block->frequency = 0;
240 rd->dup_block->count = 0;
243 /* Main data structure to hold information for duplicates of BB. */
245 static hash_table <redirection_data> redirection_data;
247 /* Given an outgoing edge E lookup and return its entry in our hash table.
249 If INSERT is true, then we insert the entry into the hash table if
250 it is not already present. INCOMING_EDGE is added to the list of incoming
251 edges associated with E in the hash table. */
253 static struct redirection_data *
254 lookup_redirection_data (edge e, enum insert_option insert)
256 struct redirection_data **slot;
257 struct redirection_data *elt;
259 /* Build a hash table element so we can see if E is already
260 in the table. */
261 elt = XNEW (struct redirection_data);
262 elt->intermediate_edge = THREAD_TARGET2 (e) ? THREAD_TARGET (e) : NULL;
263 elt->outgoing_edge = THREAD_TARGET2 (e) ? THREAD_TARGET2 (e)
264 : THREAD_TARGET (e);
265 elt->dup_block = NULL;
266 elt->incoming_edges = NULL;
268 slot = redirection_data.find_slot (elt, insert);
270 /* This will only happen if INSERT is false and the entry is not
271 in the hash table. */
272 if (slot == NULL)
274 free (elt);
275 return NULL;
278 /* This will only happen if E was not in the hash table and
279 INSERT is true. */
280 if (*slot == NULL)
282 *slot = elt;
283 elt->incoming_edges = XNEW (struct el);
284 elt->incoming_edges->e = e;
285 elt->incoming_edges->next = NULL;
286 return elt;
288 /* E was in the hash table. */
289 else
291 /* Free ELT as we do not need it anymore, we will extract the
292 relevant entry from the hash table itself. */
293 free (elt);
295 /* Get the entry stored in the hash table. */
296 elt = *slot;
298 /* If insertion was requested, then we need to add INCOMING_EDGE
299 to the list of incoming edges associated with E. */
300 if (insert)
302 struct el *el = XNEW (struct el);
303 el->next = elt->incoming_edges;
304 el->e = e;
305 elt->incoming_edges = el;
308 return elt;
312 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
314 static void
315 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
317 gimple_stmt_iterator gsi;
318 int src_indx = src_e->dest_idx;
320 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
322 gimple phi = gsi_stmt (gsi);
323 source_location locus = gimple_phi_arg_location (phi, src_indx);
324 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
328 /* We have recently made a copy of ORIG_BB, including its outgoing
329 edges. The copy is NEW_BB. Every PHI node in every direct successor of
330 ORIG_BB has a new argument associated with edge from NEW_BB to the
331 successor. Initialize the PHI argument so that it is equal to the PHI
332 argument associated with the edge from ORIG_BB to the successor. */
334 static void
335 update_destination_phis (basic_block orig_bb, basic_block new_bb)
337 edge_iterator ei;
338 edge e;
340 FOR_EACH_EDGE (e, ei, orig_bb->succs)
342 edge e2 = find_edge (new_bb, e->dest);
343 copy_phi_args (e->dest, e, e2);
347 /* Given a duplicate block and its single destination (both stored
348 in RD). Create an edge between the duplicate and its single
349 destination.
351 Add an additional argument to any PHI nodes at the single
352 destination. */
354 static void
355 create_edge_and_update_destination_phis (struct redirection_data *rd,
356 basic_block bb)
358 edge e = make_edge (bb, rd->outgoing_edge->dest, EDGE_FALLTHRU);
360 rescan_loop_exit (e, true, false);
361 e->probability = REG_BR_PROB_BASE;
362 e->count = bb->count;
364 if (rd->outgoing_edge->aux)
366 e->aux = XNEWVEC (edge, 2);
367 THREAD_TARGET(e) = THREAD_TARGET (rd->outgoing_edge);
368 THREAD_TARGET2(e) = THREAD_TARGET2 (rd->outgoing_edge);
370 else
372 e->aux = NULL;
375 /* If there are any PHI nodes at the destination of the outgoing edge
376 from the duplicate block, then we will need to add a new argument
377 to them. The argument should have the same value as the argument
378 associated with the outgoing edge stored in RD. */
379 copy_phi_args (e->dest, rd->outgoing_edge, e);
382 /* Wire up the outgoing edges from the duplicate block and
383 update any PHIs as needed. */
384 void
385 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
386 ssa_local_info_t *local_info)
388 /* If we were threading through an joiner block, then we want
389 to keep its control statement and redirect an outgoing edge.
390 Else we want to remove the control statement & edges, then create
391 a new outgoing edge. In both cases we may need to update PHIs. */
392 if (THREAD_TARGET2 (rd->incoming_edges->e))
394 edge victim;
395 edge e2;
396 edge e = rd->incoming_edges->e;
398 /* This updates the PHIs at the destination of the duplicate
399 block. */
400 update_destination_phis (local_info->bb, rd->dup_block);
402 /* Find the edge from the duplicate block to the block we're
403 threading through. That's the edge we want to redirect. */
404 victim = find_edge (rd->dup_block, THREAD_TARGET (e)->dest);
405 e2 = redirect_edge_and_branch (victim, THREAD_TARGET2 (e)->dest);
407 /* If we redirected the edge, then we need to copy PHI arguments
408 at the target. If the edge already existed (e2 != victim case),
409 then the PHIs in the target already have the correct arguments. */
410 if (e2 == victim)
411 copy_phi_args (e2->dest, THREAD_TARGET2 (e), e2);
413 else
415 remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
416 create_edge_and_update_destination_phis (rd, rd->dup_block);
419 /* Hash table traversal callback routine to create duplicate blocks. */
422 ssa_create_duplicates (struct redirection_data **slot,
423 ssa_local_info_t *local_info)
425 struct redirection_data *rd = *slot;
427 /* Create a template block if we have not done so already. Otherwise
428 use the template to create a new block. */
429 if (local_info->template_block == NULL)
431 create_block_for_threading (local_info->bb, rd);
432 local_info->template_block = rd->dup_block;
434 /* We do not create any outgoing edges for the template. We will
435 take care of that in a later traversal. That way we do not
436 create edges that are going to just be deleted. */
438 else
440 create_block_for_threading (local_info->template_block, rd);
442 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
443 block. */
444 ssa_fix_duplicate_block_edges (rd, local_info);
447 /* Keep walking the hash table. */
448 return 1;
451 /* We did not create any outgoing edges for the template block during
452 block creation. This hash table traversal callback creates the
453 outgoing edge for the template block. */
455 inline int
456 ssa_fixup_template_block (struct redirection_data **slot,
457 ssa_local_info_t *local_info)
459 struct redirection_data *rd = *slot;
461 /* If this is the template block halt the traversal after updating
462 it appropriately.
464 If we were threading through an joiner block, then we want
465 to keep its control statement and redirect an outgoing edge.
466 Else we want to remove the control statement & edges, then create
467 a new outgoing edge. In both cases we may need to update PHIs. */
468 if (rd->dup_block && rd->dup_block == local_info->template_block)
470 ssa_fix_duplicate_block_edges (rd, local_info);
471 return 0;
474 return 1;
477 /* Hash table traversal callback to redirect each incoming edge
478 associated with this hash table element to its new destination. */
481 ssa_redirect_edges (struct redirection_data **slot,
482 ssa_local_info_t *local_info)
484 struct redirection_data *rd = *slot;
485 struct el *next, *el;
487 /* Walk over all the incoming edges associated associated with this
488 hash table entry. */
489 for (el = rd->incoming_edges; el; el = next)
491 edge e = el->e;
493 /* Go ahead and free this element from the list. Doing this now
494 avoids the need for another list walk when we destroy the hash
495 table. */
496 next = el->next;
497 free (el);
499 thread_stats.num_threaded_edges++;
500 /* If we are threading through a joiner block, then we have to
501 find the edge we want to redirect and update some PHI nodes. */
502 if (THREAD_TARGET2 (e))
504 edge e2;
506 /* We want to redirect the incoming edge to the joiner block (E)
507 to instead reach the duplicate of the joiner block. */
508 e2 = redirect_edge_and_branch (e, rd->dup_block);
509 flush_pending_stmts (e2);
511 else if (rd->dup_block)
513 edge e2;
515 if (dump_file && (dump_flags & TDF_DETAILS))
516 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
517 e->src->index, e->dest->index, rd->dup_block->index);
519 rd->dup_block->count += e->count;
521 /* Excessive jump threading may make frequencies large enough so
522 the computation overflows. */
523 if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
524 rd->dup_block->frequency += EDGE_FREQUENCY (e);
525 EDGE_SUCC (rd->dup_block, 0)->count += e->count;
526 /* Redirect the incoming edge to the appropriate duplicate
527 block. */
528 e2 = redirect_edge_and_branch (e, rd->dup_block);
529 gcc_assert (e == e2);
530 flush_pending_stmts (e2);
533 /* Go ahead and clear E->aux. It's not needed anymore and failure
534 to clear it will cause all kinds of unpleasant problems later. */
535 free (e->aux);
536 e->aux = NULL;
540 /* Indicate that we actually threaded one or more jumps. */
541 if (rd->incoming_edges)
542 local_info->jumps_threaded = true;
544 return 1;
547 /* Return true if this block has no executable statements other than
548 a simple ctrl flow instruction. When the number of outgoing edges
549 is one, this is equivalent to a "forwarder" block. */
551 static bool
552 redirection_block_p (basic_block bb)
554 gimple_stmt_iterator gsi;
556 /* Advance to the first executable statement. */
557 gsi = gsi_start_bb (bb);
558 while (!gsi_end_p (gsi)
559 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
560 || is_gimple_debug (gsi_stmt (gsi))
561 || gimple_nop_p (gsi_stmt (gsi))))
562 gsi_next (&gsi);
564 /* Check if this is an empty block. */
565 if (gsi_end_p (gsi))
566 return true;
568 /* Test that we've reached the terminating control statement. */
569 return gsi_stmt (gsi)
570 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
571 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
572 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
575 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
576 is reached via one or more specific incoming edges, we know which
577 outgoing edge from BB will be traversed.
579 We want to redirect those incoming edges to the target of the
580 appropriate outgoing edge. Doing so avoids a conditional branch
581 and may expose new optimization opportunities. Note that we have
582 to update dominator tree and SSA graph after such changes.
584 The key to keeping the SSA graph update manageable is to duplicate
585 the side effects occurring in BB so that those side effects still
586 occur on the paths which bypass BB after redirecting edges.
588 We accomplish this by creating duplicates of BB and arranging for
589 the duplicates to unconditionally pass control to one specific
590 successor of BB. We then revector the incoming edges into BB to
591 the appropriate duplicate of BB.
593 If NOLOOP_ONLY is true, we only perform the threading as long as it
594 does not affect the structure of the loops in a nontrivial way. */
596 static bool
597 thread_block (basic_block bb, bool noloop_only)
599 /* E is an incoming edge into BB that we may or may not want to
600 redirect to a duplicate of BB. */
601 edge e, e2;
602 edge_iterator ei;
603 ssa_local_info_t local_info;
604 struct loop *loop = bb->loop_father;
606 /* To avoid scanning a linear array for the element we need we instead
607 use a hash table. For normal code there should be no noticeable
608 difference. However, if we have a block with a large number of
609 incoming and outgoing edges such linear searches can get expensive. */
610 redirection_data.create (EDGE_COUNT (bb->succs));
612 /* If we thread the latch of the loop to its exit, the loop ceases to
613 exist. Make sure we do not restrict ourselves in order to preserve
614 this loop. */
615 if (loop->header == bb)
617 e = loop_latch_edge (loop);
619 if (e->aux)
620 e2 = THREAD_TARGET (e);
621 else
622 e2 = NULL;
624 if (e2 && loop_exit_edge_p (loop, e2))
626 loop->header = NULL;
627 loop->latch = NULL;
628 loops_state_set (LOOPS_NEED_FIXUP);
632 /* Record each unique threaded destination into a hash table for
633 efficient lookups. */
634 FOR_EACH_EDGE (e, ei, bb->preds)
636 if (e->aux == NULL)
637 continue;
639 if (THREAD_TARGET2 (e))
640 e2 = THREAD_TARGET2 (e);
641 else
642 e2 = THREAD_TARGET (e);
644 if (!e2 || noloop_only)
646 /* If NOLOOP_ONLY is true, we only allow threading through the
647 header of a loop to exit edges.
649 There are two cases to consider. The first when BB is the
650 loop header. We will attempt to thread this elsewhere, so
651 we can just continue here. */
653 if (bb == bb->loop_father->header
654 && (!loop_exit_edge_p (bb->loop_father, e2)
655 || THREAD_TARGET2 (e)))
656 continue;
659 /* The second occurs when there was loop header buried in a jump
660 threading path. We do not try and thread this elsewhere, so
661 just cancel the jump threading request by clearing the AUX
662 field now. */
663 if ((bb->loop_father != e2->src->loop_father
664 && !loop_exit_edge_p (e2->src->loop_father, e2))
665 || (e2->src->loop_father != e2->dest->loop_father
666 && !loop_exit_edge_p (e2->src->loop_father, e2)))
668 /* Since this case is not handled by our special code
669 to thread through a loop header, we must explicitly
670 cancel the threading request here. */
671 free (e->aux);
672 e->aux = NULL;
673 continue;
677 if (e->dest == e2->src)
678 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
679 e->count, THREAD_TARGET (e));
681 /* Insert the outgoing edge into the hash table if it is not
682 already in the hash table. */
683 lookup_redirection_data (e, INSERT);
686 /* We do not update dominance info. */
687 free_dominance_info (CDI_DOMINATORS);
689 /* We know we only thread through the loop header to loop exits.
690 Let the basic block duplication hook know we are not creating
691 a multiple entry loop. */
692 if (noloop_only
693 && bb == bb->loop_father->header)
694 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
696 /* Now create duplicates of BB.
698 Note that for a block with a high outgoing degree we can waste
699 a lot of time and memory creating and destroying useless edges.
701 So we first duplicate BB and remove the control structure at the
702 tail of the duplicate as well as all outgoing edges from the
703 duplicate. We then use that duplicate block as a template for
704 the rest of the duplicates. */
705 local_info.template_block = NULL;
706 local_info.bb = bb;
707 local_info.jumps_threaded = false;
708 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
709 (&local_info);
711 /* The template does not have an outgoing edge. Create that outgoing
712 edge and update PHI nodes as the edge's target as necessary.
714 We do this after creating all the duplicates to avoid creating
715 unnecessary edges. */
716 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
717 (&local_info);
719 /* The hash table traversals above created the duplicate blocks (and the
720 statements within the duplicate blocks). This loop creates PHI nodes for
721 the duplicated blocks and redirects the incoming edges into BB to reach
722 the duplicates of BB. */
723 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
724 (&local_info);
726 /* Done with this block. Clear REDIRECTION_DATA. */
727 redirection_data.dispose ();
729 if (noloop_only
730 && bb == bb->loop_father->header)
731 set_loop_copy (bb->loop_father, NULL);
733 /* Indicate to our caller whether or not any jumps were threaded. */
734 return local_info.jumps_threaded;
737 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
738 copy of E->dest created during threading, or E->dest if it was not necessary
739 to copy it (E is its single predecessor). */
741 static basic_block
742 thread_single_edge (edge e)
744 basic_block bb = e->dest;
745 edge eto = THREAD_TARGET (e);
746 struct redirection_data rd;
748 free (e->aux);
749 e->aux = NULL;
751 thread_stats.num_threaded_edges++;
753 if (single_pred_p (bb))
755 /* If BB has just a single predecessor, we should only remove the
756 control statements at its end, and successors except for ETO. */
757 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
759 /* And fixup the flags on the single remaining edge. */
760 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
761 eto->flags |= EDGE_FALLTHRU;
763 return bb;
766 /* Otherwise, we need to create a copy. */
767 if (e->dest == eto->src)
768 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
770 rd.outgoing_edge = eto;
772 create_block_for_threading (bb, &rd);
773 remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
774 create_edge_and_update_destination_phis (&rd, rd.dup_block);
776 if (dump_file && (dump_flags & TDF_DETAILS))
777 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
778 e->src->index, e->dest->index, rd.dup_block->index);
780 rd.dup_block->count = e->count;
781 rd.dup_block->frequency = EDGE_FREQUENCY (e);
782 single_succ_edge (rd.dup_block)->count = e->count;
783 redirect_edge_and_branch (e, rd.dup_block);
784 flush_pending_stmts (e);
786 return rd.dup_block;
789 /* Callback for dfs_enumerate_from. Returns true if BB is different
790 from STOP and DBDS_CE_STOP. */
792 static basic_block dbds_ce_stop;
793 static bool
794 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
796 return (bb != (const_basic_block) stop
797 && bb != dbds_ce_stop);
800 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
801 returns the state. */
803 enum bb_dom_status
805 /* BB does not dominate latch of the LOOP. */
806 DOMST_NONDOMINATING,
807 /* The LOOP is broken (there is no path from the header to its latch. */
808 DOMST_LOOP_BROKEN,
809 /* BB dominates the latch of the LOOP. */
810 DOMST_DOMINATING
813 static enum bb_dom_status
814 determine_bb_domination_status (struct loop *loop, basic_block bb)
816 basic_block *bblocks;
817 unsigned nblocks, i;
818 bool bb_reachable = false;
819 edge_iterator ei;
820 edge e;
822 /* This function assumes BB is a successor of LOOP->header.
823 If that is not the case return DOMST_NONDOMINATING which
824 is always safe. */
826 bool ok = false;
828 FOR_EACH_EDGE (e, ei, bb->preds)
830 if (e->src == loop->header)
832 ok = true;
833 break;
837 if (!ok)
838 return DOMST_NONDOMINATING;
841 if (bb == loop->latch)
842 return DOMST_DOMINATING;
844 /* Check that BB dominates LOOP->latch, and that it is back-reachable
845 from it. */
847 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
848 dbds_ce_stop = loop->header;
849 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
850 bblocks, loop->num_nodes, bb);
851 for (i = 0; i < nblocks; i++)
852 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
854 if (e->src == loop->header)
856 free (bblocks);
857 return DOMST_NONDOMINATING;
859 if (e->src == bb)
860 bb_reachable = true;
863 free (bblocks);
864 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
867 /* Return true if BB is part of the new pre-header that is created
868 when threading the latch to DATA. */
870 static bool
871 def_split_header_continue_p (const_basic_block bb, const void *data)
873 const_basic_block new_header = (const_basic_block) data;
874 const struct loop *l;
876 if (bb == new_header
877 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
878 return false;
879 for (l = bb->loop_father; l; l = loop_outer (l))
880 if (l == new_header->loop_father)
881 return true;
882 return false;
885 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
886 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
887 to the inside of the loop. */
889 static bool
890 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
892 basic_block header = loop->header;
893 edge e, tgt_edge, latch = loop_latch_edge (loop);
894 edge_iterator ei;
895 basic_block tgt_bb, atgt_bb;
896 enum bb_dom_status domst;
898 /* We have already threaded through headers to exits, so all the threading
899 requests now are to the inside of the loop. We need to avoid creating
900 irreducible regions (i.e., loops with more than one entry block), and
901 also loop with several latch edges, or new subloops of the loop (although
902 there are cases where it might be appropriate, it is difficult to decide,
903 and doing it wrongly may confuse other optimizers).
905 We could handle more general cases here. However, the intention is to
906 preserve some information about the loop, which is impossible if its
907 structure changes significantly, in a way that is not well understood.
908 Thus we only handle few important special cases, in which also updating
909 of the loop-carried information should be feasible:
911 1) Propagation of latch edge to a block that dominates the latch block
912 of a loop. This aims to handle the following idiom:
914 first = 1;
915 while (1)
917 if (first)
918 initialize;
919 first = 0;
920 body;
923 After threading the latch edge, this becomes
925 first = 1;
926 if (first)
927 initialize;
928 while (1)
930 first = 0;
931 body;
934 The original header of the loop is moved out of it, and we may thread
935 the remaining edges through it without further constraints.
937 2) All entry edges are propagated to a single basic block that dominates
938 the latch block of the loop. This aims to handle the following idiom
939 (normally created for "for" loops):
941 i = 0;
942 while (1)
944 if (i >= 100)
945 break;
946 body;
947 i++;
950 This becomes
952 i = 0;
953 while (1)
955 body;
956 i++;
957 if (i >= 100)
958 break;
962 /* Threading through the header won't improve the code if the header has just
963 one successor. */
964 if (single_succ_p (header))
965 goto fail;
967 if (latch->aux)
969 if (THREAD_TARGET2 (latch))
970 goto fail;
971 tgt_edge = THREAD_TARGET (latch);
972 tgt_bb = tgt_edge->dest;
974 else if (!may_peel_loop_headers
975 && !redirection_block_p (loop->header))
976 goto fail;
977 else
979 tgt_bb = NULL;
980 tgt_edge = NULL;
981 FOR_EACH_EDGE (e, ei, header->preds)
983 if (!e->aux)
985 if (e == latch)
986 continue;
988 /* If latch is not threaded, and there is a header
989 edge that is not threaded, we would create loop
990 with multiple entries. */
991 goto fail;
994 if (THREAD_TARGET2 (e))
995 goto fail;
996 tgt_edge = THREAD_TARGET (e);
997 atgt_bb = tgt_edge->dest;
998 if (!tgt_bb)
999 tgt_bb = atgt_bb;
1000 /* Two targets of threading would make us create loop
1001 with multiple entries. */
1002 else if (tgt_bb != atgt_bb)
1003 goto fail;
1006 if (!tgt_bb)
1008 /* There are no threading requests. */
1009 return false;
1012 /* Redirecting to empty loop latch is useless. */
1013 if (tgt_bb == loop->latch
1014 && empty_block_p (loop->latch))
1015 goto fail;
1018 /* The target block must dominate the loop latch, otherwise we would be
1019 creating a subloop. */
1020 domst = determine_bb_domination_status (loop, tgt_bb);
1021 if (domst == DOMST_NONDOMINATING)
1022 goto fail;
1023 if (domst == DOMST_LOOP_BROKEN)
1025 /* If the loop ceased to exist, mark it as such, and thread through its
1026 original header. */
1027 loop->header = NULL;
1028 loop->latch = NULL;
1029 loops_state_set (LOOPS_NEED_FIXUP);
1030 return thread_block (header, false);
1033 if (tgt_bb->loop_father->header == tgt_bb)
1035 /* If the target of the threading is a header of a subloop, we need
1036 to create a preheader for it, so that the headers of the two loops
1037 do not merge. */
1038 if (EDGE_COUNT (tgt_bb->preds) > 2)
1040 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1041 gcc_assert (tgt_bb != NULL);
1043 else
1044 tgt_bb = split_edge (tgt_edge);
1047 if (latch->aux)
1049 basic_block *bblocks;
1050 unsigned nblocks, i;
1052 /* First handle the case latch edge is redirected. We are copying
1053 the loop header but not creating a multiple entry loop. Make the
1054 cfg manipulation code aware of that fact. */
1055 set_loop_copy (loop, loop);
1056 loop->latch = thread_single_edge (latch);
1057 set_loop_copy (loop, NULL);
1058 gcc_assert (single_succ (loop->latch) == tgt_bb);
1059 loop->header = tgt_bb;
1061 /* Remove the new pre-header blocks from our loop. */
1062 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1063 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1064 bblocks, loop->num_nodes, tgt_bb);
1065 for (i = 0; i < nblocks; i++)
1066 if (bblocks[i]->loop_father == loop)
1068 remove_bb_from_loops (bblocks[i]);
1069 add_bb_to_loop (bblocks[i], loop_outer (loop));
1071 free (bblocks);
1073 /* If the new header has multiple latches mark it so. */
1074 FOR_EACH_EDGE (e, ei, loop->header->preds)
1075 if (e->src->loop_father == loop
1076 && e->src != loop->latch)
1078 loop->latch = NULL;
1079 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1082 /* Cancel remaining threading requests that would make the
1083 loop a multiple entry loop. */
1084 FOR_EACH_EDGE (e, ei, header->preds)
1086 edge e2;
1088 if (e->aux == NULL)
1089 continue;
1091 if (THREAD_TARGET2 (e))
1092 e2 = THREAD_TARGET2 (e);
1093 else
1094 e2 = THREAD_TARGET (e);
1096 if (e->src->loop_father != e2->dest->loop_father
1097 && e2->dest != loop->header)
1099 free (e->aux);
1100 e->aux = NULL;
1104 /* Thread the remaining edges through the former header. */
1105 thread_block (header, false);
1107 else
1109 basic_block new_preheader;
1111 /* Now consider the case entry edges are redirected to the new entry
1112 block. Remember one entry edge, so that we can find the new
1113 preheader (its destination after threading). */
1114 FOR_EACH_EDGE (e, ei, header->preds)
1116 if (e->aux)
1117 break;
1120 /* The duplicate of the header is the new preheader of the loop. Ensure
1121 that it is placed correctly in the loop hierarchy. */
1122 set_loop_copy (loop, loop_outer (loop));
1124 thread_block (header, false);
1125 set_loop_copy (loop, NULL);
1126 new_preheader = e->dest;
1128 /* Create the new latch block. This is always necessary, as the latch
1129 must have only a single successor, but the original header had at
1130 least two successors. */
1131 loop->latch = NULL;
1132 mfb_kj_edge = single_succ_edge (new_preheader);
1133 loop->header = mfb_kj_edge->dest;
1134 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1135 loop->header = latch->dest;
1136 loop->latch = latch->src;
1139 return true;
1141 fail:
1142 /* We failed to thread anything. Cancel the requests. */
1143 FOR_EACH_EDGE (e, ei, header->preds)
1145 free (e->aux);
1146 e->aux = NULL;
1148 return false;
1151 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1152 PHI arguments associated with those edges are equal or there are no
1153 PHI arguments, otherwise return FALSE. */
1155 static bool
1156 phi_args_equal_on_edges (edge e1, edge e2)
1158 gimple_stmt_iterator gsi;
1159 int indx1 = e1->dest_idx;
1160 int indx2 = e2->dest_idx;
1162 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1164 gimple phi = gsi_stmt (gsi);
1166 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1167 gimple_phi_arg_def (phi, indx2), 0))
1168 return false;
1170 return true;
1173 /* Walk through the registered jump threads and convert them into a
1174 form convenient for this pass.
1176 Any block which has incoming edges threaded to outgoing edges
1177 will have its entry in THREADED_BLOCK set.
1179 Any threaded edge will have its new outgoing edge stored in the
1180 original edge's AUX field.
1182 This form avoids the need to walk all the edges in the CFG to
1183 discover blocks which need processing and avoids unnecessary
1184 hash table lookups to map from threaded edge to new target. */
1186 static void
1187 mark_threaded_blocks (bitmap threaded_blocks)
1189 unsigned int i;
1190 bitmap_iterator bi;
1191 bitmap tmp = BITMAP_ALLOC (NULL);
1192 basic_block bb;
1193 edge e;
1194 edge_iterator ei;
1196 /* It is possible to have jump threads in which one is a subpath
1197 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1198 block and (B, C), (C, D) where no joiner block exists.
1200 When this occurs ignore the jump thread request with the joiner
1201 block. It's totally subsumed by the simpler jump thread request.
1203 This results in less block copying, simpler CFGs. More improtantly,
1204 when we duplicate the joiner block, B, in this case we will create
1205 a new threading opportunity that we wouldn't be able to optimize
1206 until the next jump threading iteration.
1208 So first convert the jump thread requests which do not require a
1209 joiner block. */
1210 for (i = 0; i < threaded_edges.length (); i += 3)
1212 edge e = threaded_edges[i];
1214 if (threaded_edges[i + 2] == NULL)
1216 edge *x = XNEWVEC (edge, 2);
1218 e->aux = x;
1219 THREAD_TARGET (e) = threaded_edges[i + 1];
1220 THREAD_TARGET2 (e) = NULL;
1221 bitmap_set_bit (tmp, e->dest->index);
1226 /* Now iterate again, converting cases where we threaded through
1227 a joiner block, but ignoring those where we have already
1228 threaded through the joiner block. */
1229 for (i = 0; i < threaded_edges.length (); i += 3)
1231 edge e = threaded_edges[i];
1233 if (threaded_edges[i + 2] != NULL
1234 && threaded_edges[i + 1]->aux == NULL)
1236 edge *x = XNEWVEC (edge, 2);
1238 e->aux = x;
1239 THREAD_TARGET (e) = threaded_edges[i + 1];
1240 THREAD_TARGET2 (e) = threaded_edges[i + 2];
1241 bitmap_set_bit (tmp, e->dest->index);
1245 /* If we have a joiner block (J) which has two successors S1 and S2 and
1246 we are threading though S1 and the final destination of the thread
1247 is S2, then we must verify that any PHI nodes in S2 have the same
1248 PHI arguments for the edge J->S2 and J->S1->...->S2.
1250 We used to detect this prior to registering the jump thread, but
1251 that prohibits propagation of edge equivalences into non-dominated
1252 PHI nodes as the equivalency test might occur before propagation.
1254 This works for now, but will need improvement as part of the FSA
1255 optimization.
1257 Note since we've moved the thread request data to the edges,
1258 we have to iterate on those rather than the threaded_edges vector. */
1259 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1261 bb = BASIC_BLOCK (i);
1262 FOR_EACH_EDGE (e, ei, bb->preds)
1264 if (e->aux)
1266 bool have_joiner = THREAD_TARGET2 (e) != NULL;
1268 if (have_joiner)
1270 basic_block joiner = e->dest;
1271 edge final_edge = THREAD_TARGET2 (e);
1272 basic_block final_dest = final_edge->dest;
1273 edge e2 = find_edge (joiner, final_dest);
1275 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1277 free (e->aux);
1278 e->aux = NULL;
1286 /* If optimizing for size, only thread through block if we don't have
1287 to duplicate it or it's an otherwise empty redirection block. */
1288 if (optimize_function_for_size_p (cfun))
1290 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1292 bb = BASIC_BLOCK (i);
1293 if (EDGE_COUNT (bb->preds) > 1
1294 && !redirection_block_p (bb))
1296 FOR_EACH_EDGE (e, ei, bb->preds)
1298 free (e->aux);
1299 e->aux = NULL;
1302 else
1303 bitmap_set_bit (threaded_blocks, i);
1306 else
1307 bitmap_copy (threaded_blocks, tmp);
1309 BITMAP_FREE(tmp);
1313 /* Walk through all blocks and thread incoming edges to the appropriate
1314 outgoing edge for each edge pair recorded in THREADED_EDGES.
1316 It is the caller's responsibility to fix the dominance information
1317 and rewrite duplicated SSA_NAMEs back into SSA form.
1319 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1320 loop headers if it does not simplify the loop.
1322 Returns true if one or more edges were threaded, false otherwise. */
1324 bool
1325 thread_through_all_blocks (bool may_peel_loop_headers)
1327 bool retval = false;
1328 unsigned int i;
1329 bitmap_iterator bi;
1330 bitmap threaded_blocks;
1331 struct loop *loop;
1332 loop_iterator li;
1334 /* We must know about loops in order to preserve them. */
1335 gcc_assert (current_loops != NULL);
1337 if (!threaded_edges.exists ())
1338 return false;
1340 threaded_blocks = BITMAP_ALLOC (NULL);
1341 memset (&thread_stats, 0, sizeof (thread_stats));
1343 mark_threaded_blocks (threaded_blocks);
1345 initialize_original_copy_tables ();
1347 /* First perform the threading requests that do not affect
1348 loop structure. */
1349 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1351 basic_block bb = BASIC_BLOCK (i);
1353 if (EDGE_COUNT (bb->preds) > 0)
1354 retval |= thread_block (bb, true);
1357 /* Then perform the threading through loop headers. We start with the
1358 innermost loop, so that the changes in cfg we perform won't affect
1359 further threading. */
1360 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1362 if (!loop->header
1363 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1364 continue;
1366 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1369 statistics_counter_event (cfun, "Jumps threaded",
1370 thread_stats.num_threaded_edges);
1372 free_original_copy_tables ();
1374 BITMAP_FREE (threaded_blocks);
1375 threaded_blocks = NULL;
1376 threaded_edges.release ();
1378 if (retval)
1379 loops_state_set (LOOPS_NEED_FIXUP);
1381 return retval;
1384 /* Dump a jump threading path, including annotations about each
1385 edge in the path. */
1387 static void
1388 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path)
1390 fprintf (dump_file,
1391 " Registering jump thread: (%d, %d) incoming edge; ",
1392 path[0]->e->src->index, path[0]->e->dest->index);
1394 for (unsigned int i = 1; i < path.length (); i++)
1396 /* We can get paths with a NULL edge when the final destination
1397 of a jump thread turns out to be a constant address. We dump
1398 those paths when debugging, so we have to be prepared for that
1399 possibility here. */
1400 if (path[i]->e == NULL)
1401 continue;
1403 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1404 fprintf (dump_file, " (%d, %d) joiner; ",
1405 path[i]->e->src->index, path[i]->e->dest->index);
1406 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
1407 fprintf (dump_file, " (%d, %d) normal;",
1408 path[i]->e->src->index, path[i]->e->dest->index);
1409 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
1410 fprintf (dump_file, " (%d, %d) nocopy;",
1411 path[i]->e->src->index, path[i]->e->dest->index);
1413 fputc ('\n', dump_file);
1417 /* Register a jump threading opportunity. We queue up all the jump
1418 threading opportunities discovered by a pass and update the CFG
1419 and SSA form all at once.
1421 E is the edge we can thread, E2 is the new target edge, i.e., we
1422 are effectively recording that E->dest can be changed to E2->dest
1423 after fixing the SSA graph. */
1425 void
1426 register_jump_thread (vec<jump_thread_edge *> path)
1428 /* First make sure there are no NULL outgoing edges on the jump threading
1429 path. That can happen for jumping to a constant address. */
1430 for (unsigned int i = 0; i < path.length (); i++)
1431 if (path[i]->e == NULL)
1433 if (dump_file && (dump_flags & TDF_DETAILS))
1435 fprintf (dump_file,
1436 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1437 dump_jump_thread_path (dump_file, path);
1439 return;
1442 if (!threaded_edges.exists ())
1443 threaded_edges.create (15);
1445 if (dump_file && (dump_flags & TDF_DETAILS))
1446 dump_jump_thread_path (dump_file, path);
1448 /* The first entry in the vector is always the start of the
1449 jump threading path. */
1450 threaded_edges.safe_push (path[0]->e);
1452 /* In our 3-edge representation, the joiner, if it exists is always the
1453 2nd edge and the final block on the path is the 3rd edge. If no
1454 jointer exists, then the final block on the path is the 2nd edge
1455 and the 3rd edge is NULL.
1457 With upcoming improvements, we're going to be holding onto the entire
1458 path, so we'll be able to clean this wart up shortly. */
1459 if (path[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1461 threaded_edges.safe_push (path[1]->e);
1462 threaded_edges.safe_push (path.last ()->e);
1464 else
1466 threaded_edges.safe_push (path.last ()->e);
1467 threaded_edges.safe_push (NULL);