Fix up CL.
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blobee0c838126eb1dda19036a3f8727f7b9f9d6b9f7
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "flags.h"
25 #include "basic-block.h"
26 #include "function.h"
27 #include "hash-table.h"
28 #include "tree-ssa-alias.h"
29 #include "internal-fn.h"
30 #include "gimple-expr.h"
31 #include "is-a.h"
32 #include "gimple.h"
33 #include "gimple-iterator.h"
34 #include "gimple-ssa.h"
35 #include "tree-phinodes.h"
36 #include "tree-ssa.h"
37 #include "tree-ssa-threadupdate.h"
38 #include "ssa-iterators.h"
39 #include "dumpfile.h"
40 #include "cfgloop.h"
41 #include "dbgcnt.h"
42 #include "tree-cfg.h"
43 #include "tree-pass.h"
45 /* Given a block B, update the CFG and SSA graph to reflect redirecting
46 one or more in-edges to B to instead reach the destination of an
47 out-edge from B while preserving any side effects in B.
49 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
50 side effects of executing B.
52 1. Make a copy of B (including its outgoing edges and statements). Call
53 the copy B'. Note B' has no incoming edges or PHIs at this time.
55 2. Remove the control statement at the end of B' and all outgoing edges
56 except B'->C.
58 3. Add a new argument to each PHI in C with the same value as the existing
59 argument associated with edge B->C. Associate the new PHI arguments
60 with the edge B'->C.
62 4. For each PHI in B, find or create a PHI in B' with an identical
63 PHI_RESULT. Add an argument to the PHI in B' which has the same
64 value as the PHI in B associated with the edge A->B. Associate
65 the new argument in the PHI in B' with the edge A->B.
67 5. Change the edge A->B to A->B'.
69 5a. This automatically deletes any PHI arguments associated with the
70 edge A->B in B.
72 5b. This automatically associates each new argument added in step 4
73 with the edge A->B'.
75 6. Repeat for other incoming edges into B.
77 7. Put the duplicated resources in B and all the B' blocks into SSA form.
79 Note that block duplication can be minimized by first collecting the
80 set of unique destination blocks that the incoming edges should
81 be threaded to.
83 We reduce the number of edges and statements we create by not copying all
84 the outgoing edges and the control statement in step #1. We instead create
85 a template block without the outgoing edges and duplicate the template.
87 Another case this code handles is threading through a "joiner" block. In
88 this case, we do not know the destination of the joiner block, but one
89 of the outgoing edges from the joiner block leads to a threadable path. This
90 case largely works as outlined above, except the duplicate of the joiner
91 block still contains a full set of outgoing edges and its control statement.
92 We just redirect one of its outgoing edges to our jump threading path. */
95 /* Steps #5 and #6 of the above algorithm are best implemented by walking
96 all the incoming edges which thread to the same destination edge at
97 the same time. That avoids lots of table lookups to get information
98 for the destination edge.
100 To realize that implementation we create a list of incoming edges
101 which thread to the same outgoing edge. Thus to implement steps
102 #5 and #6 we traverse our hash table of outgoing edge information.
103 For each entry we walk the list of incoming edges which thread to
104 the current outgoing edge. */
106 struct el
108 edge e;
109 struct el *next;
112 /* Main data structure recording information regarding B's duplicate
113 blocks. */
115 /* We need to efficiently record the unique thread destinations of this
116 block and specific information associated with those destinations. We
117 may have many incoming edges threaded to the same outgoing edge. This
118 can be naturally implemented with a hash table. */
120 struct redirection_data : typed_free_remove<redirection_data>
122 /* We support wiring up two block duplicates in a jump threading path.
124 One is a normal block copy where we remove the control statement
125 and wire up its single remaining outgoing edge to the thread path.
127 The other is a joiner block where we leave the control statement
128 in place, but wire one of the outgoing edges to a thread path.
130 In theory we could have multiple block duplicates in a jump
131 threading path, but I haven't tried that.
133 The duplicate blocks appear in this array in the same order in
134 which they appear in the jump thread path. */
135 basic_block dup_blocks[2];
137 /* The jump threading path. */
138 vec<jump_thread_edge *> *path;
140 /* A list of incoming edges which we want to thread to the
141 same path. */
142 struct el *incoming_edges;
144 /* hash_table support. */
145 typedef redirection_data value_type;
146 typedef redirection_data compare_type;
147 static inline hashval_t hash (const value_type *);
148 static inline int equal (const value_type *, const compare_type *);
151 /* Simple hashing function. For any given incoming edge E, we're going
152 to be most concerned with the final destination of its jump thread
153 path. So hash on the block index of the final edge in the path. */
155 inline hashval_t
156 redirection_data::hash (const value_type *p)
158 vec<jump_thread_edge *> *path = p->path;
159 return path->last ()->e->dest->index;
162 /* Given two hash table entries, return true if they have the same
163 jump threading path. */
164 inline int
165 redirection_data::equal (const value_type *p1, const compare_type *p2)
167 vec<jump_thread_edge *> *path1 = p1->path;
168 vec<jump_thread_edge *> *path2 = p2->path;
170 if (path1->length () != path2->length ())
171 return false;
173 for (unsigned int i = 1; i < path1->length (); i++)
175 if ((*path1)[i]->type != (*path2)[i]->type
176 || (*path1)[i]->e != (*path2)[i]->e)
177 return false;
180 return true;
183 /* Data structure of information to pass to hash table traversal routines. */
184 struct ssa_local_info_t
186 /* The current block we are working on. */
187 basic_block bb;
189 /* We only create a template block for the first duplicated block in a
190 jump threading path as we may need many duplicates of that block.
192 The second duplicate block in a path is specific to that path. Creating
193 and sharing a template for that block is considerably more difficult. */
194 basic_block template_block;
196 /* TRUE if we thread one or more jumps, FALSE otherwise. */
197 bool jumps_threaded;
200 /* Passes which use the jump threading code register jump threading
201 opportunities as they are discovered. We keep the registered
202 jump threading opportunities in this vector as edge pairs
203 (original_edge, target_edge). */
204 static vec<vec<jump_thread_edge *> *> paths;
206 /* When we start updating the CFG for threading, data necessary for jump
207 threading is attached to the AUX field for the incoming edge. Use these
208 macros to access the underlying structure attached to the AUX field. */
209 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
211 /* Jump threading statistics. */
213 struct thread_stats_d
215 unsigned long num_threaded_edges;
218 struct thread_stats_d thread_stats;
221 /* Remove the last statement in block BB if it is a control statement
222 Also remove all outgoing edges except the edge which reaches DEST_BB.
223 If DEST_BB is NULL, then remove all outgoing edges. */
225 static void
226 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
228 gimple_stmt_iterator gsi;
229 edge e;
230 edge_iterator ei;
232 gsi = gsi_last_bb (bb);
234 /* If the duplicate ends with a control statement, then remove it.
236 Note that if we are duplicating the template block rather than the
237 original basic block, then the duplicate might not have any real
238 statements in it. */
239 if (!gsi_end_p (gsi)
240 && gsi_stmt (gsi)
241 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
242 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
243 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
244 gsi_remove (&gsi, true);
246 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
248 if (e->dest != dest_bb)
249 remove_edge (e);
250 else
251 ei_next (&ei);
255 /* Create a duplicate of BB. Record the duplicate block in an array
256 indexed by COUNT stored in RD. */
258 static void
259 create_block_for_threading (basic_block bb,
260 struct redirection_data *rd,
261 unsigned int count)
263 edge_iterator ei;
264 edge e;
266 /* We can use the generic block duplication code and simply remove
267 the stuff we do not need. */
268 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
270 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
271 e->aux = NULL;
273 /* Zero out the profile, since the block is unreachable for now. */
274 rd->dup_blocks[count]->frequency = 0;
275 rd->dup_blocks[count]->count = 0;
278 /* Main data structure to hold information for duplicates of BB. */
280 static hash_table <redirection_data> redirection_data;
282 /* Given an outgoing edge E lookup and return its entry in our hash table.
284 If INSERT is true, then we insert the entry into the hash table if
285 it is not already present. INCOMING_EDGE is added to the list of incoming
286 edges associated with E in the hash table. */
288 static struct redirection_data *
289 lookup_redirection_data (edge e, enum insert_option insert)
291 struct redirection_data **slot;
292 struct redirection_data *elt;
293 vec<jump_thread_edge *> *path = THREAD_PATH (e);
295 /* Build a hash table element so we can see if E is already
296 in the table. */
297 elt = XNEW (struct redirection_data);
298 elt->path = path;
299 elt->dup_blocks[0] = NULL;
300 elt->dup_blocks[1] = NULL;
301 elt->incoming_edges = NULL;
303 slot = redirection_data.find_slot (elt, insert);
305 /* This will only happen if INSERT is false and the entry is not
306 in the hash table. */
307 if (slot == NULL)
309 free (elt);
310 return NULL;
313 /* This will only happen if E was not in the hash table and
314 INSERT is true. */
315 if (*slot == NULL)
317 *slot = elt;
318 elt->incoming_edges = XNEW (struct el);
319 elt->incoming_edges->e = e;
320 elt->incoming_edges->next = NULL;
321 return elt;
323 /* E was in the hash table. */
324 else
326 /* Free ELT as we do not need it anymore, we will extract the
327 relevant entry from the hash table itself. */
328 free (elt);
330 /* Get the entry stored in the hash table. */
331 elt = *slot;
333 /* If insertion was requested, then we need to add INCOMING_EDGE
334 to the list of incoming edges associated with E. */
335 if (insert)
337 struct el *el = XNEW (struct el);
338 el->next = elt->incoming_edges;
339 el->e = e;
340 elt->incoming_edges = el;
343 return elt;
347 /* Similar to copy_phi_args, except that the PHI arg exists, it just
348 does not have a value associated with it. */
350 static void
351 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
353 int src_idx = src_e->dest_idx;
354 int tgt_idx = tgt_e->dest_idx;
356 /* Iterate over each PHI in e->dest. */
357 for (gimple_stmt_iterator gsi = gsi_start_phis (src_e->dest),
358 gsi2 = gsi_start_phis (tgt_e->dest);
359 !gsi_end_p (gsi);
360 gsi_next (&gsi), gsi_next (&gsi2))
362 gimple src_phi = gsi_stmt (gsi);
363 gimple dest_phi = gsi_stmt (gsi2);
364 tree val = gimple_phi_arg_def (src_phi, src_idx);
365 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
367 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
368 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
372 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
374 static void
375 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
377 gimple_stmt_iterator gsi;
378 int src_indx = src_e->dest_idx;
380 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
382 gimple phi = gsi_stmt (gsi);
383 source_location locus = gimple_phi_arg_location (phi, src_indx);
384 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
388 /* We have recently made a copy of ORIG_BB, including its outgoing
389 edges. The copy is NEW_BB. Every PHI node in every direct successor of
390 ORIG_BB has a new argument associated with edge from NEW_BB to the
391 successor. Initialize the PHI argument so that it is equal to the PHI
392 argument associated with the edge from ORIG_BB to the successor. */
394 static void
395 update_destination_phis (basic_block orig_bb, basic_block new_bb)
397 edge_iterator ei;
398 edge e;
400 FOR_EACH_EDGE (e, ei, orig_bb->succs)
402 edge e2 = find_edge (new_bb, e->dest);
403 copy_phi_args (e->dest, e, e2);
407 /* Given a duplicate block and its single destination (both stored
408 in RD). Create an edge between the duplicate and its single
409 destination.
411 Add an additional argument to any PHI nodes at the single
412 destination. */
414 static void
415 create_edge_and_update_destination_phis (struct redirection_data *rd,
416 basic_block bb)
418 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
420 rescan_loop_exit (e, true, false);
421 e->probability = REG_BR_PROB_BASE;
422 e->count = bb->count;
424 /* We have to copy path -- which means creating a new vector as well
425 as all the jump_thread_edge entries. */
426 if (rd->path->last ()->e->aux)
428 vec<jump_thread_edge *> *path = THREAD_PATH (rd->path->last ()->e);
429 vec<jump_thread_edge *> *copy = new vec<jump_thread_edge *> ();
431 /* Sadly, the elements of the vector are pointers and need to
432 be copied as well. */
433 for (unsigned int i = 0; i < path->length (); i++)
435 jump_thread_edge *x
436 = new jump_thread_edge ((*path)[i]->e, (*path)[i]->type);
437 copy->safe_push (x);
439 e->aux = (void *)copy;
441 else
443 e->aux = NULL;
446 /* If there are any PHI nodes at the destination of the outgoing edge
447 from the duplicate block, then we will need to add a new argument
448 to them. The argument should have the same value as the argument
449 associated with the outgoing edge stored in RD. */
450 copy_phi_args (e->dest, rd->path->last ()->e, e);
453 /* Look through PATH beginning at START and return TRUE if there are
454 any additional blocks that need to be duplicated. Otherwise,
455 return FALSE. */
456 static bool
457 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
458 unsigned int start)
460 for (unsigned int i = start + 1; i < path->length (); i++)
462 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
463 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
464 return true;
466 return false;
469 /* Wire up the outgoing edges from the duplicate blocks and
470 update any PHIs as needed. */
471 void
472 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
473 ssa_local_info_t *local_info)
475 edge e = rd->incoming_edges->e;
476 vec<jump_thread_edge *> *path = THREAD_PATH (e);
478 for (unsigned int count = 0, i = 1; i < path->length (); i++)
480 /* If we were threading through an joiner block, then we want
481 to keep its control statement and redirect an outgoing edge.
482 Else we want to remove the control statement & edges, then create
483 a new outgoing edge. In both cases we may need to update PHIs. */
484 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
486 edge victim;
487 edge e2;
489 /* This updates the PHIs at the destination of the duplicate
490 block. */
491 update_destination_phis (local_info->bb, rd->dup_blocks[count]);
493 /* Find the edge from the duplicate block to the block we're
494 threading through. That's the edge we want to redirect. */
495 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
497 /* If there are no remaining blocks on the path to duplicate,
498 then redirect VICTIM to the final destination of the jump
499 threading path. */
500 if (!any_remaining_duplicated_blocks (path, i))
502 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
503 e2->count = path->last ()->e->count;
504 /* If we redirected the edge, then we need to copy PHI arguments
505 at the target. If the edge already existed (e2 != victim
506 case), then the PHIs in the target already have the correct
507 arguments. */
508 if (e2 == victim)
509 copy_phi_args (e2->dest, path->last ()->e, e2);
511 else
513 /* Redirect VICTIM to the next duplicated block in the path. */
514 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
516 /* We need to update the PHIs in the next duplicated block. We
517 want the new PHI args to have the same value as they had
518 in the source of the next duplicate block.
520 Thus, we need to know which edge we traversed into the
521 source of the duplicate. Furthermore, we may have
522 traversed many edges to reach the source of the duplicate.
524 Walk through the path starting at element I until we
525 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
526 the edge from the prior element. */
527 for (unsigned int j = i + 1; j < path->length (); j++)
529 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
531 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
532 break;
536 count++;
538 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
540 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
541 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count]);
542 if (count == 1)
543 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
544 count++;
549 /* Hash table traversal callback routine to create duplicate blocks. */
552 ssa_create_duplicates (struct redirection_data **slot,
553 ssa_local_info_t *local_info)
555 struct redirection_data *rd = *slot;
557 /* The second duplicated block in a jump threading path is specific
558 to the path. So it gets stored in RD rather than in LOCAL_DATA.
560 Each time we're called, we have to look through the path and see
561 if a second block needs to be duplicated.
563 Note the search starts with the third edge on the path. The first
564 edge is the incoming edge, the second edge always has its source
565 duplicated. Thus we start our search with the third edge. */
566 vec<jump_thread_edge *> *path = rd->path;
567 for (unsigned int i = 2; i < path->length (); i++)
569 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
570 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
572 create_block_for_threading ((*path)[i]->e->src, rd, 1);
573 break;
577 /* Create a template block if we have not done so already. Otherwise
578 use the template to create a new block. */
579 if (local_info->template_block == NULL)
581 create_block_for_threading ((*path)[1]->e->src, rd, 0);
582 local_info->template_block = rd->dup_blocks[0];
584 /* We do not create any outgoing edges for the template. We will
585 take care of that in a later traversal. That way we do not
586 create edges that are going to just be deleted. */
588 else
590 create_block_for_threading (local_info->template_block, rd, 0);
592 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
593 block. */
594 ssa_fix_duplicate_block_edges (rd, local_info);
597 /* Keep walking the hash table. */
598 return 1;
601 /* We did not create any outgoing edges for the template block during
602 block creation. This hash table traversal callback creates the
603 outgoing edge for the template block. */
605 inline int
606 ssa_fixup_template_block (struct redirection_data **slot,
607 ssa_local_info_t *local_info)
609 struct redirection_data *rd = *slot;
611 /* If this is the template block halt the traversal after updating
612 it appropriately.
614 If we were threading through an joiner block, then we want
615 to keep its control statement and redirect an outgoing edge.
616 Else we want to remove the control statement & edges, then create
617 a new outgoing edge. In both cases we may need to update PHIs. */
618 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
620 ssa_fix_duplicate_block_edges (rd, local_info);
621 return 0;
624 return 1;
627 /* Hash table traversal callback to redirect each incoming edge
628 associated with this hash table element to its new destination. */
631 ssa_redirect_edges (struct redirection_data **slot,
632 ssa_local_info_t *local_info)
634 struct redirection_data *rd = *slot;
635 struct el *next, *el;
637 /* Walk over all the incoming edges associated associated with this
638 hash table entry. */
639 for (el = rd->incoming_edges; el; el = next)
641 edge e = el->e;
642 vec<jump_thread_edge *> *path = THREAD_PATH (e);
644 /* Go ahead and free this element from the list. Doing this now
645 avoids the need for another list walk when we destroy the hash
646 table. */
647 next = el->next;
648 free (el);
650 thread_stats.num_threaded_edges++;
652 if (rd->dup_blocks[0])
654 edge e2;
656 if (dump_file && (dump_flags & TDF_DETAILS))
657 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
658 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
660 rd->dup_blocks[0]->count += e->count;
662 /* Excessive jump threading may make frequencies large enough so
663 the computation overflows. */
664 if (rd->dup_blocks[0]->frequency < BB_FREQ_MAX * 2)
665 rd->dup_blocks[0]->frequency += EDGE_FREQUENCY (e);
667 /* In the case of threading through a joiner block, the outgoing
668 edges from the duplicate block were updated when they were
669 redirected during ssa_fix_duplicate_block_edges. */
670 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
671 EDGE_SUCC (rd->dup_blocks[0], 0)->count += e->count;
673 /* Redirect the incoming edge (possibly to the joiner block) to the
674 appropriate duplicate block. */
675 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
676 gcc_assert (e == e2);
677 flush_pending_stmts (e2);
680 /* Go ahead and clear E->aux. It's not needed anymore and failure
681 to clear it will cause all kinds of unpleasant problems later. */
682 delete_jump_thread_path (path);
683 e->aux = NULL;
687 /* Indicate that we actually threaded one or more jumps. */
688 if (rd->incoming_edges)
689 local_info->jumps_threaded = true;
691 return 1;
694 /* Return true if this block has no executable statements other than
695 a simple ctrl flow instruction. When the number of outgoing edges
696 is one, this is equivalent to a "forwarder" block. */
698 static bool
699 redirection_block_p (basic_block bb)
701 gimple_stmt_iterator gsi;
703 /* Advance to the first executable statement. */
704 gsi = gsi_start_bb (bb);
705 while (!gsi_end_p (gsi)
706 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
707 || is_gimple_debug (gsi_stmt (gsi))
708 || gimple_nop_p (gsi_stmt (gsi))))
709 gsi_next (&gsi);
711 /* Check if this is an empty block. */
712 if (gsi_end_p (gsi))
713 return true;
715 /* Test that we've reached the terminating control statement. */
716 return gsi_stmt (gsi)
717 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
718 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
719 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
722 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
723 is reached via one or more specific incoming edges, we know which
724 outgoing edge from BB will be traversed.
726 We want to redirect those incoming edges to the target of the
727 appropriate outgoing edge. Doing so avoids a conditional branch
728 and may expose new optimization opportunities. Note that we have
729 to update dominator tree and SSA graph after such changes.
731 The key to keeping the SSA graph update manageable is to duplicate
732 the side effects occurring in BB so that those side effects still
733 occur on the paths which bypass BB after redirecting edges.
735 We accomplish this by creating duplicates of BB and arranging for
736 the duplicates to unconditionally pass control to one specific
737 successor of BB. We then revector the incoming edges into BB to
738 the appropriate duplicate of BB.
740 If NOLOOP_ONLY is true, we only perform the threading as long as it
741 does not affect the structure of the loops in a nontrivial way.
743 If JOINERS is true, then thread through joiner blocks as well. */
745 static bool
746 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
748 /* E is an incoming edge into BB that we may or may not want to
749 redirect to a duplicate of BB. */
750 edge e, e2;
751 edge_iterator ei;
752 ssa_local_info_t local_info;
753 struct loop *loop = bb->loop_father;
755 /* To avoid scanning a linear array for the element we need we instead
756 use a hash table. For normal code there should be no noticeable
757 difference. However, if we have a block with a large number of
758 incoming and outgoing edges such linear searches can get expensive. */
759 redirection_data.create (EDGE_COUNT (bb->succs));
761 /* If we thread the latch of the loop to its exit, the loop ceases to
762 exist. Make sure we do not restrict ourselves in order to preserve
763 this loop. */
764 if (loop->header == bb)
766 e = loop_latch_edge (loop);
767 vec<jump_thread_edge *> *path = THREAD_PATH (e);
769 if (path
770 && (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && joiners)
771 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && !joiners)))
773 for (unsigned int i = 1; i < path->length (); i++)
775 edge e2 = (*path)[i]->e;
777 if (loop_exit_edge_p (loop, e2))
779 loop->header = NULL;
780 loop->latch = NULL;
781 loops_state_set (LOOPS_NEED_FIXUP);
787 /* Record each unique threaded destination into a hash table for
788 efficient lookups. */
789 FOR_EACH_EDGE (e, ei, bb->preds)
791 if (e->aux == NULL)
792 continue;
794 vec<jump_thread_edge *> *path = THREAD_PATH (e);
796 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
797 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
798 continue;
800 e2 = path->last ()->e;
801 if (!e2 || noloop_only)
803 /* If NOLOOP_ONLY is true, we only allow threading through the
804 header of a loop to exit edges. */
806 /* One case occurs when there was loop header buried in a jump
807 threading path that crosses loop boundaries. We do not try
808 and thread this elsewhere, so just cancel the jump threading
809 request by clearing the AUX field now. */
810 if ((bb->loop_father != e2->src->loop_father
811 && !loop_exit_edge_p (e2->src->loop_father, e2))
812 || (e2->src->loop_father != e2->dest->loop_father
813 && !loop_exit_edge_p (e2->src->loop_father, e2)))
815 /* Since this case is not handled by our special code
816 to thread through a loop header, we must explicitly
817 cancel the threading request here. */
818 delete_jump_thread_path (path);
819 e->aux = NULL;
820 continue;
823 /* Another case occurs when trying to thread through our
824 own loop header, possibly from inside the loop. We will
825 thread these later. */
826 unsigned int i;
827 for (i = 1; i < path->length (); i++)
829 if ((*path)[i]->e->src == bb->loop_father->header
830 && (!loop_exit_edge_p (bb->loop_father, e2)
831 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
832 break;
835 if (i != path->length ())
836 continue;
839 if (e->dest == e2->src)
840 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
841 e->count, (*THREAD_PATH (e))[1]->e);
843 /* Insert the outgoing edge into the hash table if it is not
844 already in the hash table. */
845 lookup_redirection_data (e, INSERT);
848 /* We do not update dominance info. */
849 free_dominance_info (CDI_DOMINATORS);
851 /* We know we only thread through the loop header to loop exits.
852 Let the basic block duplication hook know we are not creating
853 a multiple entry loop. */
854 if (noloop_only
855 && bb == bb->loop_father->header)
856 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
858 /* Now create duplicates of BB.
860 Note that for a block with a high outgoing degree we can waste
861 a lot of time and memory creating and destroying useless edges.
863 So we first duplicate BB and remove the control structure at the
864 tail of the duplicate as well as all outgoing edges from the
865 duplicate. We then use that duplicate block as a template for
866 the rest of the duplicates. */
867 local_info.template_block = NULL;
868 local_info.bb = bb;
869 local_info.jumps_threaded = false;
870 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
871 (&local_info);
873 /* The template does not have an outgoing edge. Create that outgoing
874 edge and update PHI nodes as the edge's target as necessary.
876 We do this after creating all the duplicates to avoid creating
877 unnecessary edges. */
878 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
879 (&local_info);
881 /* The hash table traversals above created the duplicate blocks (and the
882 statements within the duplicate blocks). This loop creates PHI nodes for
883 the duplicated blocks and redirects the incoming edges into BB to reach
884 the duplicates of BB. */
885 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
886 (&local_info);
888 /* Done with this block. Clear REDIRECTION_DATA. */
889 redirection_data.dispose ();
891 if (noloop_only
892 && bb == bb->loop_father->header)
893 set_loop_copy (bb->loop_father, NULL);
895 /* Indicate to our caller whether or not any jumps were threaded. */
896 return local_info.jumps_threaded;
899 /* Wrapper for thread_block_1 so that we can first handle jump
900 thread paths which do not involve copying joiner blocks, then
901 handle jump thread paths which have joiner blocks.
903 By doing things this way we can be as aggressive as possible and
904 not worry that copying a joiner block will create a jump threading
905 opportunity. */
907 static bool
908 thread_block (basic_block bb, bool noloop_only)
910 bool retval;
911 retval = thread_block_1 (bb, noloop_only, false);
912 retval |= thread_block_1 (bb, noloop_only, true);
913 return retval;
917 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
918 copy of E->dest created during threading, or E->dest if it was not necessary
919 to copy it (E is its single predecessor). */
921 static basic_block
922 thread_single_edge (edge e)
924 basic_block bb = e->dest;
925 struct redirection_data rd;
926 vec<jump_thread_edge *> *path = THREAD_PATH (e);
927 edge eto = (*path)[1]->e;
929 for (unsigned int i = 0; i < path->length (); i++)
930 delete (*path)[i];
931 delete path;
932 e->aux = NULL;
934 thread_stats.num_threaded_edges++;
936 if (single_pred_p (bb))
938 /* If BB has just a single predecessor, we should only remove the
939 control statements at its end, and successors except for ETO. */
940 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
942 /* And fixup the flags on the single remaining edge. */
943 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
944 eto->flags |= EDGE_FALLTHRU;
946 return bb;
949 /* Otherwise, we need to create a copy. */
950 if (e->dest == eto->src)
951 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
953 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
954 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
955 npath->safe_push (x);
957 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
958 npath->safe_push (x);
959 rd.path = npath;
961 create_block_for_threading (bb, &rd, 0);
962 remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
963 create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0]);
965 if (dump_file && (dump_flags & TDF_DETAILS))
966 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
967 e->src->index, e->dest->index, rd.dup_blocks[0]->index);
969 rd.dup_blocks[0]->count = e->count;
970 rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
971 single_succ_edge (rd.dup_blocks[0])->count = e->count;
972 redirect_edge_and_branch (e, rd.dup_blocks[0]);
973 flush_pending_stmts (e);
975 return rd.dup_blocks[0];
978 /* Callback for dfs_enumerate_from. Returns true if BB is different
979 from STOP and DBDS_CE_STOP. */
981 static basic_block dbds_ce_stop;
982 static bool
983 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
985 return (bb != (const_basic_block) stop
986 && bb != dbds_ce_stop);
989 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
990 returns the state. */
992 enum bb_dom_status
994 /* BB does not dominate latch of the LOOP. */
995 DOMST_NONDOMINATING,
996 /* The LOOP is broken (there is no path from the header to its latch. */
997 DOMST_LOOP_BROKEN,
998 /* BB dominates the latch of the LOOP. */
999 DOMST_DOMINATING
1002 static enum bb_dom_status
1003 determine_bb_domination_status (struct loop *loop, basic_block bb)
1005 basic_block *bblocks;
1006 unsigned nblocks, i;
1007 bool bb_reachable = false;
1008 edge_iterator ei;
1009 edge e;
1011 /* This function assumes BB is a successor of LOOP->header.
1012 If that is not the case return DOMST_NONDOMINATING which
1013 is always safe. */
1015 bool ok = false;
1017 FOR_EACH_EDGE (e, ei, bb->preds)
1019 if (e->src == loop->header)
1021 ok = true;
1022 break;
1026 if (!ok)
1027 return DOMST_NONDOMINATING;
1030 if (bb == loop->latch)
1031 return DOMST_DOMINATING;
1033 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1034 from it. */
1036 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1037 dbds_ce_stop = loop->header;
1038 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1039 bblocks, loop->num_nodes, bb);
1040 for (i = 0; i < nblocks; i++)
1041 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1043 if (e->src == loop->header)
1045 free (bblocks);
1046 return DOMST_NONDOMINATING;
1048 if (e->src == bb)
1049 bb_reachable = true;
1052 free (bblocks);
1053 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1056 /* Return true if BB is part of the new pre-header that is created
1057 when threading the latch to DATA. */
1059 static bool
1060 def_split_header_continue_p (const_basic_block bb, const void *data)
1062 const_basic_block new_header = (const_basic_block) data;
1063 const struct loop *l;
1065 if (bb == new_header
1066 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
1067 return false;
1068 for (l = bb->loop_father; l; l = loop_outer (l))
1069 if (l == new_header->loop_father)
1070 return true;
1071 return false;
1074 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1075 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1076 to the inside of the loop. */
1078 static bool
1079 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1081 basic_block header = loop->header;
1082 edge e, tgt_edge, latch = loop_latch_edge (loop);
1083 edge_iterator ei;
1084 basic_block tgt_bb, atgt_bb;
1085 enum bb_dom_status domst;
1087 /* We have already threaded through headers to exits, so all the threading
1088 requests now are to the inside of the loop. We need to avoid creating
1089 irreducible regions (i.e., loops with more than one entry block), and
1090 also loop with several latch edges, or new subloops of the loop (although
1091 there are cases where it might be appropriate, it is difficult to decide,
1092 and doing it wrongly may confuse other optimizers).
1094 We could handle more general cases here. However, the intention is to
1095 preserve some information about the loop, which is impossible if its
1096 structure changes significantly, in a way that is not well understood.
1097 Thus we only handle few important special cases, in which also updating
1098 of the loop-carried information should be feasible:
1100 1) Propagation of latch edge to a block that dominates the latch block
1101 of a loop. This aims to handle the following idiom:
1103 first = 1;
1104 while (1)
1106 if (first)
1107 initialize;
1108 first = 0;
1109 body;
1112 After threading the latch edge, this becomes
1114 first = 1;
1115 if (first)
1116 initialize;
1117 while (1)
1119 first = 0;
1120 body;
1123 The original header of the loop is moved out of it, and we may thread
1124 the remaining edges through it without further constraints.
1126 2) All entry edges are propagated to a single basic block that dominates
1127 the latch block of the loop. This aims to handle the following idiom
1128 (normally created for "for" loops):
1130 i = 0;
1131 while (1)
1133 if (i >= 100)
1134 break;
1135 body;
1136 i++;
1139 This becomes
1141 i = 0;
1142 while (1)
1144 body;
1145 i++;
1146 if (i >= 100)
1147 break;
1151 /* Threading through the header won't improve the code if the header has just
1152 one successor. */
1153 if (single_succ_p (header))
1154 goto fail;
1156 /* If we threaded the latch using a joiner block, we cancel the
1157 threading opportunity out of an abundance of caution. However,
1158 still allow threading from outside to inside the loop. */
1159 if (latch->aux)
1161 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1162 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1164 delete_jump_thread_path (path);
1165 latch->aux = NULL;
1169 if (latch->aux)
1171 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1172 tgt_edge = (*path)[1]->e;
1173 tgt_bb = tgt_edge->dest;
1175 else if (!may_peel_loop_headers
1176 && !redirection_block_p (loop->header))
1177 goto fail;
1178 else
1180 tgt_bb = NULL;
1181 tgt_edge = NULL;
1182 FOR_EACH_EDGE (e, ei, header->preds)
1184 if (!e->aux)
1186 if (e == latch)
1187 continue;
1189 /* If latch is not threaded, and there is a header
1190 edge that is not threaded, we would create loop
1191 with multiple entries. */
1192 goto fail;
1195 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1197 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1198 goto fail;
1199 tgt_edge = (*path)[1]->e;
1200 atgt_bb = tgt_edge->dest;
1201 if (!tgt_bb)
1202 tgt_bb = atgt_bb;
1203 /* Two targets of threading would make us create loop
1204 with multiple entries. */
1205 else if (tgt_bb != atgt_bb)
1206 goto fail;
1209 if (!tgt_bb)
1211 /* There are no threading requests. */
1212 return false;
1215 /* Redirecting to empty loop latch is useless. */
1216 if (tgt_bb == loop->latch
1217 && empty_block_p (loop->latch))
1218 goto fail;
1221 /* The target block must dominate the loop latch, otherwise we would be
1222 creating a subloop. */
1223 domst = determine_bb_domination_status (loop, tgt_bb);
1224 if (domst == DOMST_NONDOMINATING)
1225 goto fail;
1226 if (domst == DOMST_LOOP_BROKEN)
1228 /* If the loop ceased to exist, mark it as such, and thread through its
1229 original header. */
1230 loop->header = NULL;
1231 loop->latch = NULL;
1232 loops_state_set (LOOPS_NEED_FIXUP);
1233 return thread_block (header, false);
1236 if (tgt_bb->loop_father->header == tgt_bb)
1238 /* If the target of the threading is a header of a subloop, we need
1239 to create a preheader for it, so that the headers of the two loops
1240 do not merge. */
1241 if (EDGE_COUNT (tgt_bb->preds) > 2)
1243 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1244 gcc_assert (tgt_bb != NULL);
1246 else
1247 tgt_bb = split_edge (tgt_edge);
1250 if (latch->aux)
1252 basic_block *bblocks;
1253 unsigned nblocks, i;
1255 /* First handle the case latch edge is redirected. We are copying
1256 the loop header but not creating a multiple entry loop. Make the
1257 cfg manipulation code aware of that fact. */
1258 set_loop_copy (loop, loop);
1259 loop->latch = thread_single_edge (latch);
1260 set_loop_copy (loop, NULL);
1261 gcc_assert (single_succ (loop->latch) == tgt_bb);
1262 loop->header = tgt_bb;
1264 /* Remove the new pre-header blocks from our loop. */
1265 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1266 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1267 bblocks, loop->num_nodes, tgt_bb);
1268 for (i = 0; i < nblocks; i++)
1269 if (bblocks[i]->loop_father == loop)
1271 remove_bb_from_loops (bblocks[i]);
1272 add_bb_to_loop (bblocks[i], loop_outer (loop));
1274 free (bblocks);
1276 /* If the new header has multiple latches mark it so. */
1277 FOR_EACH_EDGE (e, ei, loop->header->preds)
1278 if (e->src->loop_father == loop
1279 && e->src != loop->latch)
1281 loop->latch = NULL;
1282 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1285 /* Cancel remaining threading requests that would make the
1286 loop a multiple entry loop. */
1287 FOR_EACH_EDGE (e, ei, header->preds)
1289 edge e2;
1291 if (e->aux == NULL)
1292 continue;
1294 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1295 e2 = path->last ()->e;
1297 if (e->src->loop_father != e2->dest->loop_father
1298 && e2->dest != loop->header)
1300 delete_jump_thread_path (path);
1301 e->aux = NULL;
1305 /* Thread the remaining edges through the former header. */
1306 thread_block (header, false);
1308 else
1310 basic_block new_preheader;
1312 /* Now consider the case entry edges are redirected to the new entry
1313 block. Remember one entry edge, so that we can find the new
1314 preheader (its destination after threading). */
1315 FOR_EACH_EDGE (e, ei, header->preds)
1317 if (e->aux)
1318 break;
1321 /* The duplicate of the header is the new preheader of the loop. Ensure
1322 that it is placed correctly in the loop hierarchy. */
1323 set_loop_copy (loop, loop_outer (loop));
1325 thread_block (header, false);
1326 set_loop_copy (loop, NULL);
1327 new_preheader = e->dest;
1329 /* Create the new latch block. This is always necessary, as the latch
1330 must have only a single successor, but the original header had at
1331 least two successors. */
1332 loop->latch = NULL;
1333 mfb_kj_edge = single_succ_edge (new_preheader);
1334 loop->header = mfb_kj_edge->dest;
1335 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1336 loop->header = latch->dest;
1337 loop->latch = latch->src;
1340 return true;
1342 fail:
1343 /* We failed to thread anything. Cancel the requests. */
1344 FOR_EACH_EDGE (e, ei, header->preds)
1346 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1348 if (path)
1350 delete_jump_thread_path (path);
1351 e->aux = NULL;
1354 return false;
1357 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1358 PHI arguments associated with those edges are equal or there are no
1359 PHI arguments, otherwise return FALSE. */
1361 static bool
1362 phi_args_equal_on_edges (edge e1, edge e2)
1364 gimple_stmt_iterator gsi;
1365 int indx1 = e1->dest_idx;
1366 int indx2 = e2->dest_idx;
1368 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1370 gimple phi = gsi_stmt (gsi);
1372 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1373 gimple_phi_arg_def (phi, indx2), 0))
1374 return false;
1376 return true;
1379 /* Walk through the registered jump threads and convert them into a
1380 form convenient for this pass.
1382 Any block which has incoming edges threaded to outgoing edges
1383 will have its entry in THREADED_BLOCK set.
1385 Any threaded edge will have its new outgoing edge stored in the
1386 original edge's AUX field.
1388 This form avoids the need to walk all the edges in the CFG to
1389 discover blocks which need processing and avoids unnecessary
1390 hash table lookups to map from threaded edge to new target. */
1392 static void
1393 mark_threaded_blocks (bitmap threaded_blocks)
1395 unsigned int i;
1396 bitmap_iterator bi;
1397 bitmap tmp = BITMAP_ALLOC (NULL);
1398 basic_block bb;
1399 edge e;
1400 edge_iterator ei;
1402 /* Move the jump threading requests from PATHS to each edge
1403 which starts a jump thread path. */
1404 for (i = 0; i < paths.length (); i++)
1406 vec<jump_thread_edge *> *path = paths[i];
1407 edge e = (*path)[0]->e;
1408 e->aux = (void *)path;
1409 bitmap_set_bit (tmp, e->dest->index);
1414 /* If optimizing for size, only thread through block if we don't have
1415 to duplicate it or it's an otherwise empty redirection block. */
1416 if (optimize_function_for_size_p (cfun))
1418 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1420 bb = BASIC_BLOCK (i);
1421 if (EDGE_COUNT (bb->preds) > 1
1422 && !redirection_block_p (bb))
1424 FOR_EACH_EDGE (e, ei, bb->preds)
1426 if (e->aux)
1428 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1429 delete_jump_thread_path (path);
1430 e->aux = NULL;
1434 else
1435 bitmap_set_bit (threaded_blocks, i);
1438 else
1439 bitmap_copy (threaded_blocks, tmp);
1441 /* Look for jump threading paths which cross multiple loop headers.
1443 The code to thread through loop headers will change the CFG in ways
1444 that break assumptions made by the loop optimization code.
1446 We don't want to blindly cancel the requests. We can instead do better
1447 by trimming off the end of the jump thread path. */
1448 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1450 basic_block bb = BASIC_BLOCK (i);
1451 FOR_EACH_EDGE (e, ei, bb->preds)
1453 if (e->aux)
1455 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1457 /* Basically we're looking for a situation where we can see
1458 3 or more loop structures on a jump threading path. */
1460 struct loop *first_father = (*path)[0]->e->src->loop_father;
1461 struct loop *second_father = NULL;
1462 for (unsigned int i = 0; i < path->length (); i++)
1464 /* See if this is a loop father we have not seen before. */
1465 if ((*path)[i]->e->dest->loop_father != first_father
1466 && (*path)[i]->e->dest->loop_father != second_father)
1468 /* We've already seen two loop fathers, so we
1469 need to trim this jump threading path. */
1470 if (second_father != NULL)
1472 /* Trim from entry I onwards. */
1473 for (unsigned int j = i; j < path->length (); j++)
1474 delete (*path)[j];
1475 path->truncate (i);
1477 /* Now that we've truncated the path, make sure
1478 what's left is still valid. We need at least
1479 two edges on the path and the last edge can not
1480 be a joiner. This should never happen, but let's
1481 be safe. */
1482 if (path->length () < 2
1483 || (path->last ()->type
1484 == EDGE_COPY_SRC_JOINER_BLOCK))
1486 delete_jump_thread_path (path);
1487 e->aux = NULL;
1489 break;
1491 else
1493 second_father = (*path)[i]->e->dest->loop_father;
1501 /* If we have a joiner block (J) which has two successors S1 and S2 and
1502 we are threading though S1 and the final destination of the thread
1503 is S2, then we must verify that any PHI nodes in S2 have the same
1504 PHI arguments for the edge J->S2 and J->S1->...->S2.
1506 We used to detect this prior to registering the jump thread, but
1507 that prohibits propagation of edge equivalences into non-dominated
1508 PHI nodes as the equivalency test might occur before propagation.
1510 This must also occur after we truncate any jump threading paths
1511 as this scenario may only show up after truncation.
1513 This works for now, but will need improvement as part of the FSA
1514 optimization.
1516 Note since we've moved the thread request data to the edges,
1517 we have to iterate on those rather than the threaded_edges vector. */
1518 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1520 bb = BASIC_BLOCK (i);
1521 FOR_EACH_EDGE (e, ei, bb->preds)
1523 if (e->aux)
1525 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1526 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1528 if (have_joiner)
1530 basic_block joiner = e->dest;
1531 edge final_edge = path->last ()->e;
1532 basic_block final_dest = final_edge->dest;
1533 edge e2 = find_edge (joiner, final_dest);
1535 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1537 delete_jump_thread_path (path);
1538 e->aux = NULL;
1545 BITMAP_FREE (tmp);
1549 /* Return TRUE if BB ends with a switch statement or a computed goto.
1550 Otherwise return false. */
1551 static bool
1552 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED)
1554 gimple stmt = last_stmt (bb);
1555 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1556 return true;
1557 if (stmt && gimple_code (stmt) == GIMPLE_GOTO
1558 && TREE_CODE (gimple_goto_dest (stmt)) == SSA_NAME)
1559 return true;
1560 return false;
1563 /* Walk through all blocks and thread incoming edges to the appropriate
1564 outgoing edge for each edge pair recorded in THREADED_EDGES.
1566 It is the caller's responsibility to fix the dominance information
1567 and rewrite duplicated SSA_NAMEs back into SSA form.
1569 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1570 loop headers if it does not simplify the loop.
1572 Returns true if one or more edges were threaded, false otherwise. */
1574 bool
1575 thread_through_all_blocks (bool may_peel_loop_headers)
1577 bool retval = false;
1578 unsigned int i;
1579 bitmap_iterator bi;
1580 bitmap threaded_blocks;
1581 struct loop *loop;
1582 bool totally_clobbered_loops = false;
1584 /* We must know about loops in order to preserve them. */
1585 gcc_assert (current_loops != NULL);
1587 if (!paths.exists ())
1588 return false;
1590 threaded_blocks = BITMAP_ALLOC (NULL);
1591 memset (&thread_stats, 0, sizeof (thread_stats));
1593 mark_threaded_blocks (threaded_blocks);
1595 initialize_original_copy_tables ();
1597 /* First perform the threading requests that do not affect
1598 loop structure. */
1599 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1601 basic_block bb = BASIC_BLOCK (i);
1603 if (EDGE_COUNT (bb->preds) > 0)
1604 retval |= thread_block (bb, true);
1607 /* Then perform the threading through loop headers. We start with the
1608 innermost loop, so that the changes in cfg we perform won't affect
1609 further threading. */
1610 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1612 if (!loop->header
1613 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1614 continue;
1616 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1619 /* Any jump threading paths that are still attached to edges at this
1620 point must be one of two cases.
1622 First, we could have a jump threading path which went from outside
1623 a loop to inside a loop that was ignored because a prior jump thread
1624 across a backedge was realized (which indirectly causes the loop
1625 above to ignore the latter thread). We can detect these because the
1626 loop structures will be different and we do not currently try to
1627 optimize this case.
1629 Second, we could be threading across a backedge to a point within the
1630 same loop. This occurrs for the FSA/FSM optimization and we would
1631 like to optimize it. However, we have to be very careful as this
1632 may completely scramble the loop structures, with the result being
1633 irreducible loops causing us to throw away our loop structure.
1635 As a compromise for the latter case, if the thread path ends in
1636 a block where the last statement is a multiway branch, then go
1637 ahead and thread it, else ignore it. */
1638 basic_block bb;
1639 edge e;
1640 FOR_EACH_BB (bb)
1642 /* If we do end up threading here, we can remove elements from
1643 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
1644 for (edge_iterator ei = ei_start (bb->preds);
1645 (e = ei_safe_edge (ei));)
1646 if (e->aux)
1648 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1650 /* Case 1, threading from outside to inside the loop
1651 after we'd already threaded through the header. */
1652 if ((*path)[0]->e->dest->loop_father
1653 != path->last ()->e->src->loop_father)
1655 delete_jump_thread_path (path);
1656 e->aux = NULL;
1657 ei_next (&ei);
1659 else if (bb_ends_with_multiway_branch (path->last ()->e->src))
1661 /* The code to thread through loop headers may have
1662 split a block with jump threads attached to it.
1664 We can identify this with a disjoint jump threading
1665 path. If found, just remove it. */
1666 for (unsigned int i = 0; i < path->length () - 1; i++)
1667 if ((*path)[i]->e->dest != (*path)[i + 1]->e->src)
1669 delete_jump_thread_path (path);
1670 e->aux = NULL;
1671 ei_next (&ei);
1672 break;
1675 /* Our path is still valid, thread it. */
1676 if (e->aux)
1678 totally_clobbered_loops
1679 |= thread_block ((*path)[0]->e->dest, false);
1680 e->aux = NULL;
1683 else
1685 delete_jump_thread_path (path);
1686 e->aux = NULL;
1687 ei_next (&ei);
1690 else
1691 ei_next (&ei);
1694 statistics_counter_event (cfun, "Jumps threaded",
1695 thread_stats.num_threaded_edges);
1697 free_original_copy_tables ();
1699 BITMAP_FREE (threaded_blocks);
1700 threaded_blocks = NULL;
1701 paths.release ();
1703 /* If we made changes to the CFG that might have totally messed
1704 up the loop structure, then drop the old loop structure and
1705 rebuild. */
1706 if (totally_clobbered_loops)
1708 /* Release the current loop structures, they are totally
1709 clobbered at this point. */
1710 loop_optimizer_finalize ();
1711 current_loops = NULL;
1713 /* Similarly for dominance information. */
1714 free_dominance_info (CDI_DOMINATORS);
1715 free_dominance_info (CDI_POST_DOMINATORS);
1717 /* Before we can rebuild the loop structures, we need dominators,
1718 which requires no unreachable code. So remove unreachable code. */
1719 delete_unreachable_blocks ();
1721 /* Now rebuild the loop structures. */
1722 cfun->curr_properties &= ~PROP_loops;
1723 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
1724 cfun->curr_properties |= PROP_loops;
1725 retval = 1;
1728 if (retval && current_loops)
1729 loops_state_set (LOOPS_NEED_FIXUP);
1731 return retval;
1734 /* Delete the jump threading path PATH. We have to explcitly delete
1735 each entry in the vector, then the container. */
1737 void
1738 delete_jump_thread_path (vec<jump_thread_edge *> *path)
1740 for (unsigned int i = 0; i < path->length (); i++)
1741 delete (*path)[i];
1742 path->release();
1745 /* Dump a jump threading path, including annotations about each
1746 edge in the path. */
1748 static void
1749 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path)
1751 fprintf (dump_file,
1752 " Registering jump thread: (%d, %d) incoming edge; ",
1753 path[0]->e->src->index, path[0]->e->dest->index);
1755 for (unsigned int i = 1; i < path.length (); i++)
1757 /* We can get paths with a NULL edge when the final destination
1758 of a jump thread turns out to be a constant address. We dump
1759 those paths when debugging, so we have to be prepared for that
1760 possibility here. */
1761 if (path[i]->e == NULL)
1762 continue;
1764 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1765 fprintf (dump_file, " (%d, %d) joiner; ",
1766 path[i]->e->src->index, path[i]->e->dest->index);
1767 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
1768 fprintf (dump_file, " (%d, %d) normal;",
1769 path[i]->e->src->index, path[i]->e->dest->index);
1770 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
1771 fprintf (dump_file, " (%d, %d) nocopy;",
1772 path[i]->e->src->index, path[i]->e->dest->index);
1774 fputc ('\n', dump_file);
1777 /* Register a jump threading opportunity. We queue up all the jump
1778 threading opportunities discovered by a pass and update the CFG
1779 and SSA form all at once.
1781 E is the edge we can thread, E2 is the new target edge, i.e., we
1782 are effectively recording that E->dest can be changed to E2->dest
1783 after fixing the SSA graph. */
1785 void
1786 register_jump_thread (vec<jump_thread_edge *> *path)
1788 if (!dbg_cnt (registered_jump_thread))
1790 delete_jump_thread_path (path);
1791 return;
1794 /* First make sure there are no NULL outgoing edges on the jump threading
1795 path. That can happen for jumping to a constant address. */
1796 for (unsigned int i = 0; i < path->length (); i++)
1797 if ((*path)[i]->e == NULL)
1799 if (dump_file && (dump_flags & TDF_DETAILS))
1801 fprintf (dump_file,
1802 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1803 dump_jump_thread_path (dump_file, *path);
1806 delete_jump_thread_path (path);
1807 return;
1810 if (dump_file && (dump_flags & TDF_DETAILS))
1811 dump_jump_thread_path (dump_file, *path);
1813 if (!paths.exists ())
1814 paths.create (5);
1816 paths.safe_push (path);