PR target/58115
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blobc0476b48a1c0e843e7649217db63d350a175331f
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "flags.h"
25 #include "basic-block.h"
26 #include "function.h"
27 #include "hash-table.h"
28 #include "tree-ssa-alias.h"
29 #include "internal-fn.h"
30 #include "gimple-expr.h"
31 #include "is-a.h"
32 #include "gimple.h"
33 #include "gimple-iterator.h"
34 #include "gimple-ssa.h"
35 #include "tree-phinodes.h"
36 #include "tree-ssa.h"
37 #include "tree-ssa-threadupdate.h"
38 #include "ssa-iterators.h"
39 #include "dumpfile.h"
40 #include "cfgloop.h"
41 #include "dbgcnt.h"
42 #include "tree-cfg.h"
43 #include "tree-pass.h"
45 /* Given a block B, update the CFG and SSA graph to reflect redirecting
46 one or more in-edges to B to instead reach the destination of an
47 out-edge from B while preserving any side effects in B.
49 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
50 side effects of executing B.
52 1. Make a copy of B (including its outgoing edges and statements). Call
53 the copy B'. Note B' has no incoming edges or PHIs at this time.
55 2. Remove the control statement at the end of B' and all outgoing edges
56 except B'->C.
58 3. Add a new argument to each PHI in C with the same value as the existing
59 argument associated with edge B->C. Associate the new PHI arguments
60 with the edge B'->C.
62 4. For each PHI in B, find or create a PHI in B' with an identical
63 PHI_RESULT. Add an argument to the PHI in B' which has the same
64 value as the PHI in B associated with the edge A->B. Associate
65 the new argument in the PHI in B' with the edge A->B.
67 5. Change the edge A->B to A->B'.
69 5a. This automatically deletes any PHI arguments associated with the
70 edge A->B in B.
72 5b. This automatically associates each new argument added in step 4
73 with the edge A->B'.
75 6. Repeat for other incoming edges into B.
77 7. Put the duplicated resources in B and all the B' blocks into SSA form.
79 Note that block duplication can be minimized by first collecting the
80 set of unique destination blocks that the incoming edges should
81 be threaded to.
83 We reduce the number of edges and statements we create by not copying all
84 the outgoing edges and the control statement in step #1. We instead create
85 a template block without the outgoing edges and duplicate the template.
87 Another case this code handles is threading through a "joiner" block. In
88 this case, we do not know the destination of the joiner block, but one
89 of the outgoing edges from the joiner block leads to a threadable path. This
90 case largely works as outlined above, except the duplicate of the joiner
91 block still contains a full set of outgoing edges and its control statement.
92 We just redirect one of its outgoing edges to our jump threading path. */
95 /* Steps #5 and #6 of the above algorithm are best implemented by walking
96 all the incoming edges which thread to the same destination edge at
97 the same time. That avoids lots of table lookups to get information
98 for the destination edge.
100 To realize that implementation we create a list of incoming edges
101 which thread to the same outgoing edge. Thus to implement steps
102 #5 and #6 we traverse our hash table of outgoing edge information.
103 For each entry we walk the list of incoming edges which thread to
104 the current outgoing edge. */
106 struct el
108 edge e;
109 struct el *next;
112 /* Main data structure recording information regarding B's duplicate
113 blocks. */
115 /* We need to efficiently record the unique thread destinations of this
116 block and specific information associated with those destinations. We
117 may have many incoming edges threaded to the same outgoing edge. This
118 can be naturally implemented with a hash table. */
120 struct redirection_data : typed_free_remove<redirection_data>
122 /* We support wiring up two block duplicates in a jump threading path.
124 One is a normal block copy where we remove the control statement
125 and wire up its single remaining outgoing edge to the thread path.
127 The other is a joiner block where we leave the control statement
128 in place, but wire one of the outgoing edges to a thread path.
130 In theory we could have multiple block duplicates in a jump
131 threading path, but I haven't tried that.
133 The duplicate blocks appear in this array in the same order in
134 which they appear in the jump thread path. */
135 basic_block dup_blocks[2];
137 /* The jump threading path. */
138 vec<jump_thread_edge *> *path;
140 /* A list of incoming edges which we want to thread to the
141 same path. */
142 struct el *incoming_edges;
144 /* hash_table support. */
145 typedef redirection_data value_type;
146 typedef redirection_data compare_type;
147 static inline hashval_t hash (const value_type *);
148 static inline int equal (const value_type *, const compare_type *);
151 /* Simple hashing function. For any given incoming edge E, we're going
152 to be most concerned with the final destination of its jump thread
153 path. So hash on the block index of the final edge in the path. */
155 inline hashval_t
156 redirection_data::hash (const value_type *p)
158 vec<jump_thread_edge *> *path = p->path;
159 return path->last ()->e->dest->index;
162 /* Given two hash table entries, return true if they have the same
163 jump threading path. */
164 inline int
165 redirection_data::equal (const value_type *p1, const compare_type *p2)
167 vec<jump_thread_edge *> *path1 = p1->path;
168 vec<jump_thread_edge *> *path2 = p2->path;
170 if (path1->length () != path2->length ())
171 return false;
173 for (unsigned int i = 1; i < path1->length (); i++)
175 if ((*path1)[i]->type != (*path2)[i]->type
176 || (*path1)[i]->e != (*path2)[i]->e)
177 return false;
180 return true;
183 /* Data structure of information to pass to hash table traversal routines. */
184 struct ssa_local_info_t
186 /* The current block we are working on. */
187 basic_block bb;
189 /* We only create a template block for the first duplicated block in a
190 jump threading path as we may need many duplicates of that block.
192 The second duplicate block in a path is specific to that path. Creating
193 and sharing a template for that block is considerably more difficult. */
194 basic_block template_block;
196 /* TRUE if we thread one or more jumps, FALSE otherwise. */
197 bool jumps_threaded;
200 /* Passes which use the jump threading code register jump threading
201 opportunities as they are discovered. We keep the registered
202 jump threading opportunities in this vector as edge pairs
203 (original_edge, target_edge). */
204 static vec<vec<jump_thread_edge *> *> paths;
206 /* When we start updating the CFG for threading, data necessary for jump
207 threading is attached to the AUX field for the incoming edge. Use these
208 macros to access the underlying structure attached to the AUX field. */
209 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
211 /* Jump threading statistics. */
213 struct thread_stats_d
215 unsigned long num_threaded_edges;
218 struct thread_stats_d thread_stats;
221 /* Remove the last statement in block BB if it is a control statement
222 Also remove all outgoing edges except the edge which reaches DEST_BB.
223 If DEST_BB is NULL, then remove all outgoing edges. */
225 static void
226 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
228 gimple_stmt_iterator gsi;
229 edge e;
230 edge_iterator ei;
232 gsi = gsi_last_bb (bb);
234 /* If the duplicate ends with a control statement, then remove it.
236 Note that if we are duplicating the template block rather than the
237 original basic block, then the duplicate might not have any real
238 statements in it. */
239 if (!gsi_end_p (gsi)
240 && gsi_stmt (gsi)
241 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
242 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
243 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
244 gsi_remove (&gsi, true);
246 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
248 if (e->dest != dest_bb)
249 remove_edge (e);
250 else
251 ei_next (&ei);
255 /* Create a duplicate of BB. Record the duplicate block in an array
256 indexed by COUNT stored in RD. */
258 static void
259 create_block_for_threading (basic_block bb,
260 struct redirection_data *rd,
261 unsigned int count)
263 edge_iterator ei;
264 edge e;
266 /* We can use the generic block duplication code and simply remove
267 the stuff we do not need. */
268 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
270 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
271 e->aux = NULL;
273 /* Zero out the profile, since the block is unreachable for now. */
274 rd->dup_blocks[count]->frequency = 0;
275 rd->dup_blocks[count]->count = 0;
278 /* Main data structure to hold information for duplicates of BB. */
280 static hash_table <redirection_data> redirection_data;
282 /* Given an outgoing edge E lookup and return its entry in our hash table.
284 If INSERT is true, then we insert the entry into the hash table if
285 it is not already present. INCOMING_EDGE is added to the list of incoming
286 edges associated with E in the hash table. */
288 static struct redirection_data *
289 lookup_redirection_data (edge e, enum insert_option insert)
291 struct redirection_data **slot;
292 struct redirection_data *elt;
293 vec<jump_thread_edge *> *path = THREAD_PATH (e);
295 /* Build a hash table element so we can see if E is already
296 in the table. */
297 elt = XNEW (struct redirection_data);
298 elt->path = path;
299 elt->dup_blocks[0] = NULL;
300 elt->dup_blocks[1] = NULL;
301 elt->incoming_edges = NULL;
303 slot = redirection_data.find_slot (elt, insert);
305 /* This will only happen if INSERT is false and the entry is not
306 in the hash table. */
307 if (slot == NULL)
309 free (elt);
310 return NULL;
313 /* This will only happen if E was not in the hash table and
314 INSERT is true. */
315 if (*slot == NULL)
317 *slot = elt;
318 elt->incoming_edges = XNEW (struct el);
319 elt->incoming_edges->e = e;
320 elt->incoming_edges->next = NULL;
321 return elt;
323 /* E was in the hash table. */
324 else
326 /* Free ELT as we do not need it anymore, we will extract the
327 relevant entry from the hash table itself. */
328 free (elt);
330 /* Get the entry stored in the hash table. */
331 elt = *slot;
333 /* If insertion was requested, then we need to add INCOMING_EDGE
334 to the list of incoming edges associated with E. */
335 if (insert)
337 struct el *el = XNEW (struct el);
338 el->next = elt->incoming_edges;
339 el->e = e;
340 elt->incoming_edges = el;
343 return elt;
347 /* Similar to copy_phi_args, except that the PHI arg exists, it just
348 does not have a value associated with it. */
350 static void
351 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
353 int src_idx = src_e->dest_idx;
354 int tgt_idx = tgt_e->dest_idx;
356 /* Iterate over each PHI in e->dest. */
357 for (gimple_stmt_iterator gsi = gsi_start_phis (src_e->dest),
358 gsi2 = gsi_start_phis (tgt_e->dest);
359 !gsi_end_p (gsi);
360 gsi_next (&gsi), gsi_next (&gsi2))
362 gimple src_phi = gsi_stmt (gsi);
363 gimple dest_phi = gsi_stmt (gsi2);
364 tree val = gimple_phi_arg_def (src_phi, src_idx);
365 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
367 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
368 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
372 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
374 static void
375 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
377 gimple_stmt_iterator gsi;
378 int src_indx = src_e->dest_idx;
380 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
382 gimple phi = gsi_stmt (gsi);
383 source_location locus = gimple_phi_arg_location (phi, src_indx);
384 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
388 /* We have recently made a copy of ORIG_BB, including its outgoing
389 edges. The copy is NEW_BB. Every PHI node in every direct successor of
390 ORIG_BB has a new argument associated with edge from NEW_BB to the
391 successor. Initialize the PHI argument so that it is equal to the PHI
392 argument associated with the edge from ORIG_BB to the successor. */
394 static void
395 update_destination_phis (basic_block orig_bb, basic_block new_bb)
397 edge_iterator ei;
398 edge e;
400 FOR_EACH_EDGE (e, ei, orig_bb->succs)
402 edge e2 = find_edge (new_bb, e->dest);
403 copy_phi_args (e->dest, e, e2);
407 /* Given a duplicate block and its single destination (both stored
408 in RD). Create an edge between the duplicate and its single
409 destination.
411 Add an additional argument to any PHI nodes at the single
412 destination. */
414 static void
415 create_edge_and_update_destination_phis (struct redirection_data *rd,
416 basic_block bb)
418 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
420 rescan_loop_exit (e, true, false);
421 e->probability = REG_BR_PROB_BASE;
422 e->count = bb->count;
424 /* We used to copy the thread path here. That was added in 2007
425 and dutifully updated through the representation changes in 2013.
427 In 2013 we added code to thread from an interior node through
428 the backedge to another interior node. That runs after the code
429 to thread through loop headers from outside the loop.
431 The latter may delete edges in the CFG, including those
432 which appeared in the jump threading path we copied here. Thus
433 we'd end up using a dangling pointer.
435 After reviewing the 2007/2011 code, I can't see how anything
436 depended on copying the AUX field and clearly copying the jump
437 threading path is problematical due to embedded edge pointers.
438 It has been removed. */
439 e->aux = NULL;
441 /* If there are any PHI nodes at the destination of the outgoing edge
442 from the duplicate block, then we will need to add a new argument
443 to them. The argument should have the same value as the argument
444 associated with the outgoing edge stored in RD. */
445 copy_phi_args (e->dest, rd->path->last ()->e, e);
448 /* Look through PATH beginning at START and return TRUE if there are
449 any additional blocks that need to be duplicated. Otherwise,
450 return FALSE. */
451 static bool
452 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
453 unsigned int start)
455 for (unsigned int i = start + 1; i < path->length (); i++)
457 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
458 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
459 return true;
461 return false;
464 /* Wire up the outgoing edges from the duplicate blocks and
465 update any PHIs as needed. */
466 void
467 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
468 ssa_local_info_t *local_info)
470 edge e = rd->incoming_edges->e;
471 vec<jump_thread_edge *> *path = THREAD_PATH (e);
473 for (unsigned int count = 0, i = 1; i < path->length (); i++)
475 /* If we were threading through an joiner block, then we want
476 to keep its control statement and redirect an outgoing edge.
477 Else we want to remove the control statement & edges, then create
478 a new outgoing edge. In both cases we may need to update PHIs. */
479 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
481 edge victim;
482 edge e2;
484 /* This updates the PHIs at the destination of the duplicate
485 block. */
486 update_destination_phis (local_info->bb, rd->dup_blocks[count]);
488 /* Find the edge from the duplicate block to the block we're
489 threading through. That's the edge we want to redirect. */
490 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
492 /* If there are no remaining blocks on the path to duplicate,
493 then redirect VICTIM to the final destination of the jump
494 threading path. */
495 if (!any_remaining_duplicated_blocks (path, i))
497 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
498 e2->count = path->last ()->e->count;
499 /* If we redirected the edge, then we need to copy PHI arguments
500 at the target. If the edge already existed (e2 != victim
501 case), then the PHIs in the target already have the correct
502 arguments. */
503 if (e2 == victim)
504 copy_phi_args (e2->dest, path->last ()->e, e2);
506 else
508 /* Redirect VICTIM to the next duplicated block in the path. */
509 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
511 /* We need to update the PHIs in the next duplicated block. We
512 want the new PHI args to have the same value as they had
513 in the source of the next duplicate block.
515 Thus, we need to know which edge we traversed into the
516 source of the duplicate. Furthermore, we may have
517 traversed many edges to reach the source of the duplicate.
519 Walk through the path starting at element I until we
520 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
521 the edge from the prior element. */
522 for (unsigned int j = i + 1; j < path->length (); j++)
524 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
526 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
527 break;
531 count++;
533 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
535 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
536 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count]);
537 if (count == 1)
538 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
539 count++;
544 /* Hash table traversal callback routine to create duplicate blocks. */
547 ssa_create_duplicates (struct redirection_data **slot,
548 ssa_local_info_t *local_info)
550 struct redirection_data *rd = *slot;
552 /* The second duplicated block in a jump threading path is specific
553 to the path. So it gets stored in RD rather than in LOCAL_DATA.
555 Each time we're called, we have to look through the path and see
556 if a second block needs to be duplicated.
558 Note the search starts with the third edge on the path. The first
559 edge is the incoming edge, the second edge always has its source
560 duplicated. Thus we start our search with the third edge. */
561 vec<jump_thread_edge *> *path = rd->path;
562 for (unsigned int i = 2; i < path->length (); i++)
564 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
565 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
567 create_block_for_threading ((*path)[i]->e->src, rd, 1);
568 break;
572 /* Create a template block if we have not done so already. Otherwise
573 use the template to create a new block. */
574 if (local_info->template_block == NULL)
576 create_block_for_threading ((*path)[1]->e->src, rd, 0);
577 local_info->template_block = rd->dup_blocks[0];
579 /* We do not create any outgoing edges for the template. We will
580 take care of that in a later traversal. That way we do not
581 create edges that are going to just be deleted. */
583 else
585 create_block_for_threading (local_info->template_block, rd, 0);
587 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
588 block. */
589 ssa_fix_duplicate_block_edges (rd, local_info);
592 /* Keep walking the hash table. */
593 return 1;
596 /* We did not create any outgoing edges for the template block during
597 block creation. This hash table traversal callback creates the
598 outgoing edge for the template block. */
600 inline int
601 ssa_fixup_template_block (struct redirection_data **slot,
602 ssa_local_info_t *local_info)
604 struct redirection_data *rd = *slot;
606 /* If this is the template block halt the traversal after updating
607 it appropriately.
609 If we were threading through an joiner block, then we want
610 to keep its control statement and redirect an outgoing edge.
611 Else we want to remove the control statement & edges, then create
612 a new outgoing edge. In both cases we may need to update PHIs. */
613 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
615 ssa_fix_duplicate_block_edges (rd, local_info);
616 return 0;
619 return 1;
622 /* Hash table traversal callback to redirect each incoming edge
623 associated with this hash table element to its new destination. */
626 ssa_redirect_edges (struct redirection_data **slot,
627 ssa_local_info_t *local_info)
629 struct redirection_data *rd = *slot;
630 struct el *next, *el;
632 /* Walk over all the incoming edges associated associated with this
633 hash table entry. */
634 for (el = rd->incoming_edges; el; el = next)
636 edge e = el->e;
637 vec<jump_thread_edge *> *path = THREAD_PATH (e);
639 /* Go ahead and free this element from the list. Doing this now
640 avoids the need for another list walk when we destroy the hash
641 table. */
642 next = el->next;
643 free (el);
645 thread_stats.num_threaded_edges++;
647 if (rd->dup_blocks[0])
649 edge e2;
651 if (dump_file && (dump_flags & TDF_DETAILS))
652 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
653 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
655 rd->dup_blocks[0]->count += e->count;
657 /* Excessive jump threading may make frequencies large enough so
658 the computation overflows. */
659 if (rd->dup_blocks[0]->frequency < BB_FREQ_MAX * 2)
660 rd->dup_blocks[0]->frequency += EDGE_FREQUENCY (e);
662 /* In the case of threading through a joiner block, the outgoing
663 edges from the duplicate block were updated when they were
664 redirected during ssa_fix_duplicate_block_edges. */
665 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
666 EDGE_SUCC (rd->dup_blocks[0], 0)->count += e->count;
668 /* Redirect the incoming edge (possibly to the joiner block) to the
669 appropriate duplicate block. */
670 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
671 gcc_assert (e == e2);
672 flush_pending_stmts (e2);
675 /* Go ahead and clear E->aux. It's not needed anymore and failure
676 to clear it will cause all kinds of unpleasant problems later. */
677 delete_jump_thread_path (path);
678 e->aux = NULL;
682 /* Indicate that we actually threaded one or more jumps. */
683 if (rd->incoming_edges)
684 local_info->jumps_threaded = true;
686 return 1;
689 /* Return true if this block has no executable statements other than
690 a simple ctrl flow instruction. When the number of outgoing edges
691 is one, this is equivalent to a "forwarder" block. */
693 static bool
694 redirection_block_p (basic_block bb)
696 gimple_stmt_iterator gsi;
698 /* Advance to the first executable statement. */
699 gsi = gsi_start_bb (bb);
700 while (!gsi_end_p (gsi)
701 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
702 || is_gimple_debug (gsi_stmt (gsi))
703 || gimple_nop_p (gsi_stmt (gsi))))
704 gsi_next (&gsi);
706 /* Check if this is an empty block. */
707 if (gsi_end_p (gsi))
708 return true;
710 /* Test that we've reached the terminating control statement. */
711 return gsi_stmt (gsi)
712 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
713 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
714 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
717 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
718 is reached via one or more specific incoming edges, we know which
719 outgoing edge from BB will be traversed.
721 We want to redirect those incoming edges to the target of the
722 appropriate outgoing edge. Doing so avoids a conditional branch
723 and may expose new optimization opportunities. Note that we have
724 to update dominator tree and SSA graph after such changes.
726 The key to keeping the SSA graph update manageable is to duplicate
727 the side effects occurring in BB so that those side effects still
728 occur on the paths which bypass BB after redirecting edges.
730 We accomplish this by creating duplicates of BB and arranging for
731 the duplicates to unconditionally pass control to one specific
732 successor of BB. We then revector the incoming edges into BB to
733 the appropriate duplicate of BB.
735 If NOLOOP_ONLY is true, we only perform the threading as long as it
736 does not affect the structure of the loops in a nontrivial way.
738 If JOINERS is true, then thread through joiner blocks as well. */
740 static bool
741 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
743 /* E is an incoming edge into BB that we may or may not want to
744 redirect to a duplicate of BB. */
745 edge e, e2;
746 edge_iterator ei;
747 ssa_local_info_t local_info;
748 struct loop *loop = bb->loop_father;
750 /* To avoid scanning a linear array for the element we need we instead
751 use a hash table. For normal code there should be no noticeable
752 difference. However, if we have a block with a large number of
753 incoming and outgoing edges such linear searches can get expensive. */
754 redirection_data.create (EDGE_COUNT (bb->succs));
756 /* If we thread the latch of the loop to its exit, the loop ceases to
757 exist. Make sure we do not restrict ourselves in order to preserve
758 this loop. */
759 if (loop->header == bb)
761 e = loop_latch_edge (loop);
762 vec<jump_thread_edge *> *path = THREAD_PATH (e);
764 if (path
765 && (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && joiners)
766 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && !joiners)))
768 for (unsigned int i = 1; i < path->length (); i++)
770 edge e2 = (*path)[i]->e;
772 if (loop_exit_edge_p (loop, e2))
774 loop->header = NULL;
775 loop->latch = NULL;
776 loops_state_set (LOOPS_NEED_FIXUP);
782 /* Record each unique threaded destination into a hash table for
783 efficient lookups. */
784 FOR_EACH_EDGE (e, ei, bb->preds)
786 if (e->aux == NULL)
787 continue;
789 vec<jump_thread_edge *> *path = THREAD_PATH (e);
791 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
792 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
793 continue;
795 e2 = path->last ()->e;
796 if (!e2 || noloop_only)
798 /* If NOLOOP_ONLY is true, we only allow threading through the
799 header of a loop to exit edges. */
801 /* One case occurs when there was loop header buried in a jump
802 threading path that crosses loop boundaries. We do not try
803 and thread this elsewhere, so just cancel the jump threading
804 request by clearing the AUX field now. */
805 if ((bb->loop_father != e2->src->loop_father
806 && !loop_exit_edge_p (e2->src->loop_father, e2))
807 || (e2->src->loop_father != e2->dest->loop_father
808 && !loop_exit_edge_p (e2->src->loop_father, e2)))
810 /* Since this case is not handled by our special code
811 to thread through a loop header, we must explicitly
812 cancel the threading request here. */
813 delete_jump_thread_path (path);
814 e->aux = NULL;
815 continue;
818 /* Another case occurs when trying to thread through our
819 own loop header, possibly from inside the loop. We will
820 thread these later. */
821 unsigned int i;
822 for (i = 1; i < path->length (); i++)
824 if ((*path)[i]->e->src == bb->loop_father->header
825 && (!loop_exit_edge_p (bb->loop_father, e2)
826 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
827 break;
830 if (i != path->length ())
831 continue;
834 if (e->dest == e2->src)
835 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
836 e->count, (*THREAD_PATH (e))[1]->e);
838 /* Insert the outgoing edge into the hash table if it is not
839 already in the hash table. */
840 lookup_redirection_data (e, INSERT);
843 /* We do not update dominance info. */
844 free_dominance_info (CDI_DOMINATORS);
846 /* We know we only thread through the loop header to loop exits.
847 Let the basic block duplication hook know we are not creating
848 a multiple entry loop. */
849 if (noloop_only
850 && bb == bb->loop_father->header)
851 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
853 /* Now create duplicates of BB.
855 Note that for a block with a high outgoing degree we can waste
856 a lot of time and memory creating and destroying useless edges.
858 So we first duplicate BB and remove the control structure at the
859 tail of the duplicate as well as all outgoing edges from the
860 duplicate. We then use that duplicate block as a template for
861 the rest of the duplicates. */
862 local_info.template_block = NULL;
863 local_info.bb = bb;
864 local_info.jumps_threaded = false;
865 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
866 (&local_info);
868 /* The template does not have an outgoing edge. Create that outgoing
869 edge and update PHI nodes as the edge's target as necessary.
871 We do this after creating all the duplicates to avoid creating
872 unnecessary edges. */
873 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
874 (&local_info);
876 /* The hash table traversals above created the duplicate blocks (and the
877 statements within the duplicate blocks). This loop creates PHI nodes for
878 the duplicated blocks and redirects the incoming edges into BB to reach
879 the duplicates of BB. */
880 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
881 (&local_info);
883 /* Done with this block. Clear REDIRECTION_DATA. */
884 redirection_data.dispose ();
886 if (noloop_only
887 && bb == bb->loop_father->header)
888 set_loop_copy (bb->loop_father, NULL);
890 /* Indicate to our caller whether or not any jumps were threaded. */
891 return local_info.jumps_threaded;
894 /* Wrapper for thread_block_1 so that we can first handle jump
895 thread paths which do not involve copying joiner blocks, then
896 handle jump thread paths which have joiner blocks.
898 By doing things this way we can be as aggressive as possible and
899 not worry that copying a joiner block will create a jump threading
900 opportunity. */
902 static bool
903 thread_block (basic_block bb, bool noloop_only)
905 bool retval;
906 retval = thread_block_1 (bb, noloop_only, false);
907 retval |= thread_block_1 (bb, noloop_only, true);
908 return retval;
912 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
913 copy of E->dest created during threading, or E->dest if it was not necessary
914 to copy it (E is its single predecessor). */
916 static basic_block
917 thread_single_edge (edge e)
919 basic_block bb = e->dest;
920 struct redirection_data rd;
921 vec<jump_thread_edge *> *path = THREAD_PATH (e);
922 edge eto = (*path)[1]->e;
924 for (unsigned int i = 0; i < path->length (); i++)
925 delete (*path)[i];
926 delete path;
927 e->aux = NULL;
929 thread_stats.num_threaded_edges++;
931 if (single_pred_p (bb))
933 /* If BB has just a single predecessor, we should only remove the
934 control statements at its end, and successors except for ETO. */
935 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
937 /* And fixup the flags on the single remaining edge. */
938 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
939 eto->flags |= EDGE_FALLTHRU;
941 return bb;
944 /* Otherwise, we need to create a copy. */
945 if (e->dest == eto->src)
946 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
948 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
949 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
950 npath->safe_push (x);
952 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
953 npath->safe_push (x);
954 rd.path = npath;
956 create_block_for_threading (bb, &rd, 0);
957 remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
958 create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0]);
960 if (dump_file && (dump_flags & TDF_DETAILS))
961 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
962 e->src->index, e->dest->index, rd.dup_blocks[0]->index);
964 rd.dup_blocks[0]->count = e->count;
965 rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
966 single_succ_edge (rd.dup_blocks[0])->count = e->count;
967 redirect_edge_and_branch (e, rd.dup_blocks[0]);
968 flush_pending_stmts (e);
970 return rd.dup_blocks[0];
973 /* Callback for dfs_enumerate_from. Returns true if BB is different
974 from STOP and DBDS_CE_STOP. */
976 static basic_block dbds_ce_stop;
977 static bool
978 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
980 return (bb != (const_basic_block) stop
981 && bb != dbds_ce_stop);
984 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
985 returns the state. */
987 enum bb_dom_status
989 /* BB does not dominate latch of the LOOP. */
990 DOMST_NONDOMINATING,
991 /* The LOOP is broken (there is no path from the header to its latch. */
992 DOMST_LOOP_BROKEN,
993 /* BB dominates the latch of the LOOP. */
994 DOMST_DOMINATING
997 static enum bb_dom_status
998 determine_bb_domination_status (struct loop *loop, basic_block bb)
1000 basic_block *bblocks;
1001 unsigned nblocks, i;
1002 bool bb_reachable = false;
1003 edge_iterator ei;
1004 edge e;
1006 /* This function assumes BB is a successor of LOOP->header.
1007 If that is not the case return DOMST_NONDOMINATING which
1008 is always safe. */
1010 bool ok = false;
1012 FOR_EACH_EDGE (e, ei, bb->preds)
1014 if (e->src == loop->header)
1016 ok = true;
1017 break;
1021 if (!ok)
1022 return DOMST_NONDOMINATING;
1025 if (bb == loop->latch)
1026 return DOMST_DOMINATING;
1028 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1029 from it. */
1031 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1032 dbds_ce_stop = loop->header;
1033 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1034 bblocks, loop->num_nodes, bb);
1035 for (i = 0; i < nblocks; i++)
1036 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1038 if (e->src == loop->header)
1040 free (bblocks);
1041 return DOMST_NONDOMINATING;
1043 if (e->src == bb)
1044 bb_reachable = true;
1047 free (bblocks);
1048 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1051 /* Return true if BB is part of the new pre-header that is created
1052 when threading the latch to DATA. */
1054 static bool
1055 def_split_header_continue_p (const_basic_block bb, const void *data)
1057 const_basic_block new_header = (const_basic_block) data;
1058 const struct loop *l;
1060 if (bb == new_header
1061 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
1062 return false;
1063 for (l = bb->loop_father; l; l = loop_outer (l))
1064 if (l == new_header->loop_father)
1065 return true;
1066 return false;
1069 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1070 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1071 to the inside of the loop. */
1073 static bool
1074 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1076 basic_block header = loop->header;
1077 edge e, tgt_edge, latch = loop_latch_edge (loop);
1078 edge_iterator ei;
1079 basic_block tgt_bb, atgt_bb;
1080 enum bb_dom_status domst;
1082 /* We have already threaded through headers to exits, so all the threading
1083 requests now are to the inside of the loop. We need to avoid creating
1084 irreducible regions (i.e., loops with more than one entry block), and
1085 also loop with several latch edges, or new subloops of the loop (although
1086 there are cases where it might be appropriate, it is difficult to decide,
1087 and doing it wrongly may confuse other optimizers).
1089 We could handle more general cases here. However, the intention is to
1090 preserve some information about the loop, which is impossible if its
1091 structure changes significantly, in a way that is not well understood.
1092 Thus we only handle few important special cases, in which also updating
1093 of the loop-carried information should be feasible:
1095 1) Propagation of latch edge to a block that dominates the latch block
1096 of a loop. This aims to handle the following idiom:
1098 first = 1;
1099 while (1)
1101 if (first)
1102 initialize;
1103 first = 0;
1104 body;
1107 After threading the latch edge, this becomes
1109 first = 1;
1110 if (first)
1111 initialize;
1112 while (1)
1114 first = 0;
1115 body;
1118 The original header of the loop is moved out of it, and we may thread
1119 the remaining edges through it without further constraints.
1121 2) All entry edges are propagated to a single basic block that dominates
1122 the latch block of the loop. This aims to handle the following idiom
1123 (normally created for "for" loops):
1125 i = 0;
1126 while (1)
1128 if (i >= 100)
1129 break;
1130 body;
1131 i++;
1134 This becomes
1136 i = 0;
1137 while (1)
1139 body;
1140 i++;
1141 if (i >= 100)
1142 break;
1146 /* Threading through the header won't improve the code if the header has just
1147 one successor. */
1148 if (single_succ_p (header))
1149 goto fail;
1151 /* If we threaded the latch using a joiner block, we cancel the
1152 threading opportunity out of an abundance of caution. However,
1153 still allow threading from outside to inside the loop. */
1154 if (latch->aux)
1156 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1157 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1159 delete_jump_thread_path (path);
1160 latch->aux = NULL;
1164 if (latch->aux)
1166 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1167 tgt_edge = (*path)[1]->e;
1168 tgt_bb = tgt_edge->dest;
1170 else if (!may_peel_loop_headers
1171 && !redirection_block_p (loop->header))
1172 goto fail;
1173 else
1175 tgt_bb = NULL;
1176 tgt_edge = NULL;
1177 FOR_EACH_EDGE (e, ei, header->preds)
1179 if (!e->aux)
1181 if (e == latch)
1182 continue;
1184 /* If latch is not threaded, and there is a header
1185 edge that is not threaded, we would create loop
1186 with multiple entries. */
1187 goto fail;
1190 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1192 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1193 goto fail;
1194 tgt_edge = (*path)[1]->e;
1195 atgt_bb = tgt_edge->dest;
1196 if (!tgt_bb)
1197 tgt_bb = atgt_bb;
1198 /* Two targets of threading would make us create loop
1199 with multiple entries. */
1200 else if (tgt_bb != atgt_bb)
1201 goto fail;
1204 if (!tgt_bb)
1206 /* There are no threading requests. */
1207 return false;
1210 /* Redirecting to empty loop latch is useless. */
1211 if (tgt_bb == loop->latch
1212 && empty_block_p (loop->latch))
1213 goto fail;
1216 /* The target block must dominate the loop latch, otherwise we would be
1217 creating a subloop. */
1218 domst = determine_bb_domination_status (loop, tgt_bb);
1219 if (domst == DOMST_NONDOMINATING)
1220 goto fail;
1221 if (domst == DOMST_LOOP_BROKEN)
1223 /* If the loop ceased to exist, mark it as such, and thread through its
1224 original header. */
1225 loop->header = NULL;
1226 loop->latch = NULL;
1227 loops_state_set (LOOPS_NEED_FIXUP);
1228 return thread_block (header, false);
1231 if (tgt_bb->loop_father->header == tgt_bb)
1233 /* If the target of the threading is a header of a subloop, we need
1234 to create a preheader for it, so that the headers of the two loops
1235 do not merge. */
1236 if (EDGE_COUNT (tgt_bb->preds) > 2)
1238 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1239 gcc_assert (tgt_bb != NULL);
1241 else
1242 tgt_bb = split_edge (tgt_edge);
1245 if (latch->aux)
1247 basic_block *bblocks;
1248 unsigned nblocks, i;
1250 /* First handle the case latch edge is redirected. We are copying
1251 the loop header but not creating a multiple entry loop. Make the
1252 cfg manipulation code aware of that fact. */
1253 set_loop_copy (loop, loop);
1254 loop->latch = thread_single_edge (latch);
1255 set_loop_copy (loop, NULL);
1256 gcc_assert (single_succ (loop->latch) == tgt_bb);
1257 loop->header = tgt_bb;
1259 /* Remove the new pre-header blocks from our loop. */
1260 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1261 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1262 bblocks, loop->num_nodes, tgt_bb);
1263 for (i = 0; i < nblocks; i++)
1264 if (bblocks[i]->loop_father == loop)
1266 remove_bb_from_loops (bblocks[i]);
1267 add_bb_to_loop (bblocks[i], loop_outer (loop));
1269 free (bblocks);
1271 /* If the new header has multiple latches mark it so. */
1272 FOR_EACH_EDGE (e, ei, loop->header->preds)
1273 if (e->src->loop_father == loop
1274 && e->src != loop->latch)
1276 loop->latch = NULL;
1277 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1280 /* Cancel remaining threading requests that would make the
1281 loop a multiple entry loop. */
1282 FOR_EACH_EDGE (e, ei, header->preds)
1284 edge e2;
1286 if (e->aux == NULL)
1287 continue;
1289 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1290 e2 = path->last ()->e;
1292 if (e->src->loop_father != e2->dest->loop_father
1293 && e2->dest != loop->header)
1295 delete_jump_thread_path (path);
1296 e->aux = NULL;
1300 /* Thread the remaining edges through the former header. */
1301 thread_block (header, false);
1303 else
1305 basic_block new_preheader;
1307 /* Now consider the case entry edges are redirected to the new entry
1308 block. Remember one entry edge, so that we can find the new
1309 preheader (its destination after threading). */
1310 FOR_EACH_EDGE (e, ei, header->preds)
1312 if (e->aux)
1313 break;
1316 /* The duplicate of the header is the new preheader of the loop. Ensure
1317 that it is placed correctly in the loop hierarchy. */
1318 set_loop_copy (loop, loop_outer (loop));
1320 thread_block (header, false);
1321 set_loop_copy (loop, NULL);
1322 new_preheader = e->dest;
1324 /* Create the new latch block. This is always necessary, as the latch
1325 must have only a single successor, but the original header had at
1326 least two successors. */
1327 loop->latch = NULL;
1328 mfb_kj_edge = single_succ_edge (new_preheader);
1329 loop->header = mfb_kj_edge->dest;
1330 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1331 loop->header = latch->dest;
1332 loop->latch = latch->src;
1335 return true;
1337 fail:
1338 /* We failed to thread anything. Cancel the requests. */
1339 FOR_EACH_EDGE (e, ei, header->preds)
1341 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1343 if (path)
1345 delete_jump_thread_path (path);
1346 e->aux = NULL;
1349 return false;
1352 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1353 PHI arguments associated with those edges are equal or there are no
1354 PHI arguments, otherwise return FALSE. */
1356 static bool
1357 phi_args_equal_on_edges (edge e1, edge e2)
1359 gimple_stmt_iterator gsi;
1360 int indx1 = e1->dest_idx;
1361 int indx2 = e2->dest_idx;
1363 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1365 gimple phi = gsi_stmt (gsi);
1367 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1368 gimple_phi_arg_def (phi, indx2), 0))
1369 return false;
1371 return true;
1374 /* Walk through the registered jump threads and convert them into a
1375 form convenient for this pass.
1377 Any block which has incoming edges threaded to outgoing edges
1378 will have its entry in THREADED_BLOCK set.
1380 Any threaded edge will have its new outgoing edge stored in the
1381 original edge's AUX field.
1383 This form avoids the need to walk all the edges in the CFG to
1384 discover blocks which need processing and avoids unnecessary
1385 hash table lookups to map from threaded edge to new target. */
1387 static void
1388 mark_threaded_blocks (bitmap threaded_blocks)
1390 unsigned int i;
1391 bitmap_iterator bi;
1392 bitmap tmp = BITMAP_ALLOC (NULL);
1393 basic_block bb;
1394 edge e;
1395 edge_iterator ei;
1397 /* Move the jump threading requests from PATHS to each edge
1398 which starts a jump thread path. */
1399 for (i = 0; i < paths.length (); i++)
1401 vec<jump_thread_edge *> *path = paths[i];
1402 edge e = (*path)[0]->e;
1403 e->aux = (void *)path;
1404 bitmap_set_bit (tmp, e->dest->index);
1409 /* If optimizing for size, only thread through block if we don't have
1410 to duplicate it or it's an otherwise empty redirection block. */
1411 if (optimize_function_for_size_p (cfun))
1413 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1415 bb = BASIC_BLOCK_FOR_FN (cfun, i);
1416 if (EDGE_COUNT (bb->preds) > 1
1417 && !redirection_block_p (bb))
1419 FOR_EACH_EDGE (e, ei, bb->preds)
1421 if (e->aux)
1423 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1424 delete_jump_thread_path (path);
1425 e->aux = NULL;
1429 else
1430 bitmap_set_bit (threaded_blocks, i);
1433 else
1434 bitmap_copy (threaded_blocks, tmp);
1436 /* Look for jump threading paths which cross multiple loop headers.
1438 The code to thread through loop headers will change the CFG in ways
1439 that break assumptions made by the loop optimization code.
1441 We don't want to blindly cancel the requests. We can instead do better
1442 by trimming off the end of the jump thread path. */
1443 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1445 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1446 FOR_EACH_EDGE (e, ei, bb->preds)
1448 if (e->aux)
1450 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1452 for (unsigned int i = 0, crossed_headers = 0;
1453 i < path->length ();
1454 i++)
1456 basic_block dest = (*path)[i]->e->dest;
1457 crossed_headers += (dest == dest->loop_father->header);
1458 if (crossed_headers > 1)
1460 /* Trim from entry I onwards. */
1461 for (unsigned int j = i; j < path->length (); j++)
1462 delete (*path)[j];
1463 path->truncate (i);
1465 /* Now that we've truncated the path, make sure
1466 what's left is still valid. We need at least
1467 two edges on the path and the last edge can not
1468 be a joiner. This should never happen, but let's
1469 be safe. */
1470 if (path->length () < 2
1471 || (path->last ()->type
1472 == EDGE_COPY_SRC_JOINER_BLOCK))
1474 delete_jump_thread_path (path);
1475 e->aux = NULL;
1477 break;
1484 /* If we have a joiner block (J) which has two successors S1 and S2 and
1485 we are threading though S1 and the final destination of the thread
1486 is S2, then we must verify that any PHI nodes in S2 have the same
1487 PHI arguments for the edge J->S2 and J->S1->...->S2.
1489 We used to detect this prior to registering the jump thread, but
1490 that prohibits propagation of edge equivalences into non-dominated
1491 PHI nodes as the equivalency test might occur before propagation.
1493 This must also occur after we truncate any jump threading paths
1494 as this scenario may only show up after truncation.
1496 This works for now, but will need improvement as part of the FSA
1497 optimization.
1499 Note since we've moved the thread request data to the edges,
1500 we have to iterate on those rather than the threaded_edges vector. */
1501 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1503 bb = BASIC_BLOCK_FOR_FN (cfun, i);
1504 FOR_EACH_EDGE (e, ei, bb->preds)
1506 if (e->aux)
1508 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1509 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1511 if (have_joiner)
1513 basic_block joiner = e->dest;
1514 edge final_edge = path->last ()->e;
1515 basic_block final_dest = final_edge->dest;
1516 edge e2 = find_edge (joiner, final_dest);
1518 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1520 delete_jump_thread_path (path);
1521 e->aux = NULL;
1528 BITMAP_FREE (tmp);
1532 /* Return TRUE if BB ends with a switch statement or a computed goto.
1533 Otherwise return false. */
1534 static bool
1535 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED)
1537 gimple stmt = last_stmt (bb);
1538 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1539 return true;
1540 if (stmt && gimple_code (stmt) == GIMPLE_GOTO
1541 && TREE_CODE (gimple_goto_dest (stmt)) == SSA_NAME)
1542 return true;
1543 return false;
1546 /* Walk through all blocks and thread incoming edges to the appropriate
1547 outgoing edge for each edge pair recorded in THREADED_EDGES.
1549 It is the caller's responsibility to fix the dominance information
1550 and rewrite duplicated SSA_NAMEs back into SSA form.
1552 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1553 loop headers if it does not simplify the loop.
1555 Returns true if one or more edges were threaded, false otherwise. */
1557 bool
1558 thread_through_all_blocks (bool may_peel_loop_headers)
1560 bool retval = false;
1561 unsigned int i;
1562 bitmap_iterator bi;
1563 bitmap threaded_blocks;
1564 struct loop *loop;
1566 /* We must know about loops in order to preserve them. */
1567 gcc_assert (current_loops != NULL);
1569 if (!paths.exists ())
1570 return false;
1572 threaded_blocks = BITMAP_ALLOC (NULL);
1573 memset (&thread_stats, 0, sizeof (thread_stats));
1575 mark_threaded_blocks (threaded_blocks);
1577 initialize_original_copy_tables ();
1579 /* First perform the threading requests that do not affect
1580 loop structure. */
1581 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1583 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1585 if (EDGE_COUNT (bb->preds) > 0)
1586 retval |= thread_block (bb, true);
1589 /* Then perform the threading through loop headers. We start with the
1590 innermost loop, so that the changes in cfg we perform won't affect
1591 further threading. */
1592 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1594 if (!loop->header
1595 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1596 continue;
1598 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1601 /* Any jump threading paths that are still attached to edges at this
1602 point must be one of two cases.
1604 First, we could have a jump threading path which went from outside
1605 a loop to inside a loop that was ignored because a prior jump thread
1606 across a backedge was realized (which indirectly causes the loop
1607 above to ignore the latter thread). We can detect these because the
1608 loop structures will be different and we do not currently try to
1609 optimize this case.
1611 Second, we could be threading across a backedge to a point within the
1612 same loop. This occurrs for the FSA/FSM optimization and we would
1613 like to optimize it. However, we have to be very careful as this
1614 may completely scramble the loop structures, with the result being
1615 irreducible loops causing us to throw away our loop structure.
1617 As a compromise for the latter case, if the thread path ends in
1618 a block where the last statement is a multiway branch, then go
1619 ahead and thread it, else ignore it. */
1620 basic_block bb;
1621 edge e;
1622 FOR_EACH_BB_FN (bb, cfun)
1624 /* If we do end up threading here, we can remove elements from
1625 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
1626 for (edge_iterator ei = ei_start (bb->preds);
1627 (e = ei_safe_edge (ei));)
1628 if (e->aux)
1630 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1632 /* Case 1, threading from outside to inside the loop
1633 after we'd already threaded through the header. */
1634 if ((*path)[0]->e->dest->loop_father
1635 != path->last ()->e->src->loop_father)
1637 delete_jump_thread_path (path);
1638 e->aux = NULL;
1639 ei_next (&ei);
1641 else if (bb_ends_with_multiway_branch (path->last ()->e->src))
1643 /* The code to thread through loop headers may have
1644 split a block with jump threads attached to it.
1646 We can identify this with a disjoint jump threading
1647 path. If found, just remove it. */
1648 for (unsigned int i = 0; i < path->length () - 1; i++)
1649 if ((*path)[i]->e->dest != (*path)[i + 1]->e->src)
1651 delete_jump_thread_path (path);
1652 e->aux = NULL;
1653 ei_next (&ei);
1654 break;
1657 /* Our path is still valid, thread it. */
1658 if (e->aux)
1660 struct loop *loop = (*path)[0]->e->dest->loop_father;
1662 if (thread_block ((*path)[0]->e->dest, false))
1664 /* This jump thread likely totally scrambled this loop.
1665 So arrange for it to be fixed up. */
1666 loop->header = NULL;
1667 loop->latch = NULL;
1668 e->aux = NULL;
1670 else
1672 delete_jump_thread_path (path);
1673 e->aux = NULL;
1674 ei_next (&ei);
1678 else
1680 delete_jump_thread_path (path);
1681 e->aux = NULL;
1682 ei_next (&ei);
1685 else
1686 ei_next (&ei);
1689 statistics_counter_event (cfun, "Jumps threaded",
1690 thread_stats.num_threaded_edges);
1692 free_original_copy_tables ();
1694 BITMAP_FREE (threaded_blocks);
1695 threaded_blocks = NULL;
1696 paths.release ();
1698 if (retval)
1699 loops_state_set (LOOPS_NEED_FIXUP);
1701 return retval;
1704 /* Delete the jump threading path PATH. We have to explcitly delete
1705 each entry in the vector, then the container. */
1707 void
1708 delete_jump_thread_path (vec<jump_thread_edge *> *path)
1710 for (unsigned int i = 0; i < path->length (); i++)
1711 delete (*path)[i];
1712 path->release();
1715 /* Dump a jump threading path, including annotations about each
1716 edge in the path. */
1718 static void
1719 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path)
1721 fprintf (dump_file,
1722 " Registering jump thread: (%d, %d) incoming edge; ",
1723 path[0]->e->src->index, path[0]->e->dest->index);
1725 for (unsigned int i = 1; i < path.length (); i++)
1727 /* We can get paths with a NULL edge when the final destination
1728 of a jump thread turns out to be a constant address. We dump
1729 those paths when debugging, so we have to be prepared for that
1730 possibility here. */
1731 if (path[i]->e == NULL)
1732 continue;
1734 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1735 fprintf (dump_file, " (%d, %d) joiner; ",
1736 path[i]->e->src->index, path[i]->e->dest->index);
1737 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
1738 fprintf (dump_file, " (%d, %d) normal;",
1739 path[i]->e->src->index, path[i]->e->dest->index);
1740 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
1741 fprintf (dump_file, " (%d, %d) nocopy;",
1742 path[i]->e->src->index, path[i]->e->dest->index);
1744 fputc ('\n', dump_file);
1747 /* Register a jump threading opportunity. We queue up all the jump
1748 threading opportunities discovered by a pass and update the CFG
1749 and SSA form all at once.
1751 E is the edge we can thread, E2 is the new target edge, i.e., we
1752 are effectively recording that E->dest can be changed to E2->dest
1753 after fixing the SSA graph. */
1755 void
1756 register_jump_thread (vec<jump_thread_edge *> *path)
1758 if (!dbg_cnt (registered_jump_thread))
1760 delete_jump_thread_path (path);
1761 return;
1764 /* First make sure there are no NULL outgoing edges on the jump threading
1765 path. That can happen for jumping to a constant address. */
1766 for (unsigned int i = 0; i < path->length (); i++)
1767 if ((*path)[i]->e == NULL)
1769 if (dump_file && (dump_flags & TDF_DETAILS))
1771 fprintf (dump_file,
1772 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1773 dump_jump_thread_path (dump_file, *path);
1776 delete_jump_thread_path (path);
1777 return;
1780 if (dump_file && (dump_flags & TDF_DETAILS))
1781 dump_jump_thread_path (dump_file, *path);
1783 if (!paths.exists ())
1784 paths.create (5);
1786 paths.safe_push (path);