Bump version number, post release.
[official-gcc.git] / gcc-4_9-branch / gcc / tree-ssa-threadupdate.c
blob6ae071ea08324bd724601c25269746e787f56738
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "flags.h"
25 #include "basic-block.h"
26 #include "function.h"
27 #include "hash-table.h"
28 #include "tree-ssa-alias.h"
29 #include "internal-fn.h"
30 #include "gimple-expr.h"
31 #include "is-a.h"
32 #include "gimple.h"
33 #include "gimple-iterator.h"
34 #include "gimple-ssa.h"
35 #include "tree-phinodes.h"
36 #include "tree-ssa.h"
37 #include "tree-ssa-threadupdate.h"
38 #include "ssa-iterators.h"
39 #include "dumpfile.h"
40 #include "cfgloop.h"
41 #include "dbgcnt.h"
42 #include "tree-cfg.h"
43 #include "tree-pass.h"
45 /* Given a block B, update the CFG and SSA graph to reflect redirecting
46 one or more in-edges to B to instead reach the destination of an
47 out-edge from B while preserving any side effects in B.
49 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
50 side effects of executing B.
52 1. Make a copy of B (including its outgoing edges and statements). Call
53 the copy B'. Note B' has no incoming edges or PHIs at this time.
55 2. Remove the control statement at the end of B' and all outgoing edges
56 except B'->C.
58 3. Add a new argument to each PHI in C with the same value as the existing
59 argument associated with edge B->C. Associate the new PHI arguments
60 with the edge B'->C.
62 4. For each PHI in B, find or create a PHI in B' with an identical
63 PHI_RESULT. Add an argument to the PHI in B' which has the same
64 value as the PHI in B associated with the edge A->B. Associate
65 the new argument in the PHI in B' with the edge A->B.
67 5. Change the edge A->B to A->B'.
69 5a. This automatically deletes any PHI arguments associated with the
70 edge A->B in B.
72 5b. This automatically associates each new argument added in step 4
73 with the edge A->B'.
75 6. Repeat for other incoming edges into B.
77 7. Put the duplicated resources in B and all the B' blocks into SSA form.
79 Note that block duplication can be minimized by first collecting the
80 set of unique destination blocks that the incoming edges should
81 be threaded to.
83 We reduce the number of edges and statements we create by not copying all
84 the outgoing edges and the control statement in step #1. We instead create
85 a template block without the outgoing edges and duplicate the template.
87 Another case this code handles is threading through a "joiner" block. In
88 this case, we do not know the destination of the joiner block, but one
89 of the outgoing edges from the joiner block leads to a threadable path. This
90 case largely works as outlined above, except the duplicate of the joiner
91 block still contains a full set of outgoing edges and its control statement.
92 We just redirect one of its outgoing edges to our jump threading path. */
95 /* Steps #5 and #6 of the above algorithm are best implemented by walking
96 all the incoming edges which thread to the same destination edge at
97 the same time. That avoids lots of table lookups to get information
98 for the destination edge.
100 To realize that implementation we create a list of incoming edges
101 which thread to the same outgoing edge. Thus to implement steps
102 #5 and #6 we traverse our hash table of outgoing edge information.
103 For each entry we walk the list of incoming edges which thread to
104 the current outgoing edge. */
106 struct el
108 edge e;
109 struct el *next;
112 /* Main data structure recording information regarding B's duplicate
113 blocks. */
115 /* We need to efficiently record the unique thread destinations of this
116 block and specific information associated with those destinations. We
117 may have many incoming edges threaded to the same outgoing edge. This
118 can be naturally implemented with a hash table. */
120 struct redirection_data : typed_free_remove<redirection_data>
122 /* We support wiring up two block duplicates in a jump threading path.
124 One is a normal block copy where we remove the control statement
125 and wire up its single remaining outgoing edge to the thread path.
127 The other is a joiner block where we leave the control statement
128 in place, but wire one of the outgoing edges to a thread path.
130 In theory we could have multiple block duplicates in a jump
131 threading path, but I haven't tried that.
133 The duplicate blocks appear in this array in the same order in
134 which they appear in the jump thread path. */
135 basic_block dup_blocks[2];
137 /* The jump threading path. */
138 vec<jump_thread_edge *> *path;
140 /* A list of incoming edges which we want to thread to the
141 same path. */
142 struct el *incoming_edges;
144 /* hash_table support. */
145 typedef redirection_data value_type;
146 typedef redirection_data compare_type;
147 static inline hashval_t hash (const value_type *);
148 static inline int equal (const value_type *, const compare_type *);
151 /* Dump a jump threading path, including annotations about each
152 edge in the path. */
154 static void
155 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
156 bool registering)
158 fprintf (dump_file,
159 " %s%s jump thread: (%d, %d) incoming edge; ",
160 (registering ? "Registering" : "Cancelling"),
161 (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
162 path[0]->e->src->index, path[0]->e->dest->index);
164 for (unsigned int i = 1; i < path.length (); i++)
166 /* We can get paths with a NULL edge when the final destination
167 of a jump thread turns out to be a constant address. We dump
168 those paths when debugging, so we have to be prepared for that
169 possibility here. */
170 if (path[i]->e == NULL)
171 continue;
173 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
174 fprintf (dump_file, " (%d, %d) joiner; ",
175 path[i]->e->src->index, path[i]->e->dest->index);
176 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
177 fprintf (dump_file, " (%d, %d) normal;",
178 path[i]->e->src->index, path[i]->e->dest->index);
179 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
180 fprintf (dump_file, " (%d, %d) nocopy;",
181 path[i]->e->src->index, path[i]->e->dest->index);
183 fputc ('\n', dump_file);
186 /* Simple hashing function. For any given incoming edge E, we're going
187 to be most concerned with the final destination of its jump thread
188 path. So hash on the block index of the final edge in the path. */
190 inline hashval_t
191 redirection_data::hash (const value_type *p)
193 vec<jump_thread_edge *> *path = p->path;
194 return path->last ()->e->dest->index;
197 /* Given two hash table entries, return true if they have the same
198 jump threading path. */
199 inline int
200 redirection_data::equal (const value_type *p1, const compare_type *p2)
202 vec<jump_thread_edge *> *path1 = p1->path;
203 vec<jump_thread_edge *> *path2 = p2->path;
205 if (path1->length () != path2->length ())
206 return false;
208 for (unsigned int i = 1; i < path1->length (); i++)
210 if ((*path1)[i]->type != (*path2)[i]->type
211 || (*path1)[i]->e != (*path2)[i]->e)
212 return false;
215 return true;
218 /* Data structure of information to pass to hash table traversal routines. */
219 struct ssa_local_info_t
221 /* The current block we are working on. */
222 basic_block bb;
224 /* We only create a template block for the first duplicated block in a
225 jump threading path as we may need many duplicates of that block.
227 The second duplicate block in a path is specific to that path. Creating
228 and sharing a template for that block is considerably more difficult. */
229 basic_block template_block;
231 /* TRUE if we thread one or more jumps, FALSE otherwise. */
232 bool jumps_threaded;
235 /* Passes which use the jump threading code register jump threading
236 opportunities as they are discovered. We keep the registered
237 jump threading opportunities in this vector as edge pairs
238 (original_edge, target_edge). */
239 static vec<vec<jump_thread_edge *> *> paths;
241 /* When we start updating the CFG for threading, data necessary for jump
242 threading is attached to the AUX field for the incoming edge. Use these
243 macros to access the underlying structure attached to the AUX field. */
244 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
246 /* Jump threading statistics. */
248 struct thread_stats_d
250 unsigned long num_threaded_edges;
253 struct thread_stats_d thread_stats;
256 /* Remove the last statement in block BB if it is a control statement
257 Also remove all outgoing edges except the edge which reaches DEST_BB.
258 If DEST_BB is NULL, then remove all outgoing edges. */
260 static void
261 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
263 gimple_stmt_iterator gsi;
264 edge e;
265 edge_iterator ei;
267 gsi = gsi_last_bb (bb);
269 /* If the duplicate ends with a control statement, then remove it.
271 Note that if we are duplicating the template block rather than the
272 original basic block, then the duplicate might not have any real
273 statements in it. */
274 if (!gsi_end_p (gsi)
275 && gsi_stmt (gsi)
276 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
277 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
278 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
279 gsi_remove (&gsi, true);
281 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
283 if (e->dest != dest_bb)
284 remove_edge (e);
285 else
286 ei_next (&ei);
290 /* Create a duplicate of BB. Record the duplicate block in an array
291 indexed by COUNT stored in RD. */
293 static void
294 create_block_for_threading (basic_block bb,
295 struct redirection_data *rd,
296 unsigned int count)
298 edge_iterator ei;
299 edge e;
301 /* We can use the generic block duplication code and simply remove
302 the stuff we do not need. */
303 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
305 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
306 e->aux = NULL;
308 /* Zero out the profile, since the block is unreachable for now. */
309 rd->dup_blocks[count]->frequency = 0;
310 rd->dup_blocks[count]->count = 0;
313 /* Main data structure to hold information for duplicates of BB. */
315 static hash_table <redirection_data> redirection_data;
317 /* Given an outgoing edge E lookup and return its entry in our hash table.
319 If INSERT is true, then we insert the entry into the hash table if
320 it is not already present. INCOMING_EDGE is added to the list of incoming
321 edges associated with E in the hash table. */
323 static struct redirection_data *
324 lookup_redirection_data (edge e, enum insert_option insert)
326 struct redirection_data **slot;
327 struct redirection_data *elt;
328 vec<jump_thread_edge *> *path = THREAD_PATH (e);
330 /* Build a hash table element so we can see if E is already
331 in the table. */
332 elt = XNEW (struct redirection_data);
333 elt->path = path;
334 elt->dup_blocks[0] = NULL;
335 elt->dup_blocks[1] = NULL;
336 elt->incoming_edges = NULL;
338 slot = redirection_data.find_slot (elt, insert);
340 /* This will only happen if INSERT is false and the entry is not
341 in the hash table. */
342 if (slot == NULL)
344 free (elt);
345 return NULL;
348 /* This will only happen if E was not in the hash table and
349 INSERT is true. */
350 if (*slot == NULL)
352 *slot = elt;
353 elt->incoming_edges = XNEW (struct el);
354 elt->incoming_edges->e = e;
355 elt->incoming_edges->next = NULL;
356 return elt;
358 /* E was in the hash table. */
359 else
361 /* Free ELT as we do not need it anymore, we will extract the
362 relevant entry from the hash table itself. */
363 free (elt);
365 /* Get the entry stored in the hash table. */
366 elt = *slot;
368 /* If insertion was requested, then we need to add INCOMING_EDGE
369 to the list of incoming edges associated with E. */
370 if (insert)
372 struct el *el = XNEW (struct el);
373 el->next = elt->incoming_edges;
374 el->e = e;
375 elt->incoming_edges = el;
378 return elt;
382 /* Similar to copy_phi_args, except that the PHI arg exists, it just
383 does not have a value associated with it. */
385 static void
386 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
388 int src_idx = src_e->dest_idx;
389 int tgt_idx = tgt_e->dest_idx;
391 /* Iterate over each PHI in e->dest. */
392 for (gimple_stmt_iterator gsi = gsi_start_phis (src_e->dest),
393 gsi2 = gsi_start_phis (tgt_e->dest);
394 !gsi_end_p (gsi);
395 gsi_next (&gsi), gsi_next (&gsi2))
397 gimple src_phi = gsi_stmt (gsi);
398 gimple dest_phi = gsi_stmt (gsi2);
399 tree val = gimple_phi_arg_def (src_phi, src_idx);
400 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
402 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
403 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
407 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
409 static void
410 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
412 gimple_stmt_iterator gsi;
413 int src_indx = src_e->dest_idx;
415 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
417 gimple phi = gsi_stmt (gsi);
418 source_location locus = gimple_phi_arg_location (phi, src_indx);
419 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
423 /* We have recently made a copy of ORIG_BB, including its outgoing
424 edges. The copy is NEW_BB. Every PHI node in every direct successor of
425 ORIG_BB has a new argument associated with edge from NEW_BB to the
426 successor. Initialize the PHI argument so that it is equal to the PHI
427 argument associated with the edge from ORIG_BB to the successor. */
429 static void
430 update_destination_phis (basic_block orig_bb, basic_block new_bb)
432 edge_iterator ei;
433 edge e;
435 FOR_EACH_EDGE (e, ei, orig_bb->succs)
437 edge e2 = find_edge (new_bb, e->dest);
438 copy_phi_args (e->dest, e, e2);
442 /* Given a duplicate block and its single destination (both stored
443 in RD). Create an edge between the duplicate and its single
444 destination.
446 Add an additional argument to any PHI nodes at the single
447 destination. */
449 static void
450 create_edge_and_update_destination_phis (struct redirection_data *rd,
451 basic_block bb)
453 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
455 rescan_loop_exit (e, true, false);
456 e->probability = REG_BR_PROB_BASE;
457 e->count = bb->count;
459 /* We used to copy the thread path here. That was added in 2007
460 and dutifully updated through the representation changes in 2013.
462 In 2013 we added code to thread from an interior node through
463 the backedge to another interior node. That runs after the code
464 to thread through loop headers from outside the loop.
466 The latter may delete edges in the CFG, including those
467 which appeared in the jump threading path we copied here. Thus
468 we'd end up using a dangling pointer.
470 After reviewing the 2007/2011 code, I can't see how anything
471 depended on copying the AUX field and clearly copying the jump
472 threading path is problematical due to embedded edge pointers.
473 It has been removed. */
474 e->aux = NULL;
476 /* If there are any PHI nodes at the destination of the outgoing edge
477 from the duplicate block, then we will need to add a new argument
478 to them. The argument should have the same value as the argument
479 associated with the outgoing edge stored in RD. */
480 copy_phi_args (e->dest, rd->path->last ()->e, e);
483 /* Look through PATH beginning at START and return TRUE if there are
484 any additional blocks that need to be duplicated. Otherwise,
485 return FALSE. */
486 static bool
487 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
488 unsigned int start)
490 for (unsigned int i = start + 1; i < path->length (); i++)
492 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
493 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
494 return true;
496 return false;
499 /* Wire up the outgoing edges from the duplicate blocks and
500 update any PHIs as needed. */
501 void
502 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
503 ssa_local_info_t *local_info)
505 edge e = rd->incoming_edges->e;
506 vec<jump_thread_edge *> *path = THREAD_PATH (e);
508 for (unsigned int count = 0, i = 1; i < path->length (); i++)
510 /* If we were threading through an joiner block, then we want
511 to keep its control statement and redirect an outgoing edge.
512 Else we want to remove the control statement & edges, then create
513 a new outgoing edge. In both cases we may need to update PHIs. */
514 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
516 edge victim;
517 edge e2;
519 /* This updates the PHIs at the destination of the duplicate
520 block. */
521 update_destination_phis (local_info->bb, rd->dup_blocks[count]);
523 /* Find the edge from the duplicate block to the block we're
524 threading through. That's the edge we want to redirect. */
525 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
527 /* If there are no remaining blocks on the path to duplicate,
528 then redirect VICTIM to the final destination of the jump
529 threading path. */
530 if (!any_remaining_duplicated_blocks (path, i))
532 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
533 e2->count = path->last ()->e->count;
534 /* If we redirected the edge, then we need to copy PHI arguments
535 at the target. If the edge already existed (e2 != victim
536 case), then the PHIs in the target already have the correct
537 arguments. */
538 if (e2 == victim)
539 copy_phi_args (e2->dest, path->last ()->e, e2);
541 else
543 /* Redirect VICTIM to the next duplicated block in the path. */
544 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
546 /* We need to update the PHIs in the next duplicated block. We
547 want the new PHI args to have the same value as they had
548 in the source of the next duplicate block.
550 Thus, we need to know which edge we traversed into the
551 source of the duplicate. Furthermore, we may have
552 traversed many edges to reach the source of the duplicate.
554 Walk through the path starting at element I until we
555 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
556 the edge from the prior element. */
557 for (unsigned int j = i + 1; j < path->length (); j++)
559 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
561 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
562 break;
566 count++;
568 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
570 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
571 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count]);
572 if (count == 1)
573 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
574 count++;
579 /* Hash table traversal callback routine to create duplicate blocks. */
582 ssa_create_duplicates (struct redirection_data **slot,
583 ssa_local_info_t *local_info)
585 struct redirection_data *rd = *slot;
587 /* The second duplicated block in a jump threading path is specific
588 to the path. So it gets stored in RD rather than in LOCAL_DATA.
590 Each time we're called, we have to look through the path and see
591 if a second block needs to be duplicated.
593 Note the search starts with the third edge on the path. The first
594 edge is the incoming edge, the second edge always has its source
595 duplicated. Thus we start our search with the third edge. */
596 vec<jump_thread_edge *> *path = rd->path;
597 for (unsigned int i = 2; i < path->length (); i++)
599 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
600 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
602 create_block_for_threading ((*path)[i]->e->src, rd, 1);
603 break;
607 /* Create a template block if we have not done so already. Otherwise
608 use the template to create a new block. */
609 if (local_info->template_block == NULL)
611 create_block_for_threading ((*path)[1]->e->src, rd, 0);
612 local_info->template_block = rd->dup_blocks[0];
614 /* We do not create any outgoing edges for the template. We will
615 take care of that in a later traversal. That way we do not
616 create edges that are going to just be deleted. */
618 else
620 create_block_for_threading (local_info->template_block, rd, 0);
622 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
623 block. */
624 ssa_fix_duplicate_block_edges (rd, local_info);
627 /* Keep walking the hash table. */
628 return 1;
631 /* We did not create any outgoing edges for the template block during
632 block creation. This hash table traversal callback creates the
633 outgoing edge for the template block. */
635 inline int
636 ssa_fixup_template_block (struct redirection_data **slot,
637 ssa_local_info_t *local_info)
639 struct redirection_data *rd = *slot;
641 /* If this is the template block halt the traversal after updating
642 it appropriately.
644 If we were threading through an joiner block, then we want
645 to keep its control statement and redirect an outgoing edge.
646 Else we want to remove the control statement & edges, then create
647 a new outgoing edge. In both cases we may need to update PHIs. */
648 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
650 ssa_fix_duplicate_block_edges (rd, local_info);
651 return 0;
654 return 1;
657 /* Hash table traversal callback to redirect each incoming edge
658 associated with this hash table element to its new destination. */
661 ssa_redirect_edges (struct redirection_data **slot,
662 ssa_local_info_t *local_info)
664 struct redirection_data *rd = *slot;
665 struct el *next, *el;
667 /* Walk over all the incoming edges associated associated with this
668 hash table entry. */
669 for (el = rd->incoming_edges; el; el = next)
671 edge e = el->e;
672 vec<jump_thread_edge *> *path = THREAD_PATH (e);
674 /* Go ahead and free this element from the list. Doing this now
675 avoids the need for another list walk when we destroy the hash
676 table. */
677 next = el->next;
678 free (el);
680 thread_stats.num_threaded_edges++;
682 if (rd->dup_blocks[0])
684 edge e2;
686 if (dump_file && (dump_flags & TDF_DETAILS))
687 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
688 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
690 rd->dup_blocks[0]->count += e->count;
692 /* Excessive jump threading may make frequencies large enough so
693 the computation overflows. */
694 if (rd->dup_blocks[0]->frequency < BB_FREQ_MAX * 2)
695 rd->dup_blocks[0]->frequency += EDGE_FREQUENCY (e);
697 /* In the case of threading through a joiner block, the outgoing
698 edges from the duplicate block were updated when they were
699 redirected during ssa_fix_duplicate_block_edges. */
700 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
701 EDGE_SUCC (rd->dup_blocks[0], 0)->count += e->count;
703 /* If we redirect a loop latch edge cancel its loop. */
704 if (e->src == e->src->loop_father->latch)
705 mark_loop_for_removal (e->src->loop_father);
707 /* Redirect the incoming edge (possibly to the joiner block) to the
708 appropriate duplicate block. */
709 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
710 gcc_assert (e == e2);
711 flush_pending_stmts (e2);
714 /* Go ahead and clear E->aux. It's not needed anymore and failure
715 to clear it will cause all kinds of unpleasant problems later. */
716 delete_jump_thread_path (path);
717 e->aux = NULL;
721 /* Indicate that we actually threaded one or more jumps. */
722 if (rd->incoming_edges)
723 local_info->jumps_threaded = true;
725 return 1;
728 /* Return true if this block has no executable statements other than
729 a simple ctrl flow instruction. When the number of outgoing edges
730 is one, this is equivalent to a "forwarder" block. */
732 static bool
733 redirection_block_p (basic_block bb)
735 gimple_stmt_iterator gsi;
737 /* Advance to the first executable statement. */
738 gsi = gsi_start_bb (bb);
739 while (!gsi_end_p (gsi)
740 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
741 || is_gimple_debug (gsi_stmt (gsi))
742 || gimple_nop_p (gsi_stmt (gsi))))
743 gsi_next (&gsi);
745 /* Check if this is an empty block. */
746 if (gsi_end_p (gsi))
747 return true;
749 /* Test that we've reached the terminating control statement. */
750 return gsi_stmt (gsi)
751 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
752 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
753 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
756 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
757 is reached via one or more specific incoming edges, we know which
758 outgoing edge from BB will be traversed.
760 We want to redirect those incoming edges to the target of the
761 appropriate outgoing edge. Doing so avoids a conditional branch
762 and may expose new optimization opportunities. Note that we have
763 to update dominator tree and SSA graph after such changes.
765 The key to keeping the SSA graph update manageable is to duplicate
766 the side effects occurring in BB so that those side effects still
767 occur on the paths which bypass BB after redirecting edges.
769 We accomplish this by creating duplicates of BB and arranging for
770 the duplicates to unconditionally pass control to one specific
771 successor of BB. We then revector the incoming edges into BB to
772 the appropriate duplicate of BB.
774 If NOLOOP_ONLY is true, we only perform the threading as long as it
775 does not affect the structure of the loops in a nontrivial way.
777 If JOINERS is true, then thread through joiner blocks as well. */
779 static bool
780 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
782 /* E is an incoming edge into BB that we may or may not want to
783 redirect to a duplicate of BB. */
784 edge e, e2;
785 edge_iterator ei;
786 ssa_local_info_t local_info;
788 /* To avoid scanning a linear array for the element we need we instead
789 use a hash table. For normal code there should be no noticeable
790 difference. However, if we have a block with a large number of
791 incoming and outgoing edges such linear searches can get expensive. */
792 redirection_data.create (EDGE_COUNT (bb->succs));
794 /* Record each unique threaded destination into a hash table for
795 efficient lookups. */
796 FOR_EACH_EDGE (e, ei, bb->preds)
798 if (e->aux == NULL)
799 continue;
801 vec<jump_thread_edge *> *path = THREAD_PATH (e);
803 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
804 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
805 continue;
807 e2 = path->last ()->e;
808 if (!e2 || noloop_only)
810 /* If NOLOOP_ONLY is true, we only allow threading through the
811 header of a loop to exit edges. */
813 /* One case occurs when there was loop header buried in a jump
814 threading path that crosses loop boundaries. We do not try
815 and thread this elsewhere, so just cancel the jump threading
816 request by clearing the AUX field now. */
817 if ((bb->loop_father != e2->src->loop_father
818 && !loop_exit_edge_p (e2->src->loop_father, e2))
819 || (e2->src->loop_father != e2->dest->loop_father
820 && !loop_exit_edge_p (e2->src->loop_father, e2)))
822 /* Since this case is not handled by our special code
823 to thread through a loop header, we must explicitly
824 cancel the threading request here. */
825 delete_jump_thread_path (path);
826 e->aux = NULL;
827 continue;
830 /* Another case occurs when trying to thread through our
831 own loop header, possibly from inside the loop. We will
832 thread these later. */
833 unsigned int i;
834 for (i = 1; i < path->length (); i++)
836 if ((*path)[i]->e->src == bb->loop_father->header
837 && (!loop_exit_edge_p (bb->loop_father, e2)
838 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
839 break;
842 if (i != path->length ())
843 continue;
846 if (e->dest == e2->src)
847 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
848 e->count, (*THREAD_PATH (e))[1]->e);
850 /* Insert the outgoing edge into the hash table if it is not
851 already in the hash table. */
852 lookup_redirection_data (e, INSERT);
855 /* We do not update dominance info. */
856 free_dominance_info (CDI_DOMINATORS);
858 /* We know we only thread through the loop header to loop exits.
859 Let the basic block duplication hook know we are not creating
860 a multiple entry loop. */
861 if (noloop_only
862 && bb == bb->loop_father->header)
863 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
865 /* Now create duplicates of BB.
867 Note that for a block with a high outgoing degree we can waste
868 a lot of time and memory creating and destroying useless edges.
870 So we first duplicate BB and remove the control structure at the
871 tail of the duplicate as well as all outgoing edges from the
872 duplicate. We then use that duplicate block as a template for
873 the rest of the duplicates. */
874 local_info.template_block = NULL;
875 local_info.bb = bb;
876 local_info.jumps_threaded = false;
877 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
878 (&local_info);
880 /* The template does not have an outgoing edge. Create that outgoing
881 edge and update PHI nodes as the edge's target as necessary.
883 We do this after creating all the duplicates to avoid creating
884 unnecessary edges. */
885 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
886 (&local_info);
888 /* The hash table traversals above created the duplicate blocks (and the
889 statements within the duplicate blocks). This loop creates PHI nodes for
890 the duplicated blocks and redirects the incoming edges into BB to reach
891 the duplicates of BB. */
892 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
893 (&local_info);
895 /* Done with this block. Clear REDIRECTION_DATA. */
896 redirection_data.dispose ();
898 if (noloop_only
899 && bb == bb->loop_father->header)
900 set_loop_copy (bb->loop_father, NULL);
902 /* Indicate to our caller whether or not any jumps were threaded. */
903 return local_info.jumps_threaded;
906 /* Wrapper for thread_block_1 so that we can first handle jump
907 thread paths which do not involve copying joiner blocks, then
908 handle jump thread paths which have joiner blocks.
910 By doing things this way we can be as aggressive as possible and
911 not worry that copying a joiner block will create a jump threading
912 opportunity. */
914 static bool
915 thread_block (basic_block bb, bool noloop_only)
917 bool retval;
918 retval = thread_block_1 (bb, noloop_only, false);
919 retval |= thread_block_1 (bb, noloop_only, true);
920 return retval;
924 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
925 copy of E->dest created during threading, or E->dest if it was not necessary
926 to copy it (E is its single predecessor). */
928 static basic_block
929 thread_single_edge (edge e)
931 basic_block bb = e->dest;
932 struct redirection_data rd;
933 vec<jump_thread_edge *> *path = THREAD_PATH (e);
934 edge eto = (*path)[1]->e;
936 for (unsigned int i = 0; i < path->length (); i++)
937 delete (*path)[i];
938 delete path;
939 e->aux = NULL;
941 thread_stats.num_threaded_edges++;
943 if (single_pred_p (bb))
945 /* If BB has just a single predecessor, we should only remove the
946 control statements at its end, and successors except for ETO. */
947 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
949 /* And fixup the flags on the single remaining edge. */
950 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
951 eto->flags |= EDGE_FALLTHRU;
953 return bb;
956 /* Otherwise, we need to create a copy. */
957 if (e->dest == eto->src)
958 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
960 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
961 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
962 npath->safe_push (x);
964 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
965 npath->safe_push (x);
966 rd.path = npath;
968 create_block_for_threading (bb, &rd, 0);
969 remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
970 create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0]);
972 if (dump_file && (dump_flags & TDF_DETAILS))
973 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
974 e->src->index, e->dest->index, rd.dup_blocks[0]->index);
976 rd.dup_blocks[0]->count = e->count;
977 rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
978 single_succ_edge (rd.dup_blocks[0])->count = e->count;
979 redirect_edge_and_branch (e, rd.dup_blocks[0]);
980 flush_pending_stmts (e);
982 return rd.dup_blocks[0];
985 /* Callback for dfs_enumerate_from. Returns true if BB is different
986 from STOP and DBDS_CE_STOP. */
988 static basic_block dbds_ce_stop;
989 static bool
990 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
992 return (bb != (const_basic_block) stop
993 && bb != dbds_ce_stop);
996 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
997 returns the state. */
999 enum bb_dom_status
1001 /* BB does not dominate latch of the LOOP. */
1002 DOMST_NONDOMINATING,
1003 /* The LOOP is broken (there is no path from the header to its latch. */
1004 DOMST_LOOP_BROKEN,
1005 /* BB dominates the latch of the LOOP. */
1006 DOMST_DOMINATING
1009 static enum bb_dom_status
1010 determine_bb_domination_status (struct loop *loop, basic_block bb)
1012 basic_block *bblocks;
1013 unsigned nblocks, i;
1014 bool bb_reachable = false;
1015 edge_iterator ei;
1016 edge e;
1018 /* This function assumes BB is a successor of LOOP->header.
1019 If that is not the case return DOMST_NONDOMINATING which
1020 is always safe. */
1022 bool ok = false;
1024 FOR_EACH_EDGE (e, ei, bb->preds)
1026 if (e->src == loop->header)
1028 ok = true;
1029 break;
1033 if (!ok)
1034 return DOMST_NONDOMINATING;
1037 if (bb == loop->latch)
1038 return DOMST_DOMINATING;
1040 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1041 from it. */
1043 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1044 dbds_ce_stop = loop->header;
1045 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1046 bblocks, loop->num_nodes, bb);
1047 for (i = 0; i < nblocks; i++)
1048 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1050 if (e->src == loop->header)
1052 free (bblocks);
1053 return DOMST_NONDOMINATING;
1055 if (e->src == bb)
1056 bb_reachable = true;
1059 free (bblocks);
1060 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1063 /* Return true if BB is part of the new pre-header that is created
1064 when threading the latch to DATA. */
1066 static bool
1067 def_split_header_continue_p (const_basic_block bb, const void *data)
1069 const_basic_block new_header = (const_basic_block) data;
1070 const struct loop *l;
1072 if (bb == new_header
1073 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
1074 return false;
1075 for (l = bb->loop_father; l; l = loop_outer (l))
1076 if (l == new_header->loop_father)
1077 return true;
1078 return false;
1081 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1082 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1083 to the inside of the loop. */
1085 static bool
1086 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1088 basic_block header = loop->header;
1089 edge e, tgt_edge, latch = loop_latch_edge (loop);
1090 edge_iterator ei;
1091 basic_block tgt_bb, atgt_bb;
1092 enum bb_dom_status domst;
1094 /* We have already threaded through headers to exits, so all the threading
1095 requests now are to the inside of the loop. We need to avoid creating
1096 irreducible regions (i.e., loops with more than one entry block), and
1097 also loop with several latch edges, or new subloops of the loop (although
1098 there are cases where it might be appropriate, it is difficult to decide,
1099 and doing it wrongly may confuse other optimizers).
1101 We could handle more general cases here. However, the intention is to
1102 preserve some information about the loop, which is impossible if its
1103 structure changes significantly, in a way that is not well understood.
1104 Thus we only handle few important special cases, in which also updating
1105 of the loop-carried information should be feasible:
1107 1) Propagation of latch edge to a block that dominates the latch block
1108 of a loop. This aims to handle the following idiom:
1110 first = 1;
1111 while (1)
1113 if (first)
1114 initialize;
1115 first = 0;
1116 body;
1119 After threading the latch edge, this becomes
1121 first = 1;
1122 if (first)
1123 initialize;
1124 while (1)
1126 first = 0;
1127 body;
1130 The original header of the loop is moved out of it, and we may thread
1131 the remaining edges through it without further constraints.
1133 2) All entry edges are propagated to a single basic block that dominates
1134 the latch block of the loop. This aims to handle the following idiom
1135 (normally created for "for" loops):
1137 i = 0;
1138 while (1)
1140 if (i >= 100)
1141 break;
1142 body;
1143 i++;
1146 This becomes
1148 i = 0;
1149 while (1)
1151 body;
1152 i++;
1153 if (i >= 100)
1154 break;
1158 /* Threading through the header won't improve the code if the header has just
1159 one successor. */
1160 if (single_succ_p (header))
1161 goto fail;
1163 /* If we threaded the latch using a joiner block, we cancel the
1164 threading opportunity out of an abundance of caution. However,
1165 still allow threading from outside to inside the loop. */
1166 if (latch->aux)
1168 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1169 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1171 delete_jump_thread_path (path);
1172 latch->aux = NULL;
1176 if (latch->aux)
1178 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1179 tgt_edge = (*path)[1]->e;
1180 tgt_bb = tgt_edge->dest;
1182 else if (!may_peel_loop_headers
1183 && !redirection_block_p (loop->header))
1184 goto fail;
1185 else
1187 tgt_bb = NULL;
1188 tgt_edge = NULL;
1189 FOR_EACH_EDGE (e, ei, header->preds)
1191 if (!e->aux)
1193 if (e == latch)
1194 continue;
1196 /* If latch is not threaded, and there is a header
1197 edge that is not threaded, we would create loop
1198 with multiple entries. */
1199 goto fail;
1202 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1204 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1205 goto fail;
1206 tgt_edge = (*path)[1]->e;
1207 atgt_bb = tgt_edge->dest;
1208 if (!tgt_bb)
1209 tgt_bb = atgt_bb;
1210 /* Two targets of threading would make us create loop
1211 with multiple entries. */
1212 else if (tgt_bb != atgt_bb)
1213 goto fail;
1216 if (!tgt_bb)
1218 /* There are no threading requests. */
1219 return false;
1222 /* Redirecting to empty loop latch is useless. */
1223 if (tgt_bb == loop->latch
1224 && empty_block_p (loop->latch))
1225 goto fail;
1228 /* The target block must dominate the loop latch, otherwise we would be
1229 creating a subloop. */
1230 domst = determine_bb_domination_status (loop, tgt_bb);
1231 if (domst == DOMST_NONDOMINATING)
1232 goto fail;
1233 if (domst == DOMST_LOOP_BROKEN)
1235 /* If the loop ceased to exist, mark it as such, and thread through its
1236 original header. */
1237 mark_loop_for_removal (loop);
1238 return thread_block (header, false);
1241 if (tgt_bb->loop_father->header == tgt_bb)
1243 /* If the target of the threading is a header of a subloop, we need
1244 to create a preheader for it, so that the headers of the two loops
1245 do not merge. */
1246 if (EDGE_COUNT (tgt_bb->preds) > 2)
1248 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1249 gcc_assert (tgt_bb != NULL);
1251 else
1252 tgt_bb = split_edge (tgt_edge);
1255 if (latch->aux)
1257 basic_block *bblocks;
1258 unsigned nblocks, i;
1260 /* First handle the case latch edge is redirected. We are copying
1261 the loop header but not creating a multiple entry loop. Make the
1262 cfg manipulation code aware of that fact. */
1263 set_loop_copy (loop, loop);
1264 loop->latch = thread_single_edge (latch);
1265 set_loop_copy (loop, NULL);
1266 gcc_assert (single_succ (loop->latch) == tgt_bb);
1267 loop->header = tgt_bb;
1269 /* Remove the new pre-header blocks from our loop. */
1270 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1271 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1272 bblocks, loop->num_nodes, tgt_bb);
1273 for (i = 0; i < nblocks; i++)
1274 if (bblocks[i]->loop_father == loop)
1276 remove_bb_from_loops (bblocks[i]);
1277 add_bb_to_loop (bblocks[i], loop_outer (loop));
1279 free (bblocks);
1281 /* If the new header has multiple latches mark it so. */
1282 FOR_EACH_EDGE (e, ei, loop->header->preds)
1283 if (e->src->loop_father == loop
1284 && e->src != loop->latch)
1286 loop->latch = NULL;
1287 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1290 /* Cancel remaining threading requests that would make the
1291 loop a multiple entry loop. */
1292 FOR_EACH_EDGE (e, ei, header->preds)
1294 edge e2;
1296 if (e->aux == NULL)
1297 continue;
1299 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1300 e2 = path->last ()->e;
1302 if (e->src->loop_father != e2->dest->loop_father
1303 && e2->dest != loop->header)
1305 delete_jump_thread_path (path);
1306 e->aux = NULL;
1310 /* Thread the remaining edges through the former header. */
1311 thread_block (header, false);
1313 else
1315 basic_block new_preheader;
1317 /* Now consider the case entry edges are redirected to the new entry
1318 block. Remember one entry edge, so that we can find the new
1319 preheader (its destination after threading). */
1320 FOR_EACH_EDGE (e, ei, header->preds)
1322 if (e->aux)
1323 break;
1326 /* The duplicate of the header is the new preheader of the loop. Ensure
1327 that it is placed correctly in the loop hierarchy. */
1328 set_loop_copy (loop, loop_outer (loop));
1330 thread_block (header, false);
1331 set_loop_copy (loop, NULL);
1332 new_preheader = e->dest;
1334 /* Create the new latch block. This is always necessary, as the latch
1335 must have only a single successor, but the original header had at
1336 least two successors. */
1337 loop->latch = NULL;
1338 mfb_kj_edge = single_succ_edge (new_preheader);
1339 loop->header = mfb_kj_edge->dest;
1340 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1341 loop->header = latch->dest;
1342 loop->latch = latch->src;
1345 return true;
1347 fail:
1348 /* We failed to thread anything. Cancel the requests. */
1349 FOR_EACH_EDGE (e, ei, header->preds)
1351 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1353 if (path)
1355 delete_jump_thread_path (path);
1356 e->aux = NULL;
1359 return false;
1362 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1363 PHI arguments associated with those edges are equal or there are no
1364 PHI arguments, otherwise return FALSE. */
1366 static bool
1367 phi_args_equal_on_edges (edge e1, edge e2)
1369 gimple_stmt_iterator gsi;
1370 int indx1 = e1->dest_idx;
1371 int indx2 = e2->dest_idx;
1373 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1375 gimple phi = gsi_stmt (gsi);
1377 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1378 gimple_phi_arg_def (phi, indx2), 0))
1379 return false;
1381 return true;
1384 /* Walk through the registered jump threads and convert them into a
1385 form convenient for this pass.
1387 Any block which has incoming edges threaded to outgoing edges
1388 will have its entry in THREADED_BLOCK set.
1390 Any threaded edge will have its new outgoing edge stored in the
1391 original edge's AUX field.
1393 This form avoids the need to walk all the edges in the CFG to
1394 discover blocks which need processing and avoids unnecessary
1395 hash table lookups to map from threaded edge to new target. */
1397 static void
1398 mark_threaded_blocks (bitmap threaded_blocks)
1400 unsigned int i;
1401 bitmap_iterator bi;
1402 bitmap tmp = BITMAP_ALLOC (NULL);
1403 basic_block bb;
1404 edge e;
1405 edge_iterator ei;
1407 /* It is possible to have jump threads in which one is a subpath
1408 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1409 block and (B, C), (C, D) where no joiner block exists.
1411 When this occurs ignore the jump thread request with the joiner
1412 block. It's totally subsumed by the simpler jump thread request.
1414 This results in less block copying, simpler CFGs. More importantly,
1415 when we duplicate the joiner block, B, in this case we will create
1416 a new threading opportunity that we wouldn't be able to optimize
1417 until the next jump threading iteration.
1419 So first convert the jump thread requests which do not require a
1420 joiner block. */
1421 for (i = 0; i < paths.length (); i++)
1423 vec<jump_thread_edge *> *path = paths[i];
1425 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1427 edge e = (*path)[0]->e;
1428 e->aux = (void *)path;
1429 bitmap_set_bit (tmp, e->dest->index);
1433 /* Now iterate again, converting cases where we want to thread
1434 through a joiner block, but only if no other edge on the path
1435 already has a jump thread attached to it. */
1436 for (i = 0; i < paths.length (); i++)
1438 vec<jump_thread_edge *> *path = paths[i];
1440 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1442 unsigned int j;
1444 for (j = 0; j < path->length (); j++)
1445 if ((*path)[j]->e->aux != NULL)
1446 break;
1448 /* If we iterated through the entire path without exiting the loop,
1449 then we are good to go, attach the path to the starting edge. */
1450 if (j == path->length ())
1452 edge e = (*path)[0]->e;
1453 e->aux = path;
1454 bitmap_set_bit (tmp, e->dest->index);
1456 else if (dump_file && (dump_flags & TDF_DETAILS))
1458 dump_jump_thread_path (dump_file, *path, false);
1464 /* If optimizing for size, only thread through block if we don't have
1465 to duplicate it or it's an otherwise empty redirection block. */
1466 if (optimize_function_for_size_p (cfun))
1468 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1470 bb = BASIC_BLOCK_FOR_FN (cfun, i);
1471 if (EDGE_COUNT (bb->preds) > 1
1472 && !redirection_block_p (bb))
1474 FOR_EACH_EDGE (e, ei, bb->preds)
1476 if (e->aux)
1478 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1479 delete_jump_thread_path (path);
1480 e->aux = NULL;
1484 else
1485 bitmap_set_bit (threaded_blocks, i);
1488 else
1489 bitmap_copy (threaded_blocks, tmp);
1491 /* Look for jump threading paths which cross multiple loop headers.
1493 The code to thread through loop headers will change the CFG in ways
1494 that break assumptions made by the loop optimization code.
1496 We don't want to blindly cancel the requests. We can instead do better
1497 by trimming off the end of the jump thread path. */
1498 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1500 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1501 FOR_EACH_EDGE (e, ei, bb->preds)
1503 if (e->aux)
1505 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1507 for (unsigned int i = 0, crossed_headers = 0;
1508 i < path->length ();
1509 i++)
1511 basic_block dest = (*path)[i]->e->dest;
1512 crossed_headers += (dest == dest->loop_father->header);
1513 if (crossed_headers > 1)
1515 /* Trim from entry I onwards. */
1516 for (unsigned int j = i; j < path->length (); j++)
1517 delete (*path)[j];
1518 path->truncate (i);
1520 /* Now that we've truncated the path, make sure
1521 what's left is still valid. We need at least
1522 two edges on the path and the last edge can not
1523 be a joiner. This should never happen, but let's
1524 be safe. */
1525 if (path->length () < 2
1526 || (path->last ()->type
1527 == EDGE_COPY_SRC_JOINER_BLOCK))
1529 delete_jump_thread_path (path);
1530 e->aux = NULL;
1532 break;
1539 /* If we have a joiner block (J) which has two successors S1 and S2 and
1540 we are threading though S1 and the final destination of the thread
1541 is S2, then we must verify that any PHI nodes in S2 have the same
1542 PHI arguments for the edge J->S2 and J->S1->...->S2.
1544 We used to detect this prior to registering the jump thread, but
1545 that prohibits propagation of edge equivalences into non-dominated
1546 PHI nodes as the equivalency test might occur before propagation.
1548 This must also occur after we truncate any jump threading paths
1549 as this scenario may only show up after truncation.
1551 This works for now, but will need improvement as part of the FSA
1552 optimization.
1554 Note since we've moved the thread request data to the edges,
1555 we have to iterate on those rather than the threaded_edges vector. */
1556 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1558 bb = BASIC_BLOCK_FOR_FN (cfun, i);
1559 FOR_EACH_EDGE (e, ei, bb->preds)
1561 if (e->aux)
1563 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1564 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1566 if (have_joiner)
1568 basic_block joiner = e->dest;
1569 edge final_edge = path->last ()->e;
1570 basic_block final_dest = final_edge->dest;
1571 edge e2 = find_edge (joiner, final_dest);
1573 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1575 delete_jump_thread_path (path);
1576 e->aux = NULL;
1583 BITMAP_FREE (tmp);
1587 /* Return TRUE if BB ends with a switch statement or a computed goto.
1588 Otherwise return false. */
1589 static bool
1590 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED)
1592 gimple stmt = last_stmt (bb);
1593 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1594 return true;
1595 if (stmt && gimple_code (stmt) == GIMPLE_GOTO
1596 && TREE_CODE (gimple_goto_dest (stmt)) == SSA_NAME)
1597 return true;
1598 return false;
1601 /* Verify that the REGION is a Single Entry Multiple Exits region: make sure no
1602 edge other than ENTRY is entering the REGION. */
1604 DEBUG_FUNCTION void
1605 verify_seme (edge entry, basic_block *region, unsigned n_region)
1607 bitmap bbs = BITMAP_ALLOC (NULL);
1609 for (unsigned i = 0; i < n_region; i++)
1610 bitmap_set_bit (bbs, region[i]->index);
1612 for (unsigned i = 0; i < n_region; i++)
1614 edge e;
1615 edge_iterator ei;
1616 basic_block bb = region[i];
1618 /* All predecessors other than ENTRY->src should be in the region. */
1619 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ei_next (&ei))
1620 if (e != entry)
1621 gcc_assert (bitmap_bit_p (bbs, e->src->index));
1624 BITMAP_FREE (bbs);
1627 /* Duplicates a Single Entry Multiple Exit REGION (set of N_REGION basic
1628 blocks). The ENTRY edge is redirected to the duplicate of the region. If
1629 REGION is not a Single Entry region, ignore any incoming edges other than
1630 ENTRY: this makes the copied region a Single Entry region.
1632 Remove the last conditional statement in the last basic block in the REGION,
1633 and create a single fallthru edge pointing to the same destination as the
1634 EXIT edge.
1636 The new basic blocks are stored to REGION_COPY in the same order as they had
1637 in REGION, provided that REGION_COPY is not NULL.
1639 Returns false if it is unable to copy the region, true otherwise. */
1641 static bool
1642 duplicate_seme_region (edge entry, edge exit,
1643 basic_block *region, unsigned n_region,
1644 basic_block *region_copy)
1646 unsigned i;
1647 bool free_region_copy = false;
1648 struct loop *loop = entry->dest->loop_father;
1649 edge exit_copy;
1650 edge redirected;
1651 int total_freq = 0, entry_freq = 0;
1652 gcov_type total_count = 0, entry_count = 0;
1654 if (!can_copy_bbs_p (region, n_region))
1655 return false;
1657 /* Some sanity checking. Note that we do not check for all possible
1658 missuses of the functions. I.e. if you ask to copy something weird,
1659 it will work, but the state of structures probably will not be
1660 correct. */
1661 for (i = 0; i < n_region; i++)
1663 /* We do not handle subloops, i.e. all the blocks must belong to the
1664 same loop. */
1665 if (region[i]->loop_father != loop)
1666 return false;
1669 initialize_original_copy_tables ();
1671 set_loop_copy (loop, loop);
1673 if (!region_copy)
1675 region_copy = XNEWVEC (basic_block, n_region);
1676 free_region_copy = true;
1679 if (entry->dest->count)
1681 total_count = entry->dest->count;
1682 entry_count = entry->count;
1683 /* Fix up corner cases, to avoid division by zero or creation of negative
1684 frequencies. */
1685 if (entry_count > total_count)
1686 entry_count = total_count;
1688 else
1690 total_freq = entry->dest->frequency;
1691 entry_freq = EDGE_FREQUENCY (entry);
1692 /* Fix up corner cases, to avoid division by zero or creation of negative
1693 frequencies. */
1694 if (total_freq == 0)
1695 total_freq = 1;
1696 else if (entry_freq > total_freq)
1697 entry_freq = total_freq;
1700 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
1701 split_edge_bb_loc (entry), 0);
1702 if (total_count)
1704 scale_bbs_frequencies_gcov_type (region, n_region,
1705 total_count - entry_count,
1706 total_count);
1707 scale_bbs_frequencies_gcov_type (region_copy, n_region, entry_count,
1708 total_count);
1710 else
1712 scale_bbs_frequencies_int (region, n_region, total_freq - entry_freq,
1713 total_freq);
1714 scale_bbs_frequencies_int (region_copy, n_region, entry_freq, total_freq);
1717 #ifdef ENABLE_CHECKING
1718 /* Make sure no edge other than ENTRY is entering the copied region. */
1719 verify_seme (entry, region_copy, n_region);
1720 #endif
1722 /* Remove the last branch in the jump thread path. */
1723 remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
1724 edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
1726 if (e) {
1727 rescan_loop_exit (e, true, false);
1728 e->probability = REG_BR_PROB_BASE;
1729 e->count = region_copy[n_region - 1]->count;
1732 /* Redirect the entry and add the phi node arguments. */
1733 if (entry->dest == loop->header)
1734 mark_loop_for_removal (loop);
1735 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
1736 gcc_assert (redirected != NULL);
1737 flush_pending_stmts (entry);
1739 /* Add the other PHI node arguments. */
1740 add_phi_args_after_copy (region_copy, n_region, NULL);
1742 if (free_region_copy)
1743 free (region_copy);
1745 free_original_copy_tables ();
1746 return true;
1749 /* Walk through all blocks and thread incoming edges to the appropriate
1750 outgoing edge for each edge pair recorded in THREADED_EDGES.
1752 It is the caller's responsibility to fix the dominance information
1753 and rewrite duplicated SSA_NAMEs back into SSA form.
1755 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1756 loop headers if it does not simplify the loop.
1758 Returns true if one or more edges were threaded, false otherwise. */
1760 bool
1761 thread_through_all_blocks (bool may_peel_loop_headers)
1763 bool retval = false;
1764 unsigned int i;
1765 bitmap_iterator bi;
1766 bitmap threaded_blocks;
1767 struct loop *loop;
1769 /* We must know about loops in order to preserve them. */
1770 gcc_assert (current_loops != NULL);
1772 if (!paths.exists ())
1773 return false;
1775 threaded_blocks = BITMAP_ALLOC (NULL);
1776 memset (&thread_stats, 0, sizeof (thread_stats));
1778 /* Jump-thread all FSM threads before other jump-threads. */
1779 for (i = 0; i < paths.length ();)
1781 vec<jump_thread_edge *> *path = paths[i];
1782 edge entry = (*path)[0]->e;
1784 if ((*path)[0]->type != EDGE_FSM_THREAD
1785 /* Do not jump-thread twice from the same block. */
1786 || bitmap_bit_p (threaded_blocks, entry->src->index)) {
1787 i++;
1788 continue;
1791 unsigned len = path->length ();
1792 edge exit = (*path)[len - 1]->e;
1793 basic_block *region = XNEWVEC (basic_block, len - 1);
1795 for (unsigned int j = 0; j < len - 1; j++)
1796 region[j] = (*path)[j]->e->dest;
1798 if (duplicate_seme_region (entry, exit, region, len - 1, NULL))
1800 /* We do not update dominance info. */
1801 free_dominance_info (CDI_DOMINATORS);
1802 bitmap_set_bit (threaded_blocks, entry->src->index);
1803 retval = true;
1806 delete_jump_thread_path (path);
1807 paths.unordered_remove (i);
1810 /* Remove from PATHS all the jump-threads starting with an edge already
1811 jump-threaded. */
1812 for (i = 0; i < paths.length ();)
1814 vec<jump_thread_edge *> *path = paths[i];
1815 edge entry = (*path)[0]->e;
1817 /* Do not jump-thread twice from the same block. */
1818 if (bitmap_bit_p (threaded_blocks, entry->src->index))
1820 delete_jump_thread_path (path);
1821 paths.unordered_remove (i);
1823 else
1824 i++;
1827 bitmap_clear (threaded_blocks);
1829 mark_threaded_blocks (threaded_blocks);
1831 initialize_original_copy_tables ();
1833 /* First perform the threading requests that do not affect
1834 loop structure. */
1835 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1837 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1839 if (EDGE_COUNT (bb->preds) > 0)
1840 retval |= thread_block (bb, true);
1843 /* Then perform the threading through loop headers. We start with the
1844 innermost loop, so that the changes in cfg we perform won't affect
1845 further threading. */
1846 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1848 if (!loop->header
1849 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1850 continue;
1852 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1855 /* Any jump threading paths that are still attached to edges at this
1856 point must be one of two cases.
1858 First, we could have a jump threading path which went from outside
1859 a loop to inside a loop that was ignored because a prior jump thread
1860 across a backedge was realized (which indirectly causes the loop
1861 above to ignore the latter thread). We can detect these because the
1862 loop structures will be different and we do not currently try to
1863 optimize this case.
1865 Second, we could be threading across a backedge to a point within the
1866 same loop. This occurrs for the FSA/FSM optimization and we would
1867 like to optimize it. However, we have to be very careful as this
1868 may completely scramble the loop structures, with the result being
1869 irreducible loops causing us to throw away our loop structure.
1871 As a compromise for the latter case, if the thread path ends in
1872 a block where the last statement is a multiway branch, then go
1873 ahead and thread it, else ignore it. */
1874 basic_block bb;
1875 edge e;
1876 FOR_EACH_BB_FN (bb, cfun)
1878 /* If we do end up threading here, we can remove elements from
1879 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
1880 for (edge_iterator ei = ei_start (bb->preds);
1881 (e = ei_safe_edge (ei));)
1882 if (e->aux)
1884 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1886 /* Case 1, threading from outside to inside the loop
1887 after we'd already threaded through the header. */
1888 if ((*path)[0]->e->dest->loop_father
1889 != path->last ()->e->src->loop_father)
1891 delete_jump_thread_path (path);
1892 e->aux = NULL;
1893 ei_next (&ei);
1895 else if (bb_ends_with_multiway_branch (path->last ()->e->src))
1897 /* The code to thread through loop headers may have
1898 split a block with jump threads attached to it.
1900 We can identify this with a disjoint jump threading
1901 path. If found, just remove it. */
1902 for (unsigned int i = 0; i < path->length () - 1; i++)
1903 if ((*path)[i]->e->dest != (*path)[i + 1]->e->src)
1905 delete_jump_thread_path (path);
1906 e->aux = NULL;
1907 ei_next (&ei);
1908 break;
1911 /* Our path is still valid, thread it. */
1912 if (e->aux)
1914 if (thread_block ((*path)[0]->e->dest, false))
1915 e->aux = NULL;
1916 else
1918 delete_jump_thread_path (path);
1919 e->aux = NULL;
1920 ei_next (&ei);
1924 else
1926 delete_jump_thread_path (path);
1927 e->aux = NULL;
1928 ei_next (&ei);
1931 else
1932 ei_next (&ei);
1935 statistics_counter_event (cfun, "Jumps threaded",
1936 thread_stats.num_threaded_edges);
1938 free_original_copy_tables ();
1940 BITMAP_FREE (threaded_blocks);
1941 threaded_blocks = NULL;
1942 paths.release ();
1944 if (retval)
1945 loops_state_set (LOOPS_NEED_FIXUP);
1947 return retval;
1950 /* Delete the jump threading path PATH. We have to explcitly delete
1951 each entry in the vector, then the container. */
1953 void
1954 delete_jump_thread_path (vec<jump_thread_edge *> *path)
1956 for (unsigned int i = 0; i < path->length (); i++)
1957 delete (*path)[i];
1958 path->release();
1961 /* Register a jump threading opportunity. We queue up all the jump
1962 threading opportunities discovered by a pass and update the CFG
1963 and SSA form all at once.
1965 E is the edge we can thread, E2 is the new target edge, i.e., we
1966 are effectively recording that E->dest can be changed to E2->dest
1967 after fixing the SSA graph. */
1969 void
1970 register_jump_thread (vec<jump_thread_edge *> *path)
1972 if (!dbg_cnt (registered_jump_thread))
1974 delete_jump_thread_path (path);
1975 return;
1978 /* First make sure there are no NULL outgoing edges on the jump threading
1979 path. That can happen for jumping to a constant address. */
1980 for (unsigned int i = 0; i < path->length (); i++)
1981 if ((*path)[i]->e == NULL)
1983 if (dump_file && (dump_flags & TDF_DETAILS))
1985 fprintf (dump_file,
1986 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1987 dump_jump_thread_path (dump_file, *path, false);
1990 delete_jump_thread_path (path);
1991 return;
1994 if (dump_file && (dump_flags & TDF_DETAILS))
1995 dump_jump_thread_path (dump_file, *path, true);
1997 if (!paths.exists ())
1998 paths.create (5);
2000 paths.safe_push (path);