re PR c++/19476 (Missed null checking elimination with new)
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blob2adea1b5119e2e229f23b50a596a30f6e7dfc9f2
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "flags.h"
26 #include "tm_p.h"
27 #include "basic-block.h"
28 #include "function.h"
29 #include "tree-ssa.h"
30 #include "tree-ssa-threadupdate.h"
31 #include "dumpfile.h"
32 #include "cfgloop.h"
33 #include "hash-table.h"
35 /* Given a block B, update the CFG and SSA graph to reflect redirecting
36 one or more in-edges to B to instead reach the destination of an
37 out-edge from B while preserving any side effects in B.
39 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
40 side effects of executing B.
42 1. Make a copy of B (including its outgoing edges and statements). Call
43 the copy B'. Note B' has no incoming edges or PHIs at this time.
45 2. Remove the control statement at the end of B' and all outgoing edges
46 except B'->C.
48 3. Add a new argument to each PHI in C with the same value as the existing
49 argument associated with edge B->C. Associate the new PHI arguments
50 with the edge B'->C.
52 4. For each PHI in B, find or create a PHI in B' with an identical
53 PHI_RESULT. Add an argument to the PHI in B' which has the same
54 value as the PHI in B associated with the edge A->B. Associate
55 the new argument in the PHI in B' with the edge A->B.
57 5. Change the edge A->B to A->B'.
59 5a. This automatically deletes any PHI arguments associated with the
60 edge A->B in B.
62 5b. This automatically associates each new argument added in step 4
63 with the edge A->B'.
65 6. Repeat for other incoming edges into B.
67 7. Put the duplicated resources in B and all the B' blocks into SSA form.
69 Note that block duplication can be minimized by first collecting the
70 set of unique destination blocks that the incoming edges should
71 be threaded to.
73 Block duplication can be further minimized by using B instead of
74 creating B' for one destination if all edges into B are going to be
75 threaded to a successor of B. We had code to do this at one time, but
76 I'm not convinced it is correct with the changes to avoid mucking up
77 the loop structure (which may cancel threading requests, thus a block
78 which we thought was going to become unreachable may still be reachable).
79 This code was also going to get ugly with the introduction of the ability
80 for a single jump thread request to bypass multiple blocks.
82 We further reduce the number of edges and statements we create by
83 not copying all the outgoing edges and the control statement in
84 step #1. We instead create a template block without the outgoing
85 edges and duplicate the template. */
88 /* Steps #5 and #6 of the above algorithm are best implemented by walking
89 all the incoming edges which thread to the same destination edge at
90 the same time. That avoids lots of table lookups to get information
91 for the destination edge.
93 To realize that implementation we create a list of incoming edges
94 which thread to the same outgoing edge. Thus to implement steps
95 #5 and #6 we traverse our hash table of outgoing edge information.
96 For each entry we walk the list of incoming edges which thread to
97 the current outgoing edge. */
99 struct el
101 edge e;
102 struct el *next;
105 /* Main data structure recording information regarding B's duplicate
106 blocks. */
108 /* We need to efficiently record the unique thread destinations of this
109 block and specific information associated with those destinations. We
110 may have many incoming edges threaded to the same outgoing edge. This
111 can be naturally implemented with a hash table. */
113 struct redirection_data : typed_free_remove<redirection_data>
115 /* A duplicate of B with the trailing control statement removed and which
116 targets a single successor of B. */
117 basic_block dup_block;
119 /* The jump threading path. */
120 vec<jump_thread_edge *> *path;
122 /* A list of incoming edges which we want to thread to the
123 same path. */
124 struct el *incoming_edges;
126 /* hash_table support. */
127 typedef redirection_data value_type;
128 typedef redirection_data compare_type;
129 static inline hashval_t hash (const value_type *);
130 static inline int equal (const value_type *, const compare_type *);
133 /* Simple hashing function. For any given incoming edge E, we're going
134 to be most concerned with the final destination of its jump thread
135 path. So hash on the block index of the final edge in the path. */
137 inline hashval_t
138 redirection_data::hash (const value_type *p)
140 vec<jump_thread_edge *> *path = p->path;
141 return path->last ()->e->dest->index;
144 /* Given two hash table entries, return true if they have the same
145 jump threading path. */
146 inline int
147 redirection_data::equal (const value_type *p1, const compare_type *p2)
149 vec<jump_thread_edge *> *path1 = p1->path;
150 vec<jump_thread_edge *> *path2 = p2->path;
152 if (path1->length () != path2->length ())
153 return false;
155 for (unsigned int i = 1; i < path1->length (); i++)
157 if ((*path1)[i]->type != (*path2)[i]->type
158 || (*path1)[i]->e != (*path2)[i]->e)
159 return false;
162 return true;
165 /* Data structure of information to pass to hash table traversal routines. */
166 struct ssa_local_info_t
168 /* The current block we are working on. */
169 basic_block bb;
171 /* A template copy of BB with no outgoing edges or control statement that
172 we use for creating copies. */
173 basic_block template_block;
175 /* TRUE if we thread one or more jumps, FALSE otherwise. */
176 bool jumps_threaded;
179 /* Passes which use the jump threading code register jump threading
180 opportunities as they are discovered. We keep the registered
181 jump threading opportunities in this vector as edge pairs
182 (original_edge, target_edge). */
183 static vec<vec<jump_thread_edge *> *> paths;
185 /* When we start updating the CFG for threading, data necessary for jump
186 threading is attached to the AUX field for the incoming edge. Use these
187 macros to access the underlying structure attached to the AUX field. */
188 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
190 /* Jump threading statistics. */
192 struct thread_stats_d
194 unsigned long num_threaded_edges;
197 struct thread_stats_d thread_stats;
200 /* Remove the last statement in block BB if it is a control statement
201 Also remove all outgoing edges except the edge which reaches DEST_BB.
202 If DEST_BB is NULL, then remove all outgoing edges. */
204 static void
205 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
207 gimple_stmt_iterator gsi;
208 edge e;
209 edge_iterator ei;
211 gsi = gsi_last_bb (bb);
213 /* If the duplicate ends with a control statement, then remove it.
215 Note that if we are duplicating the template block rather than the
216 original basic block, then the duplicate might not have any real
217 statements in it. */
218 if (!gsi_end_p (gsi)
219 && gsi_stmt (gsi)
220 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
221 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
222 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
223 gsi_remove (&gsi, true);
225 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
227 if (e->dest != dest_bb)
228 remove_edge (e);
229 else
230 ei_next (&ei);
234 /* Create a duplicate of BB. Record the duplicate block in RD. */
236 static void
237 create_block_for_threading (basic_block bb, struct redirection_data *rd)
239 edge_iterator ei;
240 edge e;
242 /* We can use the generic block duplication code and simply remove
243 the stuff we do not need. */
244 rd->dup_block = duplicate_block (bb, NULL, NULL);
246 FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
247 e->aux = NULL;
249 /* Zero out the profile, since the block is unreachable for now. */
250 rd->dup_block->frequency = 0;
251 rd->dup_block->count = 0;
254 /* Main data structure to hold information for duplicates of BB. */
256 static hash_table <redirection_data> redirection_data;
258 /* Given an outgoing edge E lookup and return its entry in our hash table.
260 If INSERT is true, then we insert the entry into the hash table if
261 it is not already present. INCOMING_EDGE is added to the list of incoming
262 edges associated with E in the hash table. */
264 static struct redirection_data *
265 lookup_redirection_data (edge e, enum insert_option insert)
267 struct redirection_data **slot;
268 struct redirection_data *elt;
269 vec<jump_thread_edge *> *path = THREAD_PATH (e);
271 /* Build a hash table element so we can see if E is already
272 in the table. */
273 elt = XNEW (struct redirection_data);
274 elt->path = path;
275 elt->dup_block = NULL;
276 elt->incoming_edges = NULL;
278 slot = redirection_data.find_slot (elt, insert);
280 /* This will only happen if INSERT is false and the entry is not
281 in the hash table. */
282 if (slot == NULL)
284 free (elt);
285 return NULL;
288 /* This will only happen if E was not in the hash table and
289 INSERT is true. */
290 if (*slot == NULL)
292 *slot = elt;
293 elt->incoming_edges = XNEW (struct el);
294 elt->incoming_edges->e = e;
295 elt->incoming_edges->next = NULL;
296 return elt;
298 /* E was in the hash table. */
299 else
301 /* Free ELT as we do not need it anymore, we will extract the
302 relevant entry from the hash table itself. */
303 free (elt);
305 /* Get the entry stored in the hash table. */
306 elt = *slot;
308 /* If insertion was requested, then we need to add INCOMING_EDGE
309 to the list of incoming edges associated with E. */
310 if (insert)
312 struct el *el = XNEW (struct el);
313 el->next = elt->incoming_edges;
314 el->e = e;
315 elt->incoming_edges = el;
318 return elt;
322 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
324 static void
325 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
327 gimple_stmt_iterator gsi;
328 int src_indx = src_e->dest_idx;
330 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
332 gimple phi = gsi_stmt (gsi);
333 source_location locus = gimple_phi_arg_location (phi, src_indx);
334 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
338 /* We have recently made a copy of ORIG_BB, including its outgoing
339 edges. The copy is NEW_BB. Every PHI node in every direct successor of
340 ORIG_BB has a new argument associated with edge from NEW_BB to the
341 successor. Initialize the PHI argument so that it is equal to the PHI
342 argument associated with the edge from ORIG_BB to the successor. */
344 static void
345 update_destination_phis (basic_block orig_bb, basic_block new_bb)
347 edge_iterator ei;
348 edge e;
350 FOR_EACH_EDGE (e, ei, orig_bb->succs)
352 edge e2 = find_edge (new_bb, e->dest);
353 copy_phi_args (e->dest, e, e2);
357 /* Given a duplicate block and its single destination (both stored
358 in RD). Create an edge between the duplicate and its single
359 destination.
361 Add an additional argument to any PHI nodes at the single
362 destination. */
364 static void
365 create_edge_and_update_destination_phis (struct redirection_data *rd,
366 basic_block bb)
368 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
370 rescan_loop_exit (e, true, false);
371 e->probability = REG_BR_PROB_BASE;
372 e->count = bb->count;
374 /* We have to copy path -- which means creating a new vector as well
375 as all the jump_thread_edge entries. */
376 if (rd->path->last ()->e->aux)
378 vec<jump_thread_edge *> *path = THREAD_PATH (rd->path->last ()->e);
379 vec<jump_thread_edge *> *copy = new vec<jump_thread_edge *> ();
381 /* Sadly, the elements of the vector are pointers and need to
382 be copied as well. */
383 for (unsigned int i = 0; i < path->length (); i++)
385 jump_thread_edge *x
386 = new jump_thread_edge ((*path)[i]->e, (*path)[i]->type);
387 copy->safe_push (x);
389 e->aux = (void *)copy;
391 else
393 e->aux = NULL;
396 /* If there are any PHI nodes at the destination of the outgoing edge
397 from the duplicate block, then we will need to add a new argument
398 to them. The argument should have the same value as the argument
399 associated with the outgoing edge stored in RD. */
400 copy_phi_args (e->dest, rd->path->last ()->e, e);
403 /* Wire up the outgoing edges from the duplicate block and
404 update any PHIs as needed. */
405 void
406 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
407 ssa_local_info_t *local_info)
409 edge e = rd->incoming_edges->e;
410 vec<jump_thread_edge *> *path = THREAD_PATH (e);
412 /* If we were threading through an joiner block, then we want
413 to keep its control statement and redirect an outgoing edge.
414 Else we want to remove the control statement & edges, then create
415 a new outgoing edge. In both cases we may need to update PHIs. */
416 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
418 edge victim;
419 edge e2;
421 /* This updates the PHIs at the destination of the duplicate
422 block. */
423 update_destination_phis (local_info->bb, rd->dup_block);
425 /* Find the edge from the duplicate block to the block we're
426 threading through. That's the edge we want to redirect. */
427 victim = find_edge (rd->dup_block, (*path)[1]->e->dest);
428 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
429 e2->count = path->last ()->e->count;
431 /* If we redirected the edge, then we need to copy PHI arguments
432 at the target. If the edge already existed (e2 != victim case),
433 then the PHIs in the target already have the correct arguments. */
434 if (e2 == victim)
435 copy_phi_args (e2->dest, path->last ()->e, e2);
437 else
439 remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
440 create_edge_and_update_destination_phis (rd, rd->dup_block);
443 /* Hash table traversal callback routine to create duplicate blocks. */
446 ssa_create_duplicates (struct redirection_data **slot,
447 ssa_local_info_t *local_info)
449 struct redirection_data *rd = *slot;
451 /* Create a template block if we have not done so already. Otherwise
452 use the template to create a new block. */
453 if (local_info->template_block == NULL)
455 create_block_for_threading (local_info->bb, rd);
456 local_info->template_block = rd->dup_block;
458 /* We do not create any outgoing edges for the template. We will
459 take care of that in a later traversal. That way we do not
460 create edges that are going to just be deleted. */
462 else
464 create_block_for_threading (local_info->template_block, rd);
466 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
467 block. */
468 ssa_fix_duplicate_block_edges (rd, local_info);
471 /* Keep walking the hash table. */
472 return 1;
475 /* We did not create any outgoing edges for the template block during
476 block creation. This hash table traversal callback creates the
477 outgoing edge for the template block. */
479 inline int
480 ssa_fixup_template_block (struct redirection_data **slot,
481 ssa_local_info_t *local_info)
483 struct redirection_data *rd = *slot;
485 /* If this is the template block halt the traversal after updating
486 it appropriately.
488 If we were threading through an joiner block, then we want
489 to keep its control statement and redirect an outgoing edge.
490 Else we want to remove the control statement & edges, then create
491 a new outgoing edge. In both cases we may need to update PHIs. */
492 if (rd->dup_block && rd->dup_block == local_info->template_block)
494 ssa_fix_duplicate_block_edges (rd, local_info);
495 return 0;
498 return 1;
501 /* Hash table traversal callback to redirect each incoming edge
502 associated with this hash table element to its new destination. */
505 ssa_redirect_edges (struct redirection_data **slot,
506 ssa_local_info_t *local_info)
508 struct redirection_data *rd = *slot;
509 struct el *next, *el;
511 /* Walk over all the incoming edges associated associated with this
512 hash table entry. */
513 for (el = rd->incoming_edges; el; el = next)
515 edge e = el->e;
516 vec<jump_thread_edge *> *path = THREAD_PATH (e);
518 /* Go ahead and free this element from the list. Doing this now
519 avoids the need for another list walk when we destroy the hash
520 table. */
521 next = el->next;
522 free (el);
524 thread_stats.num_threaded_edges++;
526 if (rd->dup_block)
528 edge e2;
530 if (dump_file && (dump_flags & TDF_DETAILS))
531 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
532 e->src->index, e->dest->index, rd->dup_block->index);
534 rd->dup_block->count += e->count;
536 /* Excessive jump threading may make frequencies large enough so
537 the computation overflows. */
538 if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
539 rd->dup_block->frequency += EDGE_FREQUENCY (e);
541 /* In the case of threading through a joiner block, the outgoing
542 edges from the duplicate block were updated when they were
543 redirected during ssa_fix_duplicate_block_edges. */
544 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
545 EDGE_SUCC (rd->dup_block, 0)->count += e->count;
547 /* Redirect the incoming edge (possibly to the joiner block) to the
548 appropriate duplicate block. */
549 e2 = redirect_edge_and_branch (e, rd->dup_block);
550 gcc_assert (e == e2);
551 flush_pending_stmts (e2);
554 /* Go ahead and clear E->aux. It's not needed anymore and failure
555 to clear it will cause all kinds of unpleasant problems later. */
556 for (unsigned int i = 0; i < path->length (); i++)
557 delete (*path)[i];
558 path->release ();
559 e->aux = NULL;
563 /* Indicate that we actually threaded one or more jumps. */
564 if (rd->incoming_edges)
565 local_info->jumps_threaded = true;
567 return 1;
570 /* Return true if this block has no executable statements other than
571 a simple ctrl flow instruction. When the number of outgoing edges
572 is one, this is equivalent to a "forwarder" block. */
574 static bool
575 redirection_block_p (basic_block bb)
577 gimple_stmt_iterator gsi;
579 /* Advance to the first executable statement. */
580 gsi = gsi_start_bb (bb);
581 while (!gsi_end_p (gsi)
582 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
583 || is_gimple_debug (gsi_stmt (gsi))
584 || gimple_nop_p (gsi_stmt (gsi))))
585 gsi_next (&gsi);
587 /* Check if this is an empty block. */
588 if (gsi_end_p (gsi))
589 return true;
591 /* Test that we've reached the terminating control statement. */
592 return gsi_stmt (gsi)
593 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
594 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
595 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
598 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
599 is reached via one or more specific incoming edges, we know which
600 outgoing edge from BB will be traversed.
602 We want to redirect those incoming edges to the target of the
603 appropriate outgoing edge. Doing so avoids a conditional branch
604 and may expose new optimization opportunities. Note that we have
605 to update dominator tree and SSA graph after such changes.
607 The key to keeping the SSA graph update manageable is to duplicate
608 the side effects occurring in BB so that those side effects still
609 occur on the paths which bypass BB after redirecting edges.
611 We accomplish this by creating duplicates of BB and arranging for
612 the duplicates to unconditionally pass control to one specific
613 successor of BB. We then revector the incoming edges into BB to
614 the appropriate duplicate of BB.
616 If NOLOOP_ONLY is true, we only perform the threading as long as it
617 does not affect the structure of the loops in a nontrivial way. */
619 static bool
620 thread_block (basic_block bb, bool noloop_only)
622 /* E is an incoming edge into BB that we may or may not want to
623 redirect to a duplicate of BB. */
624 edge e, e2;
625 edge_iterator ei;
626 ssa_local_info_t local_info;
627 struct loop *loop = bb->loop_father;
629 /* To avoid scanning a linear array for the element we need we instead
630 use a hash table. For normal code there should be no noticeable
631 difference. However, if we have a block with a large number of
632 incoming and outgoing edges such linear searches can get expensive. */
633 redirection_data.create (EDGE_COUNT (bb->succs));
635 /* If we thread the latch of the loop to its exit, the loop ceases to
636 exist. Make sure we do not restrict ourselves in order to preserve
637 this loop. */
638 if (loop->header == bb)
640 e = loop_latch_edge (loop);
641 vec<jump_thread_edge *> *path = THREAD_PATH (e);
643 if (path)
645 for (unsigned int i = 1; i < path->length (); i++)
647 edge e2 = (*path)[i]->e;
649 if (loop_exit_edge_p (loop, e2))
651 loop->header = NULL;
652 loop->latch = NULL;
653 loops_state_set (LOOPS_NEED_FIXUP);
659 /* Record each unique threaded destination into a hash table for
660 efficient lookups. */
661 FOR_EACH_EDGE (e, ei, bb->preds)
663 if (e->aux == NULL)
664 continue;
666 vec<jump_thread_edge *> *path = THREAD_PATH (e);
667 e2 = path->last ()->e;
668 if (!e2 || noloop_only)
670 /* If NOLOOP_ONLY is true, we only allow threading through the
671 header of a loop to exit edges.
673 There are two cases to consider. The first when BB is the
674 loop header. We will attempt to thread this elsewhere, so
675 we can just continue here. */
677 if (bb == bb->loop_father->header
678 && (!loop_exit_edge_p (bb->loop_father, e2)
679 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
680 continue;
683 /* The second occurs when there was loop header buried in a jump
684 threading path. We do not try and thread this elsewhere, so
685 just cancel the jump threading request by clearing the AUX
686 field now. */
687 if ((bb->loop_father != e2->src->loop_father
688 && !loop_exit_edge_p (e2->src->loop_father, e2))
689 || (e2->src->loop_father != e2->dest->loop_father
690 && !loop_exit_edge_p (e2->src->loop_father, e2)))
692 /* Since this case is not handled by our special code
693 to thread through a loop header, we must explicitly
694 cancel the threading request here. */
695 for (unsigned int i = 0; i < path->length (); i++)
696 delete (*path)[i];
697 path->release ();
698 e->aux = NULL;
699 continue;
703 if (e->dest == e2->src)
704 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
705 e->count, (*THREAD_PATH (e))[1]->e);
707 /* Insert the outgoing edge into the hash table if it is not
708 already in the hash table. */
709 lookup_redirection_data (e, INSERT);
712 /* We do not update dominance info. */
713 free_dominance_info (CDI_DOMINATORS);
715 /* We know we only thread through the loop header to loop exits.
716 Let the basic block duplication hook know we are not creating
717 a multiple entry loop. */
718 if (noloop_only
719 && bb == bb->loop_father->header)
720 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
722 /* Now create duplicates of BB.
724 Note that for a block with a high outgoing degree we can waste
725 a lot of time and memory creating and destroying useless edges.
727 So we first duplicate BB and remove the control structure at the
728 tail of the duplicate as well as all outgoing edges from the
729 duplicate. We then use that duplicate block as a template for
730 the rest of the duplicates. */
731 local_info.template_block = NULL;
732 local_info.bb = bb;
733 local_info.jumps_threaded = false;
734 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
735 (&local_info);
737 /* The template does not have an outgoing edge. Create that outgoing
738 edge and update PHI nodes as the edge's target as necessary.
740 We do this after creating all the duplicates to avoid creating
741 unnecessary edges. */
742 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
743 (&local_info);
745 /* The hash table traversals above created the duplicate blocks (and the
746 statements within the duplicate blocks). This loop creates PHI nodes for
747 the duplicated blocks and redirects the incoming edges into BB to reach
748 the duplicates of BB. */
749 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
750 (&local_info);
752 /* Done with this block. Clear REDIRECTION_DATA. */
753 redirection_data.dispose ();
755 if (noloop_only
756 && bb == bb->loop_father->header)
757 set_loop_copy (bb->loop_father, NULL);
759 /* Indicate to our caller whether or not any jumps were threaded. */
760 return local_info.jumps_threaded;
763 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
764 copy of E->dest created during threading, or E->dest if it was not necessary
765 to copy it (E is its single predecessor). */
767 static basic_block
768 thread_single_edge (edge e)
770 basic_block bb = e->dest;
771 struct redirection_data rd;
772 vec<jump_thread_edge *> *path = THREAD_PATH (e);
773 edge eto = (*path)[1]->e;
775 for (unsigned int i = 0; i < path->length (); i++)
776 delete (*path)[i];
777 delete path;
778 e->aux = NULL;
780 thread_stats.num_threaded_edges++;
782 if (single_pred_p (bb))
784 /* If BB has just a single predecessor, we should only remove the
785 control statements at its end, and successors except for ETO. */
786 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
788 /* And fixup the flags on the single remaining edge. */
789 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
790 eto->flags |= EDGE_FALLTHRU;
792 return bb;
795 /* Otherwise, we need to create a copy. */
796 if (e->dest == eto->src)
797 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
799 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
800 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
801 npath->safe_push (x);
803 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
804 npath->safe_push (x);
805 rd.path = npath;
807 create_block_for_threading (bb, &rd);
808 remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
809 create_edge_and_update_destination_phis (&rd, rd.dup_block);
811 if (dump_file && (dump_flags & TDF_DETAILS))
812 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
813 e->src->index, e->dest->index, rd.dup_block->index);
815 rd.dup_block->count = e->count;
816 rd.dup_block->frequency = EDGE_FREQUENCY (e);
817 single_succ_edge (rd.dup_block)->count = e->count;
818 redirect_edge_and_branch (e, rd.dup_block);
819 flush_pending_stmts (e);
821 return rd.dup_block;
824 /* Callback for dfs_enumerate_from. Returns true if BB is different
825 from STOP and DBDS_CE_STOP. */
827 static basic_block dbds_ce_stop;
828 static bool
829 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
831 return (bb != (const_basic_block) stop
832 && bb != dbds_ce_stop);
835 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
836 returns the state. */
838 enum bb_dom_status
840 /* BB does not dominate latch of the LOOP. */
841 DOMST_NONDOMINATING,
842 /* The LOOP is broken (there is no path from the header to its latch. */
843 DOMST_LOOP_BROKEN,
844 /* BB dominates the latch of the LOOP. */
845 DOMST_DOMINATING
848 static enum bb_dom_status
849 determine_bb_domination_status (struct loop *loop, basic_block bb)
851 basic_block *bblocks;
852 unsigned nblocks, i;
853 bool bb_reachable = false;
854 edge_iterator ei;
855 edge e;
857 /* This function assumes BB is a successor of LOOP->header.
858 If that is not the case return DOMST_NONDOMINATING which
859 is always safe. */
861 bool ok = false;
863 FOR_EACH_EDGE (e, ei, bb->preds)
865 if (e->src == loop->header)
867 ok = true;
868 break;
872 if (!ok)
873 return DOMST_NONDOMINATING;
876 if (bb == loop->latch)
877 return DOMST_DOMINATING;
879 /* Check that BB dominates LOOP->latch, and that it is back-reachable
880 from it. */
882 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
883 dbds_ce_stop = loop->header;
884 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
885 bblocks, loop->num_nodes, bb);
886 for (i = 0; i < nblocks; i++)
887 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
889 if (e->src == loop->header)
891 free (bblocks);
892 return DOMST_NONDOMINATING;
894 if (e->src == bb)
895 bb_reachable = true;
898 free (bblocks);
899 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
902 /* Return true if BB is part of the new pre-header that is created
903 when threading the latch to DATA. */
905 static bool
906 def_split_header_continue_p (const_basic_block bb, const void *data)
908 const_basic_block new_header = (const_basic_block) data;
909 const struct loop *l;
911 if (bb == new_header
912 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
913 return false;
914 for (l = bb->loop_father; l; l = loop_outer (l))
915 if (l == new_header->loop_father)
916 return true;
917 return false;
920 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
921 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
922 to the inside of the loop. */
924 static bool
925 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
927 basic_block header = loop->header;
928 edge e, tgt_edge, latch = loop_latch_edge (loop);
929 edge_iterator ei;
930 basic_block tgt_bb, atgt_bb;
931 enum bb_dom_status domst;
933 /* We have already threaded through headers to exits, so all the threading
934 requests now are to the inside of the loop. We need to avoid creating
935 irreducible regions (i.e., loops with more than one entry block), and
936 also loop with several latch edges, or new subloops of the loop (although
937 there are cases where it might be appropriate, it is difficult to decide,
938 and doing it wrongly may confuse other optimizers).
940 We could handle more general cases here. However, the intention is to
941 preserve some information about the loop, which is impossible if its
942 structure changes significantly, in a way that is not well understood.
943 Thus we only handle few important special cases, in which also updating
944 of the loop-carried information should be feasible:
946 1) Propagation of latch edge to a block that dominates the latch block
947 of a loop. This aims to handle the following idiom:
949 first = 1;
950 while (1)
952 if (first)
953 initialize;
954 first = 0;
955 body;
958 After threading the latch edge, this becomes
960 first = 1;
961 if (first)
962 initialize;
963 while (1)
965 first = 0;
966 body;
969 The original header of the loop is moved out of it, and we may thread
970 the remaining edges through it without further constraints.
972 2) All entry edges are propagated to a single basic block that dominates
973 the latch block of the loop. This aims to handle the following idiom
974 (normally created for "for" loops):
976 i = 0;
977 while (1)
979 if (i >= 100)
980 break;
981 body;
982 i++;
985 This becomes
987 i = 0;
988 while (1)
990 body;
991 i++;
992 if (i >= 100)
993 break;
997 /* Threading through the header won't improve the code if the header has just
998 one successor. */
999 if (single_succ_p (header))
1000 goto fail;
1002 if (latch->aux)
1004 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1005 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1006 goto fail;
1007 tgt_edge = (*path)[1]->e;
1008 tgt_bb = tgt_edge->dest;
1010 else if (!may_peel_loop_headers
1011 && !redirection_block_p (loop->header))
1012 goto fail;
1013 else
1015 tgt_bb = NULL;
1016 tgt_edge = NULL;
1017 FOR_EACH_EDGE (e, ei, header->preds)
1019 if (!e->aux)
1021 if (e == latch)
1022 continue;
1024 /* If latch is not threaded, and there is a header
1025 edge that is not threaded, we would create loop
1026 with multiple entries. */
1027 goto fail;
1030 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1032 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1033 goto fail;
1034 tgt_edge = (*path)[1]->e;
1035 atgt_bb = tgt_edge->dest;
1036 if (!tgt_bb)
1037 tgt_bb = atgt_bb;
1038 /* Two targets of threading would make us create loop
1039 with multiple entries. */
1040 else if (tgt_bb != atgt_bb)
1041 goto fail;
1044 if (!tgt_bb)
1046 /* There are no threading requests. */
1047 return false;
1050 /* Redirecting to empty loop latch is useless. */
1051 if (tgt_bb == loop->latch
1052 && empty_block_p (loop->latch))
1053 goto fail;
1056 /* The target block must dominate the loop latch, otherwise we would be
1057 creating a subloop. */
1058 domst = determine_bb_domination_status (loop, tgt_bb);
1059 if (domst == DOMST_NONDOMINATING)
1060 goto fail;
1061 if (domst == DOMST_LOOP_BROKEN)
1063 /* If the loop ceased to exist, mark it as such, and thread through its
1064 original header. */
1065 loop->header = NULL;
1066 loop->latch = NULL;
1067 loops_state_set (LOOPS_NEED_FIXUP);
1068 return thread_block (header, false);
1071 if (tgt_bb->loop_father->header == tgt_bb)
1073 /* If the target of the threading is a header of a subloop, we need
1074 to create a preheader for it, so that the headers of the two loops
1075 do not merge. */
1076 if (EDGE_COUNT (tgt_bb->preds) > 2)
1078 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1079 gcc_assert (tgt_bb != NULL);
1081 else
1082 tgt_bb = split_edge (tgt_edge);
1085 if (latch->aux)
1087 basic_block *bblocks;
1088 unsigned nblocks, i;
1090 /* First handle the case latch edge is redirected. We are copying
1091 the loop header but not creating a multiple entry loop. Make the
1092 cfg manipulation code aware of that fact. */
1093 set_loop_copy (loop, loop);
1094 loop->latch = thread_single_edge (latch);
1095 set_loop_copy (loop, NULL);
1096 gcc_assert (single_succ (loop->latch) == tgt_bb);
1097 loop->header = tgt_bb;
1099 /* Remove the new pre-header blocks from our loop. */
1100 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1101 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1102 bblocks, loop->num_nodes, tgt_bb);
1103 for (i = 0; i < nblocks; i++)
1104 if (bblocks[i]->loop_father == loop)
1106 remove_bb_from_loops (bblocks[i]);
1107 add_bb_to_loop (bblocks[i], loop_outer (loop));
1109 free (bblocks);
1111 /* If the new header has multiple latches mark it so. */
1112 FOR_EACH_EDGE (e, ei, loop->header->preds)
1113 if (e->src->loop_father == loop
1114 && e->src != loop->latch)
1116 loop->latch = NULL;
1117 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1120 /* Cancel remaining threading requests that would make the
1121 loop a multiple entry loop. */
1122 FOR_EACH_EDGE (e, ei, header->preds)
1124 edge e2;
1126 if (e->aux == NULL)
1127 continue;
1129 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1130 e2 = path->last ()->e;
1132 if (e->src->loop_father != e2->dest->loop_father
1133 && e2->dest != loop->header)
1135 for (unsigned int i = 0; i < path->length (); i++)
1136 delete (*path)[i];
1137 path->release ();
1138 e->aux = NULL;
1142 /* Thread the remaining edges through the former header. */
1143 thread_block (header, false);
1145 else
1147 basic_block new_preheader;
1149 /* Now consider the case entry edges are redirected to the new entry
1150 block. Remember one entry edge, so that we can find the new
1151 preheader (its destination after threading). */
1152 FOR_EACH_EDGE (e, ei, header->preds)
1154 if (e->aux)
1155 break;
1158 /* The duplicate of the header is the new preheader of the loop. Ensure
1159 that it is placed correctly in the loop hierarchy. */
1160 set_loop_copy (loop, loop_outer (loop));
1162 thread_block (header, false);
1163 set_loop_copy (loop, NULL);
1164 new_preheader = e->dest;
1166 /* Create the new latch block. This is always necessary, as the latch
1167 must have only a single successor, but the original header had at
1168 least two successors. */
1169 loop->latch = NULL;
1170 mfb_kj_edge = single_succ_edge (new_preheader);
1171 loop->header = mfb_kj_edge->dest;
1172 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1173 loop->header = latch->dest;
1174 loop->latch = latch->src;
1177 return true;
1179 fail:
1180 /* We failed to thread anything. Cancel the requests. */
1181 FOR_EACH_EDGE (e, ei, header->preds)
1183 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1185 if (path)
1187 for (unsigned int i = 0; i < path->length (); i++)
1188 delete (*path)[i];
1189 path->release ();
1190 e->aux = NULL;
1193 return false;
1196 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1197 PHI arguments associated with those edges are equal or there are no
1198 PHI arguments, otherwise return FALSE. */
1200 static bool
1201 phi_args_equal_on_edges (edge e1, edge e2)
1203 gimple_stmt_iterator gsi;
1204 int indx1 = e1->dest_idx;
1205 int indx2 = e2->dest_idx;
1207 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1209 gimple phi = gsi_stmt (gsi);
1211 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1212 gimple_phi_arg_def (phi, indx2), 0))
1213 return false;
1215 return true;
1218 /* Walk through the registered jump threads and convert them into a
1219 form convenient for this pass.
1221 Any block which has incoming edges threaded to outgoing edges
1222 will have its entry in THREADED_BLOCK set.
1224 Any threaded edge will have its new outgoing edge stored in the
1225 original edge's AUX field.
1227 This form avoids the need to walk all the edges in the CFG to
1228 discover blocks which need processing and avoids unnecessary
1229 hash table lookups to map from threaded edge to new target. */
1231 static void
1232 mark_threaded_blocks (bitmap threaded_blocks)
1234 unsigned int i;
1235 bitmap_iterator bi;
1236 bitmap tmp = BITMAP_ALLOC (NULL);
1237 basic_block bb;
1238 edge e;
1239 edge_iterator ei;
1241 /* It is possible to have jump threads in which one is a subpath
1242 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1243 block and (B, C), (C, D) where no joiner block exists.
1245 When this occurs ignore the jump thread request with the joiner
1246 block. It's totally subsumed by the simpler jump thread request.
1248 This results in less block copying, simpler CFGs. More improtantly,
1249 when we duplicate the joiner block, B, in this case we will create
1250 a new threading opportunity that we wouldn't be able to optimize
1251 until the next jump threading iteration.
1253 So first convert the jump thread requests which do not require a
1254 joiner block. */
1255 for (i = 0; i < paths.length (); i++)
1257 vec<jump_thread_edge *> *path = paths[i];
1259 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1261 edge e = (*path)[0]->e;
1262 e->aux = (void *)path;
1263 bitmap_set_bit (tmp, e->dest->index);
1268 /* Now iterate again, converting cases where we threaded through
1269 a joiner block, but ignoring those where we have already
1270 threaded through the joiner block. */
1271 for (i = 0; i < paths.length (); i++)
1273 vec<jump_thread_edge *> *path = paths[i];
1275 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK
1276 && (*path)[0]->e->aux == NULL)
1278 edge e = (*path)[0]->e;
1279 e->aux = path;
1280 bitmap_set_bit (tmp, e->dest->index);
1284 /* If we have a joiner block (J) which has two successors S1 and S2 and
1285 we are threading though S1 and the final destination of the thread
1286 is S2, then we must verify that any PHI nodes in S2 have the same
1287 PHI arguments for the edge J->S2 and J->S1->...->S2.
1289 We used to detect this prior to registering the jump thread, but
1290 that prohibits propagation of edge equivalences into non-dominated
1291 PHI nodes as the equivalency test might occur before propagation.
1293 This works for now, but will need improvement as part of the FSA
1294 optimization.
1296 Note since we've moved the thread request data to the edges,
1297 we have to iterate on those rather than the threaded_edges vector. */
1298 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1300 bb = BASIC_BLOCK (i);
1301 FOR_EACH_EDGE (e, ei, bb->preds)
1303 if (e->aux)
1305 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1306 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1308 if (have_joiner)
1310 basic_block joiner = e->dest;
1311 edge final_edge = path->last ()->e;
1312 basic_block final_dest = final_edge->dest;
1313 edge e2 = find_edge (joiner, final_dest);
1315 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1317 for (unsigned int i = 0; i < path->length (); i++)
1318 delete (*path)[i];
1319 path->release ();
1320 e->aux = NULL;
1328 /* If optimizing for size, only thread through block if we don't have
1329 to duplicate it or it's an otherwise empty redirection block. */
1330 if (optimize_function_for_size_p (cfun))
1332 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1334 bb = BASIC_BLOCK (i);
1335 if (EDGE_COUNT (bb->preds) > 1
1336 && !redirection_block_p (bb))
1338 FOR_EACH_EDGE (e, ei, bb->preds)
1340 if (e->aux)
1342 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1343 for (unsigned int i = 0; i < path->length (); i++)
1344 delete (*path)[i];
1345 path->release ();
1346 e->aux = NULL;
1350 else
1351 bitmap_set_bit (threaded_blocks, i);
1354 else
1355 bitmap_copy (threaded_blocks, tmp);
1357 BITMAP_FREE (tmp);
1361 /* Walk through all blocks and thread incoming edges to the appropriate
1362 outgoing edge for each edge pair recorded in THREADED_EDGES.
1364 It is the caller's responsibility to fix the dominance information
1365 and rewrite duplicated SSA_NAMEs back into SSA form.
1367 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1368 loop headers if it does not simplify the loop.
1370 Returns true if one or more edges were threaded, false otherwise. */
1372 bool
1373 thread_through_all_blocks (bool may_peel_loop_headers)
1375 bool retval = false;
1376 unsigned int i;
1377 bitmap_iterator bi;
1378 bitmap threaded_blocks;
1379 struct loop *loop;
1380 loop_iterator li;
1382 /* We must know about loops in order to preserve them. */
1383 gcc_assert (current_loops != NULL);
1385 if (!paths.exists ())
1386 return false;
1388 threaded_blocks = BITMAP_ALLOC (NULL);
1389 memset (&thread_stats, 0, sizeof (thread_stats));
1391 mark_threaded_blocks (threaded_blocks);
1393 initialize_original_copy_tables ();
1395 /* First perform the threading requests that do not affect
1396 loop structure. */
1397 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1399 basic_block bb = BASIC_BLOCK (i);
1401 if (EDGE_COUNT (bb->preds) > 0)
1402 retval |= thread_block (bb, true);
1405 /* Then perform the threading through loop headers. We start with the
1406 innermost loop, so that the changes in cfg we perform won't affect
1407 further threading. */
1408 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1410 if (!loop->header
1411 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1412 continue;
1414 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1417 statistics_counter_event (cfun, "Jumps threaded",
1418 thread_stats.num_threaded_edges);
1420 free_original_copy_tables ();
1422 BITMAP_FREE (threaded_blocks);
1423 threaded_blocks = NULL;
1424 paths.release ();
1426 if (retval)
1427 loops_state_set (LOOPS_NEED_FIXUP);
1429 return retval;
1432 /* Dump a jump threading path, including annotations about each
1433 edge in the path. */
1435 static void
1436 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path)
1438 fprintf (dump_file,
1439 " Registering jump thread: (%d, %d) incoming edge; ",
1440 path[0]->e->src->index, path[0]->e->dest->index);
1442 for (unsigned int i = 1; i < path.length (); i++)
1444 /* We can get paths with a NULL edge when the final destination
1445 of a jump thread turns out to be a constant address. We dump
1446 those paths when debugging, so we have to be prepared for that
1447 possibility here. */
1448 if (path[i]->e == NULL)
1449 continue;
1451 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1452 fprintf (dump_file, " (%d, %d) joiner; ",
1453 path[i]->e->src->index, path[i]->e->dest->index);
1454 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
1455 fprintf (dump_file, " (%d, %d) normal;",
1456 path[i]->e->src->index, path[i]->e->dest->index);
1457 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
1458 fprintf (dump_file, " (%d, %d) nocopy;",
1459 path[i]->e->src->index, path[i]->e->dest->index);
1461 fputc ('\n', dump_file);
1464 /* Register a jump threading opportunity. We queue up all the jump
1465 threading opportunities discovered by a pass and update the CFG
1466 and SSA form all at once.
1468 E is the edge we can thread, E2 is the new target edge, i.e., we
1469 are effectively recording that E->dest can be changed to E2->dest
1470 after fixing the SSA graph. */
1472 void
1473 register_jump_thread (vec<jump_thread_edge *> *path)
1475 /* First make sure there are no NULL outgoing edges on the jump threading
1476 path. That can happen for jumping to a constant address. */
1477 for (unsigned int i = 0; i < path->length (); i++)
1478 if ((*path)[i]->e == NULL)
1480 if (dump_file && (dump_flags & TDF_DETAILS))
1482 fprintf (dump_file,
1483 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1484 dump_jump_thread_path (dump_file, *path);
1487 for (unsigned int i = 0; i < path->length (); i++)
1488 delete (*path)[i];
1489 path->release ();
1490 return;
1493 if (dump_file && (dump_flags & TDF_DETAILS))
1494 dump_jump_thread_path (dump_file, *path);
1496 if (!paths.exists ())
1497 paths.create (5);
1499 paths.safe_push (path);