* gcc.dg/atomic-compare-exchange-1.c,
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blob10271919786b5f444a4a9f97f8ef021718746acb
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "flags.h"
25 #include "basic-block.h"
26 #include "function.h"
27 #include "gimple.h"
28 #include "gimple-ssa.h"
29 #include "tree-phinodes.h"
30 #include "tree-ssa.h"
31 #include "tree-ssa-threadupdate.h"
32 #include "dumpfile.h"
33 #include "cfgloop.h"
34 #include "hash-table.h"
35 #include "dbgcnt.h"
37 /* Given a block B, update the CFG and SSA graph to reflect redirecting
38 one or more in-edges to B to instead reach the destination of an
39 out-edge from B while preserving any side effects in B.
41 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
42 side effects of executing B.
44 1. Make a copy of B (including its outgoing edges and statements). Call
45 the copy B'. Note B' has no incoming edges or PHIs at this time.
47 2. Remove the control statement at the end of B' and all outgoing edges
48 except B'->C.
50 3. Add a new argument to each PHI in C with the same value as the existing
51 argument associated with edge B->C. Associate the new PHI arguments
52 with the edge B'->C.
54 4. For each PHI in B, find or create a PHI in B' with an identical
55 PHI_RESULT. Add an argument to the PHI in B' which has the same
56 value as the PHI in B associated with the edge A->B. Associate
57 the new argument in the PHI in B' with the edge A->B.
59 5. Change the edge A->B to A->B'.
61 5a. This automatically deletes any PHI arguments associated with the
62 edge A->B in B.
64 5b. This automatically associates each new argument added in step 4
65 with the edge A->B'.
67 6. Repeat for other incoming edges into B.
69 7. Put the duplicated resources in B and all the B' blocks into SSA form.
71 Note that block duplication can be minimized by first collecting the
72 set of unique destination blocks that the incoming edges should
73 be threaded to.
75 Block duplication can be further minimized by using B instead of
76 creating B' for one destination if all edges into B are going to be
77 threaded to a successor of B. We had code to do this at one time, but
78 I'm not convinced it is correct with the changes to avoid mucking up
79 the loop structure (which may cancel threading requests, thus a block
80 which we thought was going to become unreachable may still be reachable).
81 This code was also going to get ugly with the introduction of the ability
82 for a single jump thread request to bypass multiple blocks.
84 We further reduce the number of edges and statements we create by
85 not copying all the outgoing edges and the control statement in
86 step #1. We instead create a template block without the outgoing
87 edges and duplicate the template. */
90 /* Steps #5 and #6 of the above algorithm are best implemented by walking
91 all the incoming edges which thread to the same destination edge at
92 the same time. That avoids lots of table lookups to get information
93 for the destination edge.
95 To realize that implementation we create a list of incoming edges
96 which thread to the same outgoing edge. Thus to implement steps
97 #5 and #6 we traverse our hash table of outgoing edge information.
98 For each entry we walk the list of incoming edges which thread to
99 the current outgoing edge. */
101 struct el
103 edge e;
104 struct el *next;
107 /* Main data structure recording information regarding B's duplicate
108 blocks. */
110 /* We need to efficiently record the unique thread destinations of this
111 block and specific information associated with those destinations. We
112 may have many incoming edges threaded to the same outgoing edge. This
113 can be naturally implemented with a hash table. */
115 struct redirection_data : typed_free_remove<redirection_data>
117 /* A duplicate of B with the trailing control statement removed and which
118 targets a single successor of B. */
119 basic_block dup_block;
121 /* The jump threading path. */
122 vec<jump_thread_edge *> *path;
124 /* A list of incoming edges which we want to thread to the
125 same path. */
126 struct el *incoming_edges;
128 /* hash_table support. */
129 typedef redirection_data value_type;
130 typedef redirection_data compare_type;
131 static inline hashval_t hash (const value_type *);
132 static inline int equal (const value_type *, const compare_type *);
135 /* Simple hashing function. For any given incoming edge E, we're going
136 to be most concerned with the final destination of its jump thread
137 path. So hash on the block index of the final edge in the path. */
139 inline hashval_t
140 redirection_data::hash (const value_type *p)
142 vec<jump_thread_edge *> *path = p->path;
143 return path->last ()->e->dest->index;
146 /* Given two hash table entries, return true if they have the same
147 jump threading path. */
148 inline int
149 redirection_data::equal (const value_type *p1, const compare_type *p2)
151 vec<jump_thread_edge *> *path1 = p1->path;
152 vec<jump_thread_edge *> *path2 = p2->path;
154 if (path1->length () != path2->length ())
155 return false;
157 for (unsigned int i = 1; i < path1->length (); i++)
159 if ((*path1)[i]->type != (*path2)[i]->type
160 || (*path1)[i]->e != (*path2)[i]->e)
161 return false;
164 return true;
167 /* Data structure of information to pass to hash table traversal routines. */
168 struct ssa_local_info_t
170 /* The current block we are working on. */
171 basic_block bb;
173 /* A template copy of BB with no outgoing edges or control statement that
174 we use for creating copies. */
175 basic_block template_block;
177 /* TRUE if we thread one or more jumps, FALSE otherwise. */
178 bool jumps_threaded;
181 /* Passes which use the jump threading code register jump threading
182 opportunities as they are discovered. We keep the registered
183 jump threading opportunities in this vector as edge pairs
184 (original_edge, target_edge). */
185 static vec<vec<jump_thread_edge *> *> paths;
187 /* When we start updating the CFG for threading, data necessary for jump
188 threading is attached to the AUX field for the incoming edge. Use these
189 macros to access the underlying structure attached to the AUX field. */
190 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
192 /* Jump threading statistics. */
194 struct thread_stats_d
196 unsigned long num_threaded_edges;
199 struct thread_stats_d thread_stats;
202 /* Remove the last statement in block BB if it is a control statement
203 Also remove all outgoing edges except the edge which reaches DEST_BB.
204 If DEST_BB is NULL, then remove all outgoing edges. */
206 static void
207 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
209 gimple_stmt_iterator gsi;
210 edge e;
211 edge_iterator ei;
213 gsi = gsi_last_bb (bb);
215 /* If the duplicate ends with a control statement, then remove it.
217 Note that if we are duplicating the template block rather than the
218 original basic block, then the duplicate might not have any real
219 statements in it. */
220 if (!gsi_end_p (gsi)
221 && gsi_stmt (gsi)
222 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
223 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
224 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
225 gsi_remove (&gsi, true);
227 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
229 if (e->dest != dest_bb)
230 remove_edge (e);
231 else
232 ei_next (&ei);
236 /* Create a duplicate of BB. Record the duplicate block in RD. */
238 static void
239 create_block_for_threading (basic_block bb, struct redirection_data *rd)
241 edge_iterator ei;
242 edge e;
244 /* We can use the generic block duplication code and simply remove
245 the stuff we do not need. */
246 rd->dup_block = duplicate_block (bb, NULL, NULL);
248 FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
249 e->aux = NULL;
251 /* Zero out the profile, since the block is unreachable for now. */
252 rd->dup_block->frequency = 0;
253 rd->dup_block->count = 0;
256 /* Main data structure to hold information for duplicates of BB. */
258 static hash_table <redirection_data> redirection_data;
260 /* Given an outgoing edge E lookup and return its entry in our hash table.
262 If INSERT is true, then we insert the entry into the hash table if
263 it is not already present. INCOMING_EDGE is added to the list of incoming
264 edges associated with E in the hash table. */
266 static struct redirection_data *
267 lookup_redirection_data (edge e, enum insert_option insert)
269 struct redirection_data **slot;
270 struct redirection_data *elt;
271 vec<jump_thread_edge *> *path = THREAD_PATH (e);
273 /* Build a hash table element so we can see if E is already
274 in the table. */
275 elt = XNEW (struct redirection_data);
276 elt->path = path;
277 elt->dup_block = NULL;
278 elt->incoming_edges = NULL;
280 slot = redirection_data.find_slot (elt, insert);
282 /* This will only happen if INSERT is false and the entry is not
283 in the hash table. */
284 if (slot == NULL)
286 free (elt);
287 return NULL;
290 /* This will only happen if E was not in the hash table and
291 INSERT is true. */
292 if (*slot == NULL)
294 *slot = elt;
295 elt->incoming_edges = XNEW (struct el);
296 elt->incoming_edges->e = e;
297 elt->incoming_edges->next = NULL;
298 return elt;
300 /* E was in the hash table. */
301 else
303 /* Free ELT as we do not need it anymore, we will extract the
304 relevant entry from the hash table itself. */
305 free (elt);
307 /* Get the entry stored in the hash table. */
308 elt = *slot;
310 /* If insertion was requested, then we need to add INCOMING_EDGE
311 to the list of incoming edges associated with E. */
312 if (insert)
314 struct el *el = XNEW (struct el);
315 el->next = elt->incoming_edges;
316 el->e = e;
317 elt->incoming_edges = el;
320 return elt;
324 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
326 static void
327 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
329 gimple_stmt_iterator gsi;
330 int src_indx = src_e->dest_idx;
332 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
334 gimple phi = gsi_stmt (gsi);
335 source_location locus = gimple_phi_arg_location (phi, src_indx);
336 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
340 /* We have recently made a copy of ORIG_BB, including its outgoing
341 edges. The copy is NEW_BB. Every PHI node in every direct successor of
342 ORIG_BB has a new argument associated with edge from NEW_BB to the
343 successor. Initialize the PHI argument so that it is equal to the PHI
344 argument associated with the edge from ORIG_BB to the successor. */
346 static void
347 update_destination_phis (basic_block orig_bb, basic_block new_bb)
349 edge_iterator ei;
350 edge e;
352 FOR_EACH_EDGE (e, ei, orig_bb->succs)
354 edge e2 = find_edge (new_bb, e->dest);
355 copy_phi_args (e->dest, e, e2);
359 /* Given a duplicate block and its single destination (both stored
360 in RD). Create an edge between the duplicate and its single
361 destination.
363 Add an additional argument to any PHI nodes at the single
364 destination. */
366 static void
367 create_edge_and_update_destination_phis (struct redirection_data *rd,
368 basic_block bb)
370 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
372 rescan_loop_exit (e, true, false);
373 e->probability = REG_BR_PROB_BASE;
374 e->count = bb->count;
376 /* We have to copy path -- which means creating a new vector as well
377 as all the jump_thread_edge entries. */
378 if (rd->path->last ()->e->aux)
380 vec<jump_thread_edge *> *path = THREAD_PATH (rd->path->last ()->e);
381 vec<jump_thread_edge *> *copy = new vec<jump_thread_edge *> ();
383 /* Sadly, the elements of the vector are pointers and need to
384 be copied as well. */
385 for (unsigned int i = 0; i < path->length (); i++)
387 jump_thread_edge *x
388 = new jump_thread_edge ((*path)[i]->e, (*path)[i]->type);
389 copy->safe_push (x);
391 e->aux = (void *)copy;
393 else
395 e->aux = NULL;
398 /* If there are any PHI nodes at the destination of the outgoing edge
399 from the duplicate block, then we will need to add a new argument
400 to them. The argument should have the same value as the argument
401 associated with the outgoing edge stored in RD. */
402 copy_phi_args (e->dest, rd->path->last ()->e, e);
405 /* Wire up the outgoing edges from the duplicate block and
406 update any PHIs as needed. */
407 void
408 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
409 ssa_local_info_t *local_info)
411 edge e = rd->incoming_edges->e;
412 vec<jump_thread_edge *> *path = THREAD_PATH (e);
414 /* If we were threading through an joiner block, then we want
415 to keep its control statement and redirect an outgoing edge.
416 Else we want to remove the control statement & edges, then create
417 a new outgoing edge. In both cases we may need to update PHIs. */
418 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
420 edge victim;
421 edge e2;
423 /* This updates the PHIs at the destination of the duplicate
424 block. */
425 update_destination_phis (local_info->bb, rd->dup_block);
427 /* Find the edge from the duplicate block to the block we're
428 threading through. That's the edge we want to redirect. */
429 victim = find_edge (rd->dup_block, (*path)[1]->e->dest);
430 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
431 e2->count = path->last ()->e->count;
433 /* If we redirected the edge, then we need to copy PHI arguments
434 at the target. If the edge already existed (e2 != victim case),
435 then the PHIs in the target already have the correct arguments. */
436 if (e2 == victim)
437 copy_phi_args (e2->dest, path->last ()->e, e2);
439 else
441 remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
442 create_edge_and_update_destination_phis (rd, rd->dup_block);
445 /* Hash table traversal callback routine to create duplicate blocks. */
448 ssa_create_duplicates (struct redirection_data **slot,
449 ssa_local_info_t *local_info)
451 struct redirection_data *rd = *slot;
453 /* Create a template block if we have not done so already. Otherwise
454 use the template to create a new block. */
455 if (local_info->template_block == NULL)
457 create_block_for_threading (local_info->bb, rd);
458 local_info->template_block = rd->dup_block;
460 /* We do not create any outgoing edges for the template. We will
461 take care of that in a later traversal. That way we do not
462 create edges that are going to just be deleted. */
464 else
466 create_block_for_threading (local_info->template_block, rd);
468 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
469 block. */
470 ssa_fix_duplicate_block_edges (rd, local_info);
473 /* Keep walking the hash table. */
474 return 1;
477 /* We did not create any outgoing edges for the template block during
478 block creation. This hash table traversal callback creates the
479 outgoing edge for the template block. */
481 inline int
482 ssa_fixup_template_block (struct redirection_data **slot,
483 ssa_local_info_t *local_info)
485 struct redirection_data *rd = *slot;
487 /* If this is the template block halt the traversal after updating
488 it appropriately.
490 If we were threading through an joiner block, then we want
491 to keep its control statement and redirect an outgoing edge.
492 Else we want to remove the control statement & edges, then create
493 a new outgoing edge. In both cases we may need to update PHIs. */
494 if (rd->dup_block && rd->dup_block == local_info->template_block)
496 ssa_fix_duplicate_block_edges (rd, local_info);
497 return 0;
500 return 1;
503 /* Hash table traversal callback to redirect each incoming edge
504 associated with this hash table element to its new destination. */
507 ssa_redirect_edges (struct redirection_data **slot,
508 ssa_local_info_t *local_info)
510 struct redirection_data *rd = *slot;
511 struct el *next, *el;
513 /* Walk over all the incoming edges associated associated with this
514 hash table entry. */
515 for (el = rd->incoming_edges; el; el = next)
517 edge e = el->e;
518 vec<jump_thread_edge *> *path = THREAD_PATH (e);
520 /* Go ahead and free this element from the list. Doing this now
521 avoids the need for another list walk when we destroy the hash
522 table. */
523 next = el->next;
524 free (el);
526 thread_stats.num_threaded_edges++;
528 if (rd->dup_block)
530 edge e2;
532 if (dump_file && (dump_flags & TDF_DETAILS))
533 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
534 e->src->index, e->dest->index, rd->dup_block->index);
536 rd->dup_block->count += e->count;
538 /* Excessive jump threading may make frequencies large enough so
539 the computation overflows. */
540 if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
541 rd->dup_block->frequency += EDGE_FREQUENCY (e);
543 /* In the case of threading through a joiner block, the outgoing
544 edges from the duplicate block were updated when they were
545 redirected during ssa_fix_duplicate_block_edges. */
546 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
547 EDGE_SUCC (rd->dup_block, 0)->count += e->count;
549 /* Redirect the incoming edge (possibly to the joiner block) to the
550 appropriate duplicate block. */
551 e2 = redirect_edge_and_branch (e, rd->dup_block);
552 gcc_assert (e == e2);
553 flush_pending_stmts (e2);
556 /* Go ahead and clear E->aux. It's not needed anymore and failure
557 to clear it will cause all kinds of unpleasant problems later. */
558 for (unsigned int i = 0; i < path->length (); i++)
559 delete (*path)[i];
560 path->release ();
561 e->aux = NULL;
565 /* Indicate that we actually threaded one or more jumps. */
566 if (rd->incoming_edges)
567 local_info->jumps_threaded = true;
569 return 1;
572 /* Return true if this block has no executable statements other than
573 a simple ctrl flow instruction. When the number of outgoing edges
574 is one, this is equivalent to a "forwarder" block. */
576 static bool
577 redirection_block_p (basic_block bb)
579 gimple_stmt_iterator gsi;
581 /* Advance to the first executable statement. */
582 gsi = gsi_start_bb (bb);
583 while (!gsi_end_p (gsi)
584 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
585 || is_gimple_debug (gsi_stmt (gsi))
586 || gimple_nop_p (gsi_stmt (gsi))))
587 gsi_next (&gsi);
589 /* Check if this is an empty block. */
590 if (gsi_end_p (gsi))
591 return true;
593 /* Test that we've reached the terminating control statement. */
594 return gsi_stmt (gsi)
595 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
596 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
597 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
600 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
601 is reached via one or more specific incoming edges, we know which
602 outgoing edge from BB will be traversed.
604 We want to redirect those incoming edges to the target of the
605 appropriate outgoing edge. Doing so avoids a conditional branch
606 and may expose new optimization opportunities. Note that we have
607 to update dominator tree and SSA graph after such changes.
609 The key to keeping the SSA graph update manageable is to duplicate
610 the side effects occurring in BB so that those side effects still
611 occur on the paths which bypass BB after redirecting edges.
613 We accomplish this by creating duplicates of BB and arranging for
614 the duplicates to unconditionally pass control to one specific
615 successor of BB. We then revector the incoming edges into BB to
616 the appropriate duplicate of BB.
618 If NOLOOP_ONLY is true, we only perform the threading as long as it
619 does not affect the structure of the loops in a nontrivial way.
621 If JOINERS is true, then thread through joiner blocks as well. */
623 static bool
624 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
626 /* E is an incoming edge into BB that we may or may not want to
627 redirect to a duplicate of BB. */
628 edge e, e2;
629 edge_iterator ei;
630 ssa_local_info_t local_info;
631 struct loop *loop = bb->loop_father;
633 /* To avoid scanning a linear array for the element we need we instead
634 use a hash table. For normal code there should be no noticeable
635 difference. However, if we have a block with a large number of
636 incoming and outgoing edges such linear searches can get expensive. */
637 redirection_data.create (EDGE_COUNT (bb->succs));
639 /* If we thread the latch of the loop to its exit, the loop ceases to
640 exist. Make sure we do not restrict ourselves in order to preserve
641 this loop. */
642 if (loop->header == bb)
644 e = loop_latch_edge (loop);
645 vec<jump_thread_edge *> *path = THREAD_PATH (e);
647 if (path
648 && (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && joiners)
649 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && !joiners)))
651 for (unsigned int i = 1; i < path->length (); i++)
653 edge e2 = (*path)[i]->e;
655 if (loop_exit_edge_p (loop, e2))
657 loop->header = NULL;
658 loop->latch = NULL;
659 loops_state_set (LOOPS_NEED_FIXUP);
665 /* Record each unique threaded destination into a hash table for
666 efficient lookups. */
667 FOR_EACH_EDGE (e, ei, bb->preds)
669 if (e->aux == NULL)
670 continue;
672 vec<jump_thread_edge *> *path = THREAD_PATH (e);
674 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
675 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
676 continue;
678 e2 = path->last ()->e;
679 if (!e2 || noloop_only)
681 /* If NOLOOP_ONLY is true, we only allow threading through the
682 header of a loop to exit edges.
684 There are two cases to consider. The first when BB is the
685 loop header. We will attempt to thread this elsewhere, so
686 we can just continue here. */
688 if (bb == bb->loop_father->header
689 && (!loop_exit_edge_p (bb->loop_father, e2)
690 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
691 continue;
694 /* The second occurs when there was loop header buried in a jump
695 threading path. We do not try and thread this elsewhere, so
696 just cancel the jump threading request by clearing the AUX
697 field now. */
698 if ((bb->loop_father != e2->src->loop_father
699 && !loop_exit_edge_p (e2->src->loop_father, e2))
700 || (e2->src->loop_father != e2->dest->loop_father
701 && !loop_exit_edge_p (e2->src->loop_father, e2)))
703 /* Since this case is not handled by our special code
704 to thread through a loop header, we must explicitly
705 cancel the threading request here. */
706 for (unsigned int i = 0; i < path->length (); i++)
707 delete (*path)[i];
708 path->release ();
709 e->aux = NULL;
710 continue;
714 if (e->dest == e2->src)
715 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
716 e->count, (*THREAD_PATH (e))[1]->e);
718 /* Insert the outgoing edge into the hash table if it is not
719 already in the hash table. */
720 lookup_redirection_data (e, INSERT);
723 /* We do not update dominance info. */
724 free_dominance_info (CDI_DOMINATORS);
726 /* We know we only thread through the loop header to loop exits.
727 Let the basic block duplication hook know we are not creating
728 a multiple entry loop. */
729 if (noloop_only
730 && bb == bb->loop_father->header)
731 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
733 /* Now create duplicates of BB.
735 Note that for a block with a high outgoing degree we can waste
736 a lot of time and memory creating and destroying useless edges.
738 So we first duplicate BB and remove the control structure at the
739 tail of the duplicate as well as all outgoing edges from the
740 duplicate. We then use that duplicate block as a template for
741 the rest of the duplicates. */
742 local_info.template_block = NULL;
743 local_info.bb = bb;
744 local_info.jumps_threaded = false;
745 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
746 (&local_info);
748 /* The template does not have an outgoing edge. Create that outgoing
749 edge and update PHI nodes as the edge's target as necessary.
751 We do this after creating all the duplicates to avoid creating
752 unnecessary edges. */
753 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
754 (&local_info);
756 /* The hash table traversals above created the duplicate blocks (and the
757 statements within the duplicate blocks). This loop creates PHI nodes for
758 the duplicated blocks and redirects the incoming edges into BB to reach
759 the duplicates of BB. */
760 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
761 (&local_info);
763 /* Done with this block. Clear REDIRECTION_DATA. */
764 redirection_data.dispose ();
766 if (noloop_only
767 && bb == bb->loop_father->header)
768 set_loop_copy (bb->loop_father, NULL);
770 /* Indicate to our caller whether or not any jumps were threaded. */
771 return local_info.jumps_threaded;
774 /* Wrapper for thread_block_1 so that we can first handle jump
775 thread paths which do not involve copying joiner blocks, then
776 handle jump thread paths which have joiner blocks.
778 By doing things this way we can be as aggressive as possible and
779 not worry that copying a joiner block will create a jump threading
780 opportunity. */
782 static bool
783 thread_block (basic_block bb, bool noloop_only)
785 bool retval;
786 retval = thread_block_1 (bb, noloop_only, false);
787 retval |= thread_block_1 (bb, noloop_only, true);
788 return retval;
792 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
793 copy of E->dest created during threading, or E->dest if it was not necessary
794 to copy it (E is its single predecessor). */
796 static basic_block
797 thread_single_edge (edge e)
799 basic_block bb = e->dest;
800 struct redirection_data rd;
801 vec<jump_thread_edge *> *path = THREAD_PATH (e);
802 edge eto = (*path)[1]->e;
804 for (unsigned int i = 0; i < path->length (); i++)
805 delete (*path)[i];
806 delete path;
807 e->aux = NULL;
809 thread_stats.num_threaded_edges++;
811 if (single_pred_p (bb))
813 /* If BB has just a single predecessor, we should only remove the
814 control statements at its end, and successors except for ETO. */
815 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
817 /* And fixup the flags on the single remaining edge. */
818 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
819 eto->flags |= EDGE_FALLTHRU;
821 return bb;
824 /* Otherwise, we need to create a copy. */
825 if (e->dest == eto->src)
826 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
828 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
829 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
830 npath->safe_push (x);
832 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
833 npath->safe_push (x);
834 rd.path = npath;
836 create_block_for_threading (bb, &rd);
837 remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
838 create_edge_and_update_destination_phis (&rd, rd.dup_block);
840 if (dump_file && (dump_flags & TDF_DETAILS))
841 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
842 e->src->index, e->dest->index, rd.dup_block->index);
844 rd.dup_block->count = e->count;
845 rd.dup_block->frequency = EDGE_FREQUENCY (e);
846 single_succ_edge (rd.dup_block)->count = e->count;
847 redirect_edge_and_branch (e, rd.dup_block);
848 flush_pending_stmts (e);
850 return rd.dup_block;
853 /* Callback for dfs_enumerate_from. Returns true if BB is different
854 from STOP and DBDS_CE_STOP. */
856 static basic_block dbds_ce_stop;
857 static bool
858 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
860 return (bb != (const_basic_block) stop
861 && bb != dbds_ce_stop);
864 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
865 returns the state. */
867 enum bb_dom_status
869 /* BB does not dominate latch of the LOOP. */
870 DOMST_NONDOMINATING,
871 /* The LOOP is broken (there is no path from the header to its latch. */
872 DOMST_LOOP_BROKEN,
873 /* BB dominates the latch of the LOOP. */
874 DOMST_DOMINATING
877 static enum bb_dom_status
878 determine_bb_domination_status (struct loop *loop, basic_block bb)
880 basic_block *bblocks;
881 unsigned nblocks, i;
882 bool bb_reachable = false;
883 edge_iterator ei;
884 edge e;
886 /* This function assumes BB is a successor of LOOP->header.
887 If that is not the case return DOMST_NONDOMINATING which
888 is always safe. */
890 bool ok = false;
892 FOR_EACH_EDGE (e, ei, bb->preds)
894 if (e->src == loop->header)
896 ok = true;
897 break;
901 if (!ok)
902 return DOMST_NONDOMINATING;
905 if (bb == loop->latch)
906 return DOMST_DOMINATING;
908 /* Check that BB dominates LOOP->latch, and that it is back-reachable
909 from it. */
911 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
912 dbds_ce_stop = loop->header;
913 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
914 bblocks, loop->num_nodes, bb);
915 for (i = 0; i < nblocks; i++)
916 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
918 if (e->src == loop->header)
920 free (bblocks);
921 return DOMST_NONDOMINATING;
923 if (e->src == bb)
924 bb_reachable = true;
927 free (bblocks);
928 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
931 /* Return true if BB is part of the new pre-header that is created
932 when threading the latch to DATA. */
934 static bool
935 def_split_header_continue_p (const_basic_block bb, const void *data)
937 const_basic_block new_header = (const_basic_block) data;
938 const struct loop *l;
940 if (bb == new_header
941 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
942 return false;
943 for (l = bb->loop_father; l; l = loop_outer (l))
944 if (l == new_header->loop_father)
945 return true;
946 return false;
949 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
950 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
951 to the inside of the loop. */
953 static bool
954 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
956 basic_block header = loop->header;
957 edge e, tgt_edge, latch = loop_latch_edge (loop);
958 edge_iterator ei;
959 basic_block tgt_bb, atgt_bb;
960 enum bb_dom_status domst;
962 /* We have already threaded through headers to exits, so all the threading
963 requests now are to the inside of the loop. We need to avoid creating
964 irreducible regions (i.e., loops with more than one entry block), and
965 also loop with several latch edges, or new subloops of the loop (although
966 there are cases where it might be appropriate, it is difficult to decide,
967 and doing it wrongly may confuse other optimizers).
969 We could handle more general cases here. However, the intention is to
970 preserve some information about the loop, which is impossible if its
971 structure changes significantly, in a way that is not well understood.
972 Thus we only handle few important special cases, in which also updating
973 of the loop-carried information should be feasible:
975 1) Propagation of latch edge to a block that dominates the latch block
976 of a loop. This aims to handle the following idiom:
978 first = 1;
979 while (1)
981 if (first)
982 initialize;
983 first = 0;
984 body;
987 After threading the latch edge, this becomes
989 first = 1;
990 if (first)
991 initialize;
992 while (1)
994 first = 0;
995 body;
998 The original header of the loop is moved out of it, and we may thread
999 the remaining edges through it without further constraints.
1001 2) All entry edges are propagated to a single basic block that dominates
1002 the latch block of the loop. This aims to handle the following idiom
1003 (normally created for "for" loops):
1005 i = 0;
1006 while (1)
1008 if (i >= 100)
1009 break;
1010 body;
1011 i++;
1014 This becomes
1016 i = 0;
1017 while (1)
1019 body;
1020 i++;
1021 if (i >= 100)
1022 break;
1026 /* Threading through the header won't improve the code if the header has just
1027 one successor. */
1028 if (single_succ_p (header))
1029 goto fail;
1031 if (latch->aux)
1033 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1034 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1035 goto fail;
1036 tgt_edge = (*path)[1]->e;
1037 tgt_bb = tgt_edge->dest;
1039 else if (!may_peel_loop_headers
1040 && !redirection_block_p (loop->header))
1041 goto fail;
1042 else
1044 tgt_bb = NULL;
1045 tgt_edge = NULL;
1046 FOR_EACH_EDGE (e, ei, header->preds)
1048 if (!e->aux)
1050 if (e == latch)
1051 continue;
1053 /* If latch is not threaded, and there is a header
1054 edge that is not threaded, we would create loop
1055 with multiple entries. */
1056 goto fail;
1059 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1061 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1062 goto fail;
1063 tgt_edge = (*path)[1]->e;
1064 atgt_bb = tgt_edge->dest;
1065 if (!tgt_bb)
1066 tgt_bb = atgt_bb;
1067 /* Two targets of threading would make us create loop
1068 with multiple entries. */
1069 else if (tgt_bb != atgt_bb)
1070 goto fail;
1073 if (!tgt_bb)
1075 /* There are no threading requests. */
1076 return false;
1079 /* Redirecting to empty loop latch is useless. */
1080 if (tgt_bb == loop->latch
1081 && empty_block_p (loop->latch))
1082 goto fail;
1085 /* The target block must dominate the loop latch, otherwise we would be
1086 creating a subloop. */
1087 domst = determine_bb_domination_status (loop, tgt_bb);
1088 if (domst == DOMST_NONDOMINATING)
1089 goto fail;
1090 if (domst == DOMST_LOOP_BROKEN)
1092 /* If the loop ceased to exist, mark it as such, and thread through its
1093 original header. */
1094 loop->header = NULL;
1095 loop->latch = NULL;
1096 loops_state_set (LOOPS_NEED_FIXUP);
1097 return thread_block (header, false);
1100 if (tgt_bb->loop_father->header == tgt_bb)
1102 /* If the target of the threading is a header of a subloop, we need
1103 to create a preheader for it, so that the headers of the two loops
1104 do not merge. */
1105 if (EDGE_COUNT (tgt_bb->preds) > 2)
1107 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1108 gcc_assert (tgt_bb != NULL);
1110 else
1111 tgt_bb = split_edge (tgt_edge);
1114 if (latch->aux)
1116 basic_block *bblocks;
1117 unsigned nblocks, i;
1119 /* First handle the case latch edge is redirected. We are copying
1120 the loop header but not creating a multiple entry loop. Make the
1121 cfg manipulation code aware of that fact. */
1122 set_loop_copy (loop, loop);
1123 loop->latch = thread_single_edge (latch);
1124 set_loop_copy (loop, NULL);
1125 gcc_assert (single_succ (loop->latch) == tgt_bb);
1126 loop->header = tgt_bb;
1128 /* Remove the new pre-header blocks from our loop. */
1129 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1130 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1131 bblocks, loop->num_nodes, tgt_bb);
1132 for (i = 0; i < nblocks; i++)
1133 if (bblocks[i]->loop_father == loop)
1135 remove_bb_from_loops (bblocks[i]);
1136 add_bb_to_loop (bblocks[i], loop_outer (loop));
1138 free (bblocks);
1140 /* If the new header has multiple latches mark it so. */
1141 FOR_EACH_EDGE (e, ei, loop->header->preds)
1142 if (e->src->loop_father == loop
1143 && e->src != loop->latch)
1145 loop->latch = NULL;
1146 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1149 /* Cancel remaining threading requests that would make the
1150 loop a multiple entry loop. */
1151 FOR_EACH_EDGE (e, ei, header->preds)
1153 edge e2;
1155 if (e->aux == NULL)
1156 continue;
1158 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1159 e2 = path->last ()->e;
1161 if (e->src->loop_father != e2->dest->loop_father
1162 && e2->dest != loop->header)
1164 for (unsigned int i = 0; i < path->length (); i++)
1165 delete (*path)[i];
1166 path->release ();
1167 e->aux = NULL;
1171 /* Thread the remaining edges through the former header. */
1172 thread_block (header, false);
1174 else
1176 basic_block new_preheader;
1178 /* Now consider the case entry edges are redirected to the new entry
1179 block. Remember one entry edge, so that we can find the new
1180 preheader (its destination after threading). */
1181 FOR_EACH_EDGE (e, ei, header->preds)
1183 if (e->aux)
1184 break;
1187 /* The duplicate of the header is the new preheader of the loop. Ensure
1188 that it is placed correctly in the loop hierarchy. */
1189 set_loop_copy (loop, loop_outer (loop));
1191 thread_block (header, false);
1192 set_loop_copy (loop, NULL);
1193 new_preheader = e->dest;
1195 /* Create the new latch block. This is always necessary, as the latch
1196 must have only a single successor, but the original header had at
1197 least two successors. */
1198 loop->latch = NULL;
1199 mfb_kj_edge = single_succ_edge (new_preheader);
1200 loop->header = mfb_kj_edge->dest;
1201 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1202 loop->header = latch->dest;
1203 loop->latch = latch->src;
1206 return true;
1208 fail:
1209 /* We failed to thread anything. Cancel the requests. */
1210 FOR_EACH_EDGE (e, ei, header->preds)
1212 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1214 if (path)
1216 for (unsigned int i = 0; i < path->length (); i++)
1217 delete (*path)[i];
1218 path->release ();
1219 e->aux = NULL;
1222 return false;
1225 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1226 PHI arguments associated with those edges are equal or there are no
1227 PHI arguments, otherwise return FALSE. */
1229 static bool
1230 phi_args_equal_on_edges (edge e1, edge e2)
1232 gimple_stmt_iterator gsi;
1233 int indx1 = e1->dest_idx;
1234 int indx2 = e2->dest_idx;
1236 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1238 gimple phi = gsi_stmt (gsi);
1240 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1241 gimple_phi_arg_def (phi, indx2), 0))
1242 return false;
1244 return true;
1247 /* Walk through the registered jump threads and convert them into a
1248 form convenient for this pass.
1250 Any block which has incoming edges threaded to outgoing edges
1251 will have its entry in THREADED_BLOCK set.
1253 Any threaded edge will have its new outgoing edge stored in the
1254 original edge's AUX field.
1256 This form avoids the need to walk all the edges in the CFG to
1257 discover blocks which need processing and avoids unnecessary
1258 hash table lookups to map from threaded edge to new target. */
1260 static void
1261 mark_threaded_blocks (bitmap threaded_blocks)
1263 unsigned int i;
1264 bitmap_iterator bi;
1265 bitmap tmp = BITMAP_ALLOC (NULL);
1266 basic_block bb;
1267 edge e;
1268 edge_iterator ei;
1270 /* Move the jump threading requests from PATHS to each edge
1271 which starts a jump thread path. */
1272 for (i = 0; i < paths.length (); i++)
1274 vec<jump_thread_edge *> *path = paths[i];
1275 edge e = (*path)[0]->e;
1276 e->aux = (void *)path;
1277 bitmap_set_bit (tmp, e->dest->index);
1280 /* If we have a joiner block (J) which has two successors S1 and S2 and
1281 we are threading though S1 and the final destination of the thread
1282 is S2, then we must verify that any PHI nodes in S2 have the same
1283 PHI arguments for the edge J->S2 and J->S1->...->S2.
1285 We used to detect this prior to registering the jump thread, but
1286 that prohibits propagation of edge equivalences into non-dominated
1287 PHI nodes as the equivalency test might occur before propagation.
1289 This works for now, but will need improvement as part of the FSA
1290 optimization.
1292 Note since we've moved the thread request data to the edges,
1293 we have to iterate on those rather than the threaded_edges vector. */
1294 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1296 bb = BASIC_BLOCK (i);
1297 FOR_EACH_EDGE (e, ei, bb->preds)
1299 if (e->aux)
1301 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1302 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1304 if (have_joiner)
1306 basic_block joiner = e->dest;
1307 edge final_edge = path->last ()->e;
1308 basic_block final_dest = final_edge->dest;
1309 edge e2 = find_edge (joiner, final_dest);
1311 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1313 for (unsigned int i = 0; i < path->length (); i++)
1314 delete (*path)[i];
1315 path->release ();
1316 e->aux = NULL;
1324 /* If optimizing for size, only thread through block if we don't have
1325 to duplicate it or it's an otherwise empty redirection block. */
1326 if (optimize_function_for_size_p (cfun))
1328 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1330 bb = BASIC_BLOCK (i);
1331 if (EDGE_COUNT (bb->preds) > 1
1332 && !redirection_block_p (bb))
1334 FOR_EACH_EDGE (e, ei, bb->preds)
1336 if (e->aux)
1338 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1339 for (unsigned int i = 0; i < path->length (); i++)
1340 delete (*path)[i];
1341 path->release ();
1342 e->aux = NULL;
1346 else
1347 bitmap_set_bit (threaded_blocks, i);
1350 else
1351 bitmap_copy (threaded_blocks, tmp);
1353 /* Look for jump threading paths which cross multiple loop headers.
1355 The code to thread through loop headers will change the CFG in ways
1356 that break assumptions made by the loop optimization code.
1358 We don't want to blindly cancel the requests. We can instead do better
1359 by trimming off the end of the jump thread path. */
1360 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1362 basic_block bb = BASIC_BLOCK (i);
1363 FOR_EACH_EDGE (e, ei, bb->preds)
1365 if (e->aux)
1367 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1369 /* Basically we're looking for a situation where we can see
1370 3 or more loop structures on a jump threading path. */
1372 struct loop *first_father = (*path)[0]->e->src->loop_father;
1373 struct loop *second_father = NULL;
1374 for (unsigned int i = 0; i < path->length (); i++)
1376 /* See if this is a loop father we have not seen before. */
1377 if ((*path)[i]->e->dest->loop_father != first_father
1378 && (*path)[i]->e->dest->loop_father != second_father)
1380 /* We've already seen two loop fathers, so we
1381 need to trim this jump threading path. */
1382 if (second_father != NULL)
1384 /* Trim from entry I onwards. */
1385 for (unsigned int j = i; j < path->length (); j++)
1386 delete (*path)[j];
1387 path->truncate (i);
1389 /* Now that we've truncated the path, make sure
1390 what's left is still valid. We need at least
1391 two edges on the path and the last edge can not
1392 be a joiner. This should never happen, but let's
1393 be safe. */
1394 if (path->length () < 2
1395 || (path->last ()->type
1396 == EDGE_COPY_SRC_JOINER_BLOCK))
1398 for (unsigned int i = 0; i < path->length (); i++)
1399 delete (*path)[i];
1400 path->release ();
1401 e->aux = NULL;
1403 break;
1405 else
1407 second_father = (*path)[i]->e->dest->loop_father;
1415 BITMAP_FREE (tmp);
1419 /* Walk through all blocks and thread incoming edges to the appropriate
1420 outgoing edge for each edge pair recorded in THREADED_EDGES.
1422 It is the caller's responsibility to fix the dominance information
1423 and rewrite duplicated SSA_NAMEs back into SSA form.
1425 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1426 loop headers if it does not simplify the loop.
1428 Returns true if one or more edges were threaded, false otherwise. */
1430 bool
1431 thread_through_all_blocks (bool may_peel_loop_headers)
1433 bool retval = false;
1434 unsigned int i;
1435 bitmap_iterator bi;
1436 bitmap threaded_blocks;
1437 struct loop *loop;
1438 loop_iterator li;
1440 /* We must know about loops in order to preserve them. */
1441 gcc_assert (current_loops != NULL);
1443 if (!paths.exists ())
1444 return false;
1446 threaded_blocks = BITMAP_ALLOC (NULL);
1447 memset (&thread_stats, 0, sizeof (thread_stats));
1449 mark_threaded_blocks (threaded_blocks);
1451 initialize_original_copy_tables ();
1453 /* First perform the threading requests that do not affect
1454 loop structure. */
1455 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1457 basic_block bb = BASIC_BLOCK (i);
1459 if (EDGE_COUNT (bb->preds) > 0)
1460 retval |= thread_block (bb, true);
1463 /* Then perform the threading through loop headers. We start with the
1464 innermost loop, so that the changes in cfg we perform won't affect
1465 further threading. */
1466 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1468 if (!loop->header
1469 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1470 continue;
1472 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1475 /* Assume we had a jump thread path which went from the latch to the exit
1476 and a path which goes from outside to inside the same loop.
1478 If the latch to exit was handled first, we will thread it and clear
1479 loop->header.
1481 The second path will be ignored by thread_block because we're going
1482 through a loop header. It will also be ignored by the loop above
1483 because loop->header is NULL.
1485 This results in the second path never being threaded. The failure
1486 mode is a dangling AUX field.
1488 This is inherently a bit of a pain to fix, so we just walk all the
1489 blocks and all the incoming edges to those blocks and clear their
1490 AUX fields. */
1491 basic_block bb;
1492 edge_iterator ei;
1493 edge e;
1494 FOR_EACH_BB (bb)
1496 FOR_EACH_EDGE (e, ei, bb->preds)
1497 if (e->aux)
1499 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1501 for (unsigned int i = 0; i < path->length (); i++)
1502 delete (*path)[i];
1503 path->release ();
1504 e->aux = NULL;
1508 statistics_counter_event (cfun, "Jumps threaded",
1509 thread_stats.num_threaded_edges);
1511 free_original_copy_tables ();
1513 BITMAP_FREE (threaded_blocks);
1514 threaded_blocks = NULL;
1515 paths.release ();
1517 if (retval)
1518 loops_state_set (LOOPS_NEED_FIXUP);
1520 return retval;
1523 /* Dump a jump threading path, including annotations about each
1524 edge in the path. */
1526 static void
1527 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path)
1529 fprintf (dump_file,
1530 " Registering jump thread: (%d, %d) incoming edge; ",
1531 path[0]->e->src->index, path[0]->e->dest->index);
1533 for (unsigned int i = 1; i < path.length (); i++)
1535 /* We can get paths with a NULL edge when the final destination
1536 of a jump thread turns out to be a constant address. We dump
1537 those paths when debugging, so we have to be prepared for that
1538 possibility here. */
1539 if (path[i]->e == NULL)
1540 continue;
1542 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1543 fprintf (dump_file, " (%d, %d) joiner; ",
1544 path[i]->e->src->index, path[i]->e->dest->index);
1545 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
1546 fprintf (dump_file, " (%d, %d) normal;",
1547 path[i]->e->src->index, path[i]->e->dest->index);
1548 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
1549 fprintf (dump_file, " (%d, %d) nocopy;",
1550 path[i]->e->src->index, path[i]->e->dest->index);
1552 fputc ('\n', dump_file);
1555 /* Register a jump threading opportunity. We queue up all the jump
1556 threading opportunities discovered by a pass and update the CFG
1557 and SSA form all at once.
1559 E is the edge we can thread, E2 is the new target edge, i.e., we
1560 are effectively recording that E->dest can be changed to E2->dest
1561 after fixing the SSA graph. */
1563 void
1564 register_jump_thread (vec<jump_thread_edge *> *path)
1566 if (!dbg_cnt (registered_jump_thread))
1568 for (unsigned int i = 0; i < path->length (); i++)
1569 delete (*path)[i];
1570 path->release ();
1571 return;
1574 /* First make sure there are no NULL outgoing edges on the jump threading
1575 path. That can happen for jumping to a constant address. */
1576 for (unsigned int i = 0; i < path->length (); i++)
1577 if ((*path)[i]->e == NULL)
1579 if (dump_file && (dump_flags & TDF_DETAILS))
1581 fprintf (dump_file,
1582 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1583 dump_jump_thread_path (dump_file, *path);
1586 for (unsigned int i = 0; i < path->length (); i++)
1587 delete (*path)[i];
1588 path->release ();
1589 return;
1592 if (dump_file && (dump_flags & TDF_DETAILS))
1593 dump_jump_thread_path (dump_file, *path);
1595 if (!paths.exists ())
1596 paths.create (5);
1598 paths.safe_push (path);