[AArch64 costs 18/18] Dump a message if we are unable to cost an insn.
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blob0a24a5cc0ec690c97f22686358c5c3186be0dd00
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "flags.h"
25 #include "basic-block.h"
26 #include "function.h"
27 #include "hash-table.h"
28 #include "tree-ssa-alias.h"
29 #include "internal-fn.h"
30 #include "gimple-expr.h"
31 #include "is-a.h"
32 #include "gimple.h"
33 #include "gimple-iterator.h"
34 #include "gimple-ssa.h"
35 #include "tree-phinodes.h"
36 #include "tree-ssa.h"
37 #include "tree-ssa-threadupdate.h"
38 #include "ssa-iterators.h"
39 #include "dumpfile.h"
40 #include "cfgloop.h"
41 #include "dbgcnt.h"
42 #include "tree-cfg.h"
43 #include "tree-pass.h"
45 /* Given a block B, update the CFG and SSA graph to reflect redirecting
46 one or more in-edges to B to instead reach the destination of an
47 out-edge from B while preserving any side effects in B.
49 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
50 side effects of executing B.
52 1. Make a copy of B (including its outgoing edges and statements). Call
53 the copy B'. Note B' has no incoming edges or PHIs at this time.
55 2. Remove the control statement at the end of B' and all outgoing edges
56 except B'->C.
58 3. Add a new argument to each PHI in C with the same value as the existing
59 argument associated with edge B->C. Associate the new PHI arguments
60 with the edge B'->C.
62 4. For each PHI in B, find or create a PHI in B' with an identical
63 PHI_RESULT. Add an argument to the PHI in B' which has the same
64 value as the PHI in B associated with the edge A->B. Associate
65 the new argument in the PHI in B' with the edge A->B.
67 5. Change the edge A->B to A->B'.
69 5a. This automatically deletes any PHI arguments associated with the
70 edge A->B in B.
72 5b. This automatically associates each new argument added in step 4
73 with the edge A->B'.
75 6. Repeat for other incoming edges into B.
77 7. Put the duplicated resources in B and all the B' blocks into SSA form.
79 Note that block duplication can be minimized by first collecting the
80 set of unique destination blocks that the incoming edges should
81 be threaded to.
83 We reduce the number of edges and statements we create by not copying all
84 the outgoing edges and the control statement in step #1. We instead create
85 a template block without the outgoing edges and duplicate the template.
87 Another case this code handles is threading through a "joiner" block. In
88 this case, we do not know the destination of the joiner block, but one
89 of the outgoing edges from the joiner block leads to a threadable path. This
90 case largely works as outlined above, except the duplicate of the joiner
91 block still contains a full set of outgoing edges and its control statement.
92 We just redirect one of its outgoing edges to our jump threading path. */
95 /* Steps #5 and #6 of the above algorithm are best implemented by walking
96 all the incoming edges which thread to the same destination edge at
97 the same time. That avoids lots of table lookups to get information
98 for the destination edge.
100 To realize that implementation we create a list of incoming edges
101 which thread to the same outgoing edge. Thus to implement steps
102 #5 and #6 we traverse our hash table of outgoing edge information.
103 For each entry we walk the list of incoming edges which thread to
104 the current outgoing edge. */
106 struct el
108 edge e;
109 struct el *next;
112 /* Main data structure recording information regarding B's duplicate
113 blocks. */
115 /* We need to efficiently record the unique thread destinations of this
116 block and specific information associated with those destinations. We
117 may have many incoming edges threaded to the same outgoing edge. This
118 can be naturally implemented with a hash table. */
120 struct redirection_data : typed_free_remove<redirection_data>
122 /* We support wiring up two block duplicates in a jump threading path.
124 One is a normal block copy where we remove the control statement
125 and wire up its single remaining outgoing edge to the thread path.
127 The other is a joiner block where we leave the control statement
128 in place, but wire one of the outgoing edges to a thread path.
130 In theory we could have multiple block duplicates in a jump
131 threading path, but I haven't tried that.
133 The duplicate blocks appear in this array in the same order in
134 which they appear in the jump thread path. */
135 basic_block dup_blocks[2];
137 /* The jump threading path. */
138 vec<jump_thread_edge *> *path;
140 /* A list of incoming edges which we want to thread to the
141 same path. */
142 struct el *incoming_edges;
144 /* hash_table support. */
145 typedef redirection_data value_type;
146 typedef redirection_data compare_type;
147 static inline hashval_t hash (const value_type *);
148 static inline int equal (const value_type *, const compare_type *);
151 /* Dump a jump threading path, including annotations about each
152 edge in the path. */
154 static void
155 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
156 bool registering)
158 fprintf (dump_file,
159 " %s jump thread: (%d, %d) incoming edge; ",
160 (registering ? "Registering" : "Cancelling"),
161 path[0]->e->src->index, path[0]->e->dest->index);
163 for (unsigned int i = 1; i < path.length (); i++)
165 /* We can get paths with a NULL edge when the final destination
166 of a jump thread turns out to be a constant address. We dump
167 those paths when debugging, so we have to be prepared for that
168 possibility here. */
169 if (path[i]->e == NULL)
170 continue;
172 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
173 fprintf (dump_file, " (%d, %d) joiner; ",
174 path[i]->e->src->index, path[i]->e->dest->index);
175 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
176 fprintf (dump_file, " (%d, %d) normal;",
177 path[i]->e->src->index, path[i]->e->dest->index);
178 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
179 fprintf (dump_file, " (%d, %d) nocopy;",
180 path[i]->e->src->index, path[i]->e->dest->index);
182 fputc ('\n', dump_file);
185 /* Simple hashing function. For any given incoming edge E, we're going
186 to be most concerned with the final destination of its jump thread
187 path. So hash on the block index of the final edge in the path. */
189 inline hashval_t
190 redirection_data::hash (const value_type *p)
192 vec<jump_thread_edge *> *path = p->path;
193 return path->last ()->e->dest->index;
196 /* Given two hash table entries, return true if they have the same
197 jump threading path. */
198 inline int
199 redirection_data::equal (const value_type *p1, const compare_type *p2)
201 vec<jump_thread_edge *> *path1 = p1->path;
202 vec<jump_thread_edge *> *path2 = p2->path;
204 if (path1->length () != path2->length ())
205 return false;
207 for (unsigned int i = 1; i < path1->length (); i++)
209 if ((*path1)[i]->type != (*path2)[i]->type
210 || (*path1)[i]->e != (*path2)[i]->e)
211 return false;
214 return true;
217 /* Data structure of information to pass to hash table traversal routines. */
218 struct ssa_local_info_t
220 /* The current block we are working on. */
221 basic_block bb;
223 /* We only create a template block for the first duplicated block in a
224 jump threading path as we may need many duplicates of that block.
226 The second duplicate block in a path is specific to that path. Creating
227 and sharing a template for that block is considerably more difficult. */
228 basic_block template_block;
230 /* TRUE if we thread one or more jumps, FALSE otherwise. */
231 bool jumps_threaded;
234 /* Passes which use the jump threading code register jump threading
235 opportunities as they are discovered. We keep the registered
236 jump threading opportunities in this vector as edge pairs
237 (original_edge, target_edge). */
238 static vec<vec<jump_thread_edge *> *> paths;
240 /* When we start updating the CFG for threading, data necessary for jump
241 threading is attached to the AUX field for the incoming edge. Use these
242 macros to access the underlying structure attached to the AUX field. */
243 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
245 /* Jump threading statistics. */
247 struct thread_stats_d
249 unsigned long num_threaded_edges;
252 struct thread_stats_d thread_stats;
255 /* Remove the last statement in block BB if it is a control statement
256 Also remove all outgoing edges except the edge which reaches DEST_BB.
257 If DEST_BB is NULL, then remove all outgoing edges. */
259 static void
260 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
262 gimple_stmt_iterator gsi;
263 edge e;
264 edge_iterator ei;
266 gsi = gsi_last_bb (bb);
268 /* If the duplicate ends with a control statement, then remove it.
270 Note that if we are duplicating the template block rather than the
271 original basic block, then the duplicate might not have any real
272 statements in it. */
273 if (!gsi_end_p (gsi)
274 && gsi_stmt (gsi)
275 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
276 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
277 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
278 gsi_remove (&gsi, true);
280 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
282 if (e->dest != dest_bb)
283 remove_edge (e);
284 else
285 ei_next (&ei);
289 /* Create a duplicate of BB. Record the duplicate block in an array
290 indexed by COUNT stored in RD. */
292 static void
293 create_block_for_threading (basic_block bb,
294 struct redirection_data *rd,
295 unsigned int count)
297 edge_iterator ei;
298 edge e;
300 /* We can use the generic block duplication code and simply remove
301 the stuff we do not need. */
302 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
304 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
305 e->aux = NULL;
307 /* Zero out the profile, since the block is unreachable for now. */
308 rd->dup_blocks[count]->frequency = 0;
309 rd->dup_blocks[count]->count = 0;
312 /* Main data structure to hold information for duplicates of BB. */
314 static hash_table <redirection_data> redirection_data;
316 /* Given an outgoing edge E lookup and return its entry in our hash table.
318 If INSERT is true, then we insert the entry into the hash table if
319 it is not already present. INCOMING_EDGE is added to the list of incoming
320 edges associated with E in the hash table. */
322 static struct redirection_data *
323 lookup_redirection_data (edge e, enum insert_option insert)
325 struct redirection_data **slot;
326 struct redirection_data *elt;
327 vec<jump_thread_edge *> *path = THREAD_PATH (e);
329 /* Build a hash table element so we can see if E is already
330 in the table. */
331 elt = XNEW (struct redirection_data);
332 elt->path = path;
333 elt->dup_blocks[0] = NULL;
334 elt->dup_blocks[1] = NULL;
335 elt->incoming_edges = NULL;
337 slot = redirection_data.find_slot (elt, insert);
339 /* This will only happen if INSERT is false and the entry is not
340 in the hash table. */
341 if (slot == NULL)
343 free (elt);
344 return NULL;
347 /* This will only happen if E was not in the hash table and
348 INSERT is true. */
349 if (*slot == NULL)
351 *slot = elt;
352 elt->incoming_edges = XNEW (struct el);
353 elt->incoming_edges->e = e;
354 elt->incoming_edges->next = NULL;
355 return elt;
357 /* E was in the hash table. */
358 else
360 /* Free ELT as we do not need it anymore, we will extract the
361 relevant entry from the hash table itself. */
362 free (elt);
364 /* Get the entry stored in the hash table. */
365 elt = *slot;
367 /* If insertion was requested, then we need to add INCOMING_EDGE
368 to the list of incoming edges associated with E. */
369 if (insert)
371 struct el *el = XNEW (struct el);
372 el->next = elt->incoming_edges;
373 el->e = e;
374 elt->incoming_edges = el;
377 return elt;
381 /* Similar to copy_phi_args, except that the PHI arg exists, it just
382 does not have a value associated with it. */
384 static void
385 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
387 int src_idx = src_e->dest_idx;
388 int tgt_idx = tgt_e->dest_idx;
390 /* Iterate over each PHI in e->dest. */
391 for (gimple_stmt_iterator gsi = gsi_start_phis (src_e->dest),
392 gsi2 = gsi_start_phis (tgt_e->dest);
393 !gsi_end_p (gsi);
394 gsi_next (&gsi), gsi_next (&gsi2))
396 gimple src_phi = gsi_stmt (gsi);
397 gimple dest_phi = gsi_stmt (gsi2);
398 tree val = gimple_phi_arg_def (src_phi, src_idx);
399 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
401 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
402 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
406 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
407 to see if it has constant value in a flow sensitive manner. Set
408 LOCUS to location of the constant phi arg and return the value.
409 Return DEF directly if either PATH or idx is ZERO. */
411 static tree
412 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
413 basic_block bb, int idx, source_location *locus)
415 tree arg;
416 gimple def_phi;
417 basic_block def_bb;
419 if (path == NULL || idx == 0)
420 return def;
422 def_phi = SSA_NAME_DEF_STMT (def);
423 if (gimple_code (def_phi) != GIMPLE_PHI)
424 return def;
426 def_bb = gimple_bb (def_phi);
427 /* Don't propagate loop invariants into deeper loops. */
428 if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
429 return def;
431 /* Backtrack jump threading path from IDX to see if def has constant
432 value. */
433 for (int j = idx - 1; j >= 0; j--)
435 edge e = (*path)[j]->e;
436 if (e->dest == def_bb)
438 arg = gimple_phi_arg_def (def_phi, e->dest_idx);
439 if (is_gimple_min_invariant (arg))
441 *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
442 return arg;
444 break;
448 return def;
451 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
452 Try to backtrack jump threading PATH from node IDX to see if the arg
453 has constant value, copy constant value instead of argument itself
454 if yes. */
456 static void
457 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
458 vec<jump_thread_edge *> *path, int idx)
460 gimple_stmt_iterator gsi;
461 int src_indx = src_e->dest_idx;
463 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
465 gimple phi = gsi_stmt (gsi);
466 tree def = gimple_phi_arg_def (phi, src_indx);
467 source_location locus = gimple_phi_arg_location (phi, src_indx);
469 if (TREE_CODE (def) == SSA_NAME
470 && !virtual_operand_p (gimple_phi_result (phi)))
471 def = get_value_locus_in_path (def, path, bb, idx, &locus);
473 add_phi_arg (phi, def, tgt_e, locus);
477 /* We have recently made a copy of ORIG_BB, including its outgoing
478 edges. The copy is NEW_BB. Every PHI node in every direct successor of
479 ORIG_BB has a new argument associated with edge from NEW_BB to the
480 successor. Initialize the PHI argument so that it is equal to the PHI
481 argument associated with the edge from ORIG_BB to the successor.
482 PATH and IDX are used to check if the new PHI argument has constant
483 value in a flow sensitive manner. */
485 static void
486 update_destination_phis (basic_block orig_bb, basic_block new_bb,
487 vec<jump_thread_edge *> *path, int idx)
489 edge_iterator ei;
490 edge e;
492 FOR_EACH_EDGE (e, ei, orig_bb->succs)
494 edge e2 = find_edge (new_bb, e->dest);
495 copy_phi_args (e->dest, e, e2, path, idx);
499 /* Given a duplicate block and its single destination (both stored
500 in RD). Create an edge between the duplicate and its single
501 destination.
503 Add an additional argument to any PHI nodes at the single
504 destination. IDX is the start node in jump threading path
505 we start to check to see if the new PHI argument has constant
506 value along the jump threading path. */
508 static void
509 create_edge_and_update_destination_phis (struct redirection_data *rd,
510 basic_block bb, int idx)
512 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
514 rescan_loop_exit (e, true, false);
515 e->probability = REG_BR_PROB_BASE;
516 e->count = bb->count;
518 /* We used to copy the thread path here. That was added in 2007
519 and dutifully updated through the representation changes in 2013.
521 In 2013 we added code to thread from an interior node through
522 the backedge to another interior node. That runs after the code
523 to thread through loop headers from outside the loop.
525 The latter may delete edges in the CFG, including those
526 which appeared in the jump threading path we copied here. Thus
527 we'd end up using a dangling pointer.
529 After reviewing the 2007/2011 code, I can't see how anything
530 depended on copying the AUX field and clearly copying the jump
531 threading path is problematical due to embedded edge pointers.
532 It has been removed. */
533 e->aux = NULL;
535 /* If there are any PHI nodes at the destination of the outgoing edge
536 from the duplicate block, then we will need to add a new argument
537 to them. The argument should have the same value as the argument
538 associated with the outgoing edge stored in RD. */
539 copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
542 /* Look through PATH beginning at START and return TRUE if there are
543 any additional blocks that need to be duplicated. Otherwise,
544 return FALSE. */
545 static bool
546 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
547 unsigned int start)
549 for (unsigned int i = start + 1; i < path->length (); i++)
551 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
552 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
553 return true;
555 return false;
558 /* Wire up the outgoing edges from the duplicate blocks and
559 update any PHIs as needed. */
560 void
561 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
562 ssa_local_info_t *local_info)
564 bool multi_incomings = (rd->incoming_edges->next != NULL);
565 edge e = rd->incoming_edges->e;
566 vec<jump_thread_edge *> *path = THREAD_PATH (e);
568 for (unsigned int count = 0, i = 1; i < path->length (); i++)
570 /* If we were threading through an joiner block, then we want
571 to keep its control statement and redirect an outgoing edge.
572 Else we want to remove the control statement & edges, then create
573 a new outgoing edge. In both cases we may need to update PHIs. */
574 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
576 edge victim;
577 edge e2;
579 /* This updates the PHIs at the destination of the duplicate
580 block. Pass 0 instead of i if we are threading a path which
581 has multiple incoming edges. */
582 update_destination_phis (local_info->bb, rd->dup_blocks[count],
583 path, multi_incomings ? 0 : i);
585 /* Find the edge from the duplicate block to the block we're
586 threading through. That's the edge we want to redirect. */
587 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
589 /* If there are no remaining blocks on the path to duplicate,
590 then redirect VICTIM to the final destination of the jump
591 threading path. */
592 if (!any_remaining_duplicated_blocks (path, i))
594 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
595 e2->count = path->last ()->e->count;
596 /* If we redirected the edge, then we need to copy PHI arguments
597 at the target. If the edge already existed (e2 != victim
598 case), then the PHIs in the target already have the correct
599 arguments. */
600 if (e2 == victim)
601 copy_phi_args (e2->dest, path->last ()->e, e2,
602 path, multi_incomings ? 0 : i);
604 else
606 /* Redirect VICTIM to the next duplicated block in the path. */
607 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
609 /* We need to update the PHIs in the next duplicated block. We
610 want the new PHI args to have the same value as they had
611 in the source of the next duplicate block.
613 Thus, we need to know which edge we traversed into the
614 source of the duplicate. Furthermore, we may have
615 traversed many edges to reach the source of the duplicate.
617 Walk through the path starting at element I until we
618 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
619 the edge from the prior element. */
620 for (unsigned int j = i + 1; j < path->length (); j++)
622 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
624 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
625 break;
629 count++;
631 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
633 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
634 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
635 multi_incomings ? 0 : i);
636 if (count == 1)
637 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
638 count++;
643 /* Hash table traversal callback routine to create duplicate blocks. */
646 ssa_create_duplicates (struct redirection_data **slot,
647 ssa_local_info_t *local_info)
649 struct redirection_data *rd = *slot;
651 /* The second duplicated block in a jump threading path is specific
652 to the path. So it gets stored in RD rather than in LOCAL_DATA.
654 Each time we're called, we have to look through the path and see
655 if a second block needs to be duplicated.
657 Note the search starts with the third edge on the path. The first
658 edge is the incoming edge, the second edge always has its source
659 duplicated. Thus we start our search with the third edge. */
660 vec<jump_thread_edge *> *path = rd->path;
661 for (unsigned int i = 2; i < path->length (); i++)
663 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
664 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
666 create_block_for_threading ((*path)[i]->e->src, rd, 1);
667 break;
671 /* Create a template block if we have not done so already. Otherwise
672 use the template to create a new block. */
673 if (local_info->template_block == NULL)
675 create_block_for_threading ((*path)[1]->e->src, rd, 0);
676 local_info->template_block = rd->dup_blocks[0];
678 /* We do not create any outgoing edges for the template. We will
679 take care of that in a later traversal. That way we do not
680 create edges that are going to just be deleted. */
682 else
684 create_block_for_threading (local_info->template_block, rd, 0);
686 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
687 block. */
688 ssa_fix_duplicate_block_edges (rd, local_info);
691 /* Keep walking the hash table. */
692 return 1;
695 /* We did not create any outgoing edges for the template block during
696 block creation. This hash table traversal callback creates the
697 outgoing edge for the template block. */
699 inline int
700 ssa_fixup_template_block (struct redirection_data **slot,
701 ssa_local_info_t *local_info)
703 struct redirection_data *rd = *slot;
705 /* If this is the template block halt the traversal after updating
706 it appropriately.
708 If we were threading through an joiner block, then we want
709 to keep its control statement and redirect an outgoing edge.
710 Else we want to remove the control statement & edges, then create
711 a new outgoing edge. In both cases we may need to update PHIs. */
712 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
714 ssa_fix_duplicate_block_edges (rd, local_info);
715 return 0;
718 return 1;
721 /* Hash table traversal callback to redirect each incoming edge
722 associated with this hash table element to its new destination. */
725 ssa_redirect_edges (struct redirection_data **slot,
726 ssa_local_info_t *local_info)
728 struct redirection_data *rd = *slot;
729 struct el *next, *el;
731 /* Walk over all the incoming edges associated associated with this
732 hash table entry. */
733 for (el = rd->incoming_edges; el; el = next)
735 edge e = el->e;
736 vec<jump_thread_edge *> *path = THREAD_PATH (e);
738 /* Go ahead and free this element from the list. Doing this now
739 avoids the need for another list walk when we destroy the hash
740 table. */
741 next = el->next;
742 free (el);
744 thread_stats.num_threaded_edges++;
746 if (rd->dup_blocks[0])
748 edge e2;
750 if (dump_file && (dump_flags & TDF_DETAILS))
751 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
752 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
754 rd->dup_blocks[0]->count += e->count;
756 /* Excessive jump threading may make frequencies large enough so
757 the computation overflows. */
758 if (rd->dup_blocks[0]->frequency < BB_FREQ_MAX * 2)
759 rd->dup_blocks[0]->frequency += EDGE_FREQUENCY (e);
761 /* In the case of threading through a joiner block, the outgoing
762 edges from the duplicate block were updated when they were
763 redirected during ssa_fix_duplicate_block_edges. */
764 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
765 EDGE_SUCC (rd->dup_blocks[0], 0)->count += e->count;
767 /* Redirect the incoming edge (possibly to the joiner block) to the
768 appropriate duplicate block. */
769 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
770 gcc_assert (e == e2);
771 flush_pending_stmts (e2);
774 /* Go ahead and clear E->aux. It's not needed anymore and failure
775 to clear it will cause all kinds of unpleasant problems later. */
776 delete_jump_thread_path (path);
777 e->aux = NULL;
781 /* Indicate that we actually threaded one or more jumps. */
782 if (rd->incoming_edges)
783 local_info->jumps_threaded = true;
785 return 1;
788 /* Return true if this block has no executable statements other than
789 a simple ctrl flow instruction. When the number of outgoing edges
790 is one, this is equivalent to a "forwarder" block. */
792 static bool
793 redirection_block_p (basic_block bb)
795 gimple_stmt_iterator gsi;
797 /* Advance to the first executable statement. */
798 gsi = gsi_start_bb (bb);
799 while (!gsi_end_p (gsi)
800 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
801 || is_gimple_debug (gsi_stmt (gsi))
802 || gimple_nop_p (gsi_stmt (gsi))))
803 gsi_next (&gsi);
805 /* Check if this is an empty block. */
806 if (gsi_end_p (gsi))
807 return true;
809 /* Test that we've reached the terminating control statement. */
810 return gsi_stmt (gsi)
811 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
812 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
813 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
816 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
817 is reached via one or more specific incoming edges, we know which
818 outgoing edge from BB will be traversed.
820 We want to redirect those incoming edges to the target of the
821 appropriate outgoing edge. Doing so avoids a conditional branch
822 and may expose new optimization opportunities. Note that we have
823 to update dominator tree and SSA graph after such changes.
825 The key to keeping the SSA graph update manageable is to duplicate
826 the side effects occurring in BB so that those side effects still
827 occur on the paths which bypass BB after redirecting edges.
829 We accomplish this by creating duplicates of BB and arranging for
830 the duplicates to unconditionally pass control to one specific
831 successor of BB. We then revector the incoming edges into BB to
832 the appropriate duplicate of BB.
834 If NOLOOP_ONLY is true, we only perform the threading as long as it
835 does not affect the structure of the loops in a nontrivial way.
837 If JOINERS is true, then thread through joiner blocks as well. */
839 static bool
840 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
842 /* E is an incoming edge into BB that we may or may not want to
843 redirect to a duplicate of BB. */
844 edge e, e2;
845 edge_iterator ei;
846 ssa_local_info_t local_info;
847 struct loop *loop = bb->loop_father;
849 /* To avoid scanning a linear array for the element we need we instead
850 use a hash table. For normal code there should be no noticeable
851 difference. However, if we have a block with a large number of
852 incoming and outgoing edges such linear searches can get expensive. */
853 redirection_data.create (EDGE_COUNT (bb->succs));
855 /* If we thread the latch of the loop to its exit, the loop ceases to
856 exist. Make sure we do not restrict ourselves in order to preserve
857 this loop. */
858 if (loop->header == bb)
860 e = loop_latch_edge (loop);
861 vec<jump_thread_edge *> *path = THREAD_PATH (e);
863 if (path
864 && (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && joiners)
865 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && !joiners)))
867 for (unsigned int i = 1; i < path->length (); i++)
869 edge e2 = (*path)[i]->e;
871 if (loop_exit_edge_p (loop, e2))
873 loop->header = NULL;
874 loop->latch = NULL;
875 loops_state_set (LOOPS_NEED_FIXUP);
881 /* Record each unique threaded destination into a hash table for
882 efficient lookups. */
883 FOR_EACH_EDGE (e, ei, bb->preds)
885 if (e->aux == NULL)
886 continue;
888 vec<jump_thread_edge *> *path = THREAD_PATH (e);
890 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
891 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
892 continue;
894 e2 = path->last ()->e;
895 if (!e2 || noloop_only)
897 /* If NOLOOP_ONLY is true, we only allow threading through the
898 header of a loop to exit edges. */
900 /* One case occurs when there was loop header buried in a jump
901 threading path that crosses loop boundaries. We do not try
902 and thread this elsewhere, so just cancel the jump threading
903 request by clearing the AUX field now. */
904 if ((bb->loop_father != e2->src->loop_father
905 && !loop_exit_edge_p (e2->src->loop_father, e2))
906 || (e2->src->loop_father != e2->dest->loop_father
907 && !loop_exit_edge_p (e2->src->loop_father, e2)))
909 /* Since this case is not handled by our special code
910 to thread through a loop header, we must explicitly
911 cancel the threading request here. */
912 delete_jump_thread_path (path);
913 e->aux = NULL;
914 continue;
917 /* Another case occurs when trying to thread through our
918 own loop header, possibly from inside the loop. We will
919 thread these later. */
920 unsigned int i;
921 for (i = 1; i < path->length (); i++)
923 if ((*path)[i]->e->src == bb->loop_father->header
924 && (!loop_exit_edge_p (bb->loop_father, e2)
925 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
926 break;
929 if (i != path->length ())
930 continue;
933 if (e->dest == e2->src)
934 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
935 e->count, (*THREAD_PATH (e))[1]->e);
937 /* Insert the outgoing edge into the hash table if it is not
938 already in the hash table. */
939 lookup_redirection_data (e, INSERT);
942 /* We do not update dominance info. */
943 free_dominance_info (CDI_DOMINATORS);
945 /* We know we only thread through the loop header to loop exits.
946 Let the basic block duplication hook know we are not creating
947 a multiple entry loop. */
948 if (noloop_only
949 && bb == bb->loop_father->header)
950 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
952 /* Now create duplicates of BB.
954 Note that for a block with a high outgoing degree we can waste
955 a lot of time and memory creating and destroying useless edges.
957 So we first duplicate BB and remove the control structure at the
958 tail of the duplicate as well as all outgoing edges from the
959 duplicate. We then use that duplicate block as a template for
960 the rest of the duplicates. */
961 local_info.template_block = NULL;
962 local_info.bb = bb;
963 local_info.jumps_threaded = false;
964 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
965 (&local_info);
967 /* The template does not have an outgoing edge. Create that outgoing
968 edge and update PHI nodes as the edge's target as necessary.
970 We do this after creating all the duplicates to avoid creating
971 unnecessary edges. */
972 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
973 (&local_info);
975 /* The hash table traversals above created the duplicate blocks (and the
976 statements within the duplicate blocks). This loop creates PHI nodes for
977 the duplicated blocks and redirects the incoming edges into BB to reach
978 the duplicates of BB. */
979 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
980 (&local_info);
982 /* Done with this block. Clear REDIRECTION_DATA. */
983 redirection_data.dispose ();
985 if (noloop_only
986 && bb == bb->loop_father->header)
987 set_loop_copy (bb->loop_father, NULL);
989 /* Indicate to our caller whether or not any jumps were threaded. */
990 return local_info.jumps_threaded;
993 /* Wrapper for thread_block_1 so that we can first handle jump
994 thread paths which do not involve copying joiner blocks, then
995 handle jump thread paths which have joiner blocks.
997 By doing things this way we can be as aggressive as possible and
998 not worry that copying a joiner block will create a jump threading
999 opportunity. */
1001 static bool
1002 thread_block (basic_block bb, bool noloop_only)
1004 bool retval;
1005 retval = thread_block_1 (bb, noloop_only, false);
1006 retval |= thread_block_1 (bb, noloop_only, true);
1007 return retval;
1011 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
1012 copy of E->dest created during threading, or E->dest if it was not necessary
1013 to copy it (E is its single predecessor). */
1015 static basic_block
1016 thread_single_edge (edge e)
1018 basic_block bb = e->dest;
1019 struct redirection_data rd;
1020 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1021 edge eto = (*path)[1]->e;
1023 for (unsigned int i = 0; i < path->length (); i++)
1024 delete (*path)[i];
1025 delete path;
1026 e->aux = NULL;
1028 thread_stats.num_threaded_edges++;
1030 if (single_pred_p (bb))
1032 /* If BB has just a single predecessor, we should only remove the
1033 control statements at its end, and successors except for ETO. */
1034 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
1036 /* And fixup the flags on the single remaining edge. */
1037 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
1038 eto->flags |= EDGE_FALLTHRU;
1040 return bb;
1043 /* Otherwise, we need to create a copy. */
1044 if (e->dest == eto->src)
1045 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
1047 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
1048 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1049 npath->safe_push (x);
1051 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
1052 npath->safe_push (x);
1053 rd.path = npath;
1055 create_block_for_threading (bb, &rd, 0);
1056 remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
1057 create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0], 0);
1059 if (dump_file && (dump_flags & TDF_DETAILS))
1060 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
1061 e->src->index, e->dest->index, rd.dup_blocks[0]->index);
1063 rd.dup_blocks[0]->count = e->count;
1064 rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
1065 single_succ_edge (rd.dup_blocks[0])->count = e->count;
1066 redirect_edge_and_branch (e, rd.dup_blocks[0]);
1067 flush_pending_stmts (e);
1069 return rd.dup_blocks[0];
1072 /* Callback for dfs_enumerate_from. Returns true if BB is different
1073 from STOP and DBDS_CE_STOP. */
1075 static basic_block dbds_ce_stop;
1076 static bool
1077 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1079 return (bb != (const_basic_block) stop
1080 && bb != dbds_ce_stop);
1083 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1084 returns the state. */
1086 enum bb_dom_status
1088 /* BB does not dominate latch of the LOOP. */
1089 DOMST_NONDOMINATING,
1090 /* The LOOP is broken (there is no path from the header to its latch. */
1091 DOMST_LOOP_BROKEN,
1092 /* BB dominates the latch of the LOOP. */
1093 DOMST_DOMINATING
1096 static enum bb_dom_status
1097 determine_bb_domination_status (struct loop *loop, basic_block bb)
1099 basic_block *bblocks;
1100 unsigned nblocks, i;
1101 bool bb_reachable = false;
1102 edge_iterator ei;
1103 edge e;
1105 /* This function assumes BB is a successor of LOOP->header.
1106 If that is not the case return DOMST_NONDOMINATING which
1107 is always safe. */
1109 bool ok = false;
1111 FOR_EACH_EDGE (e, ei, bb->preds)
1113 if (e->src == loop->header)
1115 ok = true;
1116 break;
1120 if (!ok)
1121 return DOMST_NONDOMINATING;
1124 if (bb == loop->latch)
1125 return DOMST_DOMINATING;
1127 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1128 from it. */
1130 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1131 dbds_ce_stop = loop->header;
1132 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1133 bblocks, loop->num_nodes, bb);
1134 for (i = 0; i < nblocks; i++)
1135 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1137 if (e->src == loop->header)
1139 free (bblocks);
1140 return DOMST_NONDOMINATING;
1142 if (e->src == bb)
1143 bb_reachable = true;
1146 free (bblocks);
1147 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1150 /* Return true if BB is part of the new pre-header that is created
1151 when threading the latch to DATA. */
1153 static bool
1154 def_split_header_continue_p (const_basic_block bb, const void *data)
1156 const_basic_block new_header = (const_basic_block) data;
1157 const struct loop *l;
1159 if (bb == new_header
1160 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
1161 return false;
1162 for (l = bb->loop_father; l; l = loop_outer (l))
1163 if (l == new_header->loop_father)
1164 return true;
1165 return false;
1168 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1169 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1170 to the inside of the loop. */
1172 static bool
1173 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1175 basic_block header = loop->header;
1176 edge e, tgt_edge, latch = loop_latch_edge (loop);
1177 edge_iterator ei;
1178 basic_block tgt_bb, atgt_bb;
1179 enum bb_dom_status domst;
1181 /* We have already threaded through headers to exits, so all the threading
1182 requests now are to the inside of the loop. We need to avoid creating
1183 irreducible regions (i.e., loops with more than one entry block), and
1184 also loop with several latch edges, or new subloops of the loop (although
1185 there are cases where it might be appropriate, it is difficult to decide,
1186 and doing it wrongly may confuse other optimizers).
1188 We could handle more general cases here. However, the intention is to
1189 preserve some information about the loop, which is impossible if its
1190 structure changes significantly, in a way that is not well understood.
1191 Thus we only handle few important special cases, in which also updating
1192 of the loop-carried information should be feasible:
1194 1) Propagation of latch edge to a block that dominates the latch block
1195 of a loop. This aims to handle the following idiom:
1197 first = 1;
1198 while (1)
1200 if (first)
1201 initialize;
1202 first = 0;
1203 body;
1206 After threading the latch edge, this becomes
1208 first = 1;
1209 if (first)
1210 initialize;
1211 while (1)
1213 first = 0;
1214 body;
1217 The original header of the loop is moved out of it, and we may thread
1218 the remaining edges through it without further constraints.
1220 2) All entry edges are propagated to a single basic block that dominates
1221 the latch block of the loop. This aims to handle the following idiom
1222 (normally created for "for" loops):
1224 i = 0;
1225 while (1)
1227 if (i >= 100)
1228 break;
1229 body;
1230 i++;
1233 This becomes
1235 i = 0;
1236 while (1)
1238 body;
1239 i++;
1240 if (i >= 100)
1241 break;
1245 /* Threading through the header won't improve the code if the header has just
1246 one successor. */
1247 if (single_succ_p (header))
1248 goto fail;
1250 /* If we threaded the latch using a joiner block, we cancel the
1251 threading opportunity out of an abundance of caution. However,
1252 still allow threading from outside to inside the loop. */
1253 if (latch->aux)
1255 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1256 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1258 delete_jump_thread_path (path);
1259 latch->aux = NULL;
1263 if (latch->aux)
1265 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1266 tgt_edge = (*path)[1]->e;
1267 tgt_bb = tgt_edge->dest;
1269 else if (!may_peel_loop_headers
1270 && !redirection_block_p (loop->header))
1271 goto fail;
1272 else
1274 tgt_bb = NULL;
1275 tgt_edge = NULL;
1276 FOR_EACH_EDGE (e, ei, header->preds)
1278 if (!e->aux)
1280 if (e == latch)
1281 continue;
1283 /* If latch is not threaded, and there is a header
1284 edge that is not threaded, we would create loop
1285 with multiple entries. */
1286 goto fail;
1289 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1291 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1292 goto fail;
1293 tgt_edge = (*path)[1]->e;
1294 atgt_bb = tgt_edge->dest;
1295 if (!tgt_bb)
1296 tgt_bb = atgt_bb;
1297 /* Two targets of threading would make us create loop
1298 with multiple entries. */
1299 else if (tgt_bb != atgt_bb)
1300 goto fail;
1303 if (!tgt_bb)
1305 /* There are no threading requests. */
1306 return false;
1309 /* Redirecting to empty loop latch is useless. */
1310 if (tgt_bb == loop->latch
1311 && empty_block_p (loop->latch))
1312 goto fail;
1315 /* The target block must dominate the loop latch, otherwise we would be
1316 creating a subloop. */
1317 domst = determine_bb_domination_status (loop, tgt_bb);
1318 if (domst == DOMST_NONDOMINATING)
1319 goto fail;
1320 if (domst == DOMST_LOOP_BROKEN)
1322 /* If the loop ceased to exist, mark it as such, and thread through its
1323 original header. */
1324 loop->header = NULL;
1325 loop->latch = NULL;
1326 loops_state_set (LOOPS_NEED_FIXUP);
1327 return thread_block (header, false);
1330 if (tgt_bb->loop_father->header == tgt_bb)
1332 /* If the target of the threading is a header of a subloop, we need
1333 to create a preheader for it, so that the headers of the two loops
1334 do not merge. */
1335 if (EDGE_COUNT (tgt_bb->preds) > 2)
1337 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1338 gcc_assert (tgt_bb != NULL);
1340 else
1341 tgt_bb = split_edge (tgt_edge);
1344 if (latch->aux)
1346 basic_block *bblocks;
1347 unsigned nblocks, i;
1349 /* First handle the case latch edge is redirected. We are copying
1350 the loop header but not creating a multiple entry loop. Make the
1351 cfg manipulation code aware of that fact. */
1352 set_loop_copy (loop, loop);
1353 loop->latch = thread_single_edge (latch);
1354 set_loop_copy (loop, NULL);
1355 gcc_assert (single_succ (loop->latch) == tgt_bb);
1356 loop->header = tgt_bb;
1358 /* Remove the new pre-header blocks from our loop. */
1359 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1360 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1361 bblocks, loop->num_nodes, tgt_bb);
1362 for (i = 0; i < nblocks; i++)
1363 if (bblocks[i]->loop_father == loop)
1365 remove_bb_from_loops (bblocks[i]);
1366 add_bb_to_loop (bblocks[i], loop_outer (loop));
1368 free (bblocks);
1370 /* If the new header has multiple latches mark it so. */
1371 FOR_EACH_EDGE (e, ei, loop->header->preds)
1372 if (e->src->loop_father == loop
1373 && e->src != loop->latch)
1375 loop->latch = NULL;
1376 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1379 /* Cancel remaining threading requests that would make the
1380 loop a multiple entry loop. */
1381 FOR_EACH_EDGE (e, ei, header->preds)
1383 edge e2;
1385 if (e->aux == NULL)
1386 continue;
1388 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1389 e2 = path->last ()->e;
1391 if (e->src->loop_father != e2->dest->loop_father
1392 && e2->dest != loop->header)
1394 delete_jump_thread_path (path);
1395 e->aux = NULL;
1399 /* Thread the remaining edges through the former header. */
1400 thread_block (header, false);
1402 else
1404 basic_block new_preheader;
1406 /* Now consider the case entry edges are redirected to the new entry
1407 block. Remember one entry edge, so that we can find the new
1408 preheader (its destination after threading). */
1409 FOR_EACH_EDGE (e, ei, header->preds)
1411 if (e->aux)
1412 break;
1415 /* The duplicate of the header is the new preheader of the loop. Ensure
1416 that it is placed correctly in the loop hierarchy. */
1417 set_loop_copy (loop, loop_outer (loop));
1419 thread_block (header, false);
1420 set_loop_copy (loop, NULL);
1421 new_preheader = e->dest;
1423 /* Create the new latch block. This is always necessary, as the latch
1424 must have only a single successor, but the original header had at
1425 least two successors. */
1426 loop->latch = NULL;
1427 mfb_kj_edge = single_succ_edge (new_preheader);
1428 loop->header = mfb_kj_edge->dest;
1429 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1430 loop->header = latch->dest;
1431 loop->latch = latch->src;
1434 return true;
1436 fail:
1437 /* We failed to thread anything. Cancel the requests. */
1438 FOR_EACH_EDGE (e, ei, header->preds)
1440 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1442 if (path)
1444 delete_jump_thread_path (path);
1445 e->aux = NULL;
1448 return false;
1451 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1452 PHI arguments associated with those edges are equal or there are no
1453 PHI arguments, otherwise return FALSE. */
1455 static bool
1456 phi_args_equal_on_edges (edge e1, edge e2)
1458 gimple_stmt_iterator gsi;
1459 int indx1 = e1->dest_idx;
1460 int indx2 = e2->dest_idx;
1462 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1464 gimple phi = gsi_stmt (gsi);
1466 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1467 gimple_phi_arg_def (phi, indx2), 0))
1468 return false;
1470 return true;
1473 /* Walk through the registered jump threads and convert them into a
1474 form convenient for this pass.
1476 Any block which has incoming edges threaded to outgoing edges
1477 will have its entry in THREADED_BLOCK set.
1479 Any threaded edge will have its new outgoing edge stored in the
1480 original edge's AUX field.
1482 This form avoids the need to walk all the edges in the CFG to
1483 discover blocks which need processing and avoids unnecessary
1484 hash table lookups to map from threaded edge to new target. */
1486 static void
1487 mark_threaded_blocks (bitmap threaded_blocks)
1489 unsigned int i;
1490 bitmap_iterator bi;
1491 bitmap tmp = BITMAP_ALLOC (NULL);
1492 basic_block bb;
1493 edge e;
1494 edge_iterator ei;
1496 /* It is possible to have jump threads in which one is a subpath
1497 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1498 block and (B, C), (C, D) where no joiner block exists.
1500 When this occurs ignore the jump thread request with the joiner
1501 block. It's totally subsumed by the simpler jump thread request.
1503 This results in less block copying, simpler CFGs. More importantly,
1504 when we duplicate the joiner block, B, in this case we will create
1505 a new threading opportunity that we wouldn't be able to optimize
1506 until the next jump threading iteration.
1508 So first convert the jump thread requests which do not require a
1509 joiner block. */
1510 for (i = 0; i < paths.length (); i++)
1512 vec<jump_thread_edge *> *path = paths[i];
1514 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1516 edge e = (*path)[0]->e;
1517 e->aux = (void *)path;
1518 bitmap_set_bit (tmp, e->dest->index);
1522 /* Now iterate again, converting cases where we want to thread
1523 through a joiner block, but only if no other edge on the path
1524 already has a jump thread attached to it. */
1525 for (i = 0; i < paths.length (); i++)
1527 vec<jump_thread_edge *> *path = paths[i];
1529 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1531 unsigned int j;
1533 for (j = 0; j < path->length (); j++)
1534 if ((*path)[j]->e->aux != NULL)
1535 break;
1537 /* If we iterated through the entire path without exiting the loop,
1538 then we are good to go, attach the path to the starting edge. */
1539 if (j == path->length ())
1541 edge e = (*path)[0]->e;
1542 e->aux = path;
1543 bitmap_set_bit (tmp, e->dest->index);
1545 else if (dump_file && (dump_flags & TDF_DETAILS))
1547 dump_jump_thread_path (dump_file, *path, false);
1553 /* If optimizing for size, only thread through block if we don't have
1554 to duplicate it or it's an otherwise empty redirection block. */
1555 if (optimize_function_for_size_p (cfun))
1557 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1559 bb = BASIC_BLOCK_FOR_FN (cfun, i);
1560 if (EDGE_COUNT (bb->preds) > 1
1561 && !redirection_block_p (bb))
1563 FOR_EACH_EDGE (e, ei, bb->preds)
1565 if (e->aux)
1567 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1568 delete_jump_thread_path (path);
1569 e->aux = NULL;
1573 else
1574 bitmap_set_bit (threaded_blocks, i);
1577 else
1578 bitmap_copy (threaded_blocks, tmp);
1580 /* Look for jump threading paths which cross multiple loop headers.
1582 The code to thread through loop headers will change the CFG in ways
1583 that break assumptions made by the loop optimization code.
1585 We don't want to blindly cancel the requests. We can instead do better
1586 by trimming off the end of the jump thread path. */
1587 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1589 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1590 FOR_EACH_EDGE (e, ei, bb->preds)
1592 if (e->aux)
1594 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1596 for (unsigned int i = 0, crossed_headers = 0;
1597 i < path->length ();
1598 i++)
1600 basic_block dest = (*path)[i]->e->dest;
1601 crossed_headers += (dest == dest->loop_father->header);
1602 if (crossed_headers > 1)
1604 /* Trim from entry I onwards. */
1605 for (unsigned int j = i; j < path->length (); j++)
1606 delete (*path)[j];
1607 path->truncate (i);
1609 /* Now that we've truncated the path, make sure
1610 what's left is still valid. We need at least
1611 two edges on the path and the last edge can not
1612 be a joiner. This should never happen, but let's
1613 be safe. */
1614 if (path->length () < 2
1615 || (path->last ()->type
1616 == EDGE_COPY_SRC_JOINER_BLOCK))
1618 delete_jump_thread_path (path);
1619 e->aux = NULL;
1621 break;
1628 /* If we have a joiner block (J) which has two successors S1 and S2 and
1629 we are threading though S1 and the final destination of the thread
1630 is S2, then we must verify that any PHI nodes in S2 have the same
1631 PHI arguments for the edge J->S2 and J->S1->...->S2.
1633 We used to detect this prior to registering the jump thread, but
1634 that prohibits propagation of edge equivalences into non-dominated
1635 PHI nodes as the equivalency test might occur before propagation.
1637 This must also occur after we truncate any jump threading paths
1638 as this scenario may only show up after truncation.
1640 This works for now, but will need improvement as part of the FSA
1641 optimization.
1643 Note since we've moved the thread request data to the edges,
1644 we have to iterate on those rather than the threaded_edges vector. */
1645 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1647 bb = BASIC_BLOCK_FOR_FN (cfun, i);
1648 FOR_EACH_EDGE (e, ei, bb->preds)
1650 if (e->aux)
1652 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1653 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1655 if (have_joiner)
1657 basic_block joiner = e->dest;
1658 edge final_edge = path->last ()->e;
1659 basic_block final_dest = final_edge->dest;
1660 edge e2 = find_edge (joiner, final_dest);
1662 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1664 delete_jump_thread_path (path);
1665 e->aux = NULL;
1672 BITMAP_FREE (tmp);
1676 /* Return TRUE if BB ends with a switch statement or a computed goto.
1677 Otherwise return false. */
1678 static bool
1679 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED)
1681 gimple stmt = last_stmt (bb);
1682 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
1683 return true;
1684 if (stmt && gimple_code (stmt) == GIMPLE_GOTO
1685 && TREE_CODE (gimple_goto_dest (stmt)) == SSA_NAME)
1686 return true;
1687 return false;
1690 /* Walk through all blocks and thread incoming edges to the appropriate
1691 outgoing edge for each edge pair recorded in THREADED_EDGES.
1693 It is the caller's responsibility to fix the dominance information
1694 and rewrite duplicated SSA_NAMEs back into SSA form.
1696 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1697 loop headers if it does not simplify the loop.
1699 Returns true if one or more edges were threaded, false otherwise. */
1701 bool
1702 thread_through_all_blocks (bool may_peel_loop_headers)
1704 bool retval = false;
1705 unsigned int i;
1706 bitmap_iterator bi;
1707 bitmap threaded_blocks;
1708 struct loop *loop;
1710 /* We must know about loops in order to preserve them. */
1711 gcc_assert (current_loops != NULL);
1713 if (!paths.exists ())
1714 return false;
1716 threaded_blocks = BITMAP_ALLOC (NULL);
1717 memset (&thread_stats, 0, sizeof (thread_stats));
1719 mark_threaded_blocks (threaded_blocks);
1721 initialize_original_copy_tables ();
1723 /* First perform the threading requests that do not affect
1724 loop structure. */
1725 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1727 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1729 if (EDGE_COUNT (bb->preds) > 0)
1730 retval |= thread_block (bb, true);
1733 /* Then perform the threading through loop headers. We start with the
1734 innermost loop, so that the changes in cfg we perform won't affect
1735 further threading. */
1736 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1738 if (!loop->header
1739 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1740 continue;
1742 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1745 /* Any jump threading paths that are still attached to edges at this
1746 point must be one of two cases.
1748 First, we could have a jump threading path which went from outside
1749 a loop to inside a loop that was ignored because a prior jump thread
1750 across a backedge was realized (which indirectly causes the loop
1751 above to ignore the latter thread). We can detect these because the
1752 loop structures will be different and we do not currently try to
1753 optimize this case.
1755 Second, we could be threading across a backedge to a point within the
1756 same loop. This occurrs for the FSA/FSM optimization and we would
1757 like to optimize it. However, we have to be very careful as this
1758 may completely scramble the loop structures, with the result being
1759 irreducible loops causing us to throw away our loop structure.
1761 As a compromise for the latter case, if the thread path ends in
1762 a block where the last statement is a multiway branch, then go
1763 ahead and thread it, else ignore it. */
1764 basic_block bb;
1765 edge e;
1766 FOR_EACH_BB_FN (bb, cfun)
1768 /* If we do end up threading here, we can remove elements from
1769 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
1770 for (edge_iterator ei = ei_start (bb->preds);
1771 (e = ei_safe_edge (ei));)
1772 if (e->aux)
1774 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1776 /* Case 1, threading from outside to inside the loop
1777 after we'd already threaded through the header. */
1778 if ((*path)[0]->e->dest->loop_father
1779 != path->last ()->e->src->loop_father)
1781 delete_jump_thread_path (path);
1782 e->aux = NULL;
1783 ei_next (&ei);
1785 else if (bb_ends_with_multiway_branch (path->last ()->e->src))
1787 /* The code to thread through loop headers may have
1788 split a block with jump threads attached to it.
1790 We can identify this with a disjoint jump threading
1791 path. If found, just remove it. */
1792 for (unsigned int i = 0; i < path->length () - 1; i++)
1793 if ((*path)[i]->e->dest != (*path)[i + 1]->e->src)
1795 delete_jump_thread_path (path);
1796 e->aux = NULL;
1797 ei_next (&ei);
1798 break;
1801 /* Our path is still valid, thread it. */
1802 if (e->aux)
1804 struct loop *loop = (*path)[0]->e->dest->loop_father;
1806 if (thread_block ((*path)[0]->e->dest, false))
1808 /* This jump thread likely totally scrambled this loop.
1809 So arrange for it to be fixed up. */
1810 loop->header = NULL;
1811 loop->latch = NULL;
1812 e->aux = NULL;
1814 else
1816 delete_jump_thread_path (path);
1817 e->aux = NULL;
1818 ei_next (&ei);
1822 else
1824 delete_jump_thread_path (path);
1825 e->aux = NULL;
1826 ei_next (&ei);
1829 else
1830 ei_next (&ei);
1833 statistics_counter_event (cfun, "Jumps threaded",
1834 thread_stats.num_threaded_edges);
1836 free_original_copy_tables ();
1838 BITMAP_FREE (threaded_blocks);
1839 threaded_blocks = NULL;
1840 paths.release ();
1842 if (retval)
1843 loops_state_set (LOOPS_NEED_FIXUP);
1845 return retval;
1848 /* Delete the jump threading path PATH. We have to explcitly delete
1849 each entry in the vector, then the container. */
1851 void
1852 delete_jump_thread_path (vec<jump_thread_edge *> *path)
1854 for (unsigned int i = 0; i < path->length (); i++)
1855 delete (*path)[i];
1856 path->release();
1859 /* Register a jump threading opportunity. We queue up all the jump
1860 threading opportunities discovered by a pass and update the CFG
1861 and SSA form all at once.
1863 E is the edge we can thread, E2 is the new target edge, i.e., we
1864 are effectively recording that E->dest can be changed to E2->dest
1865 after fixing the SSA graph. */
1867 void
1868 register_jump_thread (vec<jump_thread_edge *> *path)
1870 if (!dbg_cnt (registered_jump_thread))
1872 delete_jump_thread_path (path);
1873 return;
1876 /* First make sure there are no NULL outgoing edges on the jump threading
1877 path. That can happen for jumping to a constant address. */
1878 for (unsigned int i = 0; i < path->length (); i++)
1879 if ((*path)[i]->e == NULL)
1881 if (dump_file && (dump_flags & TDF_DETAILS))
1883 fprintf (dump_file,
1884 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1885 dump_jump_thread_path (dump_file, *path, false);
1888 delete_jump_thread_path (path);
1889 return;
1892 if (dump_file && (dump_flags & TDF_DETAILS))
1893 dump_jump_thread_path (dump_file, *path, true);
1895 if (!paths.exists ())
1896 paths.create (5);
1898 paths.safe_push (path);