Add support for ARMv8-R architecture
[official-gcc.git] / gcc / tree-ssa-threadupdate.c
blob235a907339970cd80092428e8aebdbb7dba37c68
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfganal.h"
31 #include "gimple-iterator.h"
32 #include "tree-ssa.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "cfgloop.h"
35 #include "dbgcnt.h"
36 #include "tree-cfg.h"
37 #include "tree-vectorizer.h"
39 /* Given a block B, update the CFG and SSA graph to reflect redirecting
40 one or more in-edges to B to instead reach the destination of an
41 out-edge from B while preserving any side effects in B.
43 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
44 side effects of executing B.
46 1. Make a copy of B (including its outgoing edges and statements). Call
47 the copy B'. Note B' has no incoming edges or PHIs at this time.
49 2. Remove the control statement at the end of B' and all outgoing edges
50 except B'->C.
52 3. Add a new argument to each PHI in C with the same value as the existing
53 argument associated with edge B->C. Associate the new PHI arguments
54 with the edge B'->C.
56 4. For each PHI in B, find or create a PHI in B' with an identical
57 PHI_RESULT. Add an argument to the PHI in B' which has the same
58 value as the PHI in B associated with the edge A->B. Associate
59 the new argument in the PHI in B' with the edge A->B.
61 5. Change the edge A->B to A->B'.
63 5a. This automatically deletes any PHI arguments associated with the
64 edge A->B in B.
66 5b. This automatically associates each new argument added in step 4
67 with the edge A->B'.
69 6. Repeat for other incoming edges into B.
71 7. Put the duplicated resources in B and all the B' blocks into SSA form.
73 Note that block duplication can be minimized by first collecting the
74 set of unique destination blocks that the incoming edges should
75 be threaded to.
77 We reduce the number of edges and statements we create by not copying all
78 the outgoing edges and the control statement in step #1. We instead create
79 a template block without the outgoing edges and duplicate the template.
81 Another case this code handles is threading through a "joiner" block. In
82 this case, we do not know the destination of the joiner block, but one
83 of the outgoing edges from the joiner block leads to a threadable path. This
84 case largely works as outlined above, except the duplicate of the joiner
85 block still contains a full set of outgoing edges and its control statement.
86 We just redirect one of its outgoing edges to our jump threading path. */
89 /* Steps #5 and #6 of the above algorithm are best implemented by walking
90 all the incoming edges which thread to the same destination edge at
91 the same time. That avoids lots of table lookups to get information
92 for the destination edge.
94 To realize that implementation we create a list of incoming edges
95 which thread to the same outgoing edge. Thus to implement steps
96 #5 and #6 we traverse our hash table of outgoing edge information.
97 For each entry we walk the list of incoming edges which thread to
98 the current outgoing edge. */
100 struct el
102 edge e;
103 struct el *next;
106 /* Main data structure recording information regarding B's duplicate
107 blocks. */
109 /* We need to efficiently record the unique thread destinations of this
110 block and specific information associated with those destinations. We
111 may have many incoming edges threaded to the same outgoing edge. This
112 can be naturally implemented with a hash table. */
114 struct redirection_data : free_ptr_hash<redirection_data>
116 /* We support wiring up two block duplicates in a jump threading path.
118 One is a normal block copy where we remove the control statement
119 and wire up its single remaining outgoing edge to the thread path.
121 The other is a joiner block where we leave the control statement
122 in place, but wire one of the outgoing edges to a thread path.
124 In theory we could have multiple block duplicates in a jump
125 threading path, but I haven't tried that.
127 The duplicate blocks appear in this array in the same order in
128 which they appear in the jump thread path. */
129 basic_block dup_blocks[2];
131 /* The jump threading path. */
132 vec<jump_thread_edge *> *path;
134 /* A list of incoming edges which we want to thread to the
135 same path. */
136 struct el *incoming_edges;
138 /* hash_table support. */
139 static inline hashval_t hash (const redirection_data *);
140 static inline int equal (const redirection_data *, const redirection_data *);
143 /* Dump a jump threading path, including annotations about each
144 edge in the path. */
146 static void
147 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
148 bool registering)
150 fprintf (dump_file,
151 " %s%s jump thread: (%d, %d) incoming edge; ",
152 (registering ? "Registering" : "Cancelling"),
153 (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
154 path[0]->e->src->index, path[0]->e->dest->index);
156 for (unsigned int i = 1; i < path.length (); i++)
158 /* We can get paths with a NULL edge when the final destination
159 of a jump thread turns out to be a constant address. We dump
160 those paths when debugging, so we have to be prepared for that
161 possibility here. */
162 if (path[i]->e == NULL)
163 continue;
165 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
166 fprintf (dump_file, " (%d, %d) joiner; ",
167 path[i]->e->src->index, path[i]->e->dest->index);
168 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
169 fprintf (dump_file, " (%d, %d) normal;",
170 path[i]->e->src->index, path[i]->e->dest->index);
171 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
172 fprintf (dump_file, " (%d, %d) nocopy;",
173 path[i]->e->src->index, path[i]->e->dest->index);
174 if (path[0]->type == EDGE_FSM_THREAD)
175 fprintf (dump_file, " (%d, %d) ",
176 path[i]->e->src->index, path[i]->e->dest->index);
178 fputc ('\n', dump_file);
181 /* Simple hashing function. For any given incoming edge E, we're going
182 to be most concerned with the final destination of its jump thread
183 path. So hash on the block index of the final edge in the path. */
185 inline hashval_t
186 redirection_data::hash (const redirection_data *p)
188 vec<jump_thread_edge *> *path = p->path;
189 return path->last ()->e->dest->index;
192 /* Given two hash table entries, return true if they have the same
193 jump threading path. */
194 inline int
195 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
197 vec<jump_thread_edge *> *path1 = p1->path;
198 vec<jump_thread_edge *> *path2 = p2->path;
200 if (path1->length () != path2->length ())
201 return false;
203 for (unsigned int i = 1; i < path1->length (); i++)
205 if ((*path1)[i]->type != (*path2)[i]->type
206 || (*path1)[i]->e != (*path2)[i]->e)
207 return false;
210 return true;
213 /* Rather than search all the edges in jump thread paths each time
214 DOM is able to simply if control statement, we build a hash table
215 with the deleted edges. We only care about the address of the edge,
216 not its contents. */
217 struct removed_edges : nofree_ptr_hash<edge_def>
219 static hashval_t hash (edge e) { return htab_hash_pointer (e); }
220 static bool equal (edge e1, edge e2) { return e1 == e2; }
223 static hash_table<removed_edges> *removed_edges;
225 /* Data structure of information to pass to hash table traversal routines. */
226 struct ssa_local_info_t
228 /* The current block we are working on. */
229 basic_block bb;
231 /* We only create a template block for the first duplicated block in a
232 jump threading path as we may need many duplicates of that block.
234 The second duplicate block in a path is specific to that path. Creating
235 and sharing a template for that block is considerably more difficult. */
236 basic_block template_block;
238 /* Blocks duplicated for the thread. */
239 bitmap duplicate_blocks;
241 /* TRUE if we thread one or more jumps, FALSE otherwise. */
242 bool jumps_threaded;
244 /* When we have multiple paths through a joiner which reach different
245 final destinations, then we may need to correct for potential
246 profile insanities. */
247 bool need_profile_correction;
250 /* Passes which use the jump threading code register jump threading
251 opportunities as they are discovered. We keep the registered
252 jump threading opportunities in this vector as edge pairs
253 (original_edge, target_edge). */
254 static vec<vec<jump_thread_edge *> *> paths;
256 /* When we start updating the CFG for threading, data necessary for jump
257 threading is attached to the AUX field for the incoming edge. Use these
258 macros to access the underlying structure attached to the AUX field. */
259 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
261 /* Jump threading statistics. */
263 struct thread_stats_d
265 unsigned long num_threaded_edges;
268 struct thread_stats_d thread_stats;
271 /* Remove the last statement in block BB if it is a control statement
272 Also remove all outgoing edges except the edge which reaches DEST_BB.
273 If DEST_BB is NULL, then remove all outgoing edges. */
275 void
276 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
278 gimple_stmt_iterator gsi;
279 edge e;
280 edge_iterator ei;
282 gsi = gsi_last_bb (bb);
284 /* If the duplicate ends with a control statement, then remove it.
286 Note that if we are duplicating the template block rather than the
287 original basic block, then the duplicate might not have any real
288 statements in it. */
289 if (!gsi_end_p (gsi)
290 && gsi_stmt (gsi)
291 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
292 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
293 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
294 gsi_remove (&gsi, true);
296 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
298 if (e->dest != dest_bb)
300 free_dom_edge_info (e);
301 remove_edge (e);
303 else
305 e->probability = profile_probability::always ();
306 e->count = bb->count;
307 ei_next (&ei);
311 /* If the remaining edge is a loop exit, there must have
312 a removed edge that was not a loop exit.
314 In that case BB and possibly other blocks were previously
315 in the loop, but are now outside the loop. Thus, we need
316 to update the loop structures. */
317 if (single_succ_p (bb)
318 && loop_outer (bb->loop_father)
319 && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
320 loops_state_set (LOOPS_NEED_FIXUP);
323 /* Create a duplicate of BB. Record the duplicate block in an array
324 indexed by COUNT stored in RD. */
326 static void
327 create_block_for_threading (basic_block bb,
328 struct redirection_data *rd,
329 unsigned int count,
330 bitmap *duplicate_blocks)
332 edge_iterator ei;
333 edge e;
335 /* We can use the generic block duplication code and simply remove
336 the stuff we do not need. */
337 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
339 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
340 e->aux = NULL;
342 /* Zero out the profile, since the block is unreachable for now. */
343 rd->dup_blocks[count]->frequency = 0;
344 rd->dup_blocks[count]->count = profile_count::uninitialized ();
345 if (duplicate_blocks)
346 bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
349 /* Main data structure to hold information for duplicates of BB. */
351 static hash_table<redirection_data> *redirection_data;
353 /* Given an outgoing edge E lookup and return its entry in our hash table.
355 If INSERT is true, then we insert the entry into the hash table if
356 it is not already present. INCOMING_EDGE is added to the list of incoming
357 edges associated with E in the hash table. */
359 static struct redirection_data *
360 lookup_redirection_data (edge e, enum insert_option insert)
362 struct redirection_data **slot;
363 struct redirection_data *elt;
364 vec<jump_thread_edge *> *path = THREAD_PATH (e);
366 /* Build a hash table element so we can see if E is already
367 in the table. */
368 elt = XNEW (struct redirection_data);
369 elt->path = path;
370 elt->dup_blocks[0] = NULL;
371 elt->dup_blocks[1] = NULL;
372 elt->incoming_edges = NULL;
374 slot = redirection_data->find_slot (elt, insert);
376 /* This will only happen if INSERT is false and the entry is not
377 in the hash table. */
378 if (slot == NULL)
380 free (elt);
381 return NULL;
384 /* This will only happen if E was not in the hash table and
385 INSERT is true. */
386 if (*slot == NULL)
388 *slot = elt;
389 elt->incoming_edges = XNEW (struct el);
390 elt->incoming_edges->e = e;
391 elt->incoming_edges->next = NULL;
392 return elt;
394 /* E was in the hash table. */
395 else
397 /* Free ELT as we do not need it anymore, we will extract the
398 relevant entry from the hash table itself. */
399 free (elt);
401 /* Get the entry stored in the hash table. */
402 elt = *slot;
404 /* If insertion was requested, then we need to add INCOMING_EDGE
405 to the list of incoming edges associated with E. */
406 if (insert)
408 struct el *el = XNEW (struct el);
409 el->next = elt->incoming_edges;
410 el->e = e;
411 elt->incoming_edges = el;
414 return elt;
418 /* Similar to copy_phi_args, except that the PHI arg exists, it just
419 does not have a value associated with it. */
421 static void
422 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
424 int src_idx = src_e->dest_idx;
425 int tgt_idx = tgt_e->dest_idx;
427 /* Iterate over each PHI in e->dest. */
428 for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
429 gsi2 = gsi_start_phis (tgt_e->dest);
430 !gsi_end_p (gsi);
431 gsi_next (&gsi), gsi_next (&gsi2))
433 gphi *src_phi = gsi.phi ();
434 gphi *dest_phi = gsi2.phi ();
435 tree val = gimple_phi_arg_def (src_phi, src_idx);
436 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
438 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
439 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
443 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
444 to see if it has constant value in a flow sensitive manner. Set
445 LOCUS to location of the constant phi arg and return the value.
446 Return DEF directly if either PATH or idx is ZERO. */
448 static tree
449 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
450 basic_block bb, int idx, source_location *locus)
452 tree arg;
453 gphi *def_phi;
454 basic_block def_bb;
456 if (path == NULL || idx == 0)
457 return def;
459 def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
460 if (!def_phi)
461 return def;
463 def_bb = gimple_bb (def_phi);
464 /* Don't propagate loop invariants into deeper loops. */
465 if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
466 return def;
468 /* Backtrack jump threading path from IDX to see if def has constant
469 value. */
470 for (int j = idx - 1; j >= 0; j--)
472 edge e = (*path)[j]->e;
473 if (e->dest == def_bb)
475 arg = gimple_phi_arg_def (def_phi, e->dest_idx);
476 if (is_gimple_min_invariant (arg))
478 *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
479 return arg;
481 break;
485 return def;
488 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
489 Try to backtrack jump threading PATH from node IDX to see if the arg
490 has constant value, copy constant value instead of argument itself
491 if yes. */
493 static void
494 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
495 vec<jump_thread_edge *> *path, int idx)
497 gphi_iterator gsi;
498 int src_indx = src_e->dest_idx;
500 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
502 gphi *phi = gsi.phi ();
503 tree def = gimple_phi_arg_def (phi, src_indx);
504 source_location locus = gimple_phi_arg_location (phi, src_indx);
506 if (TREE_CODE (def) == SSA_NAME
507 && !virtual_operand_p (gimple_phi_result (phi)))
508 def = get_value_locus_in_path (def, path, bb, idx, &locus);
510 add_phi_arg (phi, def, tgt_e, locus);
514 /* We have recently made a copy of ORIG_BB, including its outgoing
515 edges. The copy is NEW_BB. Every PHI node in every direct successor of
516 ORIG_BB has a new argument associated with edge from NEW_BB to the
517 successor. Initialize the PHI argument so that it is equal to the PHI
518 argument associated with the edge from ORIG_BB to the successor.
519 PATH and IDX are used to check if the new PHI argument has constant
520 value in a flow sensitive manner. */
522 static void
523 update_destination_phis (basic_block orig_bb, basic_block new_bb,
524 vec<jump_thread_edge *> *path, int idx)
526 edge_iterator ei;
527 edge e;
529 FOR_EACH_EDGE (e, ei, orig_bb->succs)
531 edge e2 = find_edge (new_bb, e->dest);
532 copy_phi_args (e->dest, e, e2, path, idx);
536 /* Given a duplicate block and its single destination (both stored
537 in RD). Create an edge between the duplicate and its single
538 destination.
540 Add an additional argument to any PHI nodes at the single
541 destination. IDX is the start node in jump threading path
542 we start to check to see if the new PHI argument has constant
543 value along the jump threading path. */
545 static void
546 create_edge_and_update_destination_phis (struct redirection_data *rd,
547 basic_block bb, int idx)
549 edge e = make_single_succ_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
551 rescan_loop_exit (e, true, false);
553 /* We used to copy the thread path here. That was added in 2007
554 and dutifully updated through the representation changes in 2013.
556 In 2013 we added code to thread from an interior node through
557 the backedge to another interior node. That runs after the code
558 to thread through loop headers from outside the loop.
560 The latter may delete edges in the CFG, including those
561 which appeared in the jump threading path we copied here. Thus
562 we'd end up using a dangling pointer.
564 After reviewing the 2007/2011 code, I can't see how anything
565 depended on copying the AUX field and clearly copying the jump
566 threading path is problematical due to embedded edge pointers.
567 It has been removed. */
568 e->aux = NULL;
570 /* If there are any PHI nodes at the destination of the outgoing edge
571 from the duplicate block, then we will need to add a new argument
572 to them. The argument should have the same value as the argument
573 associated with the outgoing edge stored in RD. */
574 copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
577 /* Look through PATH beginning at START and return TRUE if there are
578 any additional blocks that need to be duplicated. Otherwise,
579 return FALSE. */
580 static bool
581 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
582 unsigned int start)
584 for (unsigned int i = start + 1; i < path->length (); i++)
586 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
587 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
588 return true;
590 return false;
594 /* Compute the amount of profile count/frequency coming into the jump threading
595 path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
596 PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
597 duplicated path, returned in PATH_OUT_COUNT_PTR. LOCAL_INFO is used to
598 identify blocks duplicated for jump threading, which have duplicated
599 edges that need to be ignored in the analysis. Return true if path contains
600 a joiner, false otherwise.
602 In the non-joiner case, this is straightforward - all the counts/frequency
603 flowing into the jump threading path should flow through the duplicated
604 block and out of the duplicated path.
606 In the joiner case, it is very tricky. Some of the counts flowing into
607 the original path go offpath at the joiner. The problem is that while
608 we know how much total count goes off-path in the original control flow,
609 we don't know how many of the counts corresponding to just the jump
610 threading path go offpath at the joiner.
612 For example, assume we have the following control flow and identified
613 jump threading paths:
615 A B C
616 \ | /
617 Ea \ |Eb / Ec
618 \ | /
619 v v v
620 J <-- Joiner
622 Eoff/ \Eon
625 Soff Son <--- Normal
627 Ed/ \ Ee
632 Jump threading paths: A -> J -> Son -> D (path 1)
633 C -> J -> Son -> E (path 2)
635 Note that the control flow could be more complicated:
636 - Each jump threading path may have more than one incoming edge. I.e. A and
637 Ea could represent multiple incoming blocks/edges that are included in
638 path 1.
639 - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
640 before or after the "normal" copy block). These are not duplicated onto
641 the jump threading path, as they are single-successor.
642 - Any of the blocks along the path may have other incoming edges that
643 are not part of any jump threading path, but add profile counts along
644 the path.
646 In the above example, after all jump threading is complete, we will
647 end up with the following control flow:
649 A B C
650 | | |
651 Ea| |Eb |Ec
652 | | |
653 v v v
654 Ja J Jc
655 / \ / \Eon' / \
656 Eona/ \ ---/---\-------- \Eonc
657 / \ / / \ \
658 v v v v v
659 Sona Soff Son Sonc
660 \ /\ /
661 \___________ / \ _____/
662 \ / \/
663 vv v
666 The main issue to notice here is that when we are processing path 1
667 (A->J->Son->D) we need to figure out the outgoing edge weights to
668 the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
669 sum of the incoming weights to D remain Ed. The problem with simply
670 assuming that Ja (and Jc when processing path 2) has the same outgoing
671 probabilities to its successors as the original block J, is that after
672 all paths are processed and other edges/counts removed (e.g. none
673 of Ec will reach D after processing path 2), we may end up with not
674 enough count flowing along duplicated edge Sona->D.
676 Therefore, in the case of a joiner, we keep track of all counts
677 coming in along the current path, as well as from predecessors not
678 on any jump threading path (Eb in the above example). While we
679 first assume that the duplicated Eona for Ja->Sona has the same
680 probability as the original, we later compensate for other jump
681 threading paths that may eliminate edges. We do that by keep track
682 of all counts coming into the original path that are not in a jump
683 thread (Eb in the above example, but as noted earlier, there could
684 be other predecessors incoming to the path at various points, such
685 as at Son). Call this cumulative non-path count coming into the path
686 before D as Enonpath. We then ensure that the count from Sona->D is as at
687 least as big as (Ed - Enonpath), but no bigger than the minimum
688 weight along the jump threading path. The probabilities of both the
689 original and duplicated joiner block J and Ja will be adjusted
690 accordingly after the updates. */
692 static bool
693 compute_path_counts (struct redirection_data *rd,
694 ssa_local_info_t *local_info,
695 profile_count *path_in_count_ptr,
696 profile_count *path_out_count_ptr,
697 int *path_in_freq_ptr)
699 edge e = rd->incoming_edges->e;
700 vec<jump_thread_edge *> *path = THREAD_PATH (e);
701 edge elast = path->last ()->e;
702 profile_count nonpath_count = profile_count::zero ();
703 bool has_joiner = false;
704 profile_count path_in_count = profile_count::zero ();
705 int path_in_freq = 0;
707 /* Start by accumulating incoming edge counts to the path's first bb
708 into a couple buckets:
709 path_in_count: total count of incoming edges that flow into the
710 current path.
711 nonpath_count: total count of incoming edges that are not
712 flowing along *any* path. These are the counts
713 that will still flow along the original path after
714 all path duplication is done by potentially multiple
715 calls to this routine.
716 (any other incoming edge counts are for a different jump threading
717 path that will be handled by a later call to this routine.)
718 To make this easier, start by recording all incoming edges that flow into
719 the current path in a bitmap. We could add up the path's incoming edge
720 counts here, but we still need to walk all the first bb's incoming edges
721 below to add up the counts of the other edges not included in this jump
722 threading path. */
723 struct el *next, *el;
724 auto_bitmap in_edge_srcs;
725 for (el = rd->incoming_edges; el; el = next)
727 next = el->next;
728 bitmap_set_bit (in_edge_srcs, el->e->src->index);
730 edge ein;
731 edge_iterator ei;
732 FOR_EACH_EDGE (ein, ei, e->dest->preds)
734 vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
735 /* Simply check the incoming edge src against the set captured above. */
736 if (ein_path
737 && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
739 /* It is necessary but not sufficient that the last path edges
740 are identical. There may be different paths that share the
741 same last path edge in the case where the last edge has a nocopy
742 source block. */
743 gcc_assert (ein_path->last ()->e == elast);
744 path_in_count += ein->count;
745 path_in_freq += EDGE_FREQUENCY (ein);
747 else if (!ein_path)
749 /* Keep track of the incoming edges that are not on any jump-threading
750 path. These counts will still flow out of original path after all
751 jump threading is complete. */
752 nonpath_count += ein->count;
756 /* This is needed due to insane incoming frequencies. */
757 if (path_in_freq > BB_FREQ_MAX)
758 path_in_freq = BB_FREQ_MAX;
760 /* Now compute the fraction of the total count coming into the first
761 path bb that is from the current threading path. */
762 profile_count total_count = e->dest->count;
763 /* Handle incoming profile insanities. */
764 if (total_count < path_in_count)
765 path_in_count = total_count;
766 int onpath_scale
767 = path_in_count.probability_in (total_count).to_reg_br_prob_base ();
769 /* Walk the entire path to do some more computation in order to estimate
770 how much of the path_in_count will flow out of the duplicated threading
771 path. In the non-joiner case this is straightforward (it should be
772 the same as path_in_count, although we will handle incoming profile
773 insanities by setting it equal to the minimum count along the path).
775 In the joiner case, we need to estimate how much of the path_in_count
776 will stay on the threading path after the joiner's conditional branch.
777 We don't really know for sure how much of the counts
778 associated with this path go to each successor of the joiner, but we'll
779 estimate based on the fraction of the total count coming into the path
780 bb was from the threading paths (computed above in onpath_scale).
781 Afterwards, we will need to do some fixup to account for other threading
782 paths and possible profile insanities.
784 In order to estimate the joiner case's counts we also need to update
785 nonpath_count with any additional counts coming into the path. Other
786 blocks along the path may have additional predecessors from outside
787 the path. */
788 profile_count path_out_count = path_in_count;
789 profile_count min_path_count = path_in_count;
790 for (unsigned int i = 1; i < path->length (); i++)
792 edge epath = (*path)[i]->e;
793 profile_count cur_count = epath->count;
794 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
796 has_joiner = true;
797 cur_count = cur_count.apply_probability (onpath_scale);
799 /* In the joiner case we need to update nonpath_count for any edges
800 coming into the path that will contribute to the count flowing
801 into the path successor. */
802 if (has_joiner && epath != elast)
804 /* Look for other incoming edges after joiner. */
805 FOR_EACH_EDGE (ein, ei, epath->dest->preds)
807 if (ein != epath
808 /* Ignore in edges from blocks we have duplicated for a
809 threading path, which have duplicated edge counts until
810 they are redirected by an invocation of this routine. */
811 && !bitmap_bit_p (local_info->duplicate_blocks,
812 ein->src->index))
813 nonpath_count += ein->count;
816 if (cur_count < path_out_count)
817 path_out_count = cur_count;
818 if (epath->count < min_path_count)
819 min_path_count = epath->count;
822 /* We computed path_out_count above assuming that this path targeted
823 the joiner's on-path successor with the same likelihood as it
824 reached the joiner. However, other thread paths through the joiner
825 may take a different path through the normal copy source block
826 (i.e. they have a different elast), meaning that they do not
827 contribute any counts to this path's elast. As a result, it may
828 turn out that this path must have more count flowing to the on-path
829 successor of the joiner. Essentially, all of this path's elast
830 count must be contributed by this path and any nonpath counts
831 (since any path through the joiner with a different elast will not
832 include a copy of this elast in its duplicated path).
833 So ensure that this path's path_out_count is at least the
834 difference between elast->count and nonpath_count. Otherwise the edge
835 counts after threading will not be sane. */
836 if (local_info->need_profile_correction
837 && has_joiner && path_out_count < elast->count - nonpath_count)
839 path_out_count = elast->count - nonpath_count;
840 /* But neither can we go above the minimum count along the path
841 we are duplicating. This can be an issue due to profile
842 insanities coming in to this pass. */
843 if (path_out_count > min_path_count)
844 path_out_count = min_path_count;
847 *path_in_count_ptr = path_in_count;
848 *path_out_count_ptr = path_out_count;
849 *path_in_freq_ptr = path_in_freq;
850 return has_joiner;
854 /* Update the counts and frequencies for both an original path
855 edge EPATH and its duplicate EDUP. The duplicate source block
856 will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
857 and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
858 static void
859 update_profile (edge epath, edge edup, profile_count path_in_count,
860 profile_count path_out_count, int path_in_freq)
863 /* First update the duplicated block's count / frequency. */
864 if (edup)
866 basic_block dup_block = edup->src;
867 gcc_assert (!dup_block->count.initialized_p ());
868 gcc_assert (dup_block->frequency == 0);
869 dup_block->count = path_in_count;
870 dup_block->frequency = path_in_freq;
873 /* Now update the original block's count and frequency in the
874 opposite manner - remove the counts/freq that will flow
875 into the duplicated block. Handle underflow due to precision/
876 rounding issues. */
877 epath->src->count -= path_in_count;
878 epath->src->frequency -= path_in_freq;
879 if (epath->src->frequency < 0)
880 epath->src->frequency = 0;
882 /* Next update this path edge's original and duplicated counts. We know
883 that the duplicated path will have path_out_count flowing
884 out of it (in the joiner case this is the count along the duplicated path
885 out of the duplicated joiner). This count can then be removed from the
886 original path edge. */
887 if (edup)
888 edup->count = path_out_count;
889 epath->count -= path_out_count;
890 /* FIXME: can epath->count be legally uninitialized here? */
894 /* The duplicate and original joiner blocks may end up with different
895 probabilities (different from both the original and from each other).
896 Recompute the probabilities here once we have updated the edge
897 counts and frequencies. */
899 static void
900 recompute_probabilities (basic_block bb)
902 edge esucc;
903 edge_iterator ei;
904 FOR_EACH_EDGE (esucc, ei, bb->succs)
906 if (!(bb->count > 0))
907 continue;
909 /* Prevent overflow computation due to insane profiles. */
910 if (esucc->count < bb->count)
911 esucc->probability = esucc->count.probability_in (bb->count).guessed ();
912 else
913 /* Can happen with missing/guessed probabilities, since we
914 may determine that more is flowing along duplicated
915 path than joiner succ probabilities allowed.
916 Counts and freqs will be insane after jump threading,
917 at least make sure probability is sane or we will
918 get a flow verification error.
919 Not much we can do to make counts/freqs sane without
920 redoing the profile estimation. */
921 esucc->probability = profile_probability::guessed_always ();
926 /* Update the counts of the original and duplicated edges from a joiner
927 that go off path, given that we have already determined that the
928 duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
929 outgoing count along the path PATH_OUT_COUNT. The original (on-)path
930 edge from joiner is EPATH. */
932 static void
933 update_joiner_offpath_counts (edge epath, basic_block dup_bb,
934 profile_count path_in_count,
935 profile_count path_out_count)
937 /* Compute the count that currently flows off path from the joiner.
938 In other words, the total count of joiner's out edges other than
939 epath. Compute this by walking the successors instead of
940 subtracting epath's count from the joiner bb count, since there
941 are sometimes slight insanities where the total out edge count is
942 larger than the bb count (possibly due to rounding/truncation
943 errors). */
944 profile_count total_orig_off_path_count = profile_count::zero ();
945 edge enonpath;
946 edge_iterator ei;
947 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
949 if (enonpath == epath)
950 continue;
951 total_orig_off_path_count += enonpath->count;
954 /* For the path that we are duplicating, the amount that will flow
955 off path from the duplicated joiner is the delta between the
956 path's cumulative in count and the portion of that count we
957 estimated above as flowing from the joiner along the duplicated
958 path. */
959 profile_count total_dup_off_path_count = path_in_count - path_out_count;
961 /* Now do the actual updates of the off-path edges. */
962 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
964 /* Look for edges going off of the threading path. */
965 if (enonpath == epath)
966 continue;
968 /* Find the corresponding edge out of the duplicated joiner. */
969 edge enonpathdup = find_edge (dup_bb, enonpath->dest);
970 gcc_assert (enonpathdup);
972 /* We can't use the original probability of the joiner's out
973 edges, since the probabilities of the original branch
974 and the duplicated branches may vary after all threading is
975 complete. But apportion the duplicated joiner's off-path
976 total edge count computed earlier (total_dup_off_path_count)
977 among the duplicated off-path edges based on their original
978 ratio to the full off-path count (total_orig_off_path_count).
980 int scale = enonpath->count.probability_in (total_orig_off_path_count)
981 .to_reg_br_prob_base ();
982 /* Give the duplicated offpath edge a portion of the duplicated
983 total. */
984 enonpathdup->count = total_dup_off_path_count.apply_probability (scale);
985 /* Now update the original offpath edge count, handling underflow
986 due to rounding errors. */
987 enonpath->count -= enonpathdup->count;
992 /* Check if the paths through RD all have estimated frequencies but zero
993 profile counts. This is more accurate than checking the entry block
994 for a zero profile count, since profile insanities sometimes creep in. */
996 static bool
997 estimated_freqs_path (struct redirection_data *rd)
999 edge e = rd->incoming_edges->e;
1000 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1001 edge ein;
1002 edge_iterator ei;
1003 bool non_zero_freq = false;
1004 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1006 if (ein->count > 0)
1007 return false;
1008 non_zero_freq |= ein->src->frequency != 0;
1011 for (unsigned int i = 1; i < path->length (); i++)
1013 edge epath = (*path)[i]->e;
1014 if (epath->src->count > 0)
1015 return false;
1016 non_zero_freq |= epath->src->frequency != 0;
1017 edge esucc;
1018 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1020 if (esucc->count > 0)
1021 return false;
1022 non_zero_freq |= esucc->src->frequency != 0;
1025 return non_zero_freq;
1029 /* Invoked for routines that have guessed frequencies and no profile
1030 counts to record the block and edge frequencies for paths through RD
1031 in the profile count fields of those blocks and edges. This is because
1032 ssa_fix_duplicate_block_edges incrementally updates the block and
1033 edge counts as edges are redirected, and it is difficult to do that
1034 for edge frequencies which are computed on the fly from the source
1035 block frequency and probability. When a block frequency is updated
1036 its outgoing edge frequencies are affected and become difficult to
1037 adjust. */
1039 static void
1040 freqs_to_counts_path (struct redirection_data *rd)
1042 edge e = rd->incoming_edges->e;
1043 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1044 edge ein;
1045 edge_iterator ei;
1046 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1048 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1049 errors applying the probability when the frequencies are very
1050 small. */
1051 if (ein->probability.initialized_p ())
1052 ein->count = profile_count::from_gcov_type
1053 (apply_probability (ein->src->frequency * REG_BR_PROB_BASE,
1054 ein->probability
1055 .to_reg_br_prob_base ())).guessed ();
1056 else
1057 /* FIXME: this is hack; we should track uninitialized values. */
1058 ein->count = profile_count::zero ();
1061 for (unsigned int i = 1; i < path->length (); i++)
1063 edge epath = (*path)[i]->e;
1064 edge esucc;
1065 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1066 errors applying the edge probability when the frequencies are very
1067 small. */
1068 epath->src->count =
1069 profile_count::from_gcov_type
1070 (epath->src->frequency * REG_BR_PROB_BASE);
1071 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1072 esucc->count =
1073 esucc->src->count.apply_probability (esucc->probability);
1078 /* For routines that have guessed frequencies and no profile counts, where we
1079 used freqs_to_counts_path to record block and edge frequencies for paths
1080 through RD, we clear the counts after completing all updates for RD.
1081 The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1082 but the block frequencies and edge probabilities were updated as well,
1083 so we can simply clear the count fields. */
1085 static void
1086 clear_counts_path (struct redirection_data *rd)
1088 edge e = rd->incoming_edges->e;
1089 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1090 edge ein, esucc;
1091 edge_iterator ei;
1092 profile_count val = profile_count::uninitialized ();
1093 if (profile_status_for_fn (cfun) == PROFILE_READ)
1094 val = profile_count::zero ();
1096 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1097 ein->count = val;
1099 /* First clear counts along original path. */
1100 for (unsigned int i = 1; i < path->length (); i++)
1102 edge epath = (*path)[i]->e;
1103 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1104 esucc->count = val;
1105 epath->src->count = val;
1107 /* Also need to clear the counts along duplicated path. */
1108 for (unsigned int i = 0; i < 2; i++)
1110 basic_block dup = rd->dup_blocks[i];
1111 if (!dup)
1112 continue;
1113 FOR_EACH_EDGE (esucc, ei, dup->succs)
1114 esucc->count = val;
1115 dup->count = val;
1119 /* Wire up the outgoing edges from the duplicate blocks and
1120 update any PHIs as needed. Also update the profile counts
1121 on the original and duplicate blocks and edges. */
1122 void
1123 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
1124 ssa_local_info_t *local_info)
1126 bool multi_incomings = (rd->incoming_edges->next != NULL);
1127 edge e = rd->incoming_edges->e;
1128 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1129 edge elast = path->last ()->e;
1130 profile_count path_in_count = profile_count::zero ();
1131 profile_count path_out_count = profile_count::zero ();
1132 int path_in_freq = 0;
1134 /* This routine updates profile counts, frequencies, and probabilities
1135 incrementally. Since it is difficult to do the incremental updates
1136 using frequencies/probabilities alone, for routines without profile
1137 data we first take a snapshot of the existing block and edge frequencies
1138 by copying them into the empty profile count fields. These counts are
1139 then used to do the incremental updates, and cleared at the end of this
1140 routine. If the function is marked as having a profile, we still check
1141 to see if the paths through RD are using estimated frequencies because
1142 the routine had zero profile counts. */
1143 bool do_freqs_to_counts = (profile_status_for_fn (cfun) != PROFILE_READ
1144 || estimated_freqs_path (rd));
1145 if (do_freqs_to_counts)
1146 freqs_to_counts_path (rd);
1148 /* First determine how much profile count to move from original
1149 path to the duplicate path. This is tricky in the presence of
1150 a joiner (see comments for compute_path_counts), where some portion
1151 of the path's counts will flow off-path from the joiner. In the
1152 non-joiner case the path_in_count and path_out_count should be the
1153 same. */
1154 bool has_joiner = compute_path_counts (rd, local_info,
1155 &path_in_count, &path_out_count,
1156 &path_in_freq);
1158 int cur_path_freq = path_in_freq;
1159 for (unsigned int count = 0, i = 1; i < path->length (); i++)
1161 edge epath = (*path)[i]->e;
1163 /* If we were threading through an joiner block, then we want
1164 to keep its control statement and redirect an outgoing edge.
1165 Else we want to remove the control statement & edges, then create
1166 a new outgoing edge. In both cases we may need to update PHIs. */
1167 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1169 edge victim;
1170 edge e2;
1172 gcc_assert (has_joiner);
1174 /* This updates the PHIs at the destination of the duplicate
1175 block. Pass 0 instead of i if we are threading a path which
1176 has multiple incoming edges. */
1177 update_destination_phis (local_info->bb, rd->dup_blocks[count],
1178 path, multi_incomings ? 0 : i);
1180 /* Find the edge from the duplicate block to the block we're
1181 threading through. That's the edge we want to redirect. */
1182 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
1184 /* If there are no remaining blocks on the path to duplicate,
1185 then redirect VICTIM to the final destination of the jump
1186 threading path. */
1187 if (!any_remaining_duplicated_blocks (path, i))
1189 e2 = redirect_edge_and_branch (victim, elast->dest);
1190 /* If we redirected the edge, then we need to copy PHI arguments
1191 at the target. If the edge already existed (e2 != victim
1192 case), then the PHIs in the target already have the correct
1193 arguments. */
1194 if (e2 == victim)
1195 copy_phi_args (e2->dest, elast, e2,
1196 path, multi_incomings ? 0 : i);
1198 else
1200 /* Redirect VICTIM to the next duplicated block in the path. */
1201 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1203 /* We need to update the PHIs in the next duplicated block. We
1204 want the new PHI args to have the same value as they had
1205 in the source of the next duplicate block.
1207 Thus, we need to know which edge we traversed into the
1208 source of the duplicate. Furthermore, we may have
1209 traversed many edges to reach the source of the duplicate.
1211 Walk through the path starting at element I until we
1212 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
1213 the edge from the prior element. */
1214 for (unsigned int j = i + 1; j < path->length (); j++)
1216 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1218 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1219 break;
1224 /* Update the counts and frequency of both the original block
1225 and path edge, and the duplicates. The path duplicate's
1226 incoming count and frequency are the totals for all edges
1227 incoming to this jump threading path computed earlier.
1228 And we know that the duplicated path will have path_out_count
1229 flowing out of it (i.e. along the duplicated path out of the
1230 duplicated joiner). */
1231 update_profile (epath, e2, path_in_count, path_out_count,
1232 path_in_freq);
1234 /* Next we need to update the counts of the original and duplicated
1235 edges from the joiner that go off path. */
1236 update_joiner_offpath_counts (epath, e2->src, path_in_count,
1237 path_out_count);
1239 /* Finally, we need to set the probabilities on the duplicated
1240 edges out of the duplicated joiner (e2->src). The probabilities
1241 along the original path will all be updated below after we finish
1242 processing the whole path. */
1243 recompute_probabilities (e2->src);
1245 /* Record the frequency flowing to the downstream duplicated
1246 path blocks. */
1247 cur_path_freq = EDGE_FREQUENCY (e2);
1249 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1251 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1252 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1253 multi_incomings ? 0 : i);
1254 if (count == 1)
1255 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
1257 /* Update the counts and frequency of both the original block
1258 and path edge, and the duplicates. Since we are now after
1259 any joiner that may have existed on the path, the count
1260 flowing along the duplicated threaded path is path_out_count.
1261 If we didn't have a joiner, then cur_path_freq was the sum
1262 of the total frequencies along all incoming edges to the
1263 thread path (path_in_freq). If we had a joiner, it would have
1264 been updated at the end of that handling to the edge frequency
1265 along the duplicated joiner path edge. */
1266 update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1267 path_out_count, path_out_count,
1268 cur_path_freq);
1270 else
1272 /* No copy case. In this case we don't have an equivalent block
1273 on the duplicated thread path to update, but we do need
1274 to remove the portion of the counts/freqs that were moved
1275 to the duplicated path from the counts/freqs flowing through
1276 this block on the original path. Since all the no-copy edges
1277 are after any joiner, the removed count is the same as
1278 path_out_count.
1280 If we didn't have a joiner, then cur_path_freq was the sum
1281 of the total frequencies along all incoming edges to the
1282 thread path (path_in_freq). If we had a joiner, it would have
1283 been updated at the end of that handling to the edge frequency
1284 along the duplicated joiner path edge. */
1285 update_profile (epath, NULL, path_out_count, path_out_count,
1286 cur_path_freq);
1289 /* Increment the index into the duplicated path when we processed
1290 a duplicated block. */
1291 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
1292 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1294 count++;
1298 /* Now walk orig blocks and update their probabilities, since the
1299 counts and freqs should be updated properly by above loop. */
1300 for (unsigned int i = 1; i < path->length (); i++)
1302 edge epath = (*path)[i]->e;
1303 recompute_probabilities (epath->src);
1306 /* Done with all profile and frequency updates, clear counts if they
1307 were copied. */
1308 if (do_freqs_to_counts)
1309 clear_counts_path (rd);
1312 /* Hash table traversal callback routine to create duplicate blocks. */
1315 ssa_create_duplicates (struct redirection_data **slot,
1316 ssa_local_info_t *local_info)
1318 struct redirection_data *rd = *slot;
1320 /* The second duplicated block in a jump threading path is specific
1321 to the path. So it gets stored in RD rather than in LOCAL_DATA.
1323 Each time we're called, we have to look through the path and see
1324 if a second block needs to be duplicated.
1326 Note the search starts with the third edge on the path. The first
1327 edge is the incoming edge, the second edge always has its source
1328 duplicated. Thus we start our search with the third edge. */
1329 vec<jump_thread_edge *> *path = rd->path;
1330 for (unsigned int i = 2; i < path->length (); i++)
1332 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1333 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1335 create_block_for_threading ((*path)[i]->e->src, rd, 1,
1336 &local_info->duplicate_blocks);
1337 break;
1341 /* Create a template block if we have not done so already. Otherwise
1342 use the template to create a new block. */
1343 if (local_info->template_block == NULL)
1345 create_block_for_threading ((*path)[1]->e->src, rd, 0,
1346 &local_info->duplicate_blocks);
1347 local_info->template_block = rd->dup_blocks[0];
1349 /* We do not create any outgoing edges for the template. We will
1350 take care of that in a later traversal. That way we do not
1351 create edges that are going to just be deleted. */
1353 else
1355 create_block_for_threading (local_info->template_block, rd, 0,
1356 &local_info->duplicate_blocks);
1358 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1359 block. */
1360 ssa_fix_duplicate_block_edges (rd, local_info);
1363 /* Keep walking the hash table. */
1364 return 1;
1367 /* We did not create any outgoing edges for the template block during
1368 block creation. This hash table traversal callback creates the
1369 outgoing edge for the template block. */
1371 inline int
1372 ssa_fixup_template_block (struct redirection_data **slot,
1373 ssa_local_info_t *local_info)
1375 struct redirection_data *rd = *slot;
1377 /* If this is the template block halt the traversal after updating
1378 it appropriately.
1380 If we were threading through an joiner block, then we want
1381 to keep its control statement and redirect an outgoing edge.
1382 Else we want to remove the control statement & edges, then create
1383 a new outgoing edge. In both cases we may need to update PHIs. */
1384 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
1386 ssa_fix_duplicate_block_edges (rd, local_info);
1387 return 0;
1390 return 1;
1393 /* Hash table traversal callback to redirect each incoming edge
1394 associated with this hash table element to its new destination. */
1397 ssa_redirect_edges (struct redirection_data **slot,
1398 ssa_local_info_t *local_info)
1400 struct redirection_data *rd = *slot;
1401 struct el *next, *el;
1403 /* Walk over all the incoming edges associated with this hash table
1404 entry. */
1405 for (el = rd->incoming_edges; el; el = next)
1407 edge e = el->e;
1408 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1410 /* Go ahead and free this element from the list. Doing this now
1411 avoids the need for another list walk when we destroy the hash
1412 table. */
1413 next = el->next;
1414 free (el);
1416 thread_stats.num_threaded_edges++;
1418 if (rd->dup_blocks[0])
1420 edge e2;
1422 if (dump_file && (dump_flags & TDF_DETAILS))
1423 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
1424 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
1426 /* Redirect the incoming edge (possibly to the joiner block) to the
1427 appropriate duplicate block. */
1428 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
1429 gcc_assert (e == e2);
1430 flush_pending_stmts (e2);
1433 /* Go ahead and clear E->aux. It's not needed anymore and failure
1434 to clear it will cause all kinds of unpleasant problems later. */
1435 delete_jump_thread_path (path);
1436 e->aux = NULL;
1440 /* Indicate that we actually threaded one or more jumps. */
1441 if (rd->incoming_edges)
1442 local_info->jumps_threaded = true;
1444 return 1;
1447 /* Return true if this block has no executable statements other than
1448 a simple ctrl flow instruction. When the number of outgoing edges
1449 is one, this is equivalent to a "forwarder" block. */
1451 static bool
1452 redirection_block_p (basic_block bb)
1454 gimple_stmt_iterator gsi;
1456 /* Advance to the first executable statement. */
1457 gsi = gsi_start_bb (bb);
1458 while (!gsi_end_p (gsi)
1459 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
1460 || is_gimple_debug (gsi_stmt (gsi))
1461 || gimple_nop_p (gsi_stmt (gsi))
1462 || gimple_clobber_p (gsi_stmt (gsi))))
1463 gsi_next (&gsi);
1465 /* Check if this is an empty block. */
1466 if (gsi_end_p (gsi))
1467 return true;
1469 /* Test that we've reached the terminating control statement. */
1470 return gsi_stmt (gsi)
1471 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1472 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1473 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
1476 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1477 is reached via one or more specific incoming edges, we know which
1478 outgoing edge from BB will be traversed.
1480 We want to redirect those incoming edges to the target of the
1481 appropriate outgoing edge. Doing so avoids a conditional branch
1482 and may expose new optimization opportunities. Note that we have
1483 to update dominator tree and SSA graph after such changes.
1485 The key to keeping the SSA graph update manageable is to duplicate
1486 the side effects occurring in BB so that those side effects still
1487 occur on the paths which bypass BB after redirecting edges.
1489 We accomplish this by creating duplicates of BB and arranging for
1490 the duplicates to unconditionally pass control to one specific
1491 successor of BB. We then revector the incoming edges into BB to
1492 the appropriate duplicate of BB.
1494 If NOLOOP_ONLY is true, we only perform the threading as long as it
1495 does not affect the structure of the loops in a nontrivial way.
1497 If JOINERS is true, then thread through joiner blocks as well. */
1499 static bool
1500 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
1502 /* E is an incoming edge into BB that we may or may not want to
1503 redirect to a duplicate of BB. */
1504 edge e, e2;
1505 edge_iterator ei;
1506 ssa_local_info_t local_info;
1508 local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
1509 local_info.need_profile_correction = false;
1511 /* To avoid scanning a linear array for the element we need we instead
1512 use a hash table. For normal code there should be no noticeable
1513 difference. However, if we have a block with a large number of
1514 incoming and outgoing edges such linear searches can get expensive. */
1515 redirection_data
1516 = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
1518 /* Record each unique threaded destination into a hash table for
1519 efficient lookups. */
1520 edge last = NULL;
1521 FOR_EACH_EDGE (e, ei, bb->preds)
1523 if (e->aux == NULL)
1524 continue;
1526 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1528 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1529 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1530 continue;
1532 e2 = path->last ()->e;
1533 if (!e2 || noloop_only)
1535 /* If NOLOOP_ONLY is true, we only allow threading through the
1536 header of a loop to exit edges. */
1538 /* One case occurs when there was loop header buried in a jump
1539 threading path that crosses loop boundaries. We do not try
1540 and thread this elsewhere, so just cancel the jump threading
1541 request by clearing the AUX field now. */
1542 if (bb->loop_father != e2->src->loop_father
1543 && !loop_exit_edge_p (e2->src->loop_father, e2))
1545 /* Since this case is not handled by our special code
1546 to thread through a loop header, we must explicitly
1547 cancel the threading request here. */
1548 delete_jump_thread_path (path);
1549 e->aux = NULL;
1550 continue;
1553 /* Another case occurs when trying to thread through our
1554 own loop header, possibly from inside the loop. We will
1555 thread these later. */
1556 unsigned int i;
1557 for (i = 1; i < path->length (); i++)
1559 if ((*path)[i]->e->src == bb->loop_father->header
1560 && (!loop_exit_edge_p (bb->loop_father, e2)
1561 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
1562 break;
1565 if (i != path->length ())
1566 continue;
1569 /* Insert the outgoing edge into the hash table if it is not
1570 already in the hash table. */
1571 lookup_redirection_data (e, INSERT);
1573 /* When we have thread paths through a common joiner with different
1574 final destinations, then we may need corrections to deal with
1575 profile insanities. See the big comment before compute_path_counts. */
1576 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1578 if (!last)
1579 last = e2;
1580 else if (e2 != last)
1581 local_info.need_profile_correction = true;
1585 /* We do not update dominance info. */
1586 free_dominance_info (CDI_DOMINATORS);
1588 /* We know we only thread through the loop header to loop exits.
1589 Let the basic block duplication hook know we are not creating
1590 a multiple entry loop. */
1591 if (noloop_only
1592 && bb == bb->loop_father->header)
1593 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1595 /* Now create duplicates of BB.
1597 Note that for a block with a high outgoing degree we can waste
1598 a lot of time and memory creating and destroying useless edges.
1600 So we first duplicate BB and remove the control structure at the
1601 tail of the duplicate as well as all outgoing edges from the
1602 duplicate. We then use that duplicate block as a template for
1603 the rest of the duplicates. */
1604 local_info.template_block = NULL;
1605 local_info.bb = bb;
1606 local_info.jumps_threaded = false;
1607 redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
1608 (&local_info);
1610 /* The template does not have an outgoing edge. Create that outgoing
1611 edge and update PHI nodes as the edge's target as necessary.
1613 We do this after creating all the duplicates to avoid creating
1614 unnecessary edges. */
1615 redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
1616 (&local_info);
1618 /* The hash table traversals above created the duplicate blocks (and the
1619 statements within the duplicate blocks). This loop creates PHI nodes for
1620 the duplicated blocks and redirects the incoming edges into BB to reach
1621 the duplicates of BB. */
1622 redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
1623 (&local_info);
1625 /* Done with this block. Clear REDIRECTION_DATA. */
1626 delete redirection_data;
1627 redirection_data = NULL;
1629 if (noloop_only
1630 && bb == bb->loop_father->header)
1631 set_loop_copy (bb->loop_father, NULL);
1633 BITMAP_FREE (local_info.duplicate_blocks);
1634 local_info.duplicate_blocks = NULL;
1636 /* Indicate to our caller whether or not any jumps were threaded. */
1637 return local_info.jumps_threaded;
1640 /* Wrapper for thread_block_1 so that we can first handle jump
1641 thread paths which do not involve copying joiner blocks, then
1642 handle jump thread paths which have joiner blocks.
1644 By doing things this way we can be as aggressive as possible and
1645 not worry that copying a joiner block will create a jump threading
1646 opportunity. */
1648 static bool
1649 thread_block (basic_block bb, bool noloop_only)
1651 bool retval;
1652 retval = thread_block_1 (bb, noloop_only, false);
1653 retval |= thread_block_1 (bb, noloop_only, true);
1654 return retval;
1657 /* Callback for dfs_enumerate_from. Returns true if BB is different
1658 from STOP and DBDS_CE_STOP. */
1660 static basic_block dbds_ce_stop;
1661 static bool
1662 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1664 return (bb != (const_basic_block) stop
1665 && bb != dbds_ce_stop);
1668 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1669 returns the state. */
1671 enum bb_dom_status
1672 determine_bb_domination_status (struct loop *loop, basic_block bb)
1674 basic_block *bblocks;
1675 unsigned nblocks, i;
1676 bool bb_reachable = false;
1677 edge_iterator ei;
1678 edge e;
1680 /* This function assumes BB is a successor of LOOP->header.
1681 If that is not the case return DOMST_NONDOMINATING which
1682 is always safe. */
1684 bool ok = false;
1686 FOR_EACH_EDGE (e, ei, bb->preds)
1688 if (e->src == loop->header)
1690 ok = true;
1691 break;
1695 if (!ok)
1696 return DOMST_NONDOMINATING;
1699 if (bb == loop->latch)
1700 return DOMST_DOMINATING;
1702 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1703 from it. */
1705 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1706 dbds_ce_stop = loop->header;
1707 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1708 bblocks, loop->num_nodes, bb);
1709 for (i = 0; i < nblocks; i++)
1710 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1712 if (e->src == loop->header)
1714 free (bblocks);
1715 return DOMST_NONDOMINATING;
1717 if (e->src == bb)
1718 bb_reachable = true;
1721 free (bblocks);
1722 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1725 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1726 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1727 to the inside of the loop. */
1729 static bool
1730 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1732 basic_block header = loop->header;
1733 edge e, tgt_edge, latch = loop_latch_edge (loop);
1734 edge_iterator ei;
1735 basic_block tgt_bb, atgt_bb;
1736 enum bb_dom_status domst;
1738 /* We have already threaded through headers to exits, so all the threading
1739 requests now are to the inside of the loop. We need to avoid creating
1740 irreducible regions (i.e., loops with more than one entry block), and
1741 also loop with several latch edges, or new subloops of the loop (although
1742 there are cases where it might be appropriate, it is difficult to decide,
1743 and doing it wrongly may confuse other optimizers).
1745 We could handle more general cases here. However, the intention is to
1746 preserve some information about the loop, which is impossible if its
1747 structure changes significantly, in a way that is not well understood.
1748 Thus we only handle few important special cases, in which also updating
1749 of the loop-carried information should be feasible:
1751 1) Propagation of latch edge to a block that dominates the latch block
1752 of a loop. This aims to handle the following idiom:
1754 first = 1;
1755 while (1)
1757 if (first)
1758 initialize;
1759 first = 0;
1760 body;
1763 After threading the latch edge, this becomes
1765 first = 1;
1766 if (first)
1767 initialize;
1768 while (1)
1770 first = 0;
1771 body;
1774 The original header of the loop is moved out of it, and we may thread
1775 the remaining edges through it without further constraints.
1777 2) All entry edges are propagated to a single basic block that dominates
1778 the latch block of the loop. This aims to handle the following idiom
1779 (normally created for "for" loops):
1781 i = 0;
1782 while (1)
1784 if (i >= 100)
1785 break;
1786 body;
1787 i++;
1790 This becomes
1792 i = 0;
1793 while (1)
1795 body;
1796 i++;
1797 if (i >= 100)
1798 break;
1802 /* Threading through the header won't improve the code if the header has just
1803 one successor. */
1804 if (single_succ_p (header))
1805 goto fail;
1807 if (!may_peel_loop_headers && !redirection_block_p (loop->header))
1808 goto fail;
1809 else
1811 tgt_bb = NULL;
1812 tgt_edge = NULL;
1813 FOR_EACH_EDGE (e, ei, header->preds)
1815 if (!e->aux)
1817 if (e == latch)
1818 continue;
1820 /* If latch is not threaded, and there is a header
1821 edge that is not threaded, we would create loop
1822 with multiple entries. */
1823 goto fail;
1826 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1828 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1829 goto fail;
1830 tgt_edge = (*path)[1]->e;
1831 atgt_bb = tgt_edge->dest;
1832 if (!tgt_bb)
1833 tgt_bb = atgt_bb;
1834 /* Two targets of threading would make us create loop
1835 with multiple entries. */
1836 else if (tgt_bb != atgt_bb)
1837 goto fail;
1840 if (!tgt_bb)
1842 /* There are no threading requests. */
1843 return false;
1846 /* Redirecting to empty loop latch is useless. */
1847 if (tgt_bb == loop->latch
1848 && empty_block_p (loop->latch))
1849 goto fail;
1852 /* The target block must dominate the loop latch, otherwise we would be
1853 creating a subloop. */
1854 domst = determine_bb_domination_status (loop, tgt_bb);
1855 if (domst == DOMST_NONDOMINATING)
1856 goto fail;
1857 if (domst == DOMST_LOOP_BROKEN)
1859 /* If the loop ceased to exist, mark it as such, and thread through its
1860 original header. */
1861 mark_loop_for_removal (loop);
1862 return thread_block (header, false);
1865 if (tgt_bb->loop_father->header == tgt_bb)
1867 /* If the target of the threading is a header of a subloop, we need
1868 to create a preheader for it, so that the headers of the two loops
1869 do not merge. */
1870 if (EDGE_COUNT (tgt_bb->preds) > 2)
1872 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1873 gcc_assert (tgt_bb != NULL);
1875 else
1876 tgt_bb = split_edge (tgt_edge);
1879 basic_block new_preheader;
1881 /* Now consider the case entry edges are redirected to the new entry
1882 block. Remember one entry edge, so that we can find the new
1883 preheader (its destination after threading). */
1884 FOR_EACH_EDGE (e, ei, header->preds)
1886 if (e->aux)
1887 break;
1890 /* The duplicate of the header is the new preheader of the loop. Ensure
1891 that it is placed correctly in the loop hierarchy. */
1892 set_loop_copy (loop, loop_outer (loop));
1894 thread_block (header, false);
1895 set_loop_copy (loop, NULL);
1896 new_preheader = e->dest;
1898 /* Create the new latch block. This is always necessary, as the latch
1899 must have only a single successor, but the original header had at
1900 least two successors. */
1901 loop->latch = NULL;
1902 mfb_kj_edge = single_succ_edge (new_preheader);
1903 loop->header = mfb_kj_edge->dest;
1904 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1905 loop->header = latch->dest;
1906 loop->latch = latch->src;
1907 return true;
1909 fail:
1910 /* We failed to thread anything. Cancel the requests. */
1911 FOR_EACH_EDGE (e, ei, header->preds)
1913 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1915 if (path)
1917 delete_jump_thread_path (path);
1918 e->aux = NULL;
1921 return false;
1924 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1925 PHI arguments associated with those edges are equal or there are no
1926 PHI arguments, otherwise return FALSE. */
1928 static bool
1929 phi_args_equal_on_edges (edge e1, edge e2)
1931 gphi_iterator gsi;
1932 int indx1 = e1->dest_idx;
1933 int indx2 = e2->dest_idx;
1935 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1937 gphi *phi = gsi.phi ();
1939 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1940 gimple_phi_arg_def (phi, indx2), 0))
1941 return false;
1943 return true;
1946 /* Walk through the registered jump threads and convert them into a
1947 form convenient for this pass.
1949 Any block which has incoming edges threaded to outgoing edges
1950 will have its entry in THREADED_BLOCK set.
1952 Any threaded edge will have its new outgoing edge stored in the
1953 original edge's AUX field.
1955 This form avoids the need to walk all the edges in the CFG to
1956 discover blocks which need processing and avoids unnecessary
1957 hash table lookups to map from threaded edge to new target. */
1959 static void
1960 mark_threaded_blocks (bitmap threaded_blocks)
1962 unsigned int i;
1963 bitmap_iterator bi;
1964 auto_bitmap tmp;
1965 basic_block bb;
1966 edge e;
1967 edge_iterator ei;
1969 /* It is possible to have jump threads in which one is a subpath
1970 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1971 block and (B, C), (C, D) where no joiner block exists.
1973 When this occurs ignore the jump thread request with the joiner
1974 block. It's totally subsumed by the simpler jump thread request.
1976 This results in less block copying, simpler CFGs. More importantly,
1977 when we duplicate the joiner block, B, in this case we will create
1978 a new threading opportunity that we wouldn't be able to optimize
1979 until the next jump threading iteration.
1981 So first convert the jump thread requests which do not require a
1982 joiner block. */
1983 for (i = 0; i < paths.length (); i++)
1985 vec<jump_thread_edge *> *path = paths[i];
1987 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1989 edge e = (*path)[0]->e;
1990 e->aux = (void *)path;
1991 bitmap_set_bit (tmp, e->dest->index);
1995 /* Now iterate again, converting cases where we want to thread
1996 through a joiner block, but only if no other edge on the path
1997 already has a jump thread attached to it. We do this in two passes,
1998 to avoid situations where the order in the paths vec can hide overlapping
1999 threads (the path is recorded on the incoming edge, so we would miss
2000 cases where the second path starts at a downstream edge on the same
2001 path). First record all joiner paths, deleting any in the unexpected
2002 case where there is already a path for that incoming edge. */
2003 for (i = 0; i < paths.length ();)
2005 vec<jump_thread_edge *> *path = paths[i];
2007 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
2009 /* Attach the path to the starting edge if none is yet recorded. */
2010 if ((*path)[0]->e->aux == NULL)
2012 (*path)[0]->e->aux = path;
2013 i++;
2015 else
2017 paths.unordered_remove (i);
2018 if (dump_file && (dump_flags & TDF_DETAILS))
2019 dump_jump_thread_path (dump_file, *path, false);
2020 delete_jump_thread_path (path);
2023 else
2025 i++;
2029 /* Second, look for paths that have any other jump thread attached to
2030 them, and either finish converting them or cancel them. */
2031 for (i = 0; i < paths.length ();)
2033 vec<jump_thread_edge *> *path = paths[i];
2034 edge e = (*path)[0]->e;
2036 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
2038 unsigned int j;
2039 for (j = 1; j < path->length (); j++)
2040 if ((*path)[j]->e->aux != NULL)
2041 break;
2043 /* If we iterated through the entire path without exiting the loop,
2044 then we are good to go, record it. */
2045 if (j == path->length ())
2047 bitmap_set_bit (tmp, e->dest->index);
2048 i++;
2050 else
2052 e->aux = NULL;
2053 paths.unordered_remove (i);
2054 if (dump_file && (dump_flags & TDF_DETAILS))
2055 dump_jump_thread_path (dump_file, *path, false);
2056 delete_jump_thread_path (path);
2059 else
2061 i++;
2065 /* If optimizing for size, only thread through block if we don't have
2066 to duplicate it or it's an otherwise empty redirection block. */
2067 if (optimize_function_for_size_p (cfun))
2069 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2071 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2072 if (EDGE_COUNT (bb->preds) > 1
2073 && !redirection_block_p (bb))
2075 FOR_EACH_EDGE (e, ei, bb->preds)
2077 if (e->aux)
2079 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2080 delete_jump_thread_path (path);
2081 e->aux = NULL;
2085 else
2086 bitmap_set_bit (threaded_blocks, i);
2089 else
2090 bitmap_copy (threaded_blocks, tmp);
2092 /* If we have a joiner block (J) which has two successors S1 and S2 and
2093 we are threading though S1 and the final destination of the thread
2094 is S2, then we must verify that any PHI nodes in S2 have the same
2095 PHI arguments for the edge J->S2 and J->S1->...->S2.
2097 We used to detect this prior to registering the jump thread, but
2098 that prohibits propagation of edge equivalences into non-dominated
2099 PHI nodes as the equivalency test might occur before propagation.
2101 This must also occur after we truncate any jump threading paths
2102 as this scenario may only show up after truncation.
2104 This works for now, but will need improvement as part of the FSA
2105 optimization.
2107 Note since we've moved the thread request data to the edges,
2108 we have to iterate on those rather than the threaded_edges vector. */
2109 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2111 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2112 FOR_EACH_EDGE (e, ei, bb->preds)
2114 if (e->aux)
2116 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2117 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
2119 if (have_joiner)
2121 basic_block joiner = e->dest;
2122 edge final_edge = path->last ()->e;
2123 basic_block final_dest = final_edge->dest;
2124 edge e2 = find_edge (joiner, final_dest);
2126 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
2128 delete_jump_thread_path (path);
2129 e->aux = NULL;
2136 /* Look for jump threading paths which cross multiple loop headers.
2138 The code to thread through loop headers will change the CFG in ways
2139 that invalidate the cached loop iteration information. So we must
2140 detect that case and wipe the cached information. */
2141 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2143 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2144 FOR_EACH_EDGE (e, ei, bb->preds)
2146 if (e->aux)
2148 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2150 for (unsigned int i = 0, crossed_headers = 0;
2151 i < path->length ();
2152 i++)
2154 basic_block dest = (*path)[i]->e->dest;
2155 basic_block src = (*path)[i]->e->src;
2156 /* If we enter a loop. */
2157 if (flow_loop_nested_p (src->loop_father, dest->loop_father))
2158 ++crossed_headers;
2159 /* If we step from a block outside an irreducible region
2160 to a block inside an irreducible region, then we have
2161 crossed into a loop. */
2162 else if (! (src->flags & BB_IRREDUCIBLE_LOOP)
2163 && (dest->flags & BB_IRREDUCIBLE_LOOP))
2164 ++crossed_headers;
2165 if (crossed_headers > 1)
2167 vect_free_loop_info_assumptions
2168 ((*path)[path->length () - 1]->e->dest->loop_father);
2169 break;
2178 /* Verify that the REGION is a valid jump thread. A jump thread is a special
2179 case of SEME Single Entry Multiple Exits region in which all nodes in the
2180 REGION have exactly one incoming edge. The only exception is the first block
2181 that may not have been connected to the rest of the cfg yet. */
2183 DEBUG_FUNCTION void
2184 verify_jump_thread (basic_block *region, unsigned n_region)
2186 for (unsigned i = 0; i < n_region; i++)
2187 gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2190 /* Return true when BB is one of the first N items in BBS. */
2192 static inline bool
2193 bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2195 for (int i = 0; i < n; i++)
2196 if (bb == bbs[i])
2197 return true;
2199 return false;
2202 /* Duplicates a jump-thread path of N_REGION basic blocks.
2203 The ENTRY edge is redirected to the duplicate of the region.
2205 Remove the last conditional statement in the last basic block in the REGION,
2206 and create a single fallthru edge pointing to the same destination as the
2207 EXIT edge.
2209 The new basic blocks are stored to REGION_COPY in the same order as they had
2210 in REGION, provided that REGION_COPY is not NULL.
2212 Returns false if it is unable to copy the region, true otherwise. */
2214 static bool
2215 duplicate_thread_path (edge entry, edge exit,
2216 basic_block *region, unsigned n_region,
2217 basic_block *region_copy)
2219 unsigned i;
2220 bool free_region_copy = false;
2221 struct loop *loop = entry->dest->loop_father;
2222 edge exit_copy;
2223 edge redirected;
2224 int curr_freq;
2225 profile_count curr_count;
2227 if (!can_copy_bbs_p (region, n_region))
2228 return false;
2230 /* Some sanity checking. Note that we do not check for all possible
2231 missuses of the functions. I.e. if you ask to copy something weird,
2232 it will work, but the state of structures probably will not be
2233 correct. */
2234 for (i = 0; i < n_region; i++)
2236 /* We do not handle subloops, i.e. all the blocks must belong to the
2237 same loop. */
2238 if (region[i]->loop_father != loop)
2239 return false;
2242 initialize_original_copy_tables ();
2244 set_loop_copy (loop, loop);
2246 if (!region_copy)
2248 region_copy = XNEWVEC (basic_block, n_region);
2249 free_region_copy = true;
2252 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
2253 split_edge_bb_loc (entry), false);
2255 /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
2256 following code ensures that all the edges exiting the jump-thread path are
2257 redirected back to the original code: these edges are exceptions
2258 invalidating the property that is propagated by executing all the blocks of
2259 the jump-thread path in order. */
2261 curr_count = entry->count;
2262 curr_freq = EDGE_FREQUENCY (entry);
2264 for (i = 0; i < n_region; i++)
2266 edge e;
2267 edge_iterator ei;
2268 basic_block bb = region_copy[i];
2270 /* Watch inconsistent profile. */
2271 if (curr_count > region[i]->count)
2272 curr_count = region[i]->count;
2273 if (curr_freq > region[i]->frequency)
2274 curr_freq = region[i]->frequency;
2275 /* Scale current BB. */
2276 if (region[i]->count > 0 && curr_count.initialized_p ())
2278 /* In the middle of the path we only scale the frequencies.
2279 In last BB we need to update probabilities of outgoing edges
2280 because we know which one is taken at the threaded path. */
2281 if (i + 1 != n_region)
2282 scale_bbs_frequencies_profile_count (region + i, 1,
2283 region[i]->count - curr_count,
2284 region[i]->count);
2285 else
2286 update_bb_profile_for_threading (region[i],
2287 curr_freq, curr_count,
2288 exit);
2289 scale_bbs_frequencies_profile_count (region_copy + i, 1, curr_count,
2290 region_copy[i]->count);
2292 else if (region[i]->frequency)
2294 if (i + 1 != n_region)
2295 scale_bbs_frequencies_int (region + i, 1,
2296 region[i]->frequency - curr_freq,
2297 region[i]->frequency);
2298 else
2299 update_bb_profile_for_threading (region[i],
2300 curr_freq, curr_count,
2301 exit);
2302 scale_bbs_frequencies_int (region_copy + i, 1, curr_freq,
2303 region_copy[i]->frequency);
2306 if (single_succ_p (bb))
2308 /* Make sure the successor is the next node in the path. */
2309 gcc_assert (i + 1 == n_region
2310 || region_copy[i + 1] == single_succ_edge (bb)->dest);
2311 if (i + 1 != n_region)
2313 curr_freq = EDGE_FREQUENCY (single_succ_edge (bb));
2314 curr_count = single_succ_edge (bb)->count;
2316 continue;
2319 /* Special case the last block on the path: make sure that it does not
2320 jump back on the copied path, including back to itself. */
2321 if (i + 1 == n_region)
2323 FOR_EACH_EDGE (e, ei, bb->succs)
2324 if (bb_in_bbs (e->dest, region_copy, n_region))
2326 basic_block orig = get_bb_original (e->dest);
2327 if (orig)
2328 redirect_edge_and_branch_force (e, orig);
2330 continue;
2333 /* Redirect all other edges jumping to non-adjacent blocks back to the
2334 original code. */
2335 FOR_EACH_EDGE (e, ei, bb->succs)
2336 if (region_copy[i + 1] != e->dest)
2338 basic_block orig = get_bb_original (e->dest);
2339 if (orig)
2340 redirect_edge_and_branch_force (e, orig);
2342 else
2344 curr_freq = EDGE_FREQUENCY (e);
2345 curr_count = e->count;
2350 if (flag_checking)
2351 verify_jump_thread (region_copy, n_region);
2353 /* Remove the last branch in the jump thread path. */
2354 remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
2356 /* And fixup the flags on the single remaining edge. */
2357 edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2358 fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2359 fix_e->flags |= EDGE_FALLTHRU;
2361 edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2363 if (e)
2365 rescan_loop_exit (e, true, false);
2366 e->probability = profile_probability::always ();
2367 e->count = region_copy[n_region - 1]->count;
2370 /* Redirect the entry and add the phi node arguments. */
2371 if (entry->dest == loop->header)
2372 mark_loop_for_removal (loop);
2373 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2374 gcc_assert (redirected != NULL);
2375 flush_pending_stmts (entry);
2377 /* Add the other PHI node arguments. */
2378 add_phi_args_after_copy (region_copy, n_region, NULL);
2380 if (free_region_copy)
2381 free (region_copy);
2383 free_original_copy_tables ();
2384 return true;
2387 /* Return true when PATH is a valid jump-thread path. */
2389 static bool
2390 valid_jump_thread_path (vec<jump_thread_edge *> *path)
2392 unsigned len = path->length ();
2394 /* Check that the path is connected. */
2395 for (unsigned int j = 0; j < len - 1; j++)
2397 edge e = (*path)[j]->e;
2398 if (e->dest != (*path)[j+1]->e->src)
2399 return false;
2401 return true;
2404 /* Remove any queued jump threads that include edge E.
2406 We don't actually remove them here, just record the edges into ax
2407 hash table. That way we can do the search once per iteration of
2408 DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
2410 void
2411 remove_jump_threads_including (edge_def *e)
2413 if (!paths.exists ())
2414 return;
2416 if (!removed_edges)
2417 removed_edges = new hash_table<struct removed_edges> (17);
2419 edge *slot = removed_edges->find_slot (e, INSERT);
2420 *slot = e;
2423 /* Walk through all blocks and thread incoming edges to the appropriate
2424 outgoing edge for each edge pair recorded in THREADED_EDGES.
2426 It is the caller's responsibility to fix the dominance information
2427 and rewrite duplicated SSA_NAMEs back into SSA form.
2429 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2430 loop headers if it does not simplify the loop.
2432 Returns true if one or more edges were threaded, false otherwise. */
2434 bool
2435 thread_through_all_blocks (bool may_peel_loop_headers)
2437 bool retval = false;
2438 unsigned int i;
2439 bitmap_iterator bi;
2440 struct loop *loop;
2441 auto_bitmap threaded_blocks;
2443 if (!paths.exists ())
2445 retval = false;
2446 goto out;
2449 memset (&thread_stats, 0, sizeof (thread_stats));
2451 /* Remove any paths that referenced removed edges. */
2452 if (removed_edges)
2453 for (i = 0; i < paths.length (); )
2455 unsigned int j;
2456 vec<jump_thread_edge *> *path = paths[i];
2458 for (j = 0; j < path->length (); j++)
2460 edge e = (*path)[j]->e;
2461 if (removed_edges->find_slot (e, NO_INSERT))
2462 break;
2465 if (j != path->length ())
2467 delete_jump_thread_path (path);
2468 paths.unordered_remove (i);
2469 continue;
2471 i++;
2474 /* Jump-thread all FSM threads before other jump-threads. */
2475 for (i = 0; i < paths.length ();)
2477 vec<jump_thread_edge *> *path = paths[i];
2478 edge entry = (*path)[0]->e;
2480 /* Only code-generate FSM jump-threads in this loop. */
2481 if ((*path)[0]->type != EDGE_FSM_THREAD)
2483 i++;
2484 continue;
2487 /* Do not jump-thread twice from the same block. */
2488 if (bitmap_bit_p (threaded_blocks, entry->src->index)
2489 /* We may not want to realize this jump thread path
2490 for various reasons. So check it first. */
2491 || !valid_jump_thread_path (path))
2493 /* Remove invalid FSM jump-thread paths. */
2494 delete_jump_thread_path (path);
2495 paths.unordered_remove (i);
2496 continue;
2499 unsigned len = path->length ();
2500 edge exit = (*path)[len - 1]->e;
2501 basic_block *region = XNEWVEC (basic_block, len - 1);
2503 for (unsigned int j = 0; j < len - 1; j++)
2504 region[j] = (*path)[j]->e->dest;
2506 if (duplicate_thread_path (entry, exit, region, len - 1, NULL))
2508 /* We do not update dominance info. */
2509 free_dominance_info (CDI_DOMINATORS);
2510 bitmap_set_bit (threaded_blocks, entry->src->index);
2511 retval = true;
2512 thread_stats.num_threaded_edges++;
2515 delete_jump_thread_path (path);
2516 paths.unordered_remove (i);
2517 free (region);
2520 /* Remove from PATHS all the jump-threads starting with an edge already
2521 jump-threaded. */
2522 for (i = 0; i < paths.length ();)
2524 vec<jump_thread_edge *> *path = paths[i];
2525 edge entry = (*path)[0]->e;
2527 /* Do not jump-thread twice from the same block. */
2528 if (bitmap_bit_p (threaded_blocks, entry->src->index))
2530 delete_jump_thread_path (path);
2531 paths.unordered_remove (i);
2533 else
2534 i++;
2537 bitmap_clear (threaded_blocks);
2539 mark_threaded_blocks (threaded_blocks);
2541 initialize_original_copy_tables ();
2543 /* First perform the threading requests that do not affect
2544 loop structure. */
2545 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
2547 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2549 if (EDGE_COUNT (bb->preds) > 0)
2550 retval |= thread_block (bb, true);
2553 /* Then perform the threading through loop headers. We start with the
2554 innermost loop, so that the changes in cfg we perform won't affect
2555 further threading. */
2556 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2558 if (!loop->header
2559 || !bitmap_bit_p (threaded_blocks, loop->header->index))
2560 continue;
2562 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
2565 /* All jump threading paths should have been resolved at this
2566 point. Verify that is the case. */
2567 basic_block bb;
2568 FOR_EACH_BB_FN (bb, cfun)
2570 edge_iterator ei;
2571 edge e;
2572 FOR_EACH_EDGE (e, ei, bb->preds)
2573 gcc_assert (e->aux == NULL);
2576 statistics_counter_event (cfun, "Jumps threaded",
2577 thread_stats.num_threaded_edges);
2579 free_original_copy_tables ();
2581 paths.release ();
2583 if (retval)
2584 loops_state_set (LOOPS_NEED_FIXUP);
2586 out:
2587 delete removed_edges;
2588 removed_edges = NULL;
2589 return retval;
2592 /* Delete the jump threading path PATH. We have to explcitly delete
2593 each entry in the vector, then the container. */
2595 void
2596 delete_jump_thread_path (vec<jump_thread_edge *> *path)
2598 for (unsigned int i = 0; i < path->length (); i++)
2599 delete (*path)[i];
2600 path->release();
2601 delete path;
2604 /* Register a jump threading opportunity. We queue up all the jump
2605 threading opportunities discovered by a pass and update the CFG
2606 and SSA form all at once.
2608 E is the edge we can thread, E2 is the new target edge, i.e., we
2609 are effectively recording that E->dest can be changed to E2->dest
2610 after fixing the SSA graph. */
2612 void
2613 register_jump_thread (vec<jump_thread_edge *> *path)
2615 if (!dbg_cnt (registered_jump_thread))
2617 delete_jump_thread_path (path);
2618 return;
2621 /* First make sure there are no NULL outgoing edges on the jump threading
2622 path. That can happen for jumping to a constant address. */
2623 for (unsigned int i = 0; i < path->length (); i++)
2625 if ((*path)[i]->e == NULL)
2627 if (dump_file && (dump_flags & TDF_DETAILS))
2629 fprintf (dump_file,
2630 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
2631 dump_jump_thread_path (dump_file, *path, false);
2634 delete_jump_thread_path (path);
2635 return;
2638 /* Only the FSM threader is allowed to thread across
2639 backedges in the CFG. */
2640 if (flag_checking
2641 && (*path)[0]->type != EDGE_FSM_THREAD)
2642 gcc_assert (((*path)[i]->e->flags & EDGE_DFS_BACK) == 0);
2645 if (dump_file && (dump_flags & TDF_DETAILS))
2646 dump_jump_thread_path (dump_file, *path, true);
2648 if (!paths.exists ())
2649 paths.create (5);
2651 paths.safe_push (path);