[Ada] Missing range check on assignment to bit-packed array
[official-gcc.git] / gcc / cfgloopmanip.c
blobb5f6a47fb80553a2b083df636370d6d159381d42
1 /* Loop manipulation code for GNU compiler.
2 Copyright (C) 2002-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "cfganal.h"
29 #include "cfgloop.h"
30 #include "gimple-iterator.h"
31 #include "gimplify-me.h"
32 #include "tree-ssa-loop-manip.h"
33 #include "dumpfile.h"
35 static void copy_loops_to (struct loop **, int,
36 struct loop *);
37 static void loop_redirect_edge (edge, basic_block);
38 static void remove_bbs (basic_block *, int);
39 static bool rpe_enum_p (const_basic_block, const void *);
40 static int find_path (edge, basic_block **);
41 static void fix_loop_placements (struct loop *, bool *);
42 static bool fix_bb_placement (basic_block);
43 static void fix_bb_placements (basic_block, bool *, bitmap);
45 /* Checks whether basic block BB is dominated by DATA. */
46 static bool
47 rpe_enum_p (const_basic_block bb, const void *data)
49 return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data);
52 /* Remove basic blocks BBS. NBBS is the number of the basic blocks. */
54 static void
55 remove_bbs (basic_block *bbs, int nbbs)
57 int i;
59 for (i = 0; i < nbbs; i++)
60 delete_basic_block (bbs[i]);
63 /* Find path -- i.e. the basic blocks dominated by edge E and put them
64 into array BBS, that will be allocated large enough to contain them.
65 E->dest must have exactly one predecessor for this to work (it is
66 easy to achieve and we do not put it here because we do not want to
67 alter anything by this function). The number of basic blocks in the
68 path is returned. */
69 static int
70 find_path (edge e, basic_block **bbs)
72 gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
74 /* Find bbs in the path. */
75 *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
76 return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
77 n_basic_blocks_for_fn (cfun), e->dest);
80 /* Fix placement of basic block BB inside loop hierarchy --
81 Let L be a loop to that BB belongs. Then every successor of BB must either
82 1) belong to some superloop of loop L, or
83 2) be a header of loop K such that K->outer is superloop of L
84 Returns true if we had to move BB into other loop to enforce this condition,
85 false if the placement of BB was already correct (provided that placements
86 of its successors are correct). */
87 static bool
88 fix_bb_placement (basic_block bb)
90 edge e;
91 edge_iterator ei;
92 struct loop *loop = current_loops->tree_root, *act;
94 FOR_EACH_EDGE (e, ei, bb->succs)
96 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
97 continue;
99 act = e->dest->loop_father;
100 if (act->header == e->dest)
101 act = loop_outer (act);
103 if (flow_loop_nested_p (loop, act))
104 loop = act;
107 if (loop == bb->loop_father)
108 return false;
110 remove_bb_from_loops (bb);
111 add_bb_to_loop (bb, loop);
113 return true;
116 /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
117 of LOOP to that leads at least one exit edge of LOOP, and set it
118 as the immediate superloop of LOOP. Return true if the immediate superloop
119 of LOOP changed.
121 IRRED_INVALIDATED is set to true if a change in the loop structures might
122 invalidate the information about irreducible regions. */
124 static bool
125 fix_loop_placement (struct loop *loop, bool *irred_invalidated)
127 unsigned i;
128 edge e;
129 vec<edge> exits = get_loop_exit_edges (loop);
130 struct loop *father = current_loops->tree_root, *act;
131 bool ret = false;
133 FOR_EACH_VEC_ELT (exits, i, e)
135 act = find_common_loop (loop, e->dest->loop_father);
136 if (flow_loop_nested_p (father, act))
137 father = act;
140 if (father != loop_outer (loop))
142 for (act = loop_outer (loop); act != father; act = loop_outer (act))
143 act->num_nodes -= loop->num_nodes;
144 flow_loop_tree_node_remove (loop);
145 flow_loop_tree_node_add (father, loop);
147 /* The exit edges of LOOP no longer exits its original immediate
148 superloops; remove them from the appropriate exit lists. */
149 FOR_EACH_VEC_ELT (exits, i, e)
151 /* We may need to recompute irreducible loops. */
152 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
153 *irred_invalidated = true;
154 rescan_loop_exit (e, false, false);
157 ret = true;
160 exits.release ();
161 return ret;
164 /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
165 enforce condition stated in description of fix_bb_placement. We
166 start from basic block FROM that had some of its successors removed, so that
167 his placement no longer has to be correct, and iteratively fix placement of
168 its predecessors that may change if placement of FROM changed. Also fix
169 placement of subloops of FROM->loop_father, that might also be altered due
170 to this change; the condition for them is similar, except that instead of
171 successors we consider edges coming out of the loops.
173 If the changes may invalidate the information about irreducible regions,
174 IRRED_INVALIDATED is set to true.
176 If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
177 changed loop_father are collected there. */
179 static void
180 fix_bb_placements (basic_block from,
181 bool *irred_invalidated,
182 bitmap loop_closed_ssa_invalidated)
184 basic_block *queue, *qtop, *qbeg, *qend;
185 struct loop *base_loop, *target_loop;
186 edge e;
188 /* We pass through blocks back-reachable from FROM, testing whether some
189 of their successors moved to outer loop. It may be necessary to
190 iterate several times, but it is finite, as we stop unless we move
191 the basic block up the loop structure. The whole story is a bit
192 more complicated due to presence of subloops, those are moved using
193 fix_loop_placement. */
195 base_loop = from->loop_father;
196 /* If we are already in the outermost loop, the basic blocks cannot be moved
197 outside of it. If FROM is the header of the base loop, it cannot be moved
198 outside of it, either. In both cases, we can end now. */
199 if (base_loop == current_loops->tree_root
200 || from == base_loop->header)
201 return;
203 auto_sbitmap in_queue (last_basic_block_for_fn (cfun));
204 bitmap_clear (in_queue);
205 bitmap_set_bit (in_queue, from->index);
206 /* Prevent us from going out of the base_loop. */
207 bitmap_set_bit (in_queue, base_loop->header->index);
209 queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
210 qtop = queue + base_loop->num_nodes + 1;
211 qbeg = queue;
212 qend = queue + 1;
213 *qbeg = from;
215 while (qbeg != qend)
217 edge_iterator ei;
218 from = *qbeg;
219 qbeg++;
220 if (qbeg == qtop)
221 qbeg = queue;
222 bitmap_clear_bit (in_queue, from->index);
224 if (from->loop_father->header == from)
226 /* Subloop header, maybe move the loop upward. */
227 if (!fix_loop_placement (from->loop_father, irred_invalidated))
228 continue;
229 target_loop = loop_outer (from->loop_father);
230 if (loop_closed_ssa_invalidated)
232 basic_block *bbs = get_loop_body (from->loop_father);
233 for (unsigned i = 0; i < from->loop_father->num_nodes; ++i)
234 bitmap_set_bit (loop_closed_ssa_invalidated, bbs[i]->index);
235 free (bbs);
238 else
240 /* Ordinary basic block. */
241 if (!fix_bb_placement (from))
242 continue;
243 target_loop = from->loop_father;
244 if (loop_closed_ssa_invalidated)
245 bitmap_set_bit (loop_closed_ssa_invalidated, from->index);
248 FOR_EACH_EDGE (e, ei, from->succs)
250 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
251 *irred_invalidated = true;
254 /* Something has changed, insert predecessors into queue. */
255 FOR_EACH_EDGE (e, ei, from->preds)
257 basic_block pred = e->src;
258 struct loop *nca;
260 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
261 *irred_invalidated = true;
263 if (bitmap_bit_p (in_queue, pred->index))
264 continue;
266 /* If it is subloop, then it either was not moved, or
267 the path up the loop tree from base_loop do not contain
268 it. */
269 nca = find_common_loop (pred->loop_father, base_loop);
270 if (pred->loop_father != base_loop
271 && (nca == base_loop
272 || nca != pred->loop_father))
273 pred = pred->loop_father->header;
274 else if (!flow_loop_nested_p (target_loop, pred->loop_father))
276 /* If PRED is already higher in the loop hierarchy than the
277 TARGET_LOOP to that we moved FROM, the change of the position
278 of FROM does not affect the position of PRED, so there is no
279 point in processing it. */
280 continue;
283 if (bitmap_bit_p (in_queue, pred->index))
284 continue;
286 /* Schedule the basic block. */
287 *qend = pred;
288 qend++;
289 if (qend == qtop)
290 qend = queue;
291 bitmap_set_bit (in_queue, pred->index);
294 free (queue);
297 /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
298 and update loop structures and dominators. Return true if we were able
299 to remove the path, false otherwise (and nothing is affected then). */
300 bool
301 remove_path (edge e, bool *irred_invalidated,
302 bitmap loop_closed_ssa_invalidated)
304 edge ae;
305 basic_block *rem_bbs, *bord_bbs, from, bb;
306 vec<basic_block> dom_bbs;
307 int i, nrem, n_bord_bbs;
308 bool local_irred_invalidated = false;
309 edge_iterator ei;
310 struct loop *l, *f;
312 if (! irred_invalidated)
313 irred_invalidated = &local_irred_invalidated;
315 if (!can_remove_branch_p (e))
316 return false;
318 /* Keep track of whether we need to update information about irreducible
319 regions. This is the case if the removed area is a part of the
320 irreducible region, or if the set of basic blocks that belong to a loop
321 that is inside an irreducible region is changed, or if such a loop is
322 removed. */
323 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
324 *irred_invalidated = true;
326 /* We need to check whether basic blocks are dominated by the edge
327 e, but we only have basic block dominators. This is easy to
328 fix -- when e->dest has exactly one predecessor, this corresponds
329 to blocks dominated by e->dest, if not, split the edge. */
330 if (!single_pred_p (e->dest))
331 e = single_pred_edge (split_edge (e));
333 /* It may happen that by removing path we remove one or more loops
334 we belong to. In this case first unloop the loops, then proceed
335 normally. We may assume that e->dest is not a header of any loop,
336 as it now has exactly one predecessor. */
337 for (l = e->src->loop_father; loop_outer (l); l = f)
339 f = loop_outer (l);
340 if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest))
341 unloop (l, irred_invalidated, loop_closed_ssa_invalidated);
344 /* Identify the path. */
345 nrem = find_path (e, &rem_bbs);
347 n_bord_bbs = 0;
348 bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
349 auto_sbitmap seen (last_basic_block_for_fn (cfun));
350 bitmap_clear (seen);
352 /* Find "border" hexes -- i.e. those with predecessor in removed path. */
353 for (i = 0; i < nrem; i++)
354 bitmap_set_bit (seen, rem_bbs[i]->index);
355 if (!*irred_invalidated)
356 FOR_EACH_EDGE (ae, ei, e->src->succs)
357 if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
358 && !bitmap_bit_p (seen, ae->dest->index)
359 && ae->flags & EDGE_IRREDUCIBLE_LOOP)
361 *irred_invalidated = true;
362 break;
365 for (i = 0; i < nrem; i++)
367 FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
368 if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
369 && !bitmap_bit_p (seen, ae->dest->index))
371 bitmap_set_bit (seen, ae->dest->index);
372 bord_bbs[n_bord_bbs++] = ae->dest;
374 if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
375 *irred_invalidated = true;
379 /* Remove the path. */
380 from = e->src;
381 remove_branch (e);
382 dom_bbs.create (0);
384 /* Cancel loops contained in the path. */
385 for (i = 0; i < nrem; i++)
386 if (rem_bbs[i]->loop_father->header == rem_bbs[i])
387 cancel_loop_tree (rem_bbs[i]->loop_father);
389 remove_bbs (rem_bbs, nrem);
390 free (rem_bbs);
392 /* Find blocks whose dominators may be affected. */
393 bitmap_clear (seen);
394 for (i = 0; i < n_bord_bbs; i++)
396 basic_block ldom;
398 bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
399 if (bitmap_bit_p (seen, bb->index))
400 continue;
401 bitmap_set_bit (seen, bb->index);
403 for (ldom = first_dom_son (CDI_DOMINATORS, bb);
404 ldom;
405 ldom = next_dom_son (CDI_DOMINATORS, ldom))
406 if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
407 dom_bbs.safe_push (ldom);
410 /* Recount dominators. */
411 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
412 dom_bbs.release ();
413 free (bord_bbs);
415 /* Fix placements of basic blocks inside loops and the placement of
416 loops in the loop tree. */
417 fix_bb_placements (from, irred_invalidated, loop_closed_ssa_invalidated);
418 fix_loop_placements (from->loop_father, irred_invalidated);
420 if (local_irred_invalidated
421 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
422 mark_irreducible_loops ();
424 return true;
427 /* Creates place for a new LOOP in loops structure of FN. */
429 void
430 place_new_loop (struct function *fn, struct loop *loop)
432 loop->num = number_of_loops (fn);
433 vec_safe_push (loops_for_fn (fn)->larray, loop);
436 /* Given LOOP structure with filled header and latch, find the body of the
437 corresponding loop and add it to loops tree. Insert the LOOP as a son of
438 outer. */
440 void
441 add_loop (struct loop *loop, struct loop *outer)
443 basic_block *bbs;
444 int i, n;
445 struct loop *subloop;
446 edge e;
447 edge_iterator ei;
449 /* Add it to loop structure. */
450 place_new_loop (cfun, loop);
451 flow_loop_tree_node_add (outer, loop);
453 /* Find its nodes. */
454 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
455 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
457 for (i = 0; i < n; i++)
459 if (bbs[i]->loop_father == outer)
461 remove_bb_from_loops (bbs[i]);
462 add_bb_to_loop (bbs[i], loop);
463 continue;
466 loop->num_nodes++;
468 /* If we find a direct subloop of OUTER, move it to LOOP. */
469 subloop = bbs[i]->loop_father;
470 if (loop_outer (subloop) == outer
471 && subloop->header == bbs[i])
473 flow_loop_tree_node_remove (subloop);
474 flow_loop_tree_node_add (loop, subloop);
478 /* Update the information about loop exit edges. */
479 for (i = 0; i < n; i++)
481 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
483 rescan_loop_exit (e, false, false);
487 free (bbs);
490 /* Scale profile of loop by P. */
492 void
493 scale_loop_frequencies (struct loop *loop, profile_probability p)
495 basic_block *bbs;
497 bbs = get_loop_body (loop);
498 scale_bbs_frequencies (bbs, loop->num_nodes, p);
499 free (bbs);
502 /* Scale profile in LOOP by P.
503 If ITERATION_BOUND is non-zero, scale even further if loop is predicted
504 to iterate too many times.
505 Before caling this function, preheader block profile should be already
506 scaled to final count. This is necessary because loop iterations are
507 determined by comparing header edge count to latch ege count and thus
508 they need to be scaled synchronously. */
510 void
511 scale_loop_profile (struct loop *loop, profile_probability p,
512 gcov_type iteration_bound)
514 edge e, preheader_e;
515 edge_iterator ei;
517 if (dump_file && (dump_flags & TDF_DETAILS))
519 fprintf (dump_file, ";; Scaling loop %i with scale ",
520 loop->num);
521 p.dump (dump_file);
522 fprintf (dump_file, " bounding iterations to %i\n",
523 (int)iteration_bound);
526 /* Scale the probabilities. */
527 scale_loop_frequencies (loop, p);
529 if (iteration_bound == 0)
530 return;
532 gcov_type iterations = expected_loop_iterations_unbounded (loop, NULL, true);
534 if (dump_file && (dump_flags & TDF_DETAILS))
536 fprintf (dump_file, ";; guessed iterations after scaling %i\n",
537 (int)iterations);
540 /* See if loop is predicted to iterate too many times. */
541 if (iterations <= iteration_bound)
542 return;
544 preheader_e = loop_preheader_edge (loop);
546 /* We could handle also loops without preheaders, but bounding is
547 currently used only by optimizers that have preheaders constructed. */
548 gcc_checking_assert (preheader_e);
549 profile_count count_in = preheader_e->count ();
551 if (count_in > profile_count::zero ()
552 && loop->header->count.initialized_p ())
554 profile_count count_delta = profile_count::zero ();
556 e = single_exit (loop);
557 if (e)
559 edge other_e;
560 FOR_EACH_EDGE (other_e, ei, e->src->succs)
561 if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE))
562 && e != other_e)
563 break;
565 /* Probability of exit must be 1/iterations. */
566 count_delta = e->count ();
567 e->probability = profile_probability::always ()
568 .apply_scale (1, iteration_bound);
569 other_e->probability = e->probability.invert ();
571 /* In code below we only handle the following two updates. */
572 if (other_e->dest != loop->header
573 && other_e->dest != loop->latch
574 && (dump_file && (dump_flags & TDF_DETAILS)))
576 fprintf (dump_file, ";; giving up on update of paths from "
577 "exit condition to latch\n");
580 else
581 if (dump_file && (dump_flags & TDF_DETAILS))
582 fprintf (dump_file, ";; Loop has multiple exit edges; "
583 "giving up on exit condition update\n");
585 /* Roughly speaking we want to reduce the loop body profile by the
586 difference of loop iterations. We however can do better if
587 we look at the actual profile, if it is available. */
588 p = profile_probability::always ();
590 count_in = count_in.apply_scale (iteration_bound, 1);
591 p = count_in.probability_in (loop->header->count);
592 if (!(p > profile_probability::never ()))
593 p = profile_probability::very_unlikely ();
595 if (p == profile_probability::always ()
596 || !p.initialized_p ())
597 return;
599 /* If latch exists, change its count, since we changed
600 probability of exit. Theoretically we should update everything from
601 source of exit edge to latch, but for vectorizer this is enough. */
602 if (loop->latch && loop->latch != e->src)
603 loop->latch->count += count_delta;
605 /* Scale the probabilities. */
606 scale_loop_frequencies (loop, p);
608 /* Change latch's count back. */
609 if (loop->latch && loop->latch != e->src)
610 loop->latch->count -= count_delta;
612 if (dump_file && (dump_flags & TDF_DETAILS))
613 fprintf (dump_file, ";; guessed iterations are now %i\n",
614 (int)expected_loop_iterations_unbounded (loop, NULL, true));
618 /* Recompute dominance information for basic blocks outside LOOP. */
620 static void
621 update_dominators_in_loop (struct loop *loop)
623 vec<basic_block> dom_bbs = vNULL;
624 basic_block *body;
625 unsigned i;
627 auto_sbitmap seen (last_basic_block_for_fn (cfun));
628 bitmap_clear (seen);
629 body = get_loop_body (loop);
631 for (i = 0; i < loop->num_nodes; i++)
632 bitmap_set_bit (seen, body[i]->index);
634 for (i = 0; i < loop->num_nodes; i++)
636 basic_block ldom;
638 for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
639 ldom;
640 ldom = next_dom_son (CDI_DOMINATORS, ldom))
641 if (!bitmap_bit_p (seen, ldom->index))
643 bitmap_set_bit (seen, ldom->index);
644 dom_bbs.safe_push (ldom);
648 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
649 free (body);
650 dom_bbs.release ();
653 /* Creates an if region as shown above. CONDITION is used to create
654 the test for the if.
657 | ------------- -------------
658 | | pred_bb | | pred_bb |
659 | ------------- -------------
660 | | |
661 | | | ENTRY_EDGE
662 | | ENTRY_EDGE V
663 | | ====> -------------
664 | | | cond_bb |
665 | | | CONDITION |
666 | | -------------
667 | V / \
668 | ------------- e_false / \ e_true
669 | | succ_bb | V V
670 | ------------- ----------- -----------
671 | | false_bb | | true_bb |
672 | ----------- -----------
673 | \ /
674 | \ /
675 | V V
676 | -------------
677 | | join_bb |
678 | -------------
679 | | exit_edge (result)
681 | -----------
682 | | succ_bb |
683 | -----------
687 edge
688 create_empty_if_region_on_edge (edge entry_edge, tree condition)
691 basic_block cond_bb, true_bb, false_bb, join_bb;
692 edge e_true, e_false, exit_edge;
693 gcond *cond_stmt;
694 tree simple_cond;
695 gimple_stmt_iterator gsi;
697 cond_bb = split_edge (entry_edge);
699 /* Insert condition in cond_bb. */
700 gsi = gsi_last_bb (cond_bb);
701 simple_cond =
702 force_gimple_operand_gsi (&gsi, condition, true, NULL,
703 false, GSI_NEW_STMT);
704 cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE, NULL_TREE);
705 gsi = gsi_last_bb (cond_bb);
706 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
708 join_bb = split_edge (single_succ_edge (cond_bb));
710 e_true = single_succ_edge (cond_bb);
711 true_bb = split_edge (e_true);
713 e_false = make_edge (cond_bb, join_bb, 0);
714 false_bb = split_edge (e_false);
716 e_true->flags &= ~EDGE_FALLTHRU;
717 e_true->flags |= EDGE_TRUE_VALUE;
718 e_false->flags &= ~EDGE_FALLTHRU;
719 e_false->flags |= EDGE_FALSE_VALUE;
721 set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src);
722 set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb);
723 set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb);
724 set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
726 exit_edge = single_succ_edge (join_bb);
728 if (single_pred_p (exit_edge->dest))
729 set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb);
731 return exit_edge;
734 /* create_empty_loop_on_edge
736 | - pred_bb - ------ pred_bb ------
737 | | | | iv0 = initial_value |
738 | -----|----- ---------|-----------
739 | | ______ | entry_edge
740 | | entry_edge / | |
741 | | ====> | -V---V- loop_header -------------
742 | V | | iv_before = phi (iv0, iv_after) |
743 | - succ_bb - | ---|-----------------------------
744 | | | | |
745 | ----------- | ---V--- loop_body ---------------
746 | | | iv_after = iv_before + stride |
747 | | | if (iv_before < upper_bound) |
748 | | ---|--------------\--------------
749 | | | \ exit_e
750 | | V \
751 | | - loop_latch - V- succ_bb -
752 | | | | | |
753 | | /------------- -----------
754 | \ ___ /
756 Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME
757 that is used before the increment of IV. IV_BEFORE should be used for
758 adding code to the body that uses the IV. OUTER is the outer loop in
759 which the new loop should be inserted.
761 Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and
762 inserted on the loop entry edge. This implies that this function
763 should be used only when the UPPER_BOUND expression is a loop
764 invariant. */
766 struct loop *
767 create_empty_loop_on_edge (edge entry_edge,
768 tree initial_value,
769 tree stride, tree upper_bound,
770 tree iv,
771 tree *iv_before,
772 tree *iv_after,
773 struct loop *outer)
775 basic_block loop_header, loop_latch, succ_bb, pred_bb;
776 struct loop *loop;
777 gimple_stmt_iterator gsi;
778 gimple_seq stmts;
779 gcond *cond_expr;
780 tree exit_test;
781 edge exit_e;
783 gcc_assert (entry_edge && initial_value && stride && upper_bound && iv);
785 /* Create header, latch and wire up the loop. */
786 pred_bb = entry_edge->src;
787 loop_header = split_edge (entry_edge);
788 loop_latch = split_edge (single_succ_edge (loop_header));
789 succ_bb = single_succ (loop_latch);
790 make_edge (loop_header, succ_bb, 0);
791 redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header);
793 /* Set immediate dominator information. */
794 set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb);
795 set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header);
796 set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header);
798 /* Initialize a loop structure and put it in a loop hierarchy. */
799 loop = alloc_loop ();
800 loop->header = loop_header;
801 loop->latch = loop_latch;
802 add_loop (loop, outer);
804 /* TODO: Fix counts. */
805 scale_loop_frequencies (loop, profile_probability::even ());
807 /* Update dominators. */
808 update_dominators_in_loop (loop);
810 /* Modify edge flags. */
811 exit_e = single_exit (loop);
812 exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE;
813 single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE;
815 /* Construct IV code in loop. */
816 initial_value = force_gimple_operand (initial_value, &stmts, true, iv);
817 if (stmts)
819 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
820 gsi_commit_edge_inserts ();
823 upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULL);
824 if (stmts)
826 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
827 gsi_commit_edge_inserts ();
830 gsi = gsi_last_bb (loop_header);
831 create_iv (initial_value, stride, iv, loop, &gsi, false,
832 iv_before, iv_after);
834 /* Insert loop exit condition. */
835 cond_expr = gimple_build_cond
836 (LT_EXPR, *iv_before, upper_bound, NULL_TREE, NULL_TREE);
838 exit_test = gimple_cond_lhs (cond_expr);
839 exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULL,
840 false, GSI_NEW_STMT);
841 gimple_cond_set_lhs (cond_expr, exit_test);
842 gsi = gsi_last_bb (exit_e->src);
843 gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT);
845 split_block_after_labels (loop_header);
847 return loop;
850 /* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting
851 latch to header and update loop tree and dominators
852 accordingly. Everything between them plus LATCH_EDGE destination must
853 be dominated by HEADER_EDGE destination, and back-reachable from
854 LATCH_EDGE source. HEADER_EDGE is redirected to basic block SWITCH_BB,
855 FALSE_EDGE of SWITCH_BB to original destination of HEADER_EDGE and
856 TRUE_EDGE of SWITCH_BB to original destination of LATCH_EDGE.
857 Returns the newly created loop. Frequencies and counts in the new loop
858 are scaled by FALSE_SCALE and in the old one by TRUE_SCALE. */
860 struct loop *
861 loopify (edge latch_edge, edge header_edge,
862 basic_block switch_bb, edge true_edge, edge false_edge,
863 bool redirect_all_edges, profile_probability true_scale,
864 profile_probability false_scale)
866 basic_block succ_bb = latch_edge->dest;
867 basic_block pred_bb = header_edge->src;
868 struct loop *loop = alloc_loop ();
869 struct loop *outer = loop_outer (succ_bb->loop_father);
870 profile_count cnt;
872 loop->header = header_edge->dest;
873 loop->latch = latch_edge->src;
875 cnt = header_edge->count ();
877 /* Redirect edges. */
878 loop_redirect_edge (latch_edge, loop->header);
879 loop_redirect_edge (true_edge, succ_bb);
881 /* During loop versioning, one of the switch_bb edge is already properly
882 set. Do not redirect it again unless redirect_all_edges is true. */
883 if (redirect_all_edges)
885 loop_redirect_edge (header_edge, switch_bb);
886 loop_redirect_edge (false_edge, loop->header);
888 /* Update dominators. */
889 set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
890 set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
893 set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb);
895 /* Compute new loop. */
896 add_loop (loop, outer);
898 /* Add switch_bb to appropriate loop. */
899 if (switch_bb->loop_father)
900 remove_bb_from_loops (switch_bb);
901 add_bb_to_loop (switch_bb, outer);
903 /* Fix counts. */
904 if (redirect_all_edges)
906 switch_bb->count = cnt;
908 scale_loop_frequencies (loop, false_scale);
909 scale_loop_frequencies (succ_bb->loop_father, true_scale);
910 update_dominators_in_loop (loop);
912 return loop;
915 /* Remove the latch edge of a LOOP and update loops to indicate that
916 the LOOP was removed. After this function, original loop latch will
917 have no successor, which caller is expected to fix somehow.
919 If this may cause the information about irreducible regions to become
920 invalid, IRRED_INVALIDATED is set to true.
922 LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store
923 basic blocks that had non-trivial update on their loop_father.*/
925 void
926 unloop (struct loop *loop, bool *irred_invalidated,
927 bitmap loop_closed_ssa_invalidated)
929 basic_block *body;
930 struct loop *ploop;
931 unsigned i, n;
932 basic_block latch = loop->latch;
933 bool dummy = false;
935 if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
936 *irred_invalidated = true;
938 /* This is relatively straightforward. The dominators are unchanged, as
939 loop header dominates loop latch, so the only thing we have to care of
940 is the placement of loops and basic blocks inside the loop tree. We
941 move them all to the loop->outer, and then let fix_bb_placements do
942 its work. */
944 body = get_loop_body (loop);
945 n = loop->num_nodes;
946 for (i = 0; i < n; i++)
947 if (body[i]->loop_father == loop)
949 remove_bb_from_loops (body[i]);
950 add_bb_to_loop (body[i], loop_outer (loop));
952 free (body);
954 while (loop->inner)
956 ploop = loop->inner;
957 flow_loop_tree_node_remove (ploop);
958 flow_loop_tree_node_add (loop_outer (loop), ploop);
961 /* Remove the loop and free its data. */
962 delete_loop (loop);
964 remove_edge (single_succ_edge (latch));
966 /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
967 there is an irreducible region inside the cancelled loop, the flags will
968 be still correct. */
969 fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated);
972 /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
973 condition stated in description of fix_loop_placement holds for them.
974 It is used in case when we removed some edges coming out of LOOP, which
975 may cause the right placement of LOOP inside loop tree to change.
977 IRRED_INVALIDATED is set to true if a change in the loop structures might
978 invalidate the information about irreducible regions. */
980 static void
981 fix_loop_placements (struct loop *loop, bool *irred_invalidated)
983 struct loop *outer;
985 while (loop_outer (loop))
987 outer = loop_outer (loop);
988 if (!fix_loop_placement (loop, irred_invalidated))
989 break;
991 /* Changing the placement of a loop in the loop tree may alter the
992 validity of condition 2) of the description of fix_bb_placement
993 for its preheader, because the successor is the header and belongs
994 to the loop. So call fix_bb_placements to fix up the placement
995 of the preheader and (possibly) of its predecessors. */
996 fix_bb_placements (loop_preheader_edge (loop)->src,
997 irred_invalidated, NULL);
998 loop = outer;
1002 /* Duplicate loop bounds and other information we store about
1003 the loop into its duplicate. */
1005 void
1006 copy_loop_info (struct loop *loop, struct loop *target)
1008 gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
1009 target->any_upper_bound = loop->any_upper_bound;
1010 target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound;
1011 target->any_likely_upper_bound = loop->any_likely_upper_bound;
1012 target->nb_iterations_likely_upper_bound
1013 = loop->nb_iterations_likely_upper_bound;
1014 target->any_estimate = loop->any_estimate;
1015 target->nb_iterations_estimate = loop->nb_iterations_estimate;
1016 target->estimate_state = loop->estimate_state;
1017 target->safelen = loop->safelen;
1018 target->simdlen = loop->simdlen;
1019 target->constraints = loop->constraints;
1020 target->can_be_parallel = loop->can_be_parallel;
1021 target->warned_aggressive_loop_optimizations
1022 |= loop->warned_aggressive_loop_optimizations;
1023 target->dont_vectorize = loop->dont_vectorize;
1024 target->force_vectorize = loop->force_vectorize;
1025 target->in_oacc_kernels_region = loop->in_oacc_kernels_region;
1026 target->unroll = loop->unroll;
1027 target->owned_clique = loop->owned_clique;
1030 /* Copies copy of LOOP as subloop of TARGET loop, placing newly
1031 created loop into loops structure. If AFTER is non-null
1032 the new loop is added at AFTER->next, otherwise in front of TARGETs
1033 sibling list. */
1034 struct loop *
1035 duplicate_loop (struct loop *loop, struct loop *target, struct loop *after)
1037 struct loop *cloop;
1038 cloop = alloc_loop ();
1039 place_new_loop (cfun, cloop);
1041 copy_loop_info (loop, cloop);
1043 /* Mark the new loop as copy of LOOP. */
1044 set_loop_copy (loop, cloop);
1046 /* Add it to target. */
1047 flow_loop_tree_node_add (target, cloop, after);
1049 return cloop;
1052 /* Copies structure of subloops of LOOP into TARGET loop, placing
1053 newly created loops into loop tree at the end of TARGETs sibling
1054 list in the original order. */
1055 void
1056 duplicate_subloops (struct loop *loop, struct loop *target)
1058 struct loop *aloop, *cloop, *tail;
1060 for (tail = target->inner; tail && tail->next; tail = tail->next)
1062 for (aloop = loop->inner; aloop; aloop = aloop->next)
1064 cloop = duplicate_loop (aloop, target, tail);
1065 tail = cloop;
1066 gcc_assert(!tail->next);
1067 duplicate_subloops (aloop, cloop);
1071 /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
1072 into TARGET loop, placing newly created loops into loop tree adding
1073 them to TARGETs sibling list at the end in order. */
1074 static void
1075 copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
1077 struct loop *aloop, *tail;
1078 int i;
1080 for (tail = target->inner; tail && tail->next; tail = tail->next)
1082 for (i = 0; i < n; i++)
1084 aloop = duplicate_loop (copied_loops[i], target, tail);
1085 tail = aloop;
1086 gcc_assert(!tail->next);
1087 duplicate_subloops (copied_loops[i], aloop);
1091 /* Redirects edge E to basic block DEST. */
1092 static void
1093 loop_redirect_edge (edge e, basic_block dest)
1095 if (e->dest == dest)
1096 return;
1098 redirect_edge_and_branch_force (e, dest);
1101 /* Check whether LOOP's body can be duplicated. */
1102 bool
1103 can_duplicate_loop_p (const struct loop *loop)
1105 int ret;
1106 basic_block *bbs = get_loop_body (loop);
1108 ret = can_copy_bbs_p (bbs, loop->num_nodes);
1109 free (bbs);
1111 return ret;
1114 /* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating
1115 loop structure and dominators (order of inner subloops is retained).
1116 E's destination must be LOOP header for this to work, i.e. it must be entry
1117 or latch edge of this loop; these are unique, as the loops must have
1118 preheaders for this function to work correctly (in case E is latch, the
1119 function unrolls the loop, if E is entry edge, it peels the loop). Store
1120 edges created by copying ORIG edge from copies corresponding to set bits in
1121 WONT_EXIT bitmap (bit 0 corresponds to original LOOP body, the other copies
1122 are numbered in order given by control flow through them) into TO_REMOVE
1123 array. Returns false if duplication is
1124 impossible. */
1126 bool
1127 duplicate_loop_to_header_edge (struct loop *loop, edge e,
1128 unsigned int ndupl, sbitmap wont_exit,
1129 edge orig, vec<edge> *to_remove,
1130 int flags)
1132 struct loop *target, *aloop;
1133 struct loop **orig_loops;
1134 unsigned n_orig_loops;
1135 basic_block header = loop->header, latch = loop->latch;
1136 basic_block *new_bbs, *bbs, *first_active;
1137 basic_block new_bb, bb, first_active_latch = NULL;
1138 edge ae, latch_edge;
1139 edge spec_edges[2], new_spec_edges[2];
1140 const int SE_LATCH = 0;
1141 const int SE_ORIG = 1;
1142 unsigned i, j, n;
1143 int is_latch = (latch == e->src);
1144 profile_probability *scale_step = NULL;
1145 profile_probability scale_main = profile_probability::always ();
1146 profile_probability scale_act = profile_probability::always ();
1147 profile_count after_exit_num = profile_count::zero (),
1148 after_exit_den = profile_count::zero ();
1149 bool scale_after_exit = false;
1150 int add_irreducible_flag;
1151 basic_block place_after;
1152 bitmap bbs_to_scale = NULL;
1153 bitmap_iterator bi;
1155 gcc_assert (e->dest == loop->header);
1156 gcc_assert (ndupl > 0);
1158 if (orig)
1160 /* Orig must be edge out of the loop. */
1161 gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
1162 gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
1165 n = loop->num_nodes;
1166 bbs = get_loop_body_in_dom_order (loop);
1167 gcc_assert (bbs[0] == loop->header);
1168 gcc_assert (bbs[n - 1] == loop->latch);
1170 /* Check whether duplication is possible. */
1171 if (!can_copy_bbs_p (bbs, loop->num_nodes))
1173 free (bbs);
1174 return false;
1176 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
1178 /* In case we are doing loop peeling and the loop is in the middle of
1179 irreducible region, the peeled copies will be inside it too. */
1180 add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
1181 gcc_assert (!is_latch || !add_irreducible_flag);
1183 /* Find edge from latch. */
1184 latch_edge = loop_latch_edge (loop);
1186 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1188 /* Calculate coefficients by that we have to scale counts
1189 of duplicated loop bodies. */
1190 profile_count count_in = header->count;
1191 profile_count count_le = latch_edge->count ();
1192 profile_count count_out_orig = orig ? orig->count () : count_in - count_le;
1193 profile_probability prob_pass_thru = count_le.probability_in (count_in);
1194 profile_probability prob_pass_wont_exit =
1195 (count_le + count_out_orig).probability_in (count_in);
1197 if (orig && orig->probability.initialized_p ()
1198 && !(orig->probability == profile_probability::always ()))
1200 /* The blocks that are dominated by a removed exit edge ORIG have
1201 frequencies scaled by this. */
1202 if (orig->count ().initialized_p ())
1204 after_exit_num = orig->src->count;
1205 after_exit_den = after_exit_num - orig->count ();
1206 scale_after_exit = true;
1208 bbs_to_scale = BITMAP_ALLOC (NULL);
1209 for (i = 0; i < n; i++)
1211 if (bbs[i] != orig->src
1212 && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
1213 bitmap_set_bit (bbs_to_scale, i);
1217 scale_step = XNEWVEC (profile_probability, ndupl);
1219 for (i = 1; i <= ndupl; i++)
1220 scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
1221 ? prob_pass_wont_exit
1222 : prob_pass_thru;
1224 /* Complete peeling is special as the probability of exit in last
1225 copy becomes 1. */
1226 if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
1228 profile_count wanted_count = e->count ();
1230 gcc_assert (!is_latch);
1231 /* First copy has count of incoming edge. Each subsequent
1232 count should be reduced by prob_pass_wont_exit. Caller
1233 should've managed the flags so all except for original loop
1234 has won't exist set. */
1235 scale_act = wanted_count.probability_in (count_in);
1236 /* Now simulate the duplication adjustments and compute header
1237 frequency of the last copy. */
1238 for (i = 0; i < ndupl; i++)
1239 wanted_count = wanted_count.apply_probability (scale_step [i]);
1240 scale_main = wanted_count.probability_in (count_in);
1242 /* Here we insert loop bodies inside the loop itself (for loop unrolling).
1243 First iteration will be original loop followed by duplicated bodies.
1244 It is necessary to scale down the original so we get right overall
1245 number of iterations. */
1246 else if (is_latch)
1248 profile_probability prob_pass_main = bitmap_bit_p (wont_exit, 0)
1249 ? prob_pass_wont_exit
1250 : prob_pass_thru;
1251 profile_probability p = prob_pass_main;
1252 profile_count scale_main_den = count_in;
1253 for (i = 0; i < ndupl; i++)
1255 scale_main_den += count_in.apply_probability (p);
1256 p = p * scale_step[i];
1258 /* If original loop is executed COUNT_IN times, the unrolled
1259 loop will account SCALE_MAIN_DEN times. */
1260 scale_main = count_in.probability_in (scale_main_den);
1261 scale_act = scale_main * prob_pass_main;
1263 else
1265 profile_count preheader_count = e->count ();
1266 for (i = 0; i < ndupl; i++)
1267 scale_main = scale_main * scale_step[i];
1268 scale_act = preheader_count.probability_in (count_in);
1272 /* Loop the new bbs will belong to. */
1273 target = e->src->loop_father;
1275 /* Original loops. */
1276 n_orig_loops = 0;
1277 for (aloop = loop->inner; aloop; aloop = aloop->next)
1278 n_orig_loops++;
1279 orig_loops = XNEWVEC (struct loop *, n_orig_loops);
1280 for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
1281 orig_loops[i] = aloop;
1283 set_loop_copy (loop, target);
1285 first_active = XNEWVEC (basic_block, n);
1286 if (is_latch)
1288 memcpy (first_active, bbs, n * sizeof (basic_block));
1289 first_active_latch = latch;
1292 spec_edges[SE_ORIG] = orig;
1293 spec_edges[SE_LATCH] = latch_edge;
1295 place_after = e->src;
1296 for (j = 0; j < ndupl; j++)
1298 /* Copy loops. */
1299 copy_loops_to (orig_loops, n_orig_loops, target);
1301 /* Copy bbs. */
1302 copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
1303 place_after, true);
1304 place_after = new_spec_edges[SE_LATCH]->src;
1306 if (flags & DLTHE_RECORD_COPY_NUMBER)
1307 for (i = 0; i < n; i++)
1309 gcc_assert (!new_bbs[i]->aux);
1310 new_bbs[i]->aux = (void *)(size_t)(j + 1);
1313 /* Note whether the blocks and edges belong to an irreducible loop. */
1314 if (add_irreducible_flag)
1316 for (i = 0; i < n; i++)
1317 new_bbs[i]->flags |= BB_DUPLICATED;
1318 for (i = 0; i < n; i++)
1320 edge_iterator ei;
1321 new_bb = new_bbs[i];
1322 if (new_bb->loop_father == target)
1323 new_bb->flags |= BB_IRREDUCIBLE_LOOP;
1325 FOR_EACH_EDGE (ae, ei, new_bb->succs)
1326 if ((ae->dest->flags & BB_DUPLICATED)
1327 && (ae->src->loop_father == target
1328 || ae->dest->loop_father == target))
1329 ae->flags |= EDGE_IRREDUCIBLE_LOOP;
1331 for (i = 0; i < n; i++)
1332 new_bbs[i]->flags &= ~BB_DUPLICATED;
1335 /* Redirect the special edges. */
1336 if (is_latch)
1338 redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
1339 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1340 loop->header);
1341 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
1342 latch = loop->latch = new_bbs[n - 1];
1343 e = latch_edge = new_spec_edges[SE_LATCH];
1345 else
1347 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1348 loop->header);
1349 redirect_edge_and_branch_force (e, new_bbs[0]);
1350 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
1351 e = new_spec_edges[SE_LATCH];
1354 /* Record exit edge in this copy. */
1355 if (orig && bitmap_bit_p (wont_exit, j + 1))
1357 if (to_remove)
1358 to_remove->safe_push (new_spec_edges[SE_ORIG]);
1359 force_edge_cold (new_spec_edges[SE_ORIG], true);
1361 /* Scale the frequencies of the blocks dominated by the exit. */
1362 if (bbs_to_scale && scale_after_exit)
1364 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1365 scale_bbs_frequencies_profile_count (new_bbs + i, 1, after_exit_num,
1366 after_exit_den);
1370 /* Record the first copy in the control flow order if it is not
1371 the original loop (i.e. in case of peeling). */
1372 if (!first_active_latch)
1374 memcpy (first_active, new_bbs, n * sizeof (basic_block));
1375 first_active_latch = new_bbs[n - 1];
1378 /* Set counts and frequencies. */
1379 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1381 scale_bbs_frequencies (new_bbs, n, scale_act);
1382 scale_act = scale_act * scale_step[j];
1385 free (new_bbs);
1386 free (orig_loops);
1388 /* Record the exit edge in the original loop body, and update the frequencies. */
1389 if (orig && bitmap_bit_p (wont_exit, 0))
1391 if (to_remove)
1392 to_remove->safe_push (orig);
1393 force_edge_cold (orig, true);
1395 /* Scale the frequencies of the blocks dominated by the exit. */
1396 if (bbs_to_scale && scale_after_exit)
1398 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
1399 scale_bbs_frequencies_profile_count (bbs + i, 1, after_exit_num,
1400 after_exit_den);
1404 /* Update the original loop. */
1405 if (!is_latch)
1406 set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
1407 if (flags & DLTHE_FLAG_UPDATE_FREQ)
1409 scale_bbs_frequencies (bbs, n, scale_main);
1410 free (scale_step);
1413 /* Update dominators of outer blocks if affected. */
1414 for (i = 0; i < n; i++)
1416 basic_block dominated, dom_bb;
1417 vec<basic_block> dom_bbs;
1418 unsigned j;
1420 bb = bbs[i];
1421 bb->aux = 0;
1423 dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
1424 FOR_EACH_VEC_ELT (dom_bbs, j, dominated)
1426 if (flow_bb_inside_loop_p (loop, dominated))
1427 continue;
1428 dom_bb = nearest_common_dominator (
1429 CDI_DOMINATORS, first_active[i], first_active_latch);
1430 set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
1432 dom_bbs.release ();
1434 free (first_active);
1436 free (bbs);
1437 BITMAP_FREE (bbs_to_scale);
1439 return true;
1442 /* A callback for make_forwarder block, to redirect all edges except for
1443 MFB_KJ_EDGE to the entry part. E is the edge for that we should decide
1444 whether to redirect it. */
1446 edge mfb_kj_edge;
1447 bool
1448 mfb_keep_just (edge e)
1450 return e != mfb_kj_edge;
1453 /* True when a candidate preheader BLOCK has predecessors from LOOP. */
1455 static bool
1456 has_preds_from_loop (basic_block block, struct loop *loop)
1458 edge e;
1459 edge_iterator ei;
1461 FOR_EACH_EDGE (e, ei, block->preds)
1462 if (e->src->loop_father == loop)
1463 return true;
1464 return false;
1467 /* Creates a pre-header for a LOOP. Returns newly created block. Unless
1468 CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
1469 entry; otherwise we also force preheader block to have only one successor.
1470 When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
1471 to be a fallthru predecessor to the loop header and to have only
1472 predecessors from outside of the loop.
1473 The function also updates dominators. */
1475 basic_block
1476 create_preheader (struct loop *loop, int flags)
1478 edge e;
1479 basic_block dummy;
1480 int nentry = 0;
1481 bool irred = false;
1482 bool latch_edge_was_fallthru;
1483 edge one_succ_pred = NULL, single_entry = NULL;
1484 edge_iterator ei;
1486 FOR_EACH_EDGE (e, ei, loop->header->preds)
1488 if (e->src == loop->latch)
1489 continue;
1490 irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
1491 nentry++;
1492 single_entry = e;
1493 if (single_succ_p (e->src))
1494 one_succ_pred = e;
1496 gcc_assert (nentry);
1497 if (nentry == 1)
1499 bool need_forwarder_block = false;
1501 /* We do not allow entry block to be the loop preheader, since we
1502 cannot emit code there. */
1503 if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1504 need_forwarder_block = true;
1505 else
1507 /* If we want simple preheaders, also force the preheader to have
1508 just a single successor. */
1509 if ((flags & CP_SIMPLE_PREHEADERS)
1510 && !single_succ_p (single_entry->src))
1511 need_forwarder_block = true;
1512 /* If we want fallthru preheaders, also create forwarder block when
1513 preheader ends with a jump or has predecessors from loop. */
1514 else if ((flags & CP_FALLTHRU_PREHEADERS)
1515 && (JUMP_P (BB_END (single_entry->src))
1516 || has_preds_from_loop (single_entry->src, loop)))
1517 need_forwarder_block = true;
1519 if (! need_forwarder_block)
1520 return NULL;
1523 mfb_kj_edge = loop_latch_edge (loop);
1524 latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
1525 if (nentry == 1
1526 && ((flags & CP_FALLTHRU_PREHEADERS) == 0
1527 || (single_entry->flags & EDGE_CROSSING) == 0))
1528 dummy = split_edge (single_entry);
1529 else
1531 edge fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULL);
1532 dummy = fallthru->src;
1533 loop->header = fallthru->dest;
1536 /* Try to be clever in placing the newly created preheader. The idea is to
1537 avoid breaking any "fallthruness" relationship between blocks.
1539 The preheader was created just before the header and all incoming edges
1540 to the header were redirected to the preheader, except the latch edge.
1541 So the only problematic case is when this latch edge was a fallthru
1542 edge: it is not anymore after the preheader creation so we have broken
1543 the fallthruness. We're therefore going to look for a better place. */
1544 if (latch_edge_was_fallthru)
1546 if (one_succ_pred)
1547 e = one_succ_pred;
1548 else
1549 e = EDGE_PRED (dummy, 0);
1551 move_block_after (dummy, e->src);
1554 if (irred)
1556 dummy->flags |= BB_IRREDUCIBLE_LOOP;
1557 single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
1560 if (dump_file)
1561 fprintf (dump_file, "Created preheader block for loop %i\n",
1562 loop->num);
1564 if (flags & CP_FALLTHRU_PREHEADERS)
1565 gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)
1566 && !JUMP_P (BB_END (dummy)));
1568 return dummy;
1571 /* Create preheaders for each loop; for meaning of FLAGS see create_preheader. */
1573 void
1574 create_preheaders (int flags)
1576 struct loop *loop;
1578 if (!current_loops)
1579 return;
1581 FOR_EACH_LOOP (loop, 0)
1582 create_preheader (loop, flags);
1583 loops_state_set (LOOPS_HAVE_PREHEADERS);
1586 /* Forces all loop latches to have only single successor. */
1588 void
1589 force_single_succ_latches (void)
1591 struct loop *loop;
1592 edge e;
1594 FOR_EACH_LOOP (loop, 0)
1596 if (loop->latch != loop->header && single_succ_p (loop->latch))
1597 continue;
1599 e = find_edge (loop->latch, loop->header);
1600 gcc_checking_assert (e != NULL);
1602 split_edge (e);
1604 loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES);
1607 /* This function is called from loop_version. It splits the entry edge
1608 of the loop we want to version, adds the versioning condition, and
1609 adjust the edges to the two versions of the loop appropriately.
1610 e is an incoming edge. Returns the basic block containing the
1611 condition.
1613 --- edge e ---- > [second_head]
1615 Split it and insert new conditional expression and adjust edges.
1617 --- edge e ---> [cond expr] ---> [first_head]
1619 +---------> [second_head]
1621 THEN_PROB is the probability of then branch of the condition.
1622 ELSE_PROB is the probability of else branch. Note that they may be both
1623 REG_BR_PROB_BASE when condition is IFN_LOOP_VECTORIZED or
1624 IFN_LOOP_DIST_ALIAS. */
1626 static basic_block
1627 lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
1628 edge e, void *cond_expr,
1629 profile_probability then_prob,
1630 profile_probability else_prob)
1632 basic_block new_head = NULL;
1633 edge e1;
1635 gcc_assert (e->dest == second_head);
1637 /* Split edge 'e'. This will create a new basic block, where we can
1638 insert conditional expr. */
1639 new_head = split_edge (e);
1641 lv_add_condition_to_bb (first_head, second_head, new_head,
1642 cond_expr);
1644 /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there. */
1645 e = single_succ_edge (new_head);
1646 e1 = make_edge (new_head, first_head,
1647 current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
1648 e1->probability = then_prob;
1649 e->probability = else_prob;
1651 set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
1652 set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);
1654 /* Adjust loop header phi nodes. */
1655 lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);
1657 return new_head;
1660 /* Main entry point for Loop Versioning transformation.
1662 This transformation given a condition and a loop, creates
1663 -if (condition) { loop_copy1 } else { loop_copy2 },
1664 where loop_copy1 is the loop transformed in one way, and loop_copy2
1665 is the loop transformed in another way (or unchanged). COND_EXPR
1666 may be a run time test for things that were not resolved by static
1667 analysis (overlapping ranges (anti-aliasing), alignment, etc.).
1669 If non-NULL, CONDITION_BB is set to the basic block containing the
1670 condition.
1672 THEN_PROB is the probability of the then edge of the if. THEN_SCALE
1673 is the ratio by that the frequencies in the original loop should
1674 be scaled. ELSE_SCALE is the ratio by that the frequencies in the
1675 new loop should be scaled.
1677 If PLACE_AFTER is true, we place the new loop after LOOP in the
1678 instruction stream, otherwise it is placed before LOOP. */
1680 struct loop *
1681 loop_version (struct loop *loop,
1682 void *cond_expr, basic_block *condition_bb,
1683 profile_probability then_prob, profile_probability else_prob,
1684 profile_probability then_scale, profile_probability else_scale,
1685 bool place_after)
1687 basic_block first_head, second_head;
1688 edge entry, latch_edge, true_edge, false_edge;
1689 int irred_flag;
1690 struct loop *nloop;
1691 basic_block cond_bb;
1693 /* Record entry and latch edges for the loop */
1694 entry = loop_preheader_edge (loop);
1695 irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
1696 entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
1698 /* Note down head of loop as first_head. */
1699 first_head = entry->dest;
1701 /* Duplicate loop. */
1702 if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
1703 NULL, NULL, NULL, 0))
1705 entry->flags |= irred_flag;
1706 return NULL;
1709 /* After duplication entry edge now points to new loop head block.
1710 Note down new head as second_head. */
1711 second_head = entry->dest;
1713 /* Split loop entry edge and insert new block with cond expr. */
1714 cond_bb = lv_adjust_loop_entry_edge (first_head, second_head,
1715 entry, cond_expr, then_prob, else_prob);
1716 if (condition_bb)
1717 *condition_bb = cond_bb;
1719 if (!cond_bb)
1721 entry->flags |= irred_flag;
1722 return NULL;
1725 latch_edge = single_succ_edge (get_bb_copy (loop->latch));
1727 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1728 nloop = loopify (latch_edge,
1729 single_pred_edge (get_bb_copy (loop->header)),
1730 cond_bb, true_edge, false_edge,
1731 false /* Do not redirect all edges. */,
1732 then_scale, else_scale);
1734 copy_loop_info (loop, nloop);
1736 /* loopify redirected latch_edge. Update its PENDING_STMTS. */
1737 lv_flush_pending_stmts (latch_edge);
1739 /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS. */
1740 extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
1741 lv_flush_pending_stmts (false_edge);
1742 /* Adjust irreducible flag. */
1743 if (irred_flag)
1745 cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
1746 loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1747 loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1748 single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
1751 if (place_after)
1753 basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
1754 unsigned i;
1756 after = loop->latch;
1758 for (i = 0; i < nloop->num_nodes; i++)
1760 move_block_after (bbs[i], after);
1761 after = bbs[i];
1763 free (bbs);
1766 /* At this point condition_bb is loop preheader with two successors,
1767 first_head and second_head. Make sure that loop preheader has only
1768 one successor. */
1769 split_edge (loop_preheader_edge (loop));
1770 split_edge (loop_preheader_edge (nloop));
1772 return nloop;