Small fix for -fdump-ada-spec
[official-gcc.git] / gcc / tree-ssa-loop-split.cc
blobfca678113eb8e33dd815e27634db6b014deeaf23
1 /* Loop splitting.
2 Copyright (C) 2015-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "tree-pass.h"
27 #include "ssa.h"
28 #include "fold-const.h"
29 #include "tree-cfg.h"
30 #include "tree-ssa.h"
31 #include "tree-ssa-loop-niter.h"
32 #include "tree-ssa-loop.h"
33 #include "tree-ssa-loop-manip.h"
34 #include "tree-into-ssa.h"
35 #include "tree-inline.h"
36 #include "tree-cfgcleanup.h"
37 #include "cfgloop.h"
38 #include "tree-scalar-evolution.h"
39 #include "gimple-iterator.h"
40 #include "gimple-pretty-print.h"
41 #include "cfghooks.h"
42 #include "gimple-fold.h"
43 #include "gimplify-me.h"
45 /* This file implements two kinds of loop splitting.
47 One transformation of loops like:
49 for (i = 0; i < 100; i++)
51 if (i < 50)
53 else
57 into:
59 for (i = 0; i < 50; i++)
63 for (; i < 100; i++)
70 /* Return true when BB inside LOOP is a potential iteration space
71 split point, i.e. ends with a condition like "IV < comp", which
72 is true on one side of the iteration space and false on the other,
73 and the split point can be computed. If so, also return the border
74 point in *BORDER and the comparison induction variable in IV. */
76 static tree
77 split_at_bb_p (class loop *loop, basic_block bb, tree *border, affine_iv *iv)
79 gimple *last;
80 gcond *stmt;
81 affine_iv iv2;
83 /* BB must end in a simple conditional jump. */
84 last = last_stmt (bb);
85 if (!last || gimple_code (last) != GIMPLE_COND)
86 return NULL_TREE;
87 stmt = as_a <gcond *> (last);
89 enum tree_code code = gimple_cond_code (stmt);
91 /* Only handle relational comparisons, for equality and non-equality
92 we'd have to split the loop into two loops and a middle statement. */
93 switch (code)
95 case LT_EXPR:
96 case LE_EXPR:
97 case GT_EXPR:
98 case GE_EXPR:
99 break;
100 default:
101 return NULL_TREE;
104 if (loop_exits_from_bb_p (loop, bb))
105 return NULL_TREE;
107 tree op0 = gimple_cond_lhs (stmt);
108 tree op1 = gimple_cond_rhs (stmt);
109 class loop *useloop = loop_containing_stmt (stmt);
111 if (!simple_iv (loop, useloop, op0, iv, false))
112 return NULL_TREE;
113 if (!simple_iv (loop, useloop, op1, &iv2, false))
114 return NULL_TREE;
116 /* Make it so that the first argument of the condition is
117 the looping one. */
118 if (!integer_zerop (iv2.step))
120 std::swap (op0, op1);
121 std::swap (*iv, iv2);
122 code = swap_tree_comparison (code);
123 gimple_cond_set_condition (stmt, code, op0, op1);
124 update_stmt (stmt);
126 else if (integer_zerop (iv->step))
127 return NULL_TREE;
128 if (!integer_zerop (iv2.step))
129 return NULL_TREE;
130 if (!iv->no_overflow)
131 return NULL_TREE;
133 if (dump_file && (dump_flags & TDF_DETAILS))
135 fprintf (dump_file, "Found potential split point: ");
136 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
137 fprintf (dump_file, " { ");
138 print_generic_expr (dump_file, iv->base, TDF_SLIM);
139 fprintf (dump_file, " + I*");
140 print_generic_expr (dump_file, iv->step, TDF_SLIM);
141 fprintf (dump_file, " } %s ", get_tree_code_name (code));
142 print_generic_expr (dump_file, iv2.base, TDF_SLIM);
143 fprintf (dump_file, "\n");
146 *border = iv2.base;
147 return op0;
150 /* Given a GUARD conditional stmt inside LOOP, which we want to make always
151 true or false depending on INITIAL_TRUE, and adjusted values NEXTVAL
152 (a post-increment IV) and NEWBOUND (the comparator) adjust the loop
153 exit test statement to loop back only if the GUARD statement will
154 also be true/false in the next iteration. */
156 static void
157 patch_loop_exit (class loop *loop, gcond *guard, tree nextval, tree newbound,
158 bool initial_true)
160 edge exit = single_exit (loop);
161 gcond *stmt = as_a <gcond *> (last_stmt (exit->src));
162 gimple_cond_set_condition (stmt, gimple_cond_code (guard),
163 nextval, newbound);
164 update_stmt (stmt);
166 edge stay = EDGE_SUCC (exit->src, EDGE_SUCC (exit->src, 0) == exit);
168 exit->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
169 stay->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
171 if (initial_true)
173 exit->flags |= EDGE_FALSE_VALUE;
174 stay->flags |= EDGE_TRUE_VALUE;
176 else
178 exit->flags |= EDGE_TRUE_VALUE;
179 stay->flags |= EDGE_FALSE_VALUE;
183 /* Give an induction variable GUARD_IV, and its affine descriptor IV,
184 find the loop phi node in LOOP defining it directly, or create
185 such phi node. Return that phi node. */
187 static gphi *
188 find_or_create_guard_phi (class loop *loop, tree guard_iv, affine_iv * /*iv*/)
190 gimple *def = SSA_NAME_DEF_STMT (guard_iv);
191 gphi *phi;
192 if ((phi = dyn_cast <gphi *> (def))
193 && gimple_bb (phi) == loop->header)
194 return phi;
196 /* XXX Create the PHI instead. */
197 return NULL;
200 /* Returns true if the exit values of all loop phi nodes can be
201 determined easily (i.e. that connect_loop_phis can determine them). */
203 static bool
204 easy_exit_values (class loop *loop)
206 edge exit = single_exit (loop);
207 edge latch = loop_latch_edge (loop);
208 gphi_iterator psi;
210 /* Currently we regard the exit values as easy if they are the same
211 as the value over the backedge. Which is the case if the definition
212 of the backedge value dominates the exit edge. */
213 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
215 gphi *phi = psi.phi ();
216 tree next = PHI_ARG_DEF_FROM_EDGE (phi, latch);
217 basic_block bb;
218 if (TREE_CODE (next) == SSA_NAME
219 && (bb = gimple_bb (SSA_NAME_DEF_STMT (next)))
220 && !dominated_by_p (CDI_DOMINATORS, exit->src, bb))
221 return false;
224 return true;
227 /* This function updates the SSA form after connect_loops made a new
228 edge NEW_E leading from LOOP1 exit to LOOP2 (via in intermediate
229 conditional). I.e. the second loop can now be entered either
230 via the original entry or via NEW_E, so the entry values of LOOP2
231 phi nodes are either the original ones or those at the exit
232 of LOOP1. Insert new phi nodes in LOOP2 pre-header reflecting
233 this. The loops need to fulfill easy_exit_values(). */
235 static void
236 connect_loop_phis (class loop *loop1, class loop *loop2, edge new_e)
238 basic_block rest = loop_preheader_edge (loop2)->src;
239 gcc_assert (new_e->dest == rest);
240 edge skip_first = EDGE_PRED (rest, EDGE_PRED (rest, 0) == new_e);
242 edge firste = loop_preheader_edge (loop1);
243 edge seconde = loop_preheader_edge (loop2);
244 edge firstn = loop_latch_edge (loop1);
245 gphi_iterator psi_first, psi_second;
246 for (psi_first = gsi_start_phis (loop1->header),
247 psi_second = gsi_start_phis (loop2->header);
248 !gsi_end_p (psi_first);
249 gsi_next (&psi_first), gsi_next (&psi_second))
251 tree init, next, new_init;
252 use_operand_p op;
253 gphi *phi_first = psi_first.phi ();
254 gphi *phi_second = psi_second.phi ();
256 init = PHI_ARG_DEF_FROM_EDGE (phi_first, firste);
257 next = PHI_ARG_DEF_FROM_EDGE (phi_first, firstn);
258 op = PHI_ARG_DEF_PTR_FROM_EDGE (phi_second, seconde);
259 gcc_assert (operand_equal_for_phi_arg_p (init, USE_FROM_PTR (op)));
261 /* Prefer using original variable as a base for the new ssa name.
262 This is necessary for virtual ops, and useful in order to avoid
263 losing debug info for real ops. */
264 if (TREE_CODE (next) == SSA_NAME
265 && useless_type_conversion_p (TREE_TYPE (next),
266 TREE_TYPE (init)))
267 new_init = copy_ssa_name (next);
268 else if (TREE_CODE (init) == SSA_NAME
269 && useless_type_conversion_p (TREE_TYPE (init),
270 TREE_TYPE (next)))
271 new_init = copy_ssa_name (init);
272 else if (useless_type_conversion_p (TREE_TYPE (next),
273 TREE_TYPE (init)))
274 new_init = make_temp_ssa_name (TREE_TYPE (next), NULL,
275 "unrinittmp");
276 else
277 new_init = make_temp_ssa_name (TREE_TYPE (init), NULL,
278 "unrinittmp");
280 gphi * newphi = create_phi_node (new_init, rest);
281 add_phi_arg (newphi, init, skip_first, UNKNOWN_LOCATION);
282 add_phi_arg (newphi, next, new_e, UNKNOWN_LOCATION);
283 SET_USE (op, new_init);
287 /* The two loops LOOP1 and LOOP2 were just created by loop versioning,
288 they are still equivalent and placed in two arms of a diamond, like so:
290 .------if (cond)------.
292 pre1 pre2
294 .--->h1 h2<----.
295 | | | |
296 | ex1---. .---ex2 |
297 | / | | \ |
298 '---l1 X | l2---'
301 '--->join<---'
303 This function transforms the program such that LOOP1 is conditionally
304 falling through to LOOP2, or skipping it. This is done by splitting
305 the ex1->join edge at X in the diagram above, and inserting a condition
306 whose one arm goes to pre2, resulting in this situation:
308 .------if (cond)------.
310 pre1 .---------->pre2
311 | | |
312 .--->h1 | h2<----.
313 | | | | |
314 | ex1---. | .---ex2 |
315 | / v | | \ |
316 '---l1 skip---' | l2---'
319 '--->join<---'
322 The condition used is the exit condition of LOOP1, which effectively means
323 that when the first loop exits (for whatever reason) but the real original
324 exit expression is still false the second loop will be entered.
325 The function returns the new edge cond->pre2.
327 This doesn't update the SSA form, see connect_loop_phis for that. */
329 static edge
330 connect_loops (class loop *loop1, class loop *loop2)
332 edge exit = single_exit (loop1);
333 basic_block skip_bb = split_edge (exit);
334 gcond *skip_stmt;
335 gimple_stmt_iterator gsi;
336 edge new_e, skip_e;
338 gimple *stmt = last_stmt (exit->src);
339 skip_stmt = gimple_build_cond (gimple_cond_code (stmt),
340 gimple_cond_lhs (stmt),
341 gimple_cond_rhs (stmt),
342 NULL_TREE, NULL_TREE);
343 gsi = gsi_last_bb (skip_bb);
344 gsi_insert_after (&gsi, skip_stmt, GSI_NEW_STMT);
346 skip_e = EDGE_SUCC (skip_bb, 0);
347 skip_e->flags &= ~EDGE_FALLTHRU;
348 new_e = make_edge (skip_bb, loop_preheader_edge (loop2)->src, 0);
349 if (exit->flags & EDGE_TRUE_VALUE)
351 skip_e->flags |= EDGE_TRUE_VALUE;
352 new_e->flags |= EDGE_FALSE_VALUE;
354 else
356 skip_e->flags |= EDGE_FALSE_VALUE;
357 new_e->flags |= EDGE_TRUE_VALUE;
360 new_e->probability = profile_probability::likely ();
361 skip_e->probability = new_e->probability.invert ();
363 return new_e;
366 /* This returns the new bound for iterations given the original iteration
367 space in NITER, an arbitrary new bound BORDER, assumed to be some
368 comparison value with a different IV, the initial value GUARD_INIT of
369 that other IV, and the comparison code GUARD_CODE that compares
370 that other IV with BORDER. We return an SSA name, and place any
371 necessary statements for that computation into *STMTS.
373 For example for such a loop:
375 for (i = beg, j = guard_init; i < end; i++, j++)
376 if (j < border) // this is supposed to be true/false
379 we want to return a new bound (on j) that makes the loop iterate
380 as long as the condition j < border stays true. We also don't want
381 to iterate more often than the original loop, so we have to introduce
382 some cut-off as well (via min/max), effectively resulting in:
384 newend = min (end+guard_init-beg, border)
385 for (i = beg; j = guard_init; j < newend; i++, j++)
386 if (j < c)
389 Depending on the direction of the IVs and if the exit tests
390 are strict or non-strict we need to use MIN or MAX,
391 and add or subtract 1. This routine computes newend above. */
393 static tree
394 compute_new_first_bound (gimple_seq *stmts, class tree_niter_desc *niter,
395 tree border,
396 enum tree_code guard_code, tree guard_init)
398 /* The niter structure contains the after-increment IV, we need
399 the loop-enter base, so subtract STEP once. */
400 tree controlbase = force_gimple_operand (niter->control.base,
401 stmts, true, NULL_TREE);
402 tree controlstep = niter->control.step;
403 tree enddiff;
404 if (POINTER_TYPE_P (TREE_TYPE (controlbase)))
406 controlstep = gimple_build (stmts, NEGATE_EXPR,
407 TREE_TYPE (controlstep), controlstep);
408 enddiff = gimple_build (stmts, POINTER_PLUS_EXPR,
409 TREE_TYPE (controlbase),
410 controlbase, controlstep);
412 else
413 enddiff = gimple_build (stmts, MINUS_EXPR,
414 TREE_TYPE (controlbase),
415 controlbase, controlstep);
417 /* Compute end-beg. */
418 gimple_seq stmts2;
419 tree end = force_gimple_operand (niter->bound, &stmts2,
420 true, NULL_TREE);
421 gimple_seq_add_seq_without_update (stmts, stmts2);
422 if (POINTER_TYPE_P (TREE_TYPE (enddiff)))
424 tree tem = gimple_convert (stmts, sizetype, enddiff);
425 tem = gimple_build (stmts, NEGATE_EXPR, sizetype, tem);
426 enddiff = gimple_build (stmts, POINTER_PLUS_EXPR,
427 TREE_TYPE (enddiff),
428 end, tem);
430 else
431 enddiff = gimple_build (stmts, MINUS_EXPR, TREE_TYPE (enddiff),
432 end, enddiff);
434 /* Compute guard_init + (end-beg). */
435 tree newbound;
436 enddiff = gimple_convert (stmts, TREE_TYPE (guard_init), enddiff);
437 if (POINTER_TYPE_P (TREE_TYPE (guard_init)))
439 enddiff = gimple_convert (stmts, sizetype, enddiff);
440 newbound = gimple_build (stmts, POINTER_PLUS_EXPR,
441 TREE_TYPE (guard_init),
442 guard_init, enddiff);
444 else
445 newbound = gimple_build (stmts, PLUS_EXPR, TREE_TYPE (guard_init),
446 guard_init, enddiff);
448 /* Depending on the direction of the IVs the new bound for the first
449 loop is the minimum or maximum of old bound and border.
450 Also, if the guard condition isn't strictly less or greater,
451 we need to adjust the bound. */
452 int addbound = 0;
453 enum tree_code minmax;
454 if (niter->cmp == LT_EXPR)
456 /* GT and LE are the same, inverted. */
457 if (guard_code == GT_EXPR || guard_code == LE_EXPR)
458 addbound = -1;
459 minmax = MIN_EXPR;
461 else
463 gcc_assert (niter->cmp == GT_EXPR);
464 if (guard_code == GE_EXPR || guard_code == LT_EXPR)
465 addbound = 1;
466 minmax = MAX_EXPR;
469 if (addbound)
471 tree type2 = TREE_TYPE (newbound);
472 if (POINTER_TYPE_P (type2))
473 type2 = sizetype;
474 newbound = gimple_build (stmts,
475 POINTER_TYPE_P (TREE_TYPE (newbound))
476 ? POINTER_PLUS_EXPR : PLUS_EXPR,
477 TREE_TYPE (newbound),
478 newbound,
479 build_int_cst (type2, addbound));
482 tree newend = gimple_build (stmts, minmax, TREE_TYPE (border),
483 border, newbound);
484 return newend;
487 /* Fix the two loop's bb count after split based on the split edge probability,
488 don't adjust the bbs dominated by true branches of that loop to avoid
489 dropping 1s down. */
490 static void
491 fix_loop_bb_probability (class loop *loop1, class loop *loop2, edge true_edge,
492 edge false_edge)
494 /* Proportion first loop's bb counts except those dominated by true
495 branch to avoid drop 1s down. */
496 basic_block *bbs1, *bbs2;
497 bbs1 = get_loop_body (loop1);
498 unsigned j;
499 for (j = 0; j < loop1->num_nodes; j++)
500 if (bbs1[j] == loop1->latch
501 || !dominated_by_p (CDI_DOMINATORS, bbs1[j], true_edge->dest))
502 bbs1[j]->count
503 = bbs1[j]->count.apply_probability (true_edge->probability);
504 free (bbs1);
506 /* Proportion second loop's bb counts except those dominated by false
507 branch to avoid drop 1s down. */
508 basic_block bbi_copy = get_bb_copy (false_edge->dest);
509 bbs2 = get_loop_body (loop2);
510 for (j = 0; j < loop2->num_nodes; j++)
511 if (bbs2[j] == loop2->latch
512 || !dominated_by_p (CDI_DOMINATORS, bbs2[j], bbi_copy))
513 bbs2[j]->count
514 = bbs2[j]->count.apply_probability (true_edge->probability.invert ());
515 free (bbs2);
518 /* Checks if LOOP contains an conditional block whose condition
519 depends on which side in the iteration space it is, and if so
520 splits the iteration space into two loops. Returns true if the
521 loop was split. NITER must contain the iteration descriptor for the
522 single exit of LOOP. */
524 static bool
525 split_loop (class loop *loop1)
527 class tree_niter_desc niter;
528 basic_block *bbs;
529 unsigned i;
530 bool changed = false;
531 tree guard_iv;
532 tree border = NULL_TREE;
533 affine_iv iv;
534 edge exit1;
536 if (!(exit1 = single_exit (loop1))
537 || EDGE_COUNT (exit1->src->succs) != 2
538 /* ??? We could handle non-empty latches when we split the latch edge
539 (not the exit edge), and put the new exit condition in the new block.
540 OTOH this executes some code unconditionally that might have been
541 skipped by the original exit before. */
542 || !empty_block_p (loop1->latch)
543 || !easy_exit_values (loop1)
544 || !number_of_iterations_exit (loop1, exit1, &niter, false, true)
545 || niter.cmp == ERROR_MARK
546 /* We can't yet handle loops controlled by a != predicate. */
547 || niter.cmp == NE_EXPR)
548 return false;
550 bbs = get_loop_body (loop1);
552 if (!can_copy_bbs_p (bbs, loop1->num_nodes))
554 free (bbs);
555 return false;
558 /* Find a splitting opportunity. */
559 for (i = 0; i < loop1->num_nodes; i++)
560 if ((guard_iv = split_at_bb_p (loop1, bbs[i], &border, &iv)))
562 /* Handling opposite steps is not implemented yet. Neither
563 is handling different step sizes. */
564 if ((tree_int_cst_sign_bit (iv.step)
565 != tree_int_cst_sign_bit (niter.control.step))
566 || !tree_int_cst_equal (iv.step, niter.control.step))
567 continue;
569 /* Find a loop PHI node that defines guard_iv directly,
570 or create one doing that. */
571 gphi *phi = find_or_create_guard_phi (loop1, guard_iv, &iv);
572 if (!phi)
573 continue;
574 gcond *guard_stmt = as_a<gcond *> (last_stmt (bbs[i]));
575 tree guard_init = PHI_ARG_DEF_FROM_EDGE (phi,
576 loop_preheader_edge (loop1));
577 enum tree_code guard_code = gimple_cond_code (guard_stmt);
579 /* Loop splitting is implemented by versioning the loop, placing
580 the new loop after the old loop, make the first loop iterate
581 as long as the conditional stays true (or false) and let the
582 second (new) loop handle the rest of the iterations.
584 First we need to determine if the condition will start being true
585 or false in the first loop. */
586 bool initial_true;
587 switch (guard_code)
589 case LT_EXPR:
590 case LE_EXPR:
591 initial_true = !tree_int_cst_sign_bit (iv.step);
592 break;
593 case GT_EXPR:
594 case GE_EXPR:
595 initial_true = tree_int_cst_sign_bit (iv.step);
596 break;
597 default:
598 gcc_unreachable ();
601 /* Build a condition that will skip the first loop when the
602 guard condition won't ever be true (or false). */
603 gimple_seq stmts2;
604 border = force_gimple_operand (border, &stmts2, true, NULL_TREE);
605 if (stmts2)
606 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop1),
607 stmts2);
608 tree cond = build2 (guard_code, boolean_type_node, guard_init, border);
609 if (!initial_true)
610 cond = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, cond);
612 edge true_edge, false_edge;
613 extract_true_false_edges_from_block (bbs[i], &true_edge, &false_edge);
615 /* Now version the loop, placing loop2 after loop1 connecting
616 them, and fix up SSA form for that. */
617 initialize_original_copy_tables ();
618 basic_block cond_bb;
620 class loop *loop2 = loop_version (loop1, cond, &cond_bb,
621 true_edge->probability,
622 true_edge->probability.invert (),
623 profile_probability::always (),
624 profile_probability::always (),
625 true);
626 gcc_assert (loop2);
628 edge new_e = connect_loops (loop1, loop2);
629 connect_loop_phis (loop1, loop2, new_e);
631 /* The iterations of the second loop is now already
632 exactly those that the first loop didn't do, but the
633 iteration space of the first loop is still the original one.
634 Compute the new bound for the guarding IV and patch the
635 loop exit to use it instead of original IV and bound. */
636 gimple_seq stmts = NULL;
637 tree newend = compute_new_first_bound (&stmts, &niter, border,
638 guard_code, guard_init);
639 if (stmts)
640 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop1),
641 stmts);
642 tree guard_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop1));
643 patch_loop_exit (loop1, guard_stmt, guard_next, newend, initial_true);
645 fix_loop_bb_probability (loop1, loop2, true_edge, false_edge);
647 /* Fix first loop's exit probability after scaling. */
648 edge exit_to_latch1;
649 if (EDGE_SUCC (exit1->src, 0) == exit1)
650 exit_to_latch1 = EDGE_SUCC (exit1->src, 1);
651 else
652 exit_to_latch1 = EDGE_SUCC (exit1->src, 0);
653 exit_to_latch1->probability *= true_edge->probability;
654 exit1->probability = exit_to_latch1->probability.invert ();
656 /* Finally patch out the two copies of the condition to be always
657 true/false (or opposite). */
658 gcond *force_true = as_a<gcond *> (last_stmt (bbs[i]));
659 gcond *force_false = as_a<gcond *> (last_stmt (get_bb_copy (bbs[i])));
660 if (!initial_true)
661 std::swap (force_true, force_false);
662 gimple_cond_make_true (force_true);
663 gimple_cond_make_false (force_false);
664 update_stmt (force_true);
665 update_stmt (force_false);
667 free_original_copy_tables ();
669 changed = true;
670 if (dump_file && (dump_flags & TDF_DETAILS))
671 fprintf (dump_file, ";; Loop split.\n");
673 if (dump_enabled_p ())
674 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, guard_stmt, "loop split\n");
676 /* Only deal with the first opportunity. */
677 break;
680 free (bbs);
681 return changed;
684 /* Another transformation of loops like:
686 for (i = INIT (); CHECK (i); i = NEXT ())
688 if (expr (a_1, a_2, ..., a_n)) // expr is pure
689 a_j = ...; // change at least one a_j
690 else
691 S; // not change any a_j
694 into:
696 for (i = INIT (); CHECK (i); i = NEXT ())
698 if (expr (a_1, a_2, ..., a_n))
699 a_j = ...;
700 else
703 i = NEXT ();
704 break;
708 for (; CHECK (i); i = NEXT ())
715 /* Data structure to hold temporary information during loop split upon
716 semi-invariant conditional statement. */
717 class split_info {
718 public:
719 /* Array of all basic blocks in a loop, returned by get_loop_body(). */
720 basic_block *bbs;
722 /* All memory store/clobber statements in a loop. */
723 auto_vec<gimple *> memory_stores;
725 /* Whether above memory stores vector has been filled. */
726 int need_init;
728 /* Control dependencies of basic blocks in a loop. */
729 auto_vec<hash_set<basic_block> *> control_deps;
731 split_info () : bbs (NULL), need_init (true) { }
733 ~split_info ()
735 if (bbs)
736 free (bbs);
738 for (unsigned i = 0; i < control_deps.length (); i++)
739 delete control_deps[i];
743 /* Find all statements with memory-write effect in LOOP, including memory
744 store and non-pure function call, and keep those in a vector. This work
745 is only done one time, for the vector should be constant during analysis
746 stage of semi-invariant condition. */
748 static void
749 find_vdef_in_loop (struct loop *loop)
751 split_info *info = (split_info *) loop->aux;
752 gphi *vphi = get_virtual_phi (loop->header);
754 /* Indicate memory store vector has been filled. */
755 info->need_init = false;
757 /* If loop contains memory operation, there must be a virtual PHI node in
758 loop header basic block. */
759 if (vphi == NULL)
760 return;
762 /* All virtual SSA names inside the loop are connected to be a cyclic
763 graph via virtual PHI nodes. The virtual PHI node in loop header just
764 links the first and the last virtual SSA names, by using the last as
765 PHI operand to define the first. */
766 const edge latch = loop_latch_edge (loop);
767 const tree first = gimple_phi_result (vphi);
768 const tree last = PHI_ARG_DEF_FROM_EDGE (vphi, latch);
770 /* The virtual SSA cyclic graph might consist of only one SSA name, who
771 is defined by itself.
773 .MEM_1 = PHI <.MEM_2(loop entry edge), .MEM_1(latch edge)>
775 This means the loop contains only memory loads, so we can skip it. */
776 if (first == last)
777 return;
779 auto_vec<gimple *> other_stores;
780 auto_vec<tree> worklist;
781 auto_bitmap visited;
783 bitmap_set_bit (visited, SSA_NAME_VERSION (first));
784 bitmap_set_bit (visited, SSA_NAME_VERSION (last));
785 worklist.safe_push (last);
789 tree vuse = worklist.pop ();
790 gimple *stmt = SSA_NAME_DEF_STMT (vuse);
792 /* We mark the first and last SSA names as visited at the beginning,
793 and reversely start the process from the last SSA name towards the
794 first, which ensures that this do-while will not touch SSA names
795 defined outside the loop. */
796 gcc_assert (gimple_bb (stmt)
797 && flow_bb_inside_loop_p (loop, gimple_bb (stmt)));
799 if (gimple_code (stmt) == GIMPLE_PHI)
801 gphi *phi = as_a <gphi *> (stmt);
803 for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i)
805 tree arg = gimple_phi_arg_def (stmt, i);
807 if (bitmap_set_bit (visited, SSA_NAME_VERSION (arg)))
808 worklist.safe_push (arg);
811 else
813 tree prev = gimple_vuse (stmt);
815 /* Non-pure call statement is conservatively assumed to impact all
816 memory locations. So place call statements ahead of other memory
817 stores in the vector with an idea of using them as shortcut
818 terminators to memory alias analysis. */
819 if (gimple_code (stmt) == GIMPLE_CALL)
820 info->memory_stores.safe_push (stmt);
821 else
822 other_stores.safe_push (stmt);
824 if (bitmap_set_bit (visited, SSA_NAME_VERSION (prev)))
825 worklist.safe_push (prev);
827 } while (!worklist.is_empty ());
829 info->memory_stores.safe_splice (other_stores);
832 /* Two basic blocks have equivalent control dependency if one dominates to
833 the other, and it is post-dominated by the latter. Given a basic block
834 BB in LOOP, find farest equivalent dominating basic block. For BB, there
835 is a constraint that BB does not post-dominate loop header of LOOP, this
836 means BB is control-dependent on at least one basic block in LOOP. */
838 static basic_block
839 get_control_equiv_head_block (struct loop *loop, basic_block bb)
841 while (!bb->aux)
843 basic_block dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
845 gcc_checking_assert (dom_bb && flow_bb_inside_loop_p (loop, dom_bb));
847 if (!dominated_by_p (CDI_POST_DOMINATORS, dom_bb, bb))
848 break;
850 bb = dom_bb;
852 return bb;
855 /* Given a BB in LOOP, find out all basic blocks in LOOP that BB is control-
856 dependent on. */
858 static hash_set<basic_block> *
859 find_control_dep_blocks (struct loop *loop, basic_block bb)
861 /* BB has same control dependency as loop header, then it is not control-
862 dependent on any basic block in LOOP. */
863 if (dominated_by_p (CDI_POST_DOMINATORS, loop->header, bb))
864 return NULL;
866 basic_block equiv_head = get_control_equiv_head_block (loop, bb);
868 if (equiv_head->aux)
870 /* There is a basic block containing control dependency equivalent
871 to BB. No need to recompute that, and also set this information
872 to other equivalent basic blocks. */
873 for (; bb != equiv_head;
874 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
875 bb->aux = equiv_head->aux;
876 return (hash_set<basic_block> *) equiv_head->aux;
879 /* A basic block X is control-dependent on another Y iff there exists
880 a path from X to Y, in which every basic block other than X and Y
881 is post-dominated by Y, but X is not post-dominated by Y.
883 According to this rule, traverse basic blocks in the loop backwards
884 starting from BB, if a basic block is post-dominated by BB, extend
885 current post-dominating path to this block, otherwise it is another
886 one that BB is control-dependent on. */
888 auto_vec<basic_block> pdom_worklist;
889 hash_set<basic_block> pdom_visited;
890 hash_set<basic_block> *dep_bbs = new hash_set<basic_block>;
892 pdom_worklist.safe_push (equiv_head);
896 basic_block pdom_bb = pdom_worklist.pop ();
897 edge_iterator ei;
898 edge e;
900 if (pdom_visited.add (pdom_bb))
901 continue;
903 FOR_EACH_EDGE (e, ei, pdom_bb->preds)
905 basic_block pred_bb = e->src;
907 if (!dominated_by_p (CDI_POST_DOMINATORS, pred_bb, bb))
909 dep_bbs->add (pred_bb);
910 continue;
913 pred_bb = get_control_equiv_head_block (loop, pred_bb);
915 if (pdom_visited.contains (pred_bb))
916 continue;
918 if (!pred_bb->aux)
920 pdom_worklist.safe_push (pred_bb);
921 continue;
924 /* If control dependency of basic block is available, fast extend
925 post-dominating path using the information instead of advancing
926 forward step-by-step. */
927 hash_set<basic_block> *pred_dep_bbs
928 = (hash_set<basic_block> *) pred_bb->aux;
930 for (hash_set<basic_block>::iterator iter = pred_dep_bbs->begin ();
931 iter != pred_dep_bbs->end (); ++iter)
933 basic_block pred_dep_bb = *iter;
935 /* Basic blocks can either be in control dependency of BB, or
936 must be post-dominated by BB, if so, extend the path from
937 these basic blocks. */
938 if (!dominated_by_p (CDI_POST_DOMINATORS, pred_dep_bb, bb))
939 dep_bbs->add (pred_dep_bb);
940 else if (!pdom_visited.contains (pred_dep_bb))
941 pdom_worklist.safe_push (pred_dep_bb);
944 } while (!pdom_worklist.is_empty ());
946 /* Record computed control dependencies in loop so that we can reach them
947 when reclaiming resources. */
948 ((split_info *) loop->aux)->control_deps.safe_push (dep_bbs);
950 /* Associate control dependence with related equivalent basic blocks. */
951 for (equiv_head->aux = dep_bbs; bb != equiv_head;
952 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
953 bb->aux = dep_bbs;
955 return dep_bbs;
958 /* Forward declaration */
960 static bool
961 stmt_semi_invariant_p_1 (struct loop *loop, gimple *stmt,
962 const_basic_block skip_head,
963 hash_map<gimple *, bool> &stmt_stat);
965 /* Given STMT, memory load or pure call statement, check whether it is impacted
966 by some memory store in LOOP, excluding trace starting from SKIP_HEAD (the
967 trace is composed of SKIP_HEAD and those basic block dominated by it, always
968 corresponds to one branch of a conditional statement). If SKIP_HEAD is
969 NULL, all basic blocks of LOOP are checked. */
971 static bool
972 vuse_semi_invariant_p (struct loop *loop, gimple *stmt,
973 const_basic_block skip_head)
975 split_info *info = (split_info *) loop->aux;
976 tree rhs = NULL_TREE;
977 ao_ref ref;
978 gimple *store;
979 unsigned i;
981 /* Collect memory store/clobber statements if haven't done that. */
982 if (info->need_init)
983 find_vdef_in_loop (loop);
985 if (is_gimple_assign (stmt))
986 rhs = gimple_assign_rhs1 (stmt);
988 ao_ref_init (&ref, rhs);
990 FOR_EACH_VEC_ELT (info->memory_stores, i, store)
992 /* Skip basic blocks dominated by SKIP_HEAD, if non-NULL. */
993 if (skip_head
994 && dominated_by_p (CDI_DOMINATORS, gimple_bb (store), skip_head))
995 continue;
997 if (!ref.ref || stmt_may_clobber_ref_p_1 (store, &ref))
998 return false;
1001 return true;
1004 /* Suppose one condition branch, led by SKIP_HEAD, is not executed since
1005 certain iteration of LOOP, check whether an SSA name (NAME) remains
1006 unchanged in next iteration. We call this characteristic semi-
1007 invariantness. SKIP_HEAD might be NULL, if so, nothing excluded, all basic
1008 blocks and control flows in the loop will be considered. Semi-invariant
1009 state of checked statement is cached in hash map STMT_STAT to avoid
1010 redundant computation in possible following re-check. */
1012 static inline bool
1013 ssa_semi_invariant_p (struct loop *loop, tree name,
1014 const_basic_block skip_head,
1015 hash_map<gimple *, bool> &stmt_stat)
1017 gimple *def = SSA_NAME_DEF_STMT (name);
1018 const_basic_block def_bb = gimple_bb (def);
1020 /* An SSA name defined outside loop is definitely semi-invariant. */
1021 if (!def_bb || !flow_bb_inside_loop_p (loop, def_bb))
1022 return true;
1024 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
1025 return false;
1027 return stmt_semi_invariant_p_1 (loop, def, skip_head, stmt_stat);
1030 /* Check whether a loop iteration PHI node (LOOP_PHI) defines a value that is
1031 semi-invariant in LOOP. Basic blocks dominated by SKIP_HEAD (if non-NULL),
1032 are excluded from LOOP. */
1034 static bool
1035 loop_iter_phi_semi_invariant_p (struct loop *loop, gphi *loop_phi,
1036 const_basic_block skip_head)
1038 const_edge latch = loop_latch_edge (loop);
1039 tree name = gimple_phi_result (loop_phi);
1040 tree from = PHI_ARG_DEF_FROM_EDGE (loop_phi, latch);
1042 gcc_checking_assert (from);
1044 /* Loop iteration PHI node locates in loop header, and it has two source
1045 operands, one is an initial value coming from outside the loop, the other
1046 is a value through latch of the loop, which is derived in last iteration,
1047 we call the latter latch value. From the PHI node to definition of latch
1048 value, if excluding branch trace starting from SKIP_HEAD, except copy-
1049 assignment or likewise, there is no other kind of value redefinition, SSA
1050 name defined by the PHI node is semi-invariant.
1052 loop entry
1053 | .--- latch ---.
1054 | | |
1055 v v |
1056 x_1 = PHI <x_0, x_3> |
1059 .------- if (cond) -------. |
1060 | | |
1061 | [ SKIP ] |
1062 | | |
1063 | x_2 = ... |
1064 | | |
1065 '---- T ---->.<---- F ----' |
1068 x_3 = PHI <x_1, x_2> |
1070 '----------------------'
1072 Suppose in certain iteration, execution flow in above graph goes through
1073 true branch, which means that one source value to define x_3 in false
1074 branch (x_2) is skipped, x_3 only comes from x_1, and x_1 in next
1075 iterations is defined by x_3, we know that x_1 will never changed if COND
1076 always chooses true branch from then on. */
1078 while (from != name)
1080 /* A new value comes from a CONSTANT. */
1081 if (TREE_CODE (from) != SSA_NAME)
1082 return false;
1084 gimple *stmt = SSA_NAME_DEF_STMT (from);
1085 const_basic_block bb = gimple_bb (stmt);
1087 /* A new value comes from outside the loop. */
1088 if (!bb || !flow_bb_inside_loop_p (loop, bb))
1089 return false;
1091 from = NULL_TREE;
1093 if (gimple_code (stmt) == GIMPLE_PHI)
1095 gphi *phi = as_a <gphi *> (stmt);
1097 for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i)
1099 if (skip_head)
1101 const_edge e = gimple_phi_arg_edge (phi, i);
1103 /* Don't consider redefinitions in excluded basic blocks. */
1104 if (dominated_by_p (CDI_DOMINATORS, e->src, skip_head))
1105 continue;
1108 tree arg = gimple_phi_arg_def (phi, i);
1110 if (!from)
1111 from = arg;
1112 else if (!operand_equal_p (from, arg, 0))
1113 /* There are more than one source operands that provide
1114 different values to the SSA name, it is variant. */
1115 return false;
1118 else if (gimple_code (stmt) == GIMPLE_ASSIGN)
1120 /* For simple value copy, check its rhs instead. */
1121 if (gimple_assign_ssa_name_copy_p (stmt))
1122 from = gimple_assign_rhs1 (stmt);
1125 /* Any other kind of definition is deemed to introduce a new value
1126 to the SSA name. */
1127 if (!from)
1128 return false;
1130 return true;
1133 /* Check whether conditional predicates that BB is control-dependent on, are
1134 semi-invariant in LOOP. Basic blocks dominated by SKIP_HEAD (if non-NULL),
1135 are excluded from LOOP. Semi-invariant state of checked statement is cached
1136 in hash map STMT_STAT. */
1138 static bool
1139 control_dep_semi_invariant_p (struct loop *loop, basic_block bb,
1140 const_basic_block skip_head,
1141 hash_map<gimple *, bool> &stmt_stat)
1143 hash_set<basic_block> *dep_bbs = find_control_dep_blocks (loop, bb);
1145 if (!dep_bbs)
1146 return true;
1148 for (hash_set<basic_block>::iterator iter = dep_bbs->begin ();
1149 iter != dep_bbs->end (); ++iter)
1151 gimple *last = last_stmt (*iter);
1153 if (!last)
1154 return false;
1156 /* Only check condition predicates. */
1157 if (gimple_code (last) != GIMPLE_COND
1158 && gimple_code (last) != GIMPLE_SWITCH)
1159 return false;
1161 if (!stmt_semi_invariant_p_1 (loop, last, skip_head, stmt_stat))
1162 return false;
1165 return true;
1168 /* Check whether STMT is semi-invariant in LOOP, iff all its operands are
1169 semi-invariant, consequently, all its defined values are semi-invariant.
1170 Basic blocks dominated by SKIP_HEAD (if non-NULL), are excluded from LOOP.
1171 Semi-invariant state of checked statement is cached in hash map
1172 STMT_STAT. */
1174 static bool
1175 stmt_semi_invariant_p_1 (struct loop *loop, gimple *stmt,
1176 const_basic_block skip_head,
1177 hash_map<gimple *, bool> &stmt_stat)
1179 bool existed;
1180 bool &invar = stmt_stat.get_or_insert (stmt, &existed);
1182 if (existed)
1183 return invar;
1185 /* A statement might depend on itself, which is treated as variant. So set
1186 state of statement under check to be variant to ensure that. */
1187 invar = false;
1189 if (gimple_code (stmt) == GIMPLE_PHI)
1191 gphi *phi = as_a <gphi *> (stmt);
1193 if (gimple_bb (stmt) == loop->header)
1195 /* If the entry value is subject to abnormal coalescing
1196 avoid the transform since we're going to duplicate the
1197 loop header and thus likely introduce overlapping life-ranges
1198 between the PHI def and the entry on the path when the
1199 first loop is skipped. */
1200 tree entry_def
1201 = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1202 if (TREE_CODE (entry_def) == SSA_NAME
1203 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (entry_def))
1204 return false;
1205 invar = loop_iter_phi_semi_invariant_p (loop, phi, skip_head);
1206 return invar;
1209 /* For a loop PHI node that does not locate in loop header, it is semi-
1210 invariant only if two conditions are met. The first is its source
1211 values are derived from CONSTANT (including loop-invariant value), or
1212 from SSA name defined by semi-invariant loop iteration PHI node. The
1213 second is its source incoming edges are control-dependent on semi-
1214 invariant conditional predicates. */
1215 for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i)
1217 const_edge e = gimple_phi_arg_edge (phi, i);
1218 tree arg = gimple_phi_arg_def (phi, i);
1220 if (TREE_CODE (arg) == SSA_NAME)
1222 if (!ssa_semi_invariant_p (loop, arg, skip_head, stmt_stat))
1223 return false;
1225 /* If source value is defined in location from where the source
1226 edge comes in, no need to check control dependency again
1227 since this has been done in above SSA name check stage. */
1228 if (e->src == gimple_bb (SSA_NAME_DEF_STMT (arg)))
1229 continue;
1232 if (!control_dep_semi_invariant_p (loop, e->src, skip_head,
1233 stmt_stat))
1234 return false;
1237 else
1239 ssa_op_iter iter;
1240 tree use;
1242 /* Volatile memory load or return of normal (non-const/non-pure) call
1243 should not be treated as constant in each iteration of loop. */
1244 if (gimple_has_side_effects (stmt))
1245 return false;
1247 /* Check if any memory store may kill memory load at this place. */
1248 if (gimple_vuse (stmt) && !vuse_semi_invariant_p (loop, stmt, skip_head))
1249 return false;
1251 /* Although operand of a statement might be SSA name, CONSTANT or
1252 VARDECL, here we only need to check SSA name operands. This is
1253 because check on VARDECL operands, which involve memory loads,
1254 must have been done prior to invocation of this function in
1255 vuse_semi_invariant_p. */
1256 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
1257 if (!ssa_semi_invariant_p (loop, use, skip_head, stmt_stat))
1258 return false;
1261 if (!control_dep_semi_invariant_p (loop, gimple_bb (stmt), skip_head,
1262 stmt_stat))
1263 return false;
1265 /* Here we SHOULD NOT use invar = true, since hash map might be changed due
1266 to new insertion, and thus invar may point to invalid memory. */
1267 stmt_stat.put (stmt, true);
1268 return true;
1271 /* A helper function to check whether STMT is semi-invariant in LOOP. Basic
1272 blocks dominated by SKIP_HEAD (if non-NULL), are excluded from LOOP. */
1274 static bool
1275 stmt_semi_invariant_p (struct loop *loop, gimple *stmt,
1276 const_basic_block skip_head)
1278 hash_map<gimple *, bool> stmt_stat;
1279 return stmt_semi_invariant_p_1 (loop, stmt, skip_head, stmt_stat);
1282 /* Determine when conditional statement never transfers execution to one of its
1283 branch, whether we can remove the branch's leading basic block (BRANCH_BB)
1284 and those basic blocks dominated by BRANCH_BB. */
1286 static bool
1287 branch_removable_p (basic_block branch_bb)
1289 edge_iterator ei;
1290 edge e;
1292 if (single_pred_p (branch_bb))
1293 return true;
1295 FOR_EACH_EDGE (e, ei, branch_bb->preds)
1297 if (dominated_by_p (CDI_DOMINATORS, e->src, branch_bb))
1298 continue;
1300 if (dominated_by_p (CDI_DOMINATORS, branch_bb, e->src))
1301 continue;
1303 /* The branch can be reached from opposite branch, or from some
1304 statement not dominated by the conditional statement. */
1305 return false;
1308 return true;
1311 /* Find out which branch of a conditional statement (COND) is invariant in the
1312 execution context of LOOP. That is: once the branch is selected in certain
1313 iteration of the loop, any operand that contributes to computation of the
1314 conditional statement remains unchanged in all following iterations. */
1316 static edge
1317 get_cond_invariant_branch (struct loop *loop, gcond *cond)
1319 basic_block cond_bb = gimple_bb (cond);
1320 basic_block targ_bb[2];
1321 bool invar[2];
1322 unsigned invar_checks = 0;
1324 for (unsigned i = 0; i < 2; i++)
1326 targ_bb[i] = EDGE_SUCC (cond_bb, i)->dest;
1328 /* One branch directs to loop exit, no need to perform loop split upon
1329 this conditional statement. Firstly, it is trivial if the exit branch
1330 is semi-invariant, for the statement is just to break loop. Secondly,
1331 if the opposite branch is semi-invariant, it means that the statement
1332 is real loop-invariant, which is covered by loop unswitch. */
1333 if (!flow_bb_inside_loop_p (loop, targ_bb[i]))
1334 return NULL;
1337 for (unsigned i = 0; i < 2; i++)
1339 invar[!i] = false;
1341 if (!branch_removable_p (targ_bb[i]))
1342 continue;
1344 /* Given a semi-invariant branch, if its opposite branch dominates
1345 loop latch, it and its following trace will only be executed in
1346 final iteration of loop, namely it is not part of repeated body
1347 of the loop. Similar to the above case that the branch is loop
1348 exit, no need to split loop. */
1349 if (dominated_by_p (CDI_DOMINATORS, loop->latch, targ_bb[i]))
1350 continue;
1352 invar[!i] = stmt_semi_invariant_p (loop, cond, targ_bb[i]);
1353 invar_checks++;
1356 /* With both branches being invariant (handled by loop unswitch) or
1357 variant is not what we want. */
1358 if (invar[0] ^ !invar[1])
1359 return NULL;
1361 /* Found a real loop-invariant condition, do nothing. */
1362 if (invar_checks < 2 && stmt_semi_invariant_p (loop, cond, NULL))
1363 return NULL;
1365 return EDGE_SUCC (cond_bb, invar[0] ? 0 : 1);
1368 /* Calculate increased code size measured by estimated insn number if applying
1369 loop split upon certain branch (BRANCH_EDGE) of a conditional statement. */
1371 static int
1372 compute_added_num_insns (struct loop *loop, const_edge branch_edge)
1374 basic_block cond_bb = branch_edge->src;
1375 unsigned branch = EDGE_SUCC (cond_bb, 1) == branch_edge;
1376 basic_block opposite_bb = EDGE_SUCC (cond_bb, !branch)->dest;
1377 basic_block *bbs = ((split_info *) loop->aux)->bbs;
1378 int num = 0;
1380 for (unsigned i = 0; i < loop->num_nodes; i++)
1382 /* Do no count basic blocks only in opposite branch. */
1383 if (dominated_by_p (CDI_DOMINATORS, bbs[i], opposite_bb))
1384 continue;
1386 num += estimate_num_insns_seq (bb_seq (bbs[i]), &eni_size_weights);
1389 /* It is unnecessary to evaluate expression of the conditional statement
1390 in new loop that contains only invariant branch. This expression should
1391 be constant value (either true or false). Exclude code size of insns
1392 that contribute to computation of the expression. */
1394 auto_vec<gimple *> worklist;
1395 hash_set<gimple *> removed;
1396 gimple *stmt = last_stmt (cond_bb);
1398 worklist.safe_push (stmt);
1399 removed.add (stmt);
1400 num -= estimate_num_insns (stmt, &eni_size_weights);
1404 ssa_op_iter opnd_iter;
1405 use_operand_p opnd_p;
1407 stmt = worklist.pop ();
1408 FOR_EACH_PHI_OR_STMT_USE (opnd_p, stmt, opnd_iter, SSA_OP_USE)
1410 tree opnd = USE_FROM_PTR (opnd_p);
1412 if (TREE_CODE (opnd) != SSA_NAME || SSA_NAME_IS_DEFAULT_DEF (opnd))
1413 continue;
1415 gimple *opnd_stmt = SSA_NAME_DEF_STMT (opnd);
1416 use_operand_p use_p;
1417 imm_use_iterator use_iter;
1419 if (removed.contains (opnd_stmt)
1420 || !flow_bb_inside_loop_p (loop, gimple_bb (opnd_stmt)))
1421 continue;
1423 FOR_EACH_IMM_USE_FAST (use_p, use_iter, opnd)
1425 gimple *use_stmt = USE_STMT (use_p);
1427 if (!is_gimple_debug (use_stmt) && !removed.contains (use_stmt))
1429 opnd_stmt = NULL;
1430 break;
1434 if (opnd_stmt)
1436 worklist.safe_push (opnd_stmt);
1437 removed.add (opnd_stmt);
1438 num -= estimate_num_insns (opnd_stmt, &eni_size_weights);
1441 } while (!worklist.is_empty ());
1443 gcc_assert (num >= 0);
1444 return num;
1447 /* Find out loop-invariant branch of a conditional statement (COND) if it has,
1448 and check whether it is eligible and profitable to perform loop split upon
1449 this branch in LOOP. */
1451 static edge
1452 get_cond_branch_to_split_loop (struct loop *loop, gcond *cond)
1454 edge invar_branch = get_cond_invariant_branch (loop, cond);
1455 if (!invar_branch)
1456 return NULL;
1458 /* When accurate profile information is available, and execution
1459 frequency of the branch is too low, just let it go. */
1460 profile_probability prob = invar_branch->probability;
1461 if (prob.reliable_p ())
1463 int thres = param_min_loop_cond_split_prob;
1465 if (prob < profile_probability::always ().apply_scale (thres, 100))
1466 return NULL;
1469 /* Add a threshold for increased code size to disable loop split. */
1470 if (compute_added_num_insns (loop, invar_branch) > param_max_peeled_insns)
1471 return NULL;
1473 return invar_branch;
1476 /* Given a loop (LOOP1) with a loop-invariant branch (INVAR_BRANCH) of some
1477 conditional statement, perform loop split transformation illustrated
1478 as the following graph.
1480 .-------T------ if (true) ------F------.
1481 | .---------------. |
1482 | | | |
1483 v | v v
1484 pre-header | pre-header
1485 | .------------. | | .------------.
1486 | | | | | | |
1487 | v | | | v |
1488 header | | header |
1489 | | | | |
1490 .--- if (cond) ---. | | .--- if (true) ---. |
1491 | | | | | | |
1492 invariant | | | invariant | |
1493 | | | | | | |
1494 '---T--->.<---F---' | | '---T--->.<---F---' |
1495 | | / | |
1496 stmts | / stmts |
1497 | F T | |
1498 / \ | / / \ |
1499 .-------* * [ if (cond) ] .-------* * |
1500 | | | | | |
1501 | latch | | latch |
1502 | | | | | |
1503 | '------------' | '------------'
1504 '------------------------. .-----------'
1505 loop1 | | loop2
1507 exits
1509 In the graph, loop1 represents the part derived from original one, and
1510 loop2 is duplicated using loop_version (), which corresponds to the part
1511 of original one being splitted out. In original latch edge of loop1, we
1512 insert a new conditional statement duplicated from the semi-invariant cond,
1513 and one of its branch goes back to loop1 header as a latch edge, and the
1514 other branch goes to loop2 pre-header as an entry edge. And also in loop2,
1515 we abandon the variant branch of the conditional statement by setting a
1516 constant bool condition, based on which branch is semi-invariant. */
1518 static bool
1519 do_split_loop_on_cond (struct loop *loop1, edge invar_branch)
1521 basic_block cond_bb = invar_branch->src;
1522 bool true_invar = !!(invar_branch->flags & EDGE_TRUE_VALUE);
1523 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1525 gcc_assert (cond_bb->loop_father == loop1);
1527 if (dump_enabled_p ())
1528 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, cond,
1529 "loop split on semi-invariant condition at %s branch\n",
1530 true_invar ? "true" : "false");
1532 initialize_original_copy_tables ();
1534 struct loop *loop2 = loop_version (loop1, boolean_true_node, NULL,
1535 invar_branch->probability.invert (),
1536 invar_branch->probability,
1537 profile_probability::always (),
1538 profile_probability::always (),
1539 true);
1540 if (!loop2)
1542 free_original_copy_tables ();
1543 return false;
1546 basic_block cond_bb_copy = get_bb_copy (cond_bb);
1547 gcond *cond_copy = as_a<gcond *> (last_stmt (cond_bb_copy));
1549 /* Replace the condition in loop2 with a bool constant to let PassManager
1550 remove the variant branch after current pass completes. */
1551 if (true_invar)
1552 gimple_cond_make_true (cond_copy);
1553 else
1554 gimple_cond_make_false (cond_copy);
1556 update_stmt (cond_copy);
1558 /* Insert a new conditional statement on latch edge of loop1, its condition
1559 is duplicated from the semi-invariant. This statement acts as a switch
1560 to transfer execution from loop1 to loop2, when loop1 enters into
1561 invariant state. */
1562 basic_block latch_bb = split_edge (loop_latch_edge (loop1));
1563 basic_block break_bb = split_edge (single_pred_edge (latch_bb));
1564 gimple *break_cond = gimple_build_cond (gimple_cond_code(cond),
1565 gimple_cond_lhs (cond),
1566 gimple_cond_rhs (cond),
1567 NULL_TREE, NULL_TREE);
1569 gimple_stmt_iterator gsi = gsi_last_bb (break_bb);
1570 gsi_insert_after (&gsi, break_cond, GSI_NEW_STMT);
1572 edge to_loop1 = single_succ_edge (break_bb);
1573 edge to_loop2 = make_edge (break_bb, loop_preheader_edge (loop2)->src, 0);
1575 to_loop1->flags &= ~EDGE_FALLTHRU;
1576 to_loop1->flags |= true_invar ? EDGE_FALSE_VALUE : EDGE_TRUE_VALUE;
1577 to_loop2->flags |= true_invar ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE;
1579 /* Due to introduction of a control flow edge from loop1 latch to loop2
1580 pre-header, we should update PHIs in loop2 to reflect this connection
1581 between loop1 and loop2. */
1582 connect_loop_phis (loop1, loop2, to_loop2);
1584 edge true_edge, false_edge, skip_edge1, skip_edge2;
1585 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1587 skip_edge1 = true_invar ? false_edge : true_edge;
1588 skip_edge2 = true_invar ? true_edge : false_edge;
1589 fix_loop_bb_probability (loop1, loop2, skip_edge1, skip_edge2);
1591 /* Fix first loop's exit probability after scaling. */
1592 to_loop1->probability = invar_branch->probability.invert ();
1593 to_loop2->probability = invar_branch->probability;
1595 free_original_copy_tables ();
1597 return true;
1600 /* Traverse all conditional statements in LOOP, to find out a good candidate
1601 upon which we can do loop split. */
1603 static bool
1604 split_loop_on_cond (struct loop *loop)
1606 split_info *info = new split_info ();
1607 basic_block *bbs = info->bbs = get_loop_body (loop);
1608 bool do_split = false;
1610 /* Allocate an area to keep temporary info, and associate its address
1611 with loop aux field. */
1612 loop->aux = info;
1614 for (unsigned i = 0; i < loop->num_nodes; i++)
1615 bbs[i]->aux = NULL;
1617 for (unsigned i = 0; i < loop->num_nodes; i++)
1619 basic_block bb = bbs[i];
1621 /* We only consider conditional statement, which be executed at most once
1622 in each iteration of the loop. So skip statements in inner loops. */
1623 if ((bb->loop_father != loop) || (bb->flags & BB_IRREDUCIBLE_LOOP))
1624 continue;
1626 /* Actually this check is not a must constraint. With it, we can ensure
1627 conditional statement will always be executed in each iteration. */
1628 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1629 continue;
1631 gimple *last = last_stmt (bb);
1633 if (!last || gimple_code (last) != GIMPLE_COND)
1634 continue;
1636 gcond *cond = as_a <gcond *> (last);
1637 edge branch_edge = get_cond_branch_to_split_loop (loop, cond);
1639 if (branch_edge)
1641 do_split_loop_on_cond (loop, branch_edge);
1642 do_split = true;
1643 break;
1647 delete info;
1648 loop->aux = NULL;
1650 return do_split;
1653 /* Main entry point. Perform loop splitting on all suitable loops. */
1655 static unsigned int
1656 tree_ssa_split_loops (void)
1658 bool changed = false;
1660 gcc_assert (scev_initialized_p ());
1662 calculate_dominance_info (CDI_POST_DOMINATORS);
1664 for (auto loop : loops_list (cfun, LI_INCLUDE_ROOT))
1665 loop->aux = NULL;
1667 /* Go through all loops starting from innermost. */
1668 for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
1670 if (loop->aux)
1672 /* If any of our inner loops was split, don't split us,
1673 and mark our containing loop as having had splits as well.
1674 This allows for delaying SSA update. */
1675 loop_outer (loop)->aux = loop;
1676 continue;
1679 if (optimize_loop_for_size_p (loop))
1680 continue;
1682 if (split_loop (loop) || split_loop_on_cond (loop))
1684 /* Mark our containing loop as having had some split inner loops. */
1685 loop_outer (loop)->aux = loop;
1686 changed = true;
1690 for (auto loop : loops_list (cfun, LI_INCLUDE_ROOT))
1691 loop->aux = NULL;
1693 clear_aux_for_blocks ();
1695 free_dominance_info (CDI_POST_DOMINATORS);
1697 if (changed)
1699 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1700 return TODO_cleanup_cfg;
1702 return 0;
1705 /* Loop splitting pass. */
1707 namespace {
1709 const pass_data pass_data_loop_split =
1711 GIMPLE_PASS, /* type */
1712 "lsplit", /* name */
1713 OPTGROUP_LOOP, /* optinfo_flags */
1714 TV_LOOP_SPLIT, /* tv_id */
1715 PROP_cfg, /* properties_required */
1716 0, /* properties_provided */
1717 0, /* properties_destroyed */
1718 0, /* todo_flags_start */
1719 0, /* todo_flags_finish */
1722 class pass_loop_split : public gimple_opt_pass
1724 public:
1725 pass_loop_split (gcc::context *ctxt)
1726 : gimple_opt_pass (pass_data_loop_split, ctxt)
1729 /* opt_pass methods: */
1730 bool gate (function *) final override { return flag_split_loops != 0; }
1731 unsigned int execute (function *) final override;
1733 }; // class pass_loop_split
1735 unsigned int
1736 pass_loop_split::execute (function *fun)
1738 if (number_of_loops (fun) <= 1)
1739 return 0;
1741 return tree_ssa_split_loops ();
1744 } // anon namespace
1746 gimple_opt_pass *
1747 make_pass_loop_split (gcc::context *ctxt)
1749 return new pass_loop_split (ctxt);