2 Copyright (C) 2015-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
26 #include "tree-pass.h"
28 #include "fold-const.h"
31 #include "tree-ssa-loop-niter.h"
32 #include "tree-ssa-loop.h"
33 #include "tree-ssa-loop-manip.h"
34 #include "tree-into-ssa.h"
36 #include "tree-scalar-evolution.h"
37 #include "gimple-iterator.h"
38 #include "gimple-pretty-print.h"
40 #include "gimple-fold.h"
41 #include "gimplify-me.h"
43 /* This file implements loop splitting, i.e. transformation of loops like
45 for (i = 0; i < 100; i++)
55 for (i = 0; i < 50; i++)
66 /* Return true when BB inside LOOP is a potential iteration space
67 split point, i.e. ends with a condition like "IV < comp", which
68 is true on one side of the iteration space and false on the other,
69 and the split point can be computed. If so, also return the border
70 point in *BORDER and the comparison induction variable in IV. */
73 split_at_bb_p (struct loop
*loop
, basic_block bb
, tree
*border
, affine_iv
*iv
)
79 /* BB must end in a simple conditional jump. */
80 last
= last_stmt (bb
);
81 if (!last
|| gimple_code (last
) != GIMPLE_COND
)
83 stmt
= as_a
<gcond
*> (last
);
85 enum tree_code code
= gimple_cond_code (stmt
);
87 /* Only handle relational comparisons, for equality and non-equality
88 we'd have to split the loop into two loops and a middle statement. */
100 if (loop_exits_from_bb_p (loop
, bb
))
103 tree op0
= gimple_cond_lhs (stmt
);
104 tree op1
= gimple_cond_rhs (stmt
);
105 struct loop
*useloop
= loop_containing_stmt (stmt
);
107 if (!simple_iv (loop
, useloop
, op0
, iv
, false))
109 if (!simple_iv (loop
, useloop
, op1
, &iv2
, false))
112 /* Make it so that the first argument of the condition is
114 if (!integer_zerop (iv2
.step
))
116 std::swap (op0
, op1
);
117 std::swap (*iv
, iv2
);
118 code
= swap_tree_comparison (code
);
119 gimple_cond_set_condition (stmt
, code
, op0
, op1
);
122 else if (integer_zerop (iv
->step
))
124 if (!integer_zerop (iv2
.step
))
126 if (!iv
->no_overflow
)
129 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
131 fprintf (dump_file
, "Found potential split point: ");
132 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
133 fprintf (dump_file
, " { ");
134 print_generic_expr (dump_file
, iv
->base
, TDF_SLIM
);
135 fprintf (dump_file
, " + I*");
136 print_generic_expr (dump_file
, iv
->step
, TDF_SLIM
);
137 fprintf (dump_file
, " } %s ", get_tree_code_name (code
));
138 print_generic_expr (dump_file
, iv2
.base
, TDF_SLIM
);
139 fprintf (dump_file
, "\n");
146 /* Given a GUARD conditional stmt inside LOOP, which we want to make always
147 true or false depending on INITIAL_TRUE, and adjusted values NEXTVAL
148 (a post-increment IV) and NEWBOUND (the comparator) adjust the loop
149 exit test statement to loop back only if the GUARD statement will
150 also be true/false in the next iteration. */
153 patch_loop_exit (struct loop
*loop
, gcond
*guard
, tree nextval
, tree newbound
,
156 edge exit
= single_exit (loop
);
157 gcond
*stmt
= as_a
<gcond
*> (last_stmt (exit
->src
));
158 gimple_cond_set_condition (stmt
, gimple_cond_code (guard
),
162 edge stay
= EDGE_SUCC (exit
->src
, EDGE_SUCC (exit
->src
, 0) == exit
);
164 exit
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
165 stay
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
169 exit
->flags
|= EDGE_FALSE_VALUE
;
170 stay
->flags
|= EDGE_TRUE_VALUE
;
174 exit
->flags
|= EDGE_TRUE_VALUE
;
175 stay
->flags
|= EDGE_FALSE_VALUE
;
179 /* Give an induction variable GUARD_IV, and its affine descriptor IV,
180 find the loop phi node in LOOP defining it directly, or create
181 such phi node. Return that phi node. */
184 find_or_create_guard_phi (struct loop
*loop
, tree guard_iv
, affine_iv
* /*iv*/)
186 gimple
*def
= SSA_NAME_DEF_STMT (guard_iv
);
188 if ((phi
= dyn_cast
<gphi
*> (def
))
189 && gimple_bb (phi
) == loop
->header
)
192 /* XXX Create the PHI instead. */
196 /* Returns true if the exit values of all loop phi nodes can be
197 determined easily (i.e. that connect_loop_phis can determine them). */
200 easy_exit_values (struct loop
*loop
)
202 edge exit
= single_exit (loop
);
203 edge latch
= loop_latch_edge (loop
);
206 /* Currently we regard the exit values as easy if they are the same
207 as the value over the backedge. Which is the case if the definition
208 of the backedge value dominates the exit edge. */
209 for (psi
= gsi_start_phis (loop
->header
); !gsi_end_p (psi
); gsi_next (&psi
))
211 gphi
*phi
= psi
.phi ();
212 tree next
= PHI_ARG_DEF_FROM_EDGE (phi
, latch
);
214 if (TREE_CODE (next
) == SSA_NAME
215 && (bb
= gimple_bb (SSA_NAME_DEF_STMT (next
)))
216 && !dominated_by_p (CDI_DOMINATORS
, exit
->src
, bb
))
223 /* This function updates the SSA form after connect_loops made a new
224 edge NEW_E leading from LOOP1 exit to LOOP2 (via in intermediate
225 conditional). I.e. the second loop can now be entered either
226 via the original entry or via NEW_E, so the entry values of LOOP2
227 phi nodes are either the original ones or those at the exit
228 of LOOP1. Insert new phi nodes in LOOP2 pre-header reflecting
229 this. The loops need to fulfill easy_exit_values(). */
232 connect_loop_phis (struct loop
*loop1
, struct loop
*loop2
, edge new_e
)
234 basic_block rest
= loop_preheader_edge (loop2
)->src
;
235 gcc_assert (new_e
->dest
== rest
);
236 edge skip_first
= EDGE_PRED (rest
, EDGE_PRED (rest
, 0) == new_e
);
238 edge firste
= loop_preheader_edge (loop1
);
239 edge seconde
= loop_preheader_edge (loop2
);
240 edge firstn
= loop_latch_edge (loop1
);
241 gphi_iterator psi_first
, psi_second
;
242 for (psi_first
= gsi_start_phis (loop1
->header
),
243 psi_second
= gsi_start_phis (loop2
->header
);
244 !gsi_end_p (psi_first
);
245 gsi_next (&psi_first
), gsi_next (&psi_second
))
247 tree init
, next
, new_init
;
249 gphi
*phi_first
= psi_first
.phi ();
250 gphi
*phi_second
= psi_second
.phi ();
252 init
= PHI_ARG_DEF_FROM_EDGE (phi_first
, firste
);
253 next
= PHI_ARG_DEF_FROM_EDGE (phi_first
, firstn
);
254 op
= PHI_ARG_DEF_PTR_FROM_EDGE (phi_second
, seconde
);
255 gcc_assert (operand_equal_for_phi_arg_p (init
, USE_FROM_PTR (op
)));
257 /* Prefer using original variable as a base for the new ssa name.
258 This is necessary for virtual ops, and useful in order to avoid
259 losing debug info for real ops. */
260 if (TREE_CODE (next
) == SSA_NAME
261 && useless_type_conversion_p (TREE_TYPE (next
),
263 new_init
= copy_ssa_name (next
);
264 else if (TREE_CODE (init
) == SSA_NAME
265 && useless_type_conversion_p (TREE_TYPE (init
),
267 new_init
= copy_ssa_name (init
);
268 else if (useless_type_conversion_p (TREE_TYPE (next
),
270 new_init
= make_temp_ssa_name (TREE_TYPE (next
), NULL
,
273 new_init
= make_temp_ssa_name (TREE_TYPE (init
), NULL
,
276 gphi
* newphi
= create_phi_node (new_init
, rest
);
277 add_phi_arg (newphi
, init
, skip_first
, UNKNOWN_LOCATION
);
278 add_phi_arg (newphi
, next
, new_e
, UNKNOWN_LOCATION
);
279 SET_USE (op
, new_init
);
283 /* The two loops LOOP1 and LOOP2 were just created by loop versioning,
284 they are still equivalent and placed in two arms of a diamond, like so:
286 .------if (cond)------.
299 This function transforms the program such that LOOP1 is conditionally
300 falling through to LOOP2, or skipping it. This is done by splitting
301 the ex1->join edge at X in the diagram above, and inserting a condition
302 whose one arm goes to pre2, resulting in this situation:
304 .------if (cond)------.
306 pre1 .---------->pre2
310 | ex1---. | .---ex2 |
312 '---l1 skip---' | l2---'
318 The condition used is the exit condition of LOOP1, which effectively means
319 that when the first loop exits (for whatever reason) but the real original
320 exit expression is still false the second loop will be entered.
321 The function returns the new edge cond->pre2.
323 This doesn't update the SSA form, see connect_loop_phis for that. */
326 connect_loops (struct loop
*loop1
, struct loop
*loop2
)
328 edge exit
= single_exit (loop1
);
329 basic_block skip_bb
= split_edge (exit
);
331 gimple_stmt_iterator gsi
;
334 gimple
*stmt
= last_stmt (exit
->src
);
335 skip_stmt
= gimple_build_cond (gimple_cond_code (stmt
),
336 gimple_cond_lhs (stmt
),
337 gimple_cond_rhs (stmt
),
338 NULL_TREE
, NULL_TREE
);
339 gsi
= gsi_last_bb (skip_bb
);
340 gsi_insert_after (&gsi
, skip_stmt
, GSI_NEW_STMT
);
342 skip_e
= EDGE_SUCC (skip_bb
, 0);
343 skip_e
->flags
&= ~EDGE_FALLTHRU
;
344 new_e
= make_edge (skip_bb
, loop_preheader_edge (loop2
)->src
, 0);
345 if (exit
->flags
& EDGE_TRUE_VALUE
)
347 skip_e
->flags
|= EDGE_TRUE_VALUE
;
348 new_e
->flags
|= EDGE_FALSE_VALUE
;
352 skip_e
->flags
|= EDGE_FALSE_VALUE
;
353 new_e
->flags
|= EDGE_TRUE_VALUE
;
356 new_e
->probability
= profile_probability::likely ();
357 skip_e
->probability
= new_e
->probability
.invert ();
362 /* This returns the new bound for iterations given the original iteration
363 space in NITER, an arbitrary new bound BORDER, assumed to be some
364 comparison value with a different IV, the initial value GUARD_INIT of
365 that other IV, and the comparison code GUARD_CODE that compares
366 that other IV with BORDER. We return an SSA name, and place any
367 necessary statements for that computation into *STMTS.
369 For example for such a loop:
371 for (i = beg, j = guard_init; i < end; i++, j++)
372 if (j < border) // this is supposed to be true/false
375 we want to return a new bound (on j) that makes the loop iterate
376 as long as the condition j < border stays true. We also don't want
377 to iterate more often than the original loop, so we have to introduce
378 some cut-off as well (via min/max), effectively resulting in:
380 newend = min (end+guard_init-beg, border)
381 for (i = beg; j = guard_init; j < newend; i++, j++)
385 Depending on the direction of the IVs and if the exit tests
386 are strict or non-strict we need to use MIN or MAX,
387 and add or subtract 1. This routine computes newend above. */
390 compute_new_first_bound (gimple_seq
*stmts
, struct tree_niter_desc
*niter
,
392 enum tree_code guard_code
, tree guard_init
)
394 /* The niter structure contains the after-increment IV, we need
395 the loop-enter base, so subtract STEP once. */
396 tree controlbase
= force_gimple_operand (niter
->control
.base
,
397 stmts
, true, NULL_TREE
);
398 tree controlstep
= niter
->control
.step
;
400 if (POINTER_TYPE_P (TREE_TYPE (controlbase
)))
402 controlstep
= gimple_build (stmts
, NEGATE_EXPR
,
403 TREE_TYPE (controlstep
), controlstep
);
404 enddiff
= gimple_build (stmts
, POINTER_PLUS_EXPR
,
405 TREE_TYPE (controlbase
),
406 controlbase
, controlstep
);
409 enddiff
= gimple_build (stmts
, MINUS_EXPR
,
410 TREE_TYPE (controlbase
),
411 controlbase
, controlstep
);
413 /* Compute end-beg. */
415 tree end
= force_gimple_operand (niter
->bound
, &stmts2
,
417 gimple_seq_add_seq_without_update (stmts
, stmts2
);
418 if (POINTER_TYPE_P (TREE_TYPE (enddiff
)))
420 tree tem
= gimple_convert (stmts
, sizetype
, enddiff
);
421 tem
= gimple_build (stmts
, NEGATE_EXPR
, sizetype
, tem
);
422 enddiff
= gimple_build (stmts
, POINTER_PLUS_EXPR
,
427 enddiff
= gimple_build (stmts
, MINUS_EXPR
, TREE_TYPE (enddiff
),
430 /* Compute guard_init + (end-beg). */
432 enddiff
= gimple_convert (stmts
, TREE_TYPE (guard_init
), enddiff
);
433 if (POINTER_TYPE_P (TREE_TYPE (guard_init
)))
435 enddiff
= gimple_convert (stmts
, sizetype
, enddiff
);
436 newbound
= gimple_build (stmts
, POINTER_PLUS_EXPR
,
437 TREE_TYPE (guard_init
),
438 guard_init
, enddiff
);
441 newbound
= gimple_build (stmts
, PLUS_EXPR
, TREE_TYPE (guard_init
),
442 guard_init
, enddiff
);
444 /* Depending on the direction of the IVs the new bound for the first
445 loop is the minimum or maximum of old bound and border.
446 Also, if the guard condition isn't strictly less or greater,
447 we need to adjust the bound. */
449 enum tree_code minmax
;
450 if (niter
->cmp
== LT_EXPR
)
452 /* GT and LE are the same, inverted. */
453 if (guard_code
== GT_EXPR
|| guard_code
== LE_EXPR
)
459 gcc_assert (niter
->cmp
== GT_EXPR
);
460 if (guard_code
== GE_EXPR
|| guard_code
== LT_EXPR
)
467 tree type2
= TREE_TYPE (newbound
);
468 if (POINTER_TYPE_P (type2
))
470 newbound
= gimple_build (stmts
,
471 POINTER_TYPE_P (TREE_TYPE (newbound
))
472 ? POINTER_PLUS_EXPR
: PLUS_EXPR
,
473 TREE_TYPE (newbound
),
475 build_int_cst (type2
, addbound
));
478 tree newend
= gimple_build (stmts
, minmax
, TREE_TYPE (border
),
483 /* Checks if LOOP contains an conditional block whose condition
484 depends on which side in the iteration space it is, and if so
485 splits the iteration space into two loops. Returns true if the
486 loop was split. NITER must contain the iteration descriptor for the
487 single exit of LOOP. */
490 split_loop (struct loop
*loop1
, struct tree_niter_desc
*niter
)
494 bool changed
= false;
496 tree border
= NULL_TREE
;
499 bbs
= get_loop_body (loop1
);
501 /* Find a splitting opportunity. */
502 for (i
= 0; i
< loop1
->num_nodes
; i
++)
503 if ((guard_iv
= split_at_bb_p (loop1
, bbs
[i
], &border
, &iv
)))
505 /* Handling opposite steps is not implemented yet. Neither
506 is handling different step sizes. */
507 if ((tree_int_cst_sign_bit (iv
.step
)
508 != tree_int_cst_sign_bit (niter
->control
.step
))
509 || !tree_int_cst_equal (iv
.step
, niter
->control
.step
))
512 /* Find a loop PHI node that defines guard_iv directly,
513 or create one doing that. */
514 gphi
*phi
= find_or_create_guard_phi (loop1
, guard_iv
, &iv
);
517 gcond
*guard_stmt
= as_a
<gcond
*> (last_stmt (bbs
[i
]));
518 tree guard_init
= PHI_ARG_DEF_FROM_EDGE (phi
,
519 loop_preheader_edge (loop1
));
520 enum tree_code guard_code
= gimple_cond_code (guard_stmt
);
522 /* Loop splitting is implemented by versioning the loop, placing
523 the new loop after the old loop, make the first loop iterate
524 as long as the conditional stays true (or false) and let the
525 second (new) loop handle the rest of the iterations.
527 First we need to determine if the condition will start being true
528 or false in the first loop. */
534 initial_true
= !tree_int_cst_sign_bit (iv
.step
);
538 initial_true
= tree_int_cst_sign_bit (iv
.step
);
544 /* Build a condition that will skip the first loop when the
545 guard condition won't ever be true (or false). */
547 border
= force_gimple_operand (border
, &stmts2
, true, NULL_TREE
);
549 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop1
),
551 tree cond
= build2 (guard_code
, boolean_type_node
, guard_init
, border
);
553 cond
= fold_build1 (TRUTH_NOT_EXPR
, boolean_type_node
, cond
);
555 /* Now version the loop, placing loop2 after loop1 connecting
556 them, and fix up SSA form for that. */
557 initialize_original_copy_tables ();
560 struct loop
*loop2
= loop_version (loop1
, cond
, &cond_bb
,
561 profile_probability::always (),
562 profile_probability::always (),
563 profile_probability::always (),
564 profile_probability::always (),
567 update_ssa (TODO_update_ssa
);
569 edge new_e
= connect_loops (loop1
, loop2
);
570 connect_loop_phis (loop1
, loop2
, new_e
);
572 /* The iterations of the second loop is now already
573 exactly those that the first loop didn't do, but the
574 iteration space of the first loop is still the original one.
575 Compute the new bound for the guarding IV and patch the
576 loop exit to use it instead of original IV and bound. */
577 gimple_seq stmts
= NULL
;
578 tree newend
= compute_new_first_bound (&stmts
, niter
, border
,
579 guard_code
, guard_init
);
581 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop1
),
583 tree guard_next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop1
));
584 patch_loop_exit (loop1
, guard_stmt
, guard_next
, newend
, initial_true
);
586 /* Finally patch out the two copies of the condition to be always
587 true/false (or opposite). */
588 gcond
*force_true
= as_a
<gcond
*> (last_stmt (bbs
[i
]));
589 gcond
*force_false
= as_a
<gcond
*> (last_stmt (get_bb_copy (bbs
[i
])));
591 std::swap (force_true
, force_false
);
592 gimple_cond_make_true (force_true
);
593 gimple_cond_make_false (force_false
);
594 update_stmt (force_true
);
595 update_stmt (force_false
);
597 free_original_copy_tables ();
599 /* We destroyed LCSSA form above. Eventually we might be able
600 to fix it on the fly, for now simply punt and use the helper. */
601 rewrite_into_loop_closed_ssa_1 (NULL
, 0, SSA_OP_USE
, loop1
);
604 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
605 fprintf (dump_file
, ";; Loop split.\n");
607 /* Only deal with the first opportunity. */
615 /* Main entry point. Perform loop splitting on all suitable loops. */
618 tree_ssa_split_loops (void)
621 bool changed
= false;
623 gcc_assert (scev_initialized_p ());
624 FOR_EACH_LOOP (loop
, LI_INCLUDE_ROOT
)
627 /* Go through all loops starting from innermost. */
628 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
630 struct tree_niter_desc niter
;
633 /* If any of our inner loops was split, don't split us,
634 and mark our containing loop as having had splits as well. */
635 loop_outer (loop
)->aux
= loop
;
639 if (single_exit (loop
)
640 /* ??? We could handle non-empty latches when we split
641 the latch edge (not the exit edge), and put the new
642 exit condition in the new block. OTOH this executes some
643 code unconditionally that might have been skipped by the
644 original exit before. */
645 && empty_block_p (loop
->latch
)
646 && !optimize_loop_for_size_p (loop
)
647 && easy_exit_values (loop
)
648 && number_of_iterations_exit (loop
, single_exit (loop
), &niter
,
650 && niter
.cmp
!= ERROR_MARK
651 /* We can't yet handle loops controlled by a != predicate. */
652 && niter
.cmp
!= NE_EXPR
)
654 if (split_loop (loop
, &niter
))
656 /* Mark our containing loop as having had some split inner
658 loop_outer (loop
)->aux
= loop
;
664 FOR_EACH_LOOP (loop
, LI_INCLUDE_ROOT
)
668 return TODO_cleanup_cfg
;
672 /* Loop splitting pass. */
676 const pass_data pass_data_loop_split
=
678 GIMPLE_PASS
, /* type */
680 OPTGROUP_LOOP
, /* optinfo_flags */
681 TV_LOOP_SPLIT
, /* tv_id */
682 PROP_cfg
, /* properties_required */
683 0, /* properties_provided */
684 0, /* properties_destroyed */
685 0, /* todo_flags_start */
686 0, /* todo_flags_finish */
689 class pass_loop_split
: public gimple_opt_pass
692 pass_loop_split (gcc::context
*ctxt
)
693 : gimple_opt_pass (pass_data_loop_split
, ctxt
)
696 /* opt_pass methods: */
697 virtual bool gate (function
*) { return flag_split_loops
!= 0; }
698 virtual unsigned int execute (function
*);
700 }; // class pass_loop_split
703 pass_loop_split::execute (function
*fun
)
705 if (number_of_loops (fun
) <= 1)
708 return tree_ssa_split_loops ();
714 make_pass_loop_split (gcc::context
*ctxt
)
716 return new pass_loop_split (ctxt
);