2 Copyright (C) 2002-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
39 /* This pass performs loop unrolling. We only perform this
40 optimization on innermost loops (with single exception) because
41 the impact on performance is greatest here, and we want to avoid
42 unnecessary code size growth. The gain is caused by greater sequentiality
43 of code, better code to optimize for further passes and in some cases
44 by fewer testings of exit conditions. The main problem is code growth,
45 that impacts performance negatively due to effect of caches.
49 -- unrolling of loops that roll constant times; this is almost always
50 win, as we get rid of exit condition tests.
51 -- unrolling of loops that roll number of times that we can compute
52 in runtime; we also get rid of exit condition tests here, but there
53 is the extra expense for calculating the number of iterations
54 -- simple unrolling of remaining loops; this is performed only if we
55 are asked to, as the gain is questionable in this case and often
56 it may even slow down the code
57 For more detailed descriptions of each of those, see comments at
58 appropriate function below.
60 There is a lot of parameters (defined and described in params.def) that
61 control how much we unroll.
63 ??? A great problem is that we don't have a good way how to determine
64 how many times we should unroll the loop; the experiments I have made
65 showed that this choice may affect performance in order of several %.
68 /* Information about induction variables to split. */
72 rtx_insn
*insn
; /* The insn in that the induction variable occurs. */
73 rtx orig_var
; /* The variable (register) for the IV before split. */
74 rtx base_var
; /* The variable on that the values in the further
75 iterations are based. */
76 rtx step
; /* Step of the induction variable. */
77 struct iv_to_split
*next
; /* Next entry in walking order. */
80 /* Information about accumulators to expand. */
84 rtx_insn
*insn
; /* The insn in that the variable expansion occurs. */
85 rtx reg
; /* The accumulator which is expanded. */
86 vec
<rtx
> var_expansions
; /* The copies of the accumulator which is expanded. */
87 struct var_to_expand
*next
; /* Next entry in walking order. */
88 enum rtx_code op
; /* The type of the accumulation - addition, subtraction
90 int expansion_count
; /* Count the number of expansions generated so far. */
91 int reuse_expansion
; /* The expansion we intend to reuse to expand
92 the accumulator. If REUSE_EXPANSION is 0 reuse
93 the original accumulator. Else use
94 var_expansions[REUSE_EXPANSION - 1]. */
97 /* Hashtable helper for iv_to_split. */
99 struct iv_split_hasher
: free_ptr_hash
<iv_to_split
>
101 static inline hashval_t
hash (const iv_to_split
*);
102 static inline bool equal (const iv_to_split
*, const iv_to_split
*);
106 /* A hash function for information about insns to split. */
109 iv_split_hasher::hash (const iv_to_split
*ivts
)
111 return (hashval_t
) INSN_UID (ivts
->insn
);
114 /* An equality functions for information about insns to split. */
117 iv_split_hasher::equal (const iv_to_split
*i1
, const iv_to_split
*i2
)
119 return i1
->insn
== i2
->insn
;
122 /* Hashtable helper for iv_to_split. */
124 struct var_expand_hasher
: free_ptr_hash
<var_to_expand
>
126 static inline hashval_t
hash (const var_to_expand
*);
127 static inline bool equal (const var_to_expand
*, const var_to_expand
*);
130 /* Return a hash for VES. */
133 var_expand_hasher::hash (const var_to_expand
*ves
)
135 return (hashval_t
) INSN_UID (ves
->insn
);
138 /* Return true if I1 and I2 refer to the same instruction. */
141 var_expand_hasher::equal (const var_to_expand
*i1
, const var_to_expand
*i2
)
143 return i1
->insn
== i2
->insn
;
146 /* Information about optimization applied in
147 the unrolled loop. */
151 hash_table
<iv_split_hasher
> *insns_to_split
; /* A hashtable of insns to
153 struct iv_to_split
*iv_to_split_head
; /* The first iv to split. */
154 struct iv_to_split
**iv_to_split_tail
; /* Pointer to the tail of the list. */
155 hash_table
<var_expand_hasher
> *insns_with_var_to_expand
; /* A hashtable of
156 insns with accumulators to expand. */
157 struct var_to_expand
*var_to_expand_head
; /* The first var to expand. */
158 struct var_to_expand
**var_to_expand_tail
; /* Pointer to the tail of the list. */
159 unsigned first_new_block
; /* The first basic block that was
161 basic_block loop_exit
; /* The loop exit basic block. */
162 basic_block loop_preheader
; /* The loop preheader basic block. */
165 static void decide_unroll_stupid (class loop
*, int);
166 static void decide_unroll_constant_iterations (class loop
*, int);
167 static void decide_unroll_runtime_iterations (class loop
*, int);
168 static void unroll_loop_stupid (class loop
*);
169 static void decide_unrolling (int);
170 static void unroll_loop_constant_iterations (class loop
*);
171 static void unroll_loop_runtime_iterations (class loop
*);
172 static struct opt_info
*analyze_insns_in_loop (class loop
*);
173 static void opt_info_start_duplication (struct opt_info
*);
174 static void apply_opt_in_copies (struct opt_info
*, unsigned, bool, bool);
175 static void free_opt_info (struct opt_info
*);
176 static struct var_to_expand
*analyze_insn_to_expand_var (class loop
*, rtx_insn
*);
177 static bool referenced_in_one_insn_in_loop_p (class loop
*, rtx
, int *);
178 static struct iv_to_split
*analyze_iv_to_split_insn (rtx_insn
*);
179 static void expand_var_during_unrolling (struct var_to_expand
*, rtx_insn
*);
180 static void insert_var_expansion_initialization (struct var_to_expand
*,
182 static void combine_var_copies_in_loop_exit (struct var_to_expand
*,
184 static rtx
get_expansion (struct var_to_expand
*);
186 /* Emit a message summarizing the unroll that will be
187 performed for LOOP, along with the loop's location LOCUS, if
188 appropriate given the dump or -fopt-info settings. */
191 report_unroll (class loop
*loop
, dump_location_t locus
)
193 dump_flags_t report_flags
= MSG_OPTIMIZED_LOCATIONS
| TDF_DETAILS
;
195 if (loop
->lpt_decision
.decision
== LPT_NONE
)
198 if (!dump_enabled_p ())
201 dump_metadata_t
metadata (report_flags
, locus
.get_impl_location ());
202 dump_printf_loc (metadata
, locus
.get_user_location (),
203 "loop unrolled %d times",
204 loop
->lpt_decision
.times
);
205 if (profile_info
&& loop
->header
->count
.initialized_p ())
206 dump_printf (metadata
,
207 " (header execution count %d)",
208 (int)loop
->header
->count
.to_gcov_type ());
210 dump_printf (metadata
, "\n");
213 /* Decide whether unroll loops and how much. */
215 decide_unrolling (int flags
)
217 /* Scan the loops, inner ones first. */
218 for (auto loop
: loops_list (cfun
, LI_FROM_INNERMOST
))
220 loop
->lpt_decision
.decision
= LPT_NONE
;
221 dump_user_location_t locus
= get_loop_location (loop
);
223 if (dump_enabled_p ())
224 dump_printf_loc (MSG_NOTE
, locus
,
225 "considering unrolling loop %d at BB %d\n",
226 loop
->num
, loop
->header
->index
);
228 if (loop
->unroll
== 1)
232 ";; Not unrolling loop, user didn't want it unrolled\n");
236 /* Do not peel cold areas. */
237 if (optimize_loop_for_size_p (loop
))
240 fprintf (dump_file
, ";; Not considering loop, cold area\n");
244 /* Can the loop be manipulated? */
245 if (!can_duplicate_loop_p (loop
))
249 ";; Not considering loop, cannot duplicate\n");
253 /* Skip non-innermost loops. */
257 fprintf (dump_file
, ";; Not considering loop, is not innermost\n");
261 loop
->ninsns
= num_loop_insns (loop
);
262 loop
->av_ninsns
= average_num_loop_insns (loop
);
264 /* Try transformations one by one in decreasing order of priority. */
265 decide_unroll_constant_iterations (loop
, flags
);
266 if (loop
->lpt_decision
.decision
== LPT_NONE
)
267 decide_unroll_runtime_iterations (loop
, flags
);
268 if (loop
->lpt_decision
.decision
== LPT_NONE
)
269 decide_unroll_stupid (loop
, flags
);
271 report_unroll (loop
, locus
);
277 unroll_loops (int flags
)
279 bool changed
= false;
281 /* Now decide rest of unrolling. */
282 decide_unrolling (flags
);
284 /* Scan the loops, inner ones first. */
285 for (auto loop
: loops_list (cfun
, LI_FROM_INNERMOST
))
287 /* And perform the appropriate transformations. */
288 switch (loop
->lpt_decision
.decision
)
290 case LPT_UNROLL_CONSTANT
:
291 unroll_loop_constant_iterations (loop
);
294 case LPT_UNROLL_RUNTIME
:
295 unroll_loop_runtime_iterations (loop
);
298 case LPT_UNROLL_STUPID
:
299 unroll_loop_stupid (loop
);
311 calculate_dominance_info (CDI_DOMINATORS
);
312 fix_loop_structure (NULL
);
318 /* Check whether exit of the LOOP is at the end of loop body. */
321 loop_exit_at_end_p (class loop
*loop
)
323 class niter_desc
*desc
= get_simple_loop_desc (loop
);
326 /* We should never have conditional in latch block. */
327 gcc_assert (desc
->in_edge
->dest
!= loop
->header
);
329 if (desc
->in_edge
->dest
!= loop
->latch
)
332 /* Check that the latch is empty. */
333 FOR_BB_INSNS (loop
->latch
, insn
)
335 if (INSN_P (insn
) && active_insn_p (insn
))
342 /* Decide whether to unroll LOOP iterating constant number of times
346 decide_unroll_constant_iterations (class loop
*loop
, int flags
)
348 unsigned nunroll
, nunroll_by_av
, best_copies
, best_unroll
= 0, n_copies
, i
;
349 class niter_desc
*desc
;
350 widest_int iterations
;
352 /* If we were not asked to unroll this loop, just return back silently. */
353 if (!(flags
& UAP_UNROLL
) && !loop
->unroll
)
356 if (dump_enabled_p ())
357 dump_printf (MSG_NOTE
,
358 "considering unrolling loop with constant "
359 "number of iterations\n");
361 /* nunroll = total number of copies of the original loop body in
362 unrolled loop (i.e. if it is 2, we have to duplicate loop body once). */
363 nunroll
= param_max_unrolled_insns
/ loop
->ninsns
;
365 = param_max_average_unrolled_insns
/ loop
->av_ninsns
;
366 if (nunroll
> nunroll_by_av
)
367 nunroll
= nunroll_by_av
;
368 if (nunroll
> (unsigned) param_max_unroll_times
)
369 nunroll
= param_max_unroll_times
;
371 if (targetm
.loop_unroll_adjust
)
372 nunroll
= targetm
.loop_unroll_adjust (nunroll
, loop
);
374 /* Skip big loops. */
378 fprintf (dump_file
, ";; Not considering loop, is too big\n");
382 /* Check for simple loops. */
383 desc
= get_simple_loop_desc (loop
);
385 /* Check number of iterations. */
386 if (!desc
->simple_p
|| !desc
->const_iter
|| desc
->assumptions
)
390 ";; Unable to prove that the loop iterates constant times\n");
394 /* Check for an explicit unrolling factor. */
395 if (loop
->unroll
> 0 && loop
->unroll
< USHRT_MAX
)
397 /* However we cannot unroll completely at the RTL level a loop with
398 constant number of iterations; it should have been peeled instead. */
399 if (desc
->niter
== 0 || (unsigned) loop
->unroll
> desc
->niter
- 1)
402 fprintf (dump_file
, ";; Loop should have been peeled\n");
406 loop
->lpt_decision
.decision
= LPT_UNROLL_CONSTANT
;
407 loop
->lpt_decision
.times
= loop
->unroll
- 1;
412 /* Check whether the loop rolls enough to consider.
413 Consult also loop bounds and profile; in the case the loop has more
414 than one exit it may well loop less than determined maximal number
416 if (desc
->niter
< 2 * nunroll
417 || ((get_estimated_loop_iterations (loop
, &iterations
)
418 || get_likely_max_loop_iterations (loop
, &iterations
))
419 && wi::ltu_p (iterations
, 2 * nunroll
)))
422 fprintf (dump_file
, ";; Not unrolling loop, doesn't roll\n");
426 /* Success; now compute number of iterations to unroll. We alter
427 nunroll so that as few as possible copies of loop body are
428 necessary, while still not decreasing the number of unrollings
429 too much (at most by 1). */
430 best_copies
= 2 * nunroll
+ 10;
433 if (i
> desc
->niter
- 2)
436 for (; i
>= nunroll
- 1; i
--)
438 unsigned exit_mod
= desc
->niter
% (i
+ 1);
440 if (!loop_exit_at_end_p (loop
))
441 n_copies
= exit_mod
+ i
+ 1;
442 else if (exit_mod
!= (unsigned) i
443 || desc
->noloop_assumptions
!= NULL_RTX
)
444 n_copies
= exit_mod
+ i
+ 2;
448 if (n_copies
< best_copies
)
450 best_copies
= n_copies
;
455 loop
->lpt_decision
.decision
= LPT_UNROLL_CONSTANT
;
456 loop
->lpt_decision
.times
= best_unroll
;
459 /* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES times.
460 The transformation does this:
462 for (i = 0; i < 102; i++)
465 ==> (LOOP->LPT_DECISION.TIMES == 3)
479 unroll_loop_constant_iterations (class loop
*loop
)
481 unsigned HOST_WIDE_INT niter
;
485 unsigned max_unroll
= loop
->lpt_decision
.times
;
486 class niter_desc
*desc
= get_simple_loop_desc (loop
);
487 bool exit_at_end
= loop_exit_at_end_p (loop
);
488 struct opt_info
*opt_info
= NULL
;
490 bool flat
= maybe_flat_loop_profile (loop
);
491 profile_count orig_exit_count
= desc
->out_edge
->count ();
495 /* Should not get here (such loop should be peeled instead). */
496 gcc_assert (niter
> max_unroll
+ 1);
498 exit_mod
= niter
% (max_unroll
+ 1);
500 auto_sbitmap
wont_exit (max_unroll
+ 2);
501 bitmap_ones (wont_exit
);
503 auto_vec
<edge
> remove_edges
;
504 if (flag_split_ivs_in_unroller
505 || flag_variable_expansion_in_unroller
)
506 opt_info
= analyze_insns_in_loop (loop
);
510 /* The exit is not at the end of the loop; leave exit test
511 in the first copy, so that the loops that start with test
512 of exit condition have continuous body after unrolling. */
515 fprintf (dump_file
, ";; Condition at beginning of loop.\n");
517 /* Peel exit_mod iterations. */
518 bitmap_clear_bit (wont_exit
, 0);
519 if (desc
->noloop_assumptions
)
520 bitmap_clear_bit (wont_exit
, 1);
524 opt_info_start_duplication (opt_info
);
525 ok
= duplicate_loop_body_to_header_edge (
526 loop
, loop_preheader_edge (loop
), exit_mod
, wont_exit
,
527 desc
->out_edge
, &remove_edges
,
528 DLTHE_FLAG_UPDATE_FREQ
529 | (opt_info
&& exit_mod
> 1 ? DLTHE_RECORD_COPY_NUMBER
: 0));
532 if (opt_info
&& exit_mod
> 1)
533 apply_opt_in_copies (opt_info
, exit_mod
, false, false);
535 desc
->noloop_assumptions
= NULL_RTX
;
536 desc
->niter
-= exit_mod
;
537 loop
->nb_iterations_upper_bound
-= exit_mod
;
538 if (loop
->any_estimate
539 && wi::leu_p (exit_mod
, loop
->nb_iterations_estimate
))
540 loop
->nb_iterations_estimate
-= exit_mod
;
542 loop
->any_estimate
= false;
543 if (loop
->any_likely_upper_bound
544 && wi::leu_p (exit_mod
, loop
->nb_iterations_likely_upper_bound
))
545 loop
->nb_iterations_likely_upper_bound
-= exit_mod
;
547 loop
->any_likely_upper_bound
= false;
550 bitmap_set_bit (wont_exit
, 1);
554 /* Leave exit test in last copy, for the same reason as above if
555 the loop tests the condition at the end of loop body. */
558 fprintf (dump_file
, ";; Condition at end of loop.\n");
560 /* We know that niter >= max_unroll + 2; so we do not need to care of
561 case when we would exit before reaching the loop. So just peel
562 exit_mod + 1 iterations. */
563 if (exit_mod
!= max_unroll
564 || desc
->noloop_assumptions
)
566 bitmap_clear_bit (wont_exit
, 0);
567 if (desc
->noloop_assumptions
)
568 bitmap_clear_bit (wont_exit
, 1);
570 opt_info_start_duplication (opt_info
);
571 ok
= duplicate_loop_body_to_header_edge (
572 loop
, loop_preheader_edge (loop
), exit_mod
+ 1, wont_exit
,
573 desc
->out_edge
, &remove_edges
,
574 DLTHE_FLAG_UPDATE_FREQ
575 | (opt_info
&& exit_mod
> 0 ? DLTHE_RECORD_COPY_NUMBER
: 0));
578 if (opt_info
&& exit_mod
> 0)
579 apply_opt_in_copies (opt_info
, exit_mod
+ 1, false, false);
581 desc
->niter
-= exit_mod
+ 1;
582 loop
->nb_iterations_upper_bound
-= exit_mod
+ 1;
583 if (loop
->any_estimate
584 && wi::leu_p (exit_mod
+ 1, loop
->nb_iterations_estimate
))
585 loop
->nb_iterations_estimate
-= exit_mod
+ 1;
587 loop
->any_estimate
= false;
588 if (loop
->any_likely_upper_bound
589 && wi::leu_p (exit_mod
+ 1, loop
->nb_iterations_likely_upper_bound
))
590 loop
->nb_iterations_likely_upper_bound
-= exit_mod
+ 1;
592 loop
->any_likely_upper_bound
= false;
593 desc
->noloop_assumptions
= NULL_RTX
;
595 bitmap_set_bit (wont_exit
, 0);
596 bitmap_set_bit (wont_exit
, 1);
599 bitmap_clear_bit (wont_exit
, max_unroll
);
602 /* Now unroll the loop. */
604 opt_info_start_duplication (opt_info
);
605 ok
= duplicate_loop_body_to_header_edge (
606 loop
, loop_latch_edge (loop
), max_unroll
, wont_exit
, desc
->out_edge
,
608 DLTHE_FLAG_UPDATE_FREQ
| (opt_info
? DLTHE_RECORD_COPY_NUMBER
: 0)
609 | (flat
? DLTHE_FLAG_FLAT_PROFILE
: 0));
612 edge exit
= update_loop_exit_probability_scale_dom_bbs
613 (loop
, desc
->out_edge
, orig_exit_count
);
615 update_br_prob_note (exit
->src
);
619 apply_opt_in_copies (opt_info
, max_unroll
, true, true);
620 free_opt_info (opt_info
);
625 basic_block exit_block
= get_bb_copy (desc
->in_edge
->src
);
626 /* Find a new in and out edge; they are in the last copy we have made. */
628 if (EDGE_SUCC (exit_block
, 0)->dest
== desc
->out_edge
->dest
)
630 desc
->out_edge
= EDGE_SUCC (exit_block
, 0);
631 desc
->in_edge
= EDGE_SUCC (exit_block
, 1);
635 desc
->out_edge
= EDGE_SUCC (exit_block
, 1);
636 desc
->in_edge
= EDGE_SUCC (exit_block
, 0);
640 desc
->niter
/= max_unroll
+ 1;
641 loop
->nb_iterations_upper_bound
642 = wi::udiv_trunc (loop
->nb_iterations_upper_bound
, max_unroll
+ 1);
643 if (loop
->any_estimate
)
644 loop
->nb_iterations_estimate
645 = wi::udiv_trunc (loop
->nb_iterations_estimate
, max_unroll
+ 1);
646 if (loop
->any_likely_upper_bound
)
647 loop
->nb_iterations_likely_upper_bound
648 = wi::udiv_trunc (loop
->nb_iterations_likely_upper_bound
, max_unroll
+ 1);
649 desc
->niter_expr
= gen_int_mode (desc
->niter
, desc
->mode
);
651 /* Remove the edges. */
652 FOR_EACH_VEC_ELT (remove_edges
, i
, e
)
657 ";; Unrolled loop %d times, constant # of iterations %i insns\n",
658 max_unroll
, num_loop_insns (loop
));
661 /* Decide whether to unroll LOOP iterating runtime computable number of times
664 decide_unroll_runtime_iterations (class loop
*loop
, int flags
)
666 unsigned nunroll
, nunroll_by_av
, i
;
667 class niter_desc
*desc
;
668 widest_int iterations
;
670 /* If we were not asked to unroll this loop, just return back silently. */
671 if (!(flags
& UAP_UNROLL
) && !loop
->unroll
)
674 if (dump_enabled_p ())
675 dump_printf (MSG_NOTE
,
676 "considering unrolling loop with runtime-"
677 "computable number of iterations\n");
679 /* nunroll = total number of copies of the original loop body in
680 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
681 nunroll
= param_max_unrolled_insns
/ loop
->ninsns
;
682 nunroll_by_av
= param_max_average_unrolled_insns
/ loop
->av_ninsns
;
683 if (nunroll
> nunroll_by_av
)
684 nunroll
= nunroll_by_av
;
685 if (nunroll
> (unsigned) param_max_unroll_times
)
686 nunroll
= param_max_unroll_times
;
688 if (targetm
.loop_unroll_adjust
)
689 nunroll
= targetm
.loop_unroll_adjust (nunroll
, loop
);
691 if (loop
->unroll
> 0 && loop
->unroll
< USHRT_MAX
)
692 nunroll
= loop
->unroll
;
694 /* Skip big loops. */
698 fprintf (dump_file
, ";; Not considering loop, is too big\n");
702 /* Check for simple loops. */
703 desc
= get_simple_loop_desc (loop
);
705 /* Check simpleness. */
706 if (!desc
->simple_p
|| desc
->assumptions
)
710 ";; Unable to prove that the number of iterations "
711 "can be counted in runtime\n");
715 if (desc
->const_iter
)
718 fprintf (dump_file
, ";; Loop iterates constant times\n");
722 /* Check whether the loop rolls. */
723 if ((get_estimated_loop_iterations (loop
, &iterations
)
724 || get_likely_max_loop_iterations (loop
, &iterations
))
725 && wi::ltu_p (iterations
, 2 * nunroll
))
728 fprintf (dump_file
, ";; Not unrolling loop, doesn't roll\n");
732 /* Success; now force nunroll to be power of 2, as code-gen
733 requires it, we are unable to cope with overflows in
734 computation of number of iterations. */
735 for (i
= 1; 2 * i
<= nunroll
; i
*= 2)
738 loop
->lpt_decision
.decision
= LPT_UNROLL_RUNTIME
;
739 loop
->lpt_decision
.times
= i
- 1;
742 /* Splits edge E and inserts the sequence of instructions INSNS on it, and
743 returns the newly created block. If INSNS is NULL_RTX, nothing is changed
744 and NULL is returned instead. */
747 split_edge_and_insert (edge e
, rtx_insn
*insns
)
754 emit_insn_after (insns
, BB_END (bb
));
756 /* ??? We used to assume that INSNS can contain control flow insns, and
757 that we had to try to find sub basic blocks in BB to maintain a valid
758 CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB
759 and call break_superblocks when going out of cfglayout mode. But it
760 turns out that this never happens; and that if it does ever happen,
761 the verify_flow_info at the end of the RTL loop passes would fail.
763 There are two reasons why we expected we could have control flow insns
764 in INSNS. The first is when a comparison has to be done in parts, and
765 the second is when the number of iterations is computed for loops with
766 the number of iterations known at runtime. In both cases, test cases
767 to get control flow in INSNS appear to be impossible to construct:
769 * If do_compare_rtx_and_jump needs several branches to do comparison
770 in a mode that needs comparison by parts, we cannot analyze the
771 number of iterations of the loop, and we never get to unrolling it.
773 * The code in expand_divmod that was suspected to cause creation of
774 branching code seems to be only accessed for signed division. The
775 divisions used by # of iterations analysis are always unsigned.
776 Problems might arise on architectures that emits branching code
777 for some operations that may appear in the unroller (especially
778 for division), but we have no such architectures.
780 Considering all this, it was decided that we should for now assume
781 that INSNS can in theory contain control flow insns, but in practice
782 it never does. So we don't handle the theoretical case, and should
783 a real failure ever show up, we have a pretty good clue for how to
789 /* Prepare a sequence comparing OP0 with OP1 using COMP and jumping to LABEL if
790 true, with probability PROB. If CINSN is not NULL, it is the insn to copy
791 in order to create a jump. */
794 compare_and_jump_seq (rtx op0
, rtx op1
, enum rtx_code comp
,
795 rtx_code_label
*label
, profile_probability prob
,
803 mode
= GET_MODE (op0
);
804 if (mode
== VOIDmode
)
805 mode
= GET_MODE (op1
);
808 if (GET_MODE_CLASS (mode
) == MODE_CC
)
810 /* A hack -- there seems to be no easy generic way how to make a
811 conditional jump from a ccmode comparison. */
813 cond
= XEXP (SET_SRC (pc_set (cinsn
)), 0);
814 gcc_assert (GET_CODE (cond
) == comp
);
815 gcc_assert (rtx_equal_p (op0
, XEXP (cond
, 0)));
816 gcc_assert (rtx_equal_p (op1
, XEXP (cond
, 1)));
817 emit_jump_insn (copy_insn (PATTERN (cinsn
)));
818 jump
= as_a
<rtx_jump_insn
*> (get_last_insn ());
819 JUMP_LABEL (jump
) = JUMP_LABEL (cinsn
);
820 LABEL_NUSES (JUMP_LABEL (jump
))++;
821 redirect_jump (jump
, label
, 0);
827 op0
= force_operand (op0
, NULL_RTX
);
828 op1
= force_operand (op1
, NULL_RTX
);
829 do_compare_rtx_and_jump (op0
, op1
, comp
, 0,
830 mode
, NULL_RTX
, NULL
, label
,
831 profile_probability::uninitialized ());
832 jump
= as_a
<rtx_jump_insn
*> (get_last_insn ());
833 jump
->set_jump_target (label
);
834 LABEL_NUSES (label
)++;
836 if (prob
.initialized_p ())
837 add_reg_br_prob_note (jump
, prob
);
845 /* Unroll LOOP for which we are able to count number of iterations in
846 runtime LOOP->LPT_DECISION.TIMES times. The times value must be a
847 power of two. The transformation does this (with some extra care
850 for (i = 0; i < n; i++)
853 ==> (LOOP->LPT_DECISION.TIMES == 3)
878 unroll_loop_runtime_iterations (class loop
*loop
)
880 rtx old_niter
, niter
, tmp
;
881 rtx_insn
*init_code
, *branch_code
;
883 profile_probability p
;
884 basic_block preheader
, *body
, swtch
, ezc_swtch
= NULL
;
886 profile_count iter_count
, new_count
;
889 bool extra_zero_check
, last_may_exit
;
890 unsigned max_unroll
= loop
->lpt_decision
.times
;
891 class niter_desc
*desc
= get_simple_loop_desc (loop
);
892 bool exit_at_end
= loop_exit_at_end_p (loop
);
893 struct opt_info
*opt_info
= NULL
;
896 if (flag_split_ivs_in_unroller
897 || flag_variable_expansion_in_unroller
)
898 opt_info
= analyze_insns_in_loop (loop
);
900 /* Remember blocks whose dominators will have to be updated. */
901 auto_vec
<basic_block
> dom_bbs
;
903 body
= get_loop_body (loop
);
904 for (i
= 0; i
< loop
->num_nodes
; i
++)
906 for (basic_block bb
: get_dominated_by (CDI_DOMINATORS
, body
[i
]))
907 if (!flow_bb_inside_loop_p (loop
, bb
))
908 dom_bbs
.safe_push (bb
);
914 /* Leave exit in first copy (for explanation why see comment in
915 unroll_loop_constant_iterations). */
917 n_peel
= max_unroll
- 1;
918 extra_zero_check
= true;
919 last_may_exit
= false;
923 /* Leave exit in last copy (for explanation why see comment in
924 unroll_loop_constant_iterations). */
925 may_exit_copy
= max_unroll
;
927 extra_zero_check
= false;
928 last_may_exit
= true;
931 /* Get expression for number of iterations. */
933 old_niter
= niter
= gen_reg_rtx (desc
->mode
);
934 tmp
= force_operand (copy_rtx (desc
->niter_expr
), niter
);
936 emit_move_insn (niter
, tmp
);
938 /* For loops that exit at end and whose number of iterations is reliable,
939 add one to niter to account for first pass through loop body before
940 reaching exit test. */
941 if (exit_at_end
&& !desc
->noloop_assumptions
)
943 niter
= expand_simple_binop (desc
->mode
, PLUS
,
945 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
949 /* Count modulo by ANDing it with max_unroll; we use the fact that
950 the number of unrollings is a power of two, and thus this is correct
951 even if there is overflow in the computation. */
952 niter
= expand_simple_binop (desc
->mode
, AND
,
953 niter
, gen_int_mode (max_unroll
, desc
->mode
),
954 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
956 init_code
= get_insns ();
958 unshare_all_rtl_in_chain (init_code
);
960 /* Precondition the loop. */
961 split_edge_and_insert (loop_preheader_edge (loop
), init_code
);
963 auto_vec
<edge
> remove_edges
;
965 auto_sbitmap
wont_exit (max_unroll
+ 2);
967 if (extra_zero_check
|| desc
->noloop_assumptions
)
969 /* Peel the first copy of loop body. Leave the exit test if the number
970 of iterations is not reliable. Also record the place of the extra zero
972 bitmap_clear (wont_exit
);
973 if (!desc
->noloop_assumptions
)
974 bitmap_set_bit (wont_exit
, 1);
975 ezc_swtch
= loop_preheader_edge (loop
)->src
;
976 ok
= duplicate_loop_body_to_header_edge (loop
, loop_preheader_edge (loop
),
977 1, wont_exit
, desc
->out_edge
,
979 DLTHE_FLAG_UPDATE_FREQ
);
983 /* Record the place where switch will be built for preconditioning. */
984 swtch
= split_edge (loop_preheader_edge (loop
));
986 /* Compute count increments for each switch block and initialize
987 innermost switch block. Switch blocks and peeled loop copies are built
988 from innermost outward. */
989 iter_count
= new_count
= swtch
->count
/ (max_unroll
+ 1);
990 swtch
->count
= new_count
;
992 for (i
= 0; i
< n_peel
; i
++)
995 bitmap_clear (wont_exit
);
996 if (i
!= n_peel
- 1 || !last_may_exit
)
997 bitmap_set_bit (wont_exit
, 1);
998 ok
= duplicate_loop_body_to_header_edge (loop
, loop_preheader_edge (loop
),
999 1, wont_exit
, desc
->out_edge
,
1001 DLTHE_FLAG_UPDATE_FREQ
);
1004 /* Create item for switch. */
1005 unsigned j
= n_peel
- i
- (extra_zero_check
? 0 : 1);
1006 p
= profile_probability::always () / (i
+ 2);
1008 preheader
= split_edge (loop_preheader_edge (loop
));
1009 /* Add in count of edge from switch block. */
1010 preheader
->count
+= iter_count
;
1011 branch_code
= compare_and_jump_seq (copy_rtx (niter
),
1012 gen_int_mode (j
, desc
->mode
), EQ
,
1013 block_label (preheader
), p
, NULL
);
1015 /* We rely on the fact that the compare and jump cannot be optimized out,
1016 and hence the cfg we create is correct. */
1017 gcc_assert (branch_code
!= NULL_RTX
);
1019 swtch
= split_edge_and_insert (single_pred_edge (swtch
), branch_code
);
1020 set_immediate_dominator (CDI_DOMINATORS
, preheader
, swtch
);
1021 single_succ_edge (swtch
)->probability
= p
.invert ();
1022 new_count
+= iter_count
;
1023 swtch
->count
= new_count
;
1024 e
= make_edge (swtch
, preheader
,
1025 single_succ_edge (swtch
)->flags
& EDGE_IRREDUCIBLE_LOOP
);
1029 if (extra_zero_check
)
1031 /* Add branch for zero iterations. */
1032 p
= profile_probability::always () / (max_unroll
+ 1);
1034 preheader
= split_edge (loop_preheader_edge (loop
));
1035 /* Recompute count adjustments since initial peel copy may
1036 have exited and reduced those values that were computed above. */
1037 iter_count
= swtch
->count
/ (max_unroll
+ 1);
1038 /* Add in count of edge from switch block. */
1039 preheader
->count
+= iter_count
;
1040 branch_code
= compare_and_jump_seq (copy_rtx (niter
), const0_rtx
, EQ
,
1041 block_label (preheader
), p
,
1043 gcc_assert (branch_code
!= NULL_RTX
);
1045 swtch
= split_edge_and_insert (single_succ_edge (swtch
), branch_code
);
1046 set_immediate_dominator (CDI_DOMINATORS
, preheader
, swtch
);
1047 single_succ_edge (swtch
)->probability
= p
.invert ();
1048 e
= make_edge (swtch
, preheader
,
1049 single_succ_edge (swtch
)->flags
& EDGE_IRREDUCIBLE_LOOP
);
1053 /* Recount dominators for outer blocks. */
1054 iterate_fix_dominators (CDI_DOMINATORS
, dom_bbs
, false);
1056 /* And unroll loop. */
1058 bitmap_ones (wont_exit
);
1059 bitmap_clear_bit (wont_exit
, may_exit_copy
);
1060 opt_info_start_duplication (opt_info
);
1062 ok
= duplicate_loop_body_to_header_edge (
1063 loop
, loop_latch_edge (loop
), max_unroll
, wont_exit
, desc
->out_edge
,
1065 DLTHE_FLAG_UPDATE_FREQ
| (opt_info
? DLTHE_RECORD_COPY_NUMBER
: 0));
1070 apply_opt_in_copies (opt_info
, max_unroll
, true, true);
1071 free_opt_info (opt_info
);
1076 basic_block exit_block
= get_bb_copy (desc
->in_edge
->src
);
1077 /* Find a new in and out edge; they are in the last copy we have
1080 if (EDGE_SUCC (exit_block
, 0)->dest
== desc
->out_edge
->dest
)
1082 desc
->out_edge
= EDGE_SUCC (exit_block
, 0);
1083 desc
->in_edge
= EDGE_SUCC (exit_block
, 1);
1087 desc
->out_edge
= EDGE_SUCC (exit_block
, 1);
1088 desc
->in_edge
= EDGE_SUCC (exit_block
, 0);
1092 /* Remove the edges. */
1093 FOR_EACH_VEC_ELT (remove_edges
, i
, e
)
1096 /* We must be careful when updating the number of iterations due to
1097 preconditioning and the fact that the value must be valid at entry
1098 of the loop. After passing through the above code, we see that
1099 the correct new number of iterations is this: */
1100 gcc_assert (!desc
->const_iter
);
1102 simplify_gen_binary (UDIV
, desc
->mode
, old_niter
,
1103 gen_int_mode (max_unroll
+ 1, desc
->mode
));
1104 loop
->nb_iterations_upper_bound
1105 = wi::udiv_trunc (loop
->nb_iterations_upper_bound
, max_unroll
+ 1);
1106 if (loop
->any_estimate
)
1107 loop
->nb_iterations_estimate
1108 = wi::udiv_trunc (loop
->nb_iterations_estimate
, max_unroll
+ 1);
1109 if (loop
->any_likely_upper_bound
)
1110 loop
->nb_iterations_likely_upper_bound
1111 = wi::udiv_trunc (loop
->nb_iterations_likely_upper_bound
, max_unroll
+ 1);
1115 simplify_gen_binary (MINUS
, desc
->mode
, desc
->niter_expr
, const1_rtx
);
1116 desc
->noloop_assumptions
= NULL_RTX
;
1117 --loop
->nb_iterations_upper_bound
;
1118 if (loop
->any_estimate
1119 && loop
->nb_iterations_estimate
!= 0)
1120 --loop
->nb_iterations_estimate
;
1122 loop
->any_estimate
= false;
1123 if (loop
->any_likely_upper_bound
1124 && loop
->nb_iterations_likely_upper_bound
!= 0)
1125 --loop
->nb_iterations_likely_upper_bound
;
1127 loop
->any_likely_upper_bound
= false;
1132 ";; Unrolled loop %d times, counting # of iterations "
1133 "in runtime, %i insns\n",
1134 max_unroll
, num_loop_insns (loop
));
1137 /* Decide whether to unroll LOOP stupidly and how much. */
1139 decide_unroll_stupid (class loop
*loop
, int flags
)
1141 unsigned nunroll
, nunroll_by_av
, i
;
1142 class niter_desc
*desc
;
1143 widest_int iterations
;
1145 /* If we were not asked to unroll this loop, just return back silently. */
1146 if (!(flags
& UAP_UNROLL_ALL
) && !loop
->unroll
)
1149 if (dump_enabled_p ())
1150 dump_printf (MSG_NOTE
, "considering unrolling loop stupidly\n");
1152 /* nunroll = total number of copies of the original loop body in
1153 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
1154 nunroll
= param_max_unrolled_insns
/ loop
->ninsns
;
1156 = param_max_average_unrolled_insns
/ loop
->av_ninsns
;
1157 if (nunroll
> nunroll_by_av
)
1158 nunroll
= nunroll_by_av
;
1159 if (nunroll
> (unsigned) param_max_unroll_times
)
1160 nunroll
= param_max_unroll_times
;
1162 if (targetm
.loop_unroll_adjust
)
1163 nunroll
= targetm
.loop_unroll_adjust (nunroll
, loop
);
1165 if (loop
->unroll
> 0 && loop
->unroll
< USHRT_MAX
)
1166 nunroll
= loop
->unroll
;
1168 /* Skip big loops. */
1172 fprintf (dump_file
, ";; Not considering loop, is too big\n");
1176 /* Check for simple loops. */
1177 desc
= get_simple_loop_desc (loop
);
1179 /* Check simpleness. */
1180 if (desc
->simple_p
&& !desc
->assumptions
)
1183 fprintf (dump_file
, ";; Loop is simple\n");
1187 /* Do not unroll loops with branches inside -- it increases number
1189 TODO: this heuristic needs tunning; call inside the loop body
1190 is also relatively good reason to not unroll. */
1191 if (num_loop_branches (loop
) > 1)
1194 fprintf (dump_file
, ";; Not unrolling, contains branches\n");
1198 /* Check whether the loop rolls. */
1199 if ((get_estimated_loop_iterations (loop
, &iterations
)
1200 || get_likely_max_loop_iterations (loop
, &iterations
))
1201 && wi::ltu_p (iterations
, 2 * nunroll
))
1204 fprintf (dump_file
, ";; Not unrolling loop, doesn't roll\n");
1208 /* Success. Now force nunroll to be power of 2, as it seems that this
1209 improves results (partially because of better alignments, partially
1210 because of some dark magic). */
1211 for (i
= 1; 2 * i
<= nunroll
; i
*= 2)
1214 loop
->lpt_decision
.decision
= LPT_UNROLL_STUPID
;
1215 loop
->lpt_decision
.times
= i
- 1;
1218 /* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this:
1223 ==> (LOOP->LPT_DECISION.TIMES == 3)
1237 unroll_loop_stupid (class loop
*loop
)
1239 unsigned nunroll
= loop
->lpt_decision
.times
;
1240 class niter_desc
*desc
= get_simple_loop_desc (loop
);
1241 struct opt_info
*opt_info
= NULL
;
1244 if (flag_split_ivs_in_unroller
1245 || flag_variable_expansion_in_unroller
)
1246 opt_info
= analyze_insns_in_loop (loop
);
1248 auto_sbitmap
wont_exit (nunroll
+ 1);
1249 bitmap_clear (wont_exit
);
1250 opt_info_start_duplication (opt_info
);
1252 ok
= duplicate_loop_body_to_header_edge (
1253 loop
, loop_latch_edge (loop
), nunroll
, wont_exit
, NULL
, NULL
,
1254 DLTHE_FLAG_UPDATE_FREQ
| (opt_info
? DLTHE_RECORD_COPY_NUMBER
: 0));
1259 apply_opt_in_copies (opt_info
, nunroll
, true, true);
1260 free_opt_info (opt_info
);
1265 /* We indeed may get here provided that there are nontrivial assumptions
1266 for a loop to be really simple. We could update the counts, but the
1267 problem is that we are unable to decide which exit will be taken
1268 (not really true in case the number of iterations is constant,
1269 but no one will do anything with this information, so we do not
1271 desc
->simple_p
= false;
1275 fprintf (dump_file
, ";; Unrolled loop %d times, %i insns\n",
1276 nunroll
, num_loop_insns (loop
));
1279 /* Returns true if REG is referenced in one nondebug insn in LOOP.
1280 Set *DEBUG_USES to the number of debug insns that reference the
1284 referenced_in_one_insn_in_loop_p (class loop
*loop
, rtx reg
,
1287 basic_block
*body
, bb
;
1292 body
= get_loop_body (loop
);
1293 for (i
= 0; i
< loop
->num_nodes
; i
++)
1297 FOR_BB_INSNS (bb
, insn
)
1298 if (!rtx_referenced_p (reg
, insn
))
1300 else if (DEBUG_INSN_P (insn
))
1302 else if (++count_ref
> 1)
1306 return (count_ref
== 1);
1309 /* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
1312 reset_debug_uses_in_loop (class loop
*loop
, rtx reg
, int debug_uses
)
1314 basic_block
*body
, bb
;
1318 body
= get_loop_body (loop
);
1319 for (i
= 0; debug_uses
&& i
< loop
->num_nodes
; i
++)
1323 FOR_BB_INSNS (bb
, insn
)
1324 if (!DEBUG_INSN_P (insn
) || !rtx_referenced_p (reg
, insn
))
1328 validate_change (insn
, &INSN_VAR_LOCATION_LOC (insn
),
1329 gen_rtx_UNKNOWN_VAR_LOC (), 0);
1337 /* Determine whether INSN contains an accumulator
1338 which can be expanded into separate copies,
1339 one for each copy of the LOOP body.
1341 for (i = 0 ; i < n; i++)
1355 Return NULL if INSN contains no opportunity for expansion of accumulator.
1356 Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
1357 information and return a pointer to it.
1360 static struct var_to_expand
*
1361 analyze_insn_to_expand_var (class loop
*loop
, rtx_insn
*insn
)
1364 struct var_to_expand
*ves
;
1369 set
= single_set (insn
);
1373 dest
= SET_DEST (set
);
1374 src
= SET_SRC (set
);
1375 code
= GET_CODE (src
);
1377 if (code
!= PLUS
&& code
!= MINUS
&& code
!= MULT
&& code
!= FMA
)
1380 if (FLOAT_MODE_P (GET_MODE (dest
)))
1382 if (!flag_associative_math
)
1384 /* In the case of FMA, we're also changing the rounding. */
1385 if (code
== FMA
&& !flag_unsafe_math_optimizations
)
1389 /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
1390 in MD. But if there is no optab to generate the insn, we cannot
1391 perform the variable expansion. This can happen if an MD provides
1392 an insn but not a named pattern to generate it, for example to avoid
1393 producing code that needs additional mode switches like for x87/mmx.
1395 So we check have_insn_for which looks for an optab for the operation
1396 in SRC. If it doesn't exist, we can't perform the expansion even
1397 though INSN is valid. */
1398 if (!have_insn_for (code
, GET_MODE (src
)))
1402 && !(GET_CODE (dest
) == SUBREG
1403 && REG_P (SUBREG_REG (dest
))))
1406 /* Find the accumulator use within the operation. */
1409 /* We only support accumulation via FMA in the ADD position. */
1410 if (!rtx_equal_p (dest
, XEXP (src
, 2)))
1414 else if (rtx_equal_p (dest
, XEXP (src
, 0)))
1416 else if (rtx_equal_p (dest
, XEXP (src
, 1)))
1418 /* The method of expansion that we are using; which includes the
1419 initialization of the expansions with zero and the summation of
1420 the expansions at the end of the computation will yield wrong
1421 results for (x = something - x) thus avoid using it in that case. */
1429 /* It must not otherwise be used. */
1432 if (rtx_referenced_p (dest
, XEXP (src
, 0))
1433 || rtx_referenced_p (dest
, XEXP (src
, 1)))
1436 else if (rtx_referenced_p (dest
, XEXP (src
, 1 - accum_pos
)))
1439 /* It must be used in exactly one insn. */
1440 if (!referenced_in_one_insn_in_loop_p (loop
, dest
, &debug_uses
))
1445 fprintf (dump_file
, "\n;; Expanding Accumulator ");
1446 print_rtl (dump_file
, dest
);
1447 fprintf (dump_file
, "\n");
1451 /* Instead of resetting the debug insns, we could replace each
1452 debug use in the loop with the sum or product of all expanded
1453 accumulators. Since we'll only know of all expansions at the
1454 end, we'd have to keep track of which vars_to_expand a debug
1455 insn in the loop references, take note of each copy of the
1456 debug insn during unrolling, and when it's all done, compute
1457 the sum or product of each variable and adjust the original
1458 debug insn and each copy thereof. What a pain! */
1459 reset_debug_uses_in_loop (loop
, dest
, debug_uses
);
1461 /* Record the accumulator to expand. */
1462 ves
= XNEW (struct var_to_expand
);
1464 ves
->reg
= copy_rtx (dest
);
1465 ves
->var_expansions
.create (1);
1467 ves
->op
= GET_CODE (src
);
1468 ves
->expansion_count
= 0;
1469 ves
->reuse_expansion
= 0;
1473 /* Determine whether there is an induction variable in INSN that
1474 we would like to split during unrolling.
1494 Return NULL if INSN contains no interesting IVs. Otherwise, allocate
1495 an IV_TO_SPLIT structure, fill it with the relevant information and return a
1498 static struct iv_to_split
*
1499 analyze_iv_to_split_insn (rtx_insn
*insn
)
1503 struct iv_to_split
*ivts
;
1504 scalar_int_mode mode
;
1507 /* For now we just split the basic induction variables. Later this may be
1508 extended for example by selecting also addresses of memory references. */
1509 set
= single_set (insn
);
1513 dest
= SET_DEST (set
);
1514 if (!REG_P (dest
) || !is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
1517 if (!biv_p (insn
, mode
, dest
))
1520 ok
= iv_analyze_result (insn
, dest
, &iv
);
1522 /* This used to be an assert under the assumption that if biv_p returns
1523 true that iv_analyze_result must also return true. However, that
1524 assumption is not strictly correct as evidenced by pr25569.
1526 Returning NULL when iv_analyze_result returns false is safe and
1527 avoids the problems in pr25569 until the iv_analyze_* routines
1528 can be fixed, which is apparently hard and time consuming
1529 according to their author. */
1533 if (iv
.step
== const0_rtx
1534 || iv
.mode
!= iv
.extend_mode
)
1537 /* Record the insn to split. */
1538 ivts
= XNEW (struct iv_to_split
);
1540 ivts
->orig_var
= dest
;
1541 ivts
->base_var
= NULL_RTX
;
1542 ivts
->step
= iv
.step
;
1548 /* Determines which of insns in LOOP can be optimized.
1549 Return a OPT_INFO struct with the relevant hash tables filled
1550 with all insns to be optimized. The FIRST_NEW_BLOCK field
1551 is undefined for the return value. */
1553 static struct opt_info
*
1554 analyze_insns_in_loop (class loop
*loop
)
1556 basic_block
*body
, bb
;
1558 struct opt_info
*opt_info
= XCNEW (struct opt_info
);
1560 struct iv_to_split
*ivts
= NULL
;
1561 struct var_to_expand
*ves
= NULL
;
1562 iv_to_split
**slot1
;
1563 var_to_expand
**slot2
;
1564 auto_vec
<edge
> edges
= get_loop_exit_edges (loop
);
1566 bool can_apply
= false;
1568 iv_analysis_loop_init (loop
);
1570 body
= get_loop_body (loop
);
1572 if (flag_split_ivs_in_unroller
)
1574 opt_info
->insns_to_split
1575 = new hash_table
<iv_split_hasher
> (5 * loop
->num_nodes
);
1576 opt_info
->iv_to_split_head
= NULL
;
1577 opt_info
->iv_to_split_tail
= &opt_info
->iv_to_split_head
;
1580 /* Record the loop exit bb and loop preheader before the unrolling. */
1581 opt_info
->loop_preheader
= loop_preheader_edge (loop
)->src
;
1583 if (edges
.length () == 1)
1586 if (!(exit
->flags
& EDGE_COMPLEX
))
1588 opt_info
->loop_exit
= split_edge (exit
);
1593 if (flag_variable_expansion_in_unroller
1596 opt_info
->insns_with_var_to_expand
1597 = new hash_table
<var_expand_hasher
> (5 * loop
->num_nodes
);
1598 opt_info
->var_to_expand_head
= NULL
;
1599 opt_info
->var_to_expand_tail
= &opt_info
->var_to_expand_head
;
1602 for (i
= 0; i
< loop
->num_nodes
; i
++)
1605 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
1608 FOR_BB_INSNS (bb
, insn
)
1613 if (opt_info
->insns_to_split
)
1614 ivts
= analyze_iv_to_split_insn (insn
);
1618 slot1
= opt_info
->insns_to_split
->find_slot (ivts
, INSERT
);
1619 gcc_assert (*slot1
== NULL
);
1621 *opt_info
->iv_to_split_tail
= ivts
;
1622 opt_info
->iv_to_split_tail
= &ivts
->next
;
1626 if (opt_info
->insns_with_var_to_expand
)
1627 ves
= analyze_insn_to_expand_var (loop
, insn
);
1631 slot2
= opt_info
->insns_with_var_to_expand
->find_slot (ves
, INSERT
);
1632 gcc_assert (*slot2
== NULL
);
1634 *opt_info
->var_to_expand_tail
= ves
;
1635 opt_info
->var_to_expand_tail
= &ves
->next
;
1644 /* Called just before loop duplication. Records start of duplicated area
1648 opt_info_start_duplication (struct opt_info
*opt_info
)
1651 opt_info
->first_new_block
= last_basic_block_for_fn (cfun
);
1654 /* Determine the number of iterations between initialization of the base
1655 variable and the current copy (N_COPY). N_COPIES is the total number
1656 of newly created copies. UNROLLING is true if we are unrolling
1657 (not peeling) the loop. */
1660 determine_split_iv_delta (unsigned n_copy
, unsigned n_copies
, bool unrolling
)
1664 /* If we are unrolling, initialization is done in the original loop
1670 /* If we are peeling, the copy in that the initialization occurs has
1671 number 1. The original loop (number 0) is the last. */
1679 /* Allocate basic variable for the induction variable chain. */
1682 allocate_basic_variable (struct iv_to_split
*ivts
)
1684 rtx expr
= SET_SRC (single_set (ivts
->insn
));
1686 ivts
->base_var
= gen_reg_rtx (GET_MODE (expr
));
1689 /* Insert initialization of basic variable of IVTS before INSN, taking
1690 the initial value from INSN. */
1693 insert_base_initialization (struct iv_to_split
*ivts
, rtx_insn
*insn
)
1695 rtx expr
= copy_rtx (SET_SRC (single_set (insn
)));
1699 expr
= force_operand (expr
, ivts
->base_var
);
1700 if (expr
!= ivts
->base_var
)
1701 emit_move_insn (ivts
->base_var
, expr
);
1705 emit_insn_before (seq
, insn
);
1708 /* Replace the use of induction variable described in IVTS in INSN
1709 by base variable + DELTA * step. */
1712 split_iv (struct iv_to_split
*ivts
, rtx_insn
*insn
, unsigned delta
)
1714 rtx expr
, *loc
, incr
, var
;
1716 machine_mode mode
= GET_MODE (ivts
->base_var
);
1719 /* Construct base + DELTA * step. */
1721 expr
= ivts
->base_var
;
1724 incr
= simplify_gen_binary (MULT
, mode
,
1725 copy_rtx (ivts
->step
),
1726 gen_int_mode (delta
, mode
));
1727 expr
= simplify_gen_binary (PLUS
, GET_MODE (ivts
->base_var
),
1728 ivts
->base_var
, incr
);
1731 /* Figure out where to do the replacement. */
1732 loc
= &SET_SRC (single_set (insn
));
1734 /* If we can make the replacement right away, we're done. */
1735 if (validate_change (insn
, loc
, expr
, 0))
1738 /* Otherwise, force EXPR into a register and try again. */
1740 var
= gen_reg_rtx (mode
);
1741 expr
= force_operand (expr
, var
);
1743 emit_move_insn (var
, expr
);
1746 emit_insn_before (seq
, insn
);
1748 if (validate_change (insn
, loc
, var
, 0))
1751 /* The last chance. Try recreating the assignment in insn
1752 completely from scratch. */
1753 set
= single_set (insn
);
1758 src
= copy_rtx (SET_SRC (set
));
1759 dest
= copy_rtx (SET_DEST (set
));
1760 src
= force_operand (src
, dest
);
1762 emit_move_insn (dest
, src
);
1766 emit_insn_before (seq
, insn
);
1771 /* Return one expansion of the accumulator recorded in struct VE. */
1774 get_expansion (struct var_to_expand
*ve
)
1778 if (ve
->reuse_expansion
== 0)
1781 reg
= ve
->var_expansions
[ve
->reuse_expansion
- 1];
1783 if (ve
->var_expansions
.length () == (unsigned) ve
->reuse_expansion
)
1784 ve
->reuse_expansion
= 0;
1786 ve
->reuse_expansion
++;
1792 /* Given INSN replace the uses of the accumulator recorded in VE
1793 with a new register. */
1796 expand_var_during_unrolling (struct var_to_expand
*ve
, rtx_insn
*insn
)
1799 bool really_new_expansion
= false;
1801 set
= single_set (insn
);
1804 /* Generate a new register only if the expansion limit has not been
1805 reached. Else reuse an already existing expansion. */
1806 if (param_max_variable_expansions
> ve
->expansion_count
)
1808 really_new_expansion
= true;
1809 new_reg
= gen_reg_rtx (GET_MODE (ve
->reg
));
1812 new_reg
= get_expansion (ve
);
1814 validate_replace_rtx_group (SET_DEST (set
), new_reg
, insn
);
1815 if (apply_change_group ())
1816 if (really_new_expansion
)
1818 ve
->var_expansions
.safe_push (new_reg
);
1819 ve
->expansion_count
++;
1823 /* Initialize the variable expansions in loop preheader. PLACE is the
1824 loop-preheader basic block where the initialization of the
1825 expansions should take place. The expansions are initialized with
1826 (-0) when the operation is plus or minus to honor sign zero. This
1827 way we can prevent cases where the sign of the final result is
1828 effected by the sign of the expansion. Here is an example to
1831 for (i = 0 ; i < n; i++)
1845 When SUM is initialized with -zero and SOMETHING is also -zero; the
1846 final result of sum should be -zero thus the expansions sum1 and sum2
1847 should be initialized with -zero as well (otherwise we will get +zero
1848 as the final result). */
1851 insert_var_expansion_initialization (struct var_to_expand
*ve
,
1857 machine_mode mode
= GET_MODE (ve
->reg
);
1858 bool honor_signed_zero_p
= HONOR_SIGNED_ZEROS (mode
);
1860 if (ve
->var_expansions
.length () == 0)
1867 /* Note that we only accumulate FMA via the ADD operand. */
1870 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1872 if (honor_signed_zero_p
)
1873 zero_init
= simplify_gen_unary (NEG
, mode
, CONST0_RTX (mode
), mode
);
1875 zero_init
= CONST0_RTX (mode
);
1876 emit_move_insn (var
, zero_init
);
1881 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1883 zero_init
= CONST1_RTX (GET_MODE (var
));
1884 emit_move_insn (var
, zero_init
);
1895 emit_insn_after (seq
, BB_END (place
));
1898 /* Combine the variable expansions at the loop exit. PLACE is the
1899 loop exit basic block where the summation of the expansions should
1903 combine_var_copies_in_loop_exit (struct var_to_expand
*ve
, basic_block place
)
1907 rtx_insn
*seq
, *insn
;
1910 if (ve
->var_expansions
.length () == 0)
1913 /* ve->reg might be SUBREG or some other non-shareable RTL, and we use
1914 it both here and as the destination of the assignment. */
1915 sum
= copy_rtx (sum
);
1920 /* Note that we only accumulate FMA via the ADD operand. */
1923 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1924 sum
= simplify_gen_binary (PLUS
, GET_MODE (ve
->reg
), var
, sum
);
1928 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1929 sum
= simplify_gen_binary (MULT
, GET_MODE (ve
->reg
), var
, sum
);
1936 expr
= force_operand (sum
, ve
->reg
);
1937 if (expr
!= ve
->reg
)
1938 emit_move_insn (ve
->reg
, expr
);
1942 insn
= BB_HEAD (place
);
1943 while (!NOTE_INSN_BASIC_BLOCK_P (insn
))
1944 insn
= NEXT_INSN (insn
);
1946 emit_insn_after (seq
, insn
);
1949 /* Strip away REG_EQUAL notes for IVs we're splitting.
1951 Updating REG_EQUAL notes for IVs we split is tricky: We
1952 cannot tell until after unrolling, DF-rescanning, and liveness
1953 updating, whether an EQ_USE is reached by the split IV while
1954 the IV reg is still live. See PR55006.
1956 ??? We cannot use remove_reg_equal_equiv_notes_for_regno,
1957 because RTL loop-iv requires us to defer rescanning insns and
1958 any notes attached to them. So resort to old techniques... */
1961 maybe_strip_eq_note_for_split_iv (struct opt_info
*opt_info
, rtx_insn
*insn
)
1963 struct iv_to_split
*ivts
;
1964 rtx note
= find_reg_equal_equiv_note (insn
);
1967 for (ivts
= opt_info
->iv_to_split_head
; ivts
; ivts
= ivts
->next
)
1968 if (reg_mentioned_p (ivts
->orig_var
, note
))
1970 remove_note (insn
, note
);
1975 /* Apply loop optimizations in loop copies using the
1976 data which gathered during the unrolling. Structure
1977 OPT_INFO record that data.
1979 UNROLLING is true if we unrolled (not peeled) the loop.
1980 REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
1981 the loop (as it should happen in complete unrolling, but not in ordinary
1982 peeling of the loop). */
1985 apply_opt_in_copies (struct opt_info
*opt_info
,
1986 unsigned n_copies
, bool unrolling
,
1987 bool rewrite_original_loop
)
1990 basic_block bb
, orig_bb
;
1991 rtx_insn
*insn
, *orig_insn
, *next
;
1992 struct iv_to_split ivts_templ
, *ivts
;
1993 struct var_to_expand ve_templ
, *ves
;
1995 /* Sanity check -- we need to put initialization in the original loop
1997 gcc_assert (!unrolling
|| rewrite_original_loop
);
1999 /* Allocate the basic variables (i0). */
2000 if (opt_info
->insns_to_split
)
2001 for (ivts
= opt_info
->iv_to_split_head
; ivts
; ivts
= ivts
->next
)
2002 allocate_basic_variable (ivts
);
2004 for (i
= opt_info
->first_new_block
;
2005 i
< (unsigned) last_basic_block_for_fn (cfun
);
2008 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2009 orig_bb
= get_bb_original (bb
);
2011 /* bb->aux holds position in copy sequence initialized by
2012 duplicate_loop_body_to_header_edge. */
2013 delta
= determine_split_iv_delta ((size_t)bb
->aux
, n_copies
,
2016 orig_insn
= BB_HEAD (orig_bb
);
2017 FOR_BB_INSNS_SAFE (bb
, insn
, next
)
2020 || (DEBUG_BIND_INSN_P (insn
)
2021 && INSN_VAR_LOCATION_DECL (insn
)
2022 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn
)) == LABEL_DECL
))
2025 while (!INSN_P (orig_insn
)
2026 || (DEBUG_BIND_INSN_P (orig_insn
)
2027 && INSN_VAR_LOCATION_DECL (orig_insn
)
2028 && (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn
))
2030 orig_insn
= NEXT_INSN (orig_insn
);
2032 ivts_templ
.insn
= orig_insn
;
2033 ve_templ
.insn
= orig_insn
;
2035 /* Apply splitting iv optimization. */
2036 if (opt_info
->insns_to_split
)
2038 maybe_strip_eq_note_for_split_iv (opt_info
, insn
);
2040 ivts
= opt_info
->insns_to_split
->find (&ivts_templ
);
2044 gcc_assert (GET_CODE (PATTERN (insn
))
2045 == GET_CODE (PATTERN (orig_insn
)));
2048 insert_base_initialization (ivts
, insn
);
2049 split_iv (ivts
, insn
, delta
);
2052 /* Apply variable expansion optimization. */
2053 if (unrolling
&& opt_info
->insns_with_var_to_expand
)
2055 ves
= (struct var_to_expand
*)
2056 opt_info
->insns_with_var_to_expand
->find (&ve_templ
);
2059 gcc_assert (GET_CODE (PATTERN (insn
))
2060 == GET_CODE (PATTERN (orig_insn
)));
2061 expand_var_during_unrolling (ves
, insn
);
2064 orig_insn
= NEXT_INSN (orig_insn
);
2068 if (!rewrite_original_loop
)
2071 /* Initialize the variable expansions in the loop preheader
2072 and take care of combining them at the loop exit. */
2073 if (opt_info
->insns_with_var_to_expand
)
2075 for (ves
= opt_info
->var_to_expand_head
; ves
; ves
= ves
->next
)
2076 insert_var_expansion_initialization (ves
, opt_info
->loop_preheader
);
2077 for (ves
= opt_info
->var_to_expand_head
; ves
; ves
= ves
->next
)
2078 combine_var_copies_in_loop_exit (ves
, opt_info
->loop_exit
);
2081 /* Rewrite also the original loop body. Find them as originals of the blocks
2082 in the last copied iteration, i.e. those that have
2083 get_bb_copy (get_bb_original (bb)) == bb. */
2084 for (i
= opt_info
->first_new_block
;
2085 i
< (unsigned) last_basic_block_for_fn (cfun
);
2088 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2089 orig_bb
= get_bb_original (bb
);
2090 if (get_bb_copy (orig_bb
) != bb
)
2093 delta
= determine_split_iv_delta (0, n_copies
, unrolling
);
2094 for (orig_insn
= BB_HEAD (orig_bb
);
2095 orig_insn
!= NEXT_INSN (BB_END (bb
));
2098 next
= NEXT_INSN (orig_insn
);
2100 if (!INSN_P (orig_insn
))
2103 ivts_templ
.insn
= orig_insn
;
2104 if (opt_info
->insns_to_split
)
2106 maybe_strip_eq_note_for_split_iv (opt_info
, orig_insn
);
2108 ivts
= (struct iv_to_split
*)
2109 opt_info
->insns_to_split
->find (&ivts_templ
);
2113 insert_base_initialization (ivts
, orig_insn
);
2114 split_iv (ivts
, orig_insn
, delta
);
2123 /* Release OPT_INFO. */
2126 free_opt_info (struct opt_info
*opt_info
)
2128 delete opt_info
->insns_to_split
;
2129 opt_info
->insns_to_split
= NULL
;
2130 if (opt_info
->insns_with_var_to_expand
)
2132 struct var_to_expand
*ves
;
2134 for (ves
= opt_info
->var_to_expand_head
; ves
; ves
= ves
->next
)
2135 ves
->var_expansions
.release ();
2136 delete opt_info
->insns_with_var_to_expand
;
2137 opt_info
->insns_with_var_to_expand
= NULL
;