2 Copyright (C) 2002-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
39 /* This pass performs loop unrolling. We only perform this
40 optimization on innermost loops (with single exception) because
41 the impact on performance is greatest here, and we want to avoid
42 unnecessary code size growth. The gain is caused by greater sequentiality
43 of code, better code to optimize for further passes and in some cases
44 by fewer testings of exit conditions. The main problem is code growth,
45 that impacts performance negatively due to effect of caches.
49 -- unrolling of loops that roll constant times; this is almost always
50 win, as we get rid of exit condition tests.
51 -- unrolling of loops that roll number of times that we can compute
52 in runtime; we also get rid of exit condition tests here, but there
53 is the extra expense for calculating the number of iterations
54 -- simple unrolling of remaining loops; this is performed only if we
55 are asked to, as the gain is questionable in this case and often
56 it may even slow down the code
57 For more detailed descriptions of each of those, see comments at
58 appropriate function below.
60 There is a lot of parameters (defined and described in params.def) that
61 control how much we unroll.
63 ??? A great problem is that we don't have a good way how to determine
64 how many times we should unroll the loop; the experiments I have made
65 showed that this choice may affect performance in order of several %.
68 /* Information about induction variables to split. */
72 rtx_insn
*insn
; /* The insn in that the induction variable occurs. */
73 rtx orig_var
; /* The variable (register) for the IV before split. */
74 rtx base_var
; /* The variable on that the values in the further
75 iterations are based. */
76 rtx step
; /* Step of the induction variable. */
77 struct iv_to_split
*next
; /* Next entry in walking order. */
80 /* Information about accumulators to expand. */
84 rtx_insn
*insn
; /* The insn in that the variable expansion occurs. */
85 rtx reg
; /* The accumulator which is expanded. */
86 vec
<rtx
> var_expansions
; /* The copies of the accumulator which is expanded. */
87 struct var_to_expand
*next
; /* Next entry in walking order. */
88 enum rtx_code op
; /* The type of the accumulation - addition, subtraction
90 int expansion_count
; /* Count the number of expansions generated so far. */
91 int reuse_expansion
; /* The expansion we intend to reuse to expand
92 the accumulator. If REUSE_EXPANSION is 0 reuse
93 the original accumulator. Else use
94 var_expansions[REUSE_EXPANSION - 1]. */
97 /* Hashtable helper for iv_to_split. */
99 struct iv_split_hasher
: free_ptr_hash
<iv_to_split
>
101 static inline hashval_t
hash (const iv_to_split
*);
102 static inline bool equal (const iv_to_split
*, const iv_to_split
*);
106 /* A hash function for information about insns to split. */
109 iv_split_hasher::hash (const iv_to_split
*ivts
)
111 return (hashval_t
) INSN_UID (ivts
->insn
);
114 /* An equality functions for information about insns to split. */
117 iv_split_hasher::equal (const iv_to_split
*i1
, const iv_to_split
*i2
)
119 return i1
->insn
== i2
->insn
;
122 /* Hashtable helper for iv_to_split. */
124 struct var_expand_hasher
: free_ptr_hash
<var_to_expand
>
126 static inline hashval_t
hash (const var_to_expand
*);
127 static inline bool equal (const var_to_expand
*, const var_to_expand
*);
130 /* Return a hash for VES. */
133 var_expand_hasher::hash (const var_to_expand
*ves
)
135 return (hashval_t
) INSN_UID (ves
->insn
);
138 /* Return true if I1 and I2 refer to the same instruction. */
141 var_expand_hasher::equal (const var_to_expand
*i1
, const var_to_expand
*i2
)
143 return i1
->insn
== i2
->insn
;
146 /* Information about optimization applied in
147 the unrolled loop. */
151 hash_table
<iv_split_hasher
> *insns_to_split
; /* A hashtable of insns to
153 struct iv_to_split
*iv_to_split_head
; /* The first iv to split. */
154 struct iv_to_split
**iv_to_split_tail
; /* Pointer to the tail of the list. */
155 hash_table
<var_expand_hasher
> *insns_with_var_to_expand
; /* A hashtable of
156 insns with accumulators to expand. */
157 struct var_to_expand
*var_to_expand_head
; /* The first var to expand. */
158 struct var_to_expand
**var_to_expand_tail
; /* Pointer to the tail of the list. */
159 unsigned first_new_block
; /* The first basic block that was
161 basic_block loop_exit
; /* The loop exit basic block. */
162 basic_block loop_preheader
; /* The loop preheader basic block. */
165 static void decide_unroll_stupid (struct loop
*, int);
166 static void decide_unroll_constant_iterations (struct loop
*, int);
167 static void decide_unroll_runtime_iterations (struct loop
*, int);
168 static void unroll_loop_stupid (struct loop
*);
169 static void decide_unrolling (int);
170 static void unroll_loop_constant_iterations (struct loop
*);
171 static void unroll_loop_runtime_iterations (struct loop
*);
172 static struct opt_info
*analyze_insns_in_loop (struct loop
*);
173 static void opt_info_start_duplication (struct opt_info
*);
174 static void apply_opt_in_copies (struct opt_info
*, unsigned, bool, bool);
175 static void free_opt_info (struct opt_info
*);
176 static struct var_to_expand
*analyze_insn_to_expand_var (struct loop
*, rtx_insn
*);
177 static bool referenced_in_one_insn_in_loop_p (struct loop
*, rtx
, int *);
178 static struct iv_to_split
*analyze_iv_to_split_insn (rtx_insn
*);
179 static void expand_var_during_unrolling (struct var_to_expand
*, rtx_insn
*);
180 static void insert_var_expansion_initialization (struct var_to_expand
*,
182 static void combine_var_copies_in_loop_exit (struct var_to_expand
*,
184 static rtx
get_expansion (struct var_to_expand
*);
186 /* Emit a message summarizing the unroll that will be
187 performed for LOOP, along with the loop's location LOCUS, if
188 appropriate given the dump or -fopt-info settings. */
191 report_unroll (struct loop
*loop
, location_t locus
)
193 int report_flags
= MSG_OPTIMIZED_LOCATIONS
| TDF_RTL
| TDF_DETAILS
;
195 if (loop
->lpt_decision
.decision
== LPT_NONE
)
198 if (!dump_enabled_p ())
201 dump_printf_loc (report_flags
, locus
,
202 "loop unrolled %d times",
203 loop
->lpt_decision
.times
);
205 dump_printf (report_flags
,
206 " (header execution count %d)",
207 (int)loop
->header
->count
);
209 dump_printf (report_flags
, "\n");
212 /* Decide whether unroll loops and how much. */
214 decide_unrolling (int flags
)
218 /* Scan the loops, inner ones first. */
219 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
221 loop
->lpt_decision
.decision
= LPT_NONE
;
222 location_t locus
= get_loop_location (loop
);
224 if (dump_enabled_p ())
225 dump_printf_loc (TDF_RTL
, locus
,
226 ";; *** Considering loop %d at BB %d for "
228 loop
->num
, loop
->header
->index
);
230 /* Do not peel cold areas. */
231 if (optimize_loop_for_size_p (loop
))
234 fprintf (dump_file
, ";; Not considering loop, cold area\n");
238 /* Can the loop be manipulated? */
239 if (!can_duplicate_loop_p (loop
))
243 ";; Not considering loop, cannot duplicate\n");
247 /* Skip non-innermost loops. */
251 fprintf (dump_file
, ";; Not considering loop, is not innermost\n");
255 loop
->ninsns
= num_loop_insns (loop
);
256 loop
->av_ninsns
= average_num_loop_insns (loop
);
258 /* Try transformations one by one in decreasing order of
261 decide_unroll_constant_iterations (loop
, flags
);
262 if (loop
->lpt_decision
.decision
== LPT_NONE
)
263 decide_unroll_runtime_iterations (loop
, flags
);
264 if (loop
->lpt_decision
.decision
== LPT_NONE
)
265 decide_unroll_stupid (loop
, flags
);
267 report_unroll (loop
, locus
);
273 unroll_loops (int flags
)
276 bool changed
= false;
278 /* Now decide rest of unrolling. */
279 decide_unrolling (flags
);
281 /* Scan the loops, inner ones first. */
282 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
284 /* And perform the appropriate transformations. */
285 switch (loop
->lpt_decision
.decision
)
287 case LPT_UNROLL_CONSTANT
:
288 unroll_loop_constant_iterations (loop
);
291 case LPT_UNROLL_RUNTIME
:
292 unroll_loop_runtime_iterations (loop
);
295 case LPT_UNROLL_STUPID
:
296 unroll_loop_stupid (loop
);
308 calculate_dominance_info (CDI_DOMINATORS
);
309 fix_loop_structure (NULL
);
315 /* Check whether exit of the LOOP is at the end of loop body. */
318 loop_exit_at_end_p (struct loop
*loop
)
320 struct niter_desc
*desc
= get_simple_loop_desc (loop
);
323 /* We should never have conditional in latch block. */
324 gcc_assert (desc
->in_edge
->dest
!= loop
->header
);
326 if (desc
->in_edge
->dest
!= loop
->latch
)
329 /* Check that the latch is empty. */
330 FOR_BB_INSNS (loop
->latch
, insn
)
332 if (INSN_P (insn
) && active_insn_p (insn
))
339 /* Decide whether to unroll LOOP iterating constant number of times
343 decide_unroll_constant_iterations (struct loop
*loop
, int flags
)
345 unsigned nunroll
, nunroll_by_av
, best_copies
, best_unroll
= 0, n_copies
, i
;
346 struct niter_desc
*desc
;
347 widest_int iterations
;
349 if (!(flags
& UAP_UNROLL
))
351 /* We were not asked to, just return back silently. */
357 "\n;; Considering unrolling loop with constant "
358 "number of iterations\n");
360 /* nunroll = total number of copies of the original loop body in
361 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
362 nunroll
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / loop
->ninsns
;
364 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS
) / loop
->av_ninsns
;
365 if (nunroll
> nunroll_by_av
)
366 nunroll
= nunroll_by_av
;
367 if (nunroll
> (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES
))
368 nunroll
= PARAM_VALUE (PARAM_MAX_UNROLL_TIMES
);
370 if (targetm
.loop_unroll_adjust
)
371 nunroll
= targetm
.loop_unroll_adjust (nunroll
, loop
);
373 /* Skip big loops. */
377 fprintf (dump_file
, ";; Not considering loop, is too big\n");
381 /* Check for simple loops. */
382 desc
= get_simple_loop_desc (loop
);
384 /* Check number of iterations. */
385 if (!desc
->simple_p
|| !desc
->const_iter
|| desc
->assumptions
)
389 ";; Unable to prove that the loop iterates constant times\n");
393 /* Check whether the loop rolls enough to consider.
394 Consult also loop bounds and profile; in the case the loop has more
395 than one exit it may well loop less than determined maximal number
397 if (desc
->niter
< 2 * nunroll
398 || ((get_estimated_loop_iterations (loop
, &iterations
)
399 || get_max_loop_iterations (loop
, &iterations
))
400 && wi::ltu_p (iterations
, 2 * nunroll
)))
403 fprintf (dump_file
, ";; Not unrolling loop, doesn't roll\n");
407 /* Success; now compute number of iterations to unroll. We alter
408 nunroll so that as few as possible copies of loop body are
409 necessary, while still not decreasing the number of unrollings
410 too much (at most by 1). */
411 best_copies
= 2 * nunroll
+ 10;
414 if (i
- 1 >= desc
->niter
)
417 for (; i
>= nunroll
- 1; i
--)
419 unsigned exit_mod
= desc
->niter
% (i
+ 1);
421 if (!loop_exit_at_end_p (loop
))
422 n_copies
= exit_mod
+ i
+ 1;
423 else if (exit_mod
!= (unsigned) i
424 || desc
->noloop_assumptions
!= NULL_RTX
)
425 n_copies
= exit_mod
+ i
+ 2;
429 if (n_copies
< best_copies
)
431 best_copies
= n_copies
;
436 loop
->lpt_decision
.decision
= LPT_UNROLL_CONSTANT
;
437 loop
->lpt_decision
.times
= best_unroll
;
440 /* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES times.
441 The transformation does this:
443 for (i = 0; i < 102; i++)
446 ==> (LOOP->LPT_DECISION.TIMES == 3)
460 unroll_loop_constant_iterations (struct loop
*loop
)
462 unsigned HOST_WIDE_INT niter
;
467 unsigned max_unroll
= loop
->lpt_decision
.times
;
468 struct niter_desc
*desc
= get_simple_loop_desc (loop
);
469 bool exit_at_end
= loop_exit_at_end_p (loop
);
470 struct opt_info
*opt_info
= NULL
;
475 /* Should not get here (such loop should be peeled instead). */
476 gcc_assert (niter
> max_unroll
+ 1);
478 exit_mod
= niter
% (max_unroll
+ 1);
480 wont_exit
= sbitmap_alloc (max_unroll
+ 1);
481 bitmap_ones (wont_exit
);
483 auto_vec
<edge
> remove_edges
;
484 if (flag_split_ivs_in_unroller
485 || flag_variable_expansion_in_unroller
)
486 opt_info
= analyze_insns_in_loop (loop
);
490 /* The exit is not at the end of the loop; leave exit test
491 in the first copy, so that the loops that start with test
492 of exit condition have continuous body after unrolling. */
495 fprintf (dump_file
, ";; Condition at beginning of loop.\n");
497 /* Peel exit_mod iterations. */
498 bitmap_clear_bit (wont_exit
, 0);
499 if (desc
->noloop_assumptions
)
500 bitmap_clear_bit (wont_exit
, 1);
504 opt_info_start_duplication (opt_info
);
505 ok
= duplicate_loop_to_header_edge (loop
, loop_preheader_edge (loop
),
507 wont_exit
, desc
->out_edge
,
509 DLTHE_FLAG_UPDATE_FREQ
510 | (opt_info
&& exit_mod
> 1
511 ? DLTHE_RECORD_COPY_NUMBER
515 if (opt_info
&& exit_mod
> 1)
516 apply_opt_in_copies (opt_info
, exit_mod
, false, false);
518 desc
->noloop_assumptions
= NULL_RTX
;
519 desc
->niter
-= exit_mod
;
520 loop
->nb_iterations_upper_bound
-= exit_mod
;
521 if (loop
->any_estimate
522 && wi::leu_p (exit_mod
, loop
->nb_iterations_estimate
))
523 loop
->nb_iterations_estimate
-= exit_mod
;
525 loop
->any_estimate
= false;
528 bitmap_set_bit (wont_exit
, 1);
532 /* Leave exit test in last copy, for the same reason as above if
533 the loop tests the condition at the end of loop body. */
536 fprintf (dump_file
, ";; Condition at end of loop.\n");
538 /* We know that niter >= max_unroll + 2; so we do not need to care of
539 case when we would exit before reaching the loop. So just peel
540 exit_mod + 1 iterations. */
541 if (exit_mod
!= max_unroll
542 || desc
->noloop_assumptions
)
544 bitmap_clear_bit (wont_exit
, 0);
545 if (desc
->noloop_assumptions
)
546 bitmap_clear_bit (wont_exit
, 1);
548 opt_info_start_duplication (opt_info
);
549 ok
= duplicate_loop_to_header_edge (loop
, loop_preheader_edge (loop
),
551 wont_exit
, desc
->out_edge
,
553 DLTHE_FLAG_UPDATE_FREQ
554 | (opt_info
&& exit_mod
> 0
555 ? DLTHE_RECORD_COPY_NUMBER
559 if (opt_info
&& exit_mod
> 0)
560 apply_opt_in_copies (opt_info
, exit_mod
+ 1, false, false);
562 desc
->niter
-= exit_mod
+ 1;
563 loop
->nb_iterations_upper_bound
-= exit_mod
+ 1;
564 if (loop
->any_estimate
565 && wi::leu_p (exit_mod
+ 1, loop
->nb_iterations_estimate
))
566 loop
->nb_iterations_estimate
-= exit_mod
+ 1;
568 loop
->any_estimate
= false;
569 desc
->noloop_assumptions
= NULL_RTX
;
571 bitmap_set_bit (wont_exit
, 0);
572 bitmap_set_bit (wont_exit
, 1);
575 bitmap_clear_bit (wont_exit
, max_unroll
);
578 /* Now unroll the loop. */
580 opt_info_start_duplication (opt_info
);
581 ok
= duplicate_loop_to_header_edge (loop
, loop_latch_edge (loop
),
583 wont_exit
, desc
->out_edge
,
585 DLTHE_FLAG_UPDATE_FREQ
587 ? DLTHE_RECORD_COPY_NUMBER
593 apply_opt_in_copies (opt_info
, max_unroll
, true, true);
594 free_opt_info (opt_info
);
601 basic_block exit_block
= get_bb_copy (desc
->in_edge
->src
);
602 /* Find a new in and out edge; they are in the last copy we have made. */
604 if (EDGE_SUCC (exit_block
, 0)->dest
== desc
->out_edge
->dest
)
606 desc
->out_edge
= EDGE_SUCC (exit_block
, 0);
607 desc
->in_edge
= EDGE_SUCC (exit_block
, 1);
611 desc
->out_edge
= EDGE_SUCC (exit_block
, 1);
612 desc
->in_edge
= EDGE_SUCC (exit_block
, 0);
616 desc
->niter
/= max_unroll
+ 1;
617 loop
->nb_iterations_upper_bound
618 = wi::udiv_trunc (loop
->nb_iterations_upper_bound
, max_unroll
+ 1);
619 if (loop
->any_estimate
)
620 loop
->nb_iterations_estimate
621 = wi::udiv_trunc (loop
->nb_iterations_estimate
, max_unroll
+ 1);
622 desc
->niter_expr
= GEN_INT (desc
->niter
);
624 /* Remove the edges. */
625 FOR_EACH_VEC_ELT (remove_edges
, i
, e
)
630 ";; Unrolled loop %d times, constant # of iterations %i insns\n",
631 max_unroll
, num_loop_insns (loop
));
634 /* Decide whether to unroll LOOP iterating runtime computable number of times
637 decide_unroll_runtime_iterations (struct loop
*loop
, int flags
)
639 unsigned nunroll
, nunroll_by_av
, i
;
640 struct niter_desc
*desc
;
641 widest_int iterations
;
643 if (!(flags
& UAP_UNROLL
))
645 /* We were not asked to, just return back silently. */
651 "\n;; Considering unrolling loop with runtime "
652 "computable number of iterations\n");
654 /* nunroll = total number of copies of the original loop body in
655 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
656 nunroll
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / loop
->ninsns
;
657 nunroll_by_av
= PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS
) / loop
->av_ninsns
;
658 if (nunroll
> nunroll_by_av
)
659 nunroll
= nunroll_by_av
;
660 if (nunroll
> (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES
))
661 nunroll
= PARAM_VALUE (PARAM_MAX_UNROLL_TIMES
);
663 if (targetm
.loop_unroll_adjust
)
664 nunroll
= targetm
.loop_unroll_adjust (nunroll
, loop
);
666 /* Skip big loops. */
670 fprintf (dump_file
, ";; Not considering loop, is too big\n");
674 /* Check for simple loops. */
675 desc
= get_simple_loop_desc (loop
);
677 /* Check simpleness. */
678 if (!desc
->simple_p
|| desc
->assumptions
)
682 ";; Unable to prove that the number of iterations "
683 "can be counted in runtime\n");
687 if (desc
->const_iter
)
690 fprintf (dump_file
, ";; Loop iterates constant times\n");
694 /* Check whether the loop rolls. */
695 if ((get_estimated_loop_iterations (loop
, &iterations
)
696 || get_max_loop_iterations (loop
, &iterations
))
697 && wi::ltu_p (iterations
, 2 * nunroll
))
700 fprintf (dump_file
, ";; Not unrolling loop, doesn't roll\n");
704 /* Success; now force nunroll to be power of 2, as we are unable to
705 cope with overflows in computation of number of iterations. */
706 for (i
= 1; 2 * i
<= nunroll
; i
*= 2)
709 loop
->lpt_decision
.decision
= LPT_UNROLL_RUNTIME
;
710 loop
->lpt_decision
.times
= i
- 1;
713 /* Splits edge E and inserts the sequence of instructions INSNS on it, and
714 returns the newly created block. If INSNS is NULL_RTX, nothing is changed
715 and NULL is returned instead. */
718 split_edge_and_insert (edge e
, rtx_insn
*insns
)
725 emit_insn_after (insns
, BB_END (bb
));
727 /* ??? We used to assume that INSNS can contain control flow insns, and
728 that we had to try to find sub basic blocks in BB to maintain a valid
729 CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB
730 and call break_superblocks when going out of cfglayout mode. But it
731 turns out that this never happens; and that if it does ever happen,
732 the verify_flow_info at the end of the RTL loop passes would fail.
734 There are two reasons why we expected we could have control flow insns
735 in INSNS. The first is when a comparison has to be done in parts, and
736 the second is when the number of iterations is computed for loops with
737 the number of iterations known at runtime. In both cases, test cases
738 to get control flow in INSNS appear to be impossible to construct:
740 * If do_compare_rtx_and_jump needs several branches to do comparison
741 in a mode that needs comparison by parts, we cannot analyze the
742 number of iterations of the loop, and we never get to unrolling it.
744 * The code in expand_divmod that was suspected to cause creation of
745 branching code seems to be only accessed for signed division. The
746 divisions used by # of iterations analysis are always unsigned.
747 Problems might arise on architectures that emits branching code
748 for some operations that may appear in the unroller (especially
749 for division), but we have no such architectures.
751 Considering all this, it was decided that we should for now assume
752 that INSNS can in theory contain control flow insns, but in practice
753 it never does. So we don't handle the theoretical case, and should
754 a real failure ever show up, we have a pretty good clue for how to
760 /* Prepare a sequence comparing OP0 with OP1 using COMP and jumping to LABEL if
761 true, with probability PROB. If CINSN is not NULL, it is the insn to copy
762 in order to create a jump. */
765 compare_and_jump_seq (rtx op0
, rtx op1
, enum rtx_code comp
,
766 rtx_code_label
*label
, int prob
, rtx_insn
*cinsn
)
773 mode
= GET_MODE (op0
);
774 if (mode
== VOIDmode
)
775 mode
= GET_MODE (op1
);
778 if (GET_MODE_CLASS (mode
) == MODE_CC
)
780 /* A hack -- there seems to be no easy generic way how to make a
781 conditional jump from a ccmode comparison. */
783 cond
= XEXP (SET_SRC (pc_set (cinsn
)), 0);
784 gcc_assert (GET_CODE (cond
) == comp
);
785 gcc_assert (rtx_equal_p (op0
, XEXP (cond
, 0)));
786 gcc_assert (rtx_equal_p (op1
, XEXP (cond
, 1)));
787 emit_jump_insn (copy_insn (PATTERN (cinsn
)));
788 jump
= as_a
<rtx_jump_insn
*> (get_last_insn ());
789 JUMP_LABEL (jump
) = JUMP_LABEL (cinsn
);
790 LABEL_NUSES (JUMP_LABEL (jump
))++;
791 redirect_jump (jump
, label
, 0);
797 op0
= force_operand (op0
, NULL_RTX
);
798 op1
= force_operand (op1
, NULL_RTX
);
799 do_compare_rtx_and_jump (op0
, op1
, comp
, 0,
800 mode
, NULL_RTX
, NULL
, label
, -1);
801 jump
= as_a
<rtx_jump_insn
*> (get_last_insn ());
802 jump
->set_jump_target (label
);
803 LABEL_NUSES (label
)++;
805 add_int_reg_note (jump
, REG_BR_PROB
, prob
);
813 /* Unroll LOOP for which we are able to count number of iterations in runtime
814 LOOP->LPT_DECISION.TIMES times. The transformation does this (with some
815 extra care for case n < 0):
817 for (i = 0; i < n; i++)
820 ==> (LOOP->LPT_DECISION.TIMES == 3)
845 unroll_loop_runtime_iterations (struct loop
*loop
)
847 rtx old_niter
, niter
, tmp
;
848 rtx_insn
*init_code
, *branch_code
;
850 basic_block preheader
, *body
, swtch
, ezc_swtch
;
855 bool extra_zero_check
, last_may_exit
;
856 unsigned max_unroll
= loop
->lpt_decision
.times
;
857 struct niter_desc
*desc
= get_simple_loop_desc (loop
);
858 bool exit_at_end
= loop_exit_at_end_p (loop
);
859 struct opt_info
*opt_info
= NULL
;
862 if (flag_split_ivs_in_unroller
863 || flag_variable_expansion_in_unroller
)
864 opt_info
= analyze_insns_in_loop (loop
);
866 /* Remember blocks whose dominators will have to be updated. */
867 auto_vec
<basic_block
> dom_bbs
;
869 body
= get_loop_body (loop
);
870 for (i
= 0; i
< loop
->num_nodes
; i
++)
872 vec
<basic_block
> ldom
;
875 ldom
= get_dominated_by (CDI_DOMINATORS
, body
[i
]);
876 FOR_EACH_VEC_ELT (ldom
, j
, bb
)
877 if (!flow_bb_inside_loop_p (loop
, bb
))
878 dom_bbs
.safe_push (bb
);
886 /* Leave exit in first copy (for explanation why see comment in
887 unroll_loop_constant_iterations). */
889 n_peel
= max_unroll
- 1;
890 extra_zero_check
= true;
891 last_may_exit
= false;
895 /* Leave exit in last copy (for explanation why see comment in
896 unroll_loop_constant_iterations). */
897 may_exit_copy
= max_unroll
;
899 extra_zero_check
= false;
900 last_may_exit
= true;
903 /* Get expression for number of iterations. */
905 old_niter
= niter
= gen_reg_rtx (desc
->mode
);
906 tmp
= force_operand (copy_rtx (desc
->niter_expr
), niter
);
908 emit_move_insn (niter
, tmp
);
910 /* Count modulo by ANDing it with max_unroll; we use the fact that
911 the number of unrollings is a power of two, and thus this is correct
912 even if there is overflow in the computation. */
913 niter
= expand_simple_binop (desc
->mode
, AND
,
914 niter
, gen_int_mode (max_unroll
, desc
->mode
),
915 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
917 init_code
= get_insns ();
919 unshare_all_rtl_in_chain (init_code
);
921 /* Precondition the loop. */
922 split_edge_and_insert (loop_preheader_edge (loop
), init_code
);
924 auto_vec
<edge
> remove_edges
;
926 wont_exit
= sbitmap_alloc (max_unroll
+ 2);
928 /* Peel the first copy of loop body (almost always we must leave exit test
929 here; the only exception is when we have extra zero check and the number
930 of iterations is reliable. Also record the place of (possible) extra
932 bitmap_clear (wont_exit
);
934 && !desc
->noloop_assumptions
)
935 bitmap_set_bit (wont_exit
, 1);
936 ezc_swtch
= loop_preheader_edge (loop
)->src
;
937 ok
= duplicate_loop_to_header_edge (loop
, loop_preheader_edge (loop
),
938 1, wont_exit
, desc
->out_edge
,
940 DLTHE_FLAG_UPDATE_FREQ
);
943 /* Record the place where switch will be built for preconditioning. */
944 swtch
= split_edge (loop_preheader_edge (loop
));
946 for (i
= 0; i
< n_peel
; i
++)
949 bitmap_clear (wont_exit
);
950 if (i
!= n_peel
- 1 || !last_may_exit
)
951 bitmap_set_bit (wont_exit
, 1);
952 ok
= duplicate_loop_to_header_edge (loop
, loop_preheader_edge (loop
),
953 1, wont_exit
, desc
->out_edge
,
955 DLTHE_FLAG_UPDATE_FREQ
);
958 /* Create item for switch. */
959 j
= n_peel
- i
- (extra_zero_check
? 0 : 1);
960 p
= REG_BR_PROB_BASE
/ (i
+ 2);
962 preheader
= split_edge (loop_preheader_edge (loop
));
963 branch_code
= compare_and_jump_seq (copy_rtx (niter
), GEN_INT (j
), EQ
,
964 block_label (preheader
), p
,
967 /* We rely on the fact that the compare and jump cannot be optimized out,
968 and hence the cfg we create is correct. */
969 gcc_assert (branch_code
!= NULL_RTX
);
971 swtch
= split_edge_and_insert (single_pred_edge (swtch
), branch_code
);
972 set_immediate_dominator (CDI_DOMINATORS
, preheader
, swtch
);
973 single_pred_edge (swtch
)->probability
= REG_BR_PROB_BASE
- p
;
974 e
= make_edge (swtch
, preheader
,
975 single_succ_edge (swtch
)->flags
& EDGE_IRREDUCIBLE_LOOP
);
976 e
->count
= RDIV (preheader
->count
* REG_BR_PROB_BASE
, p
);
980 if (extra_zero_check
)
982 /* Add branch for zero iterations. */
983 p
= REG_BR_PROB_BASE
/ (max_unroll
+ 1);
985 preheader
= split_edge (loop_preheader_edge (loop
));
986 branch_code
= compare_and_jump_seq (copy_rtx (niter
), const0_rtx
, EQ
,
987 block_label (preheader
), p
,
989 gcc_assert (branch_code
!= NULL_RTX
);
991 swtch
= split_edge_and_insert (single_succ_edge (swtch
), branch_code
);
992 set_immediate_dominator (CDI_DOMINATORS
, preheader
, swtch
);
993 single_succ_edge (swtch
)->probability
= REG_BR_PROB_BASE
- p
;
994 e
= make_edge (swtch
, preheader
,
995 single_succ_edge (swtch
)->flags
& EDGE_IRREDUCIBLE_LOOP
);
996 e
->count
= RDIV (preheader
->count
* REG_BR_PROB_BASE
, p
);
1000 /* Recount dominators for outer blocks. */
1001 iterate_fix_dominators (CDI_DOMINATORS
, dom_bbs
, false);
1003 /* And unroll loop. */
1005 bitmap_ones (wont_exit
);
1006 bitmap_clear_bit (wont_exit
, may_exit_copy
);
1007 opt_info_start_duplication (opt_info
);
1009 ok
= duplicate_loop_to_header_edge (loop
, loop_latch_edge (loop
),
1011 wont_exit
, desc
->out_edge
,
1013 DLTHE_FLAG_UPDATE_FREQ
1015 ? DLTHE_RECORD_COPY_NUMBER
1021 apply_opt_in_copies (opt_info
, max_unroll
, true, true);
1022 free_opt_info (opt_info
);
1029 basic_block exit_block
= get_bb_copy (desc
->in_edge
->src
);
1030 /* Find a new in and out edge; they are in the last copy we have
1033 if (EDGE_SUCC (exit_block
, 0)->dest
== desc
->out_edge
->dest
)
1035 desc
->out_edge
= EDGE_SUCC (exit_block
, 0);
1036 desc
->in_edge
= EDGE_SUCC (exit_block
, 1);
1040 desc
->out_edge
= EDGE_SUCC (exit_block
, 1);
1041 desc
->in_edge
= EDGE_SUCC (exit_block
, 0);
1045 /* Remove the edges. */
1046 FOR_EACH_VEC_ELT (remove_edges
, i
, e
)
1049 /* We must be careful when updating the number of iterations due to
1050 preconditioning and the fact that the value must be valid at entry
1051 of the loop. After passing through the above code, we see that
1052 the correct new number of iterations is this: */
1053 gcc_assert (!desc
->const_iter
);
1055 simplify_gen_binary (UDIV
, desc
->mode
, old_niter
,
1056 gen_int_mode (max_unroll
+ 1, desc
->mode
));
1057 loop
->nb_iterations_upper_bound
1058 = wi::udiv_trunc (loop
->nb_iterations_upper_bound
, max_unroll
+ 1);
1059 if (loop
->any_estimate
)
1060 loop
->nb_iterations_estimate
1061 = wi::udiv_trunc (loop
->nb_iterations_estimate
, max_unroll
+ 1);
1065 simplify_gen_binary (MINUS
, desc
->mode
, desc
->niter_expr
, const1_rtx
);
1066 desc
->noloop_assumptions
= NULL_RTX
;
1067 --loop
->nb_iterations_upper_bound
;
1068 if (loop
->any_estimate
1069 && loop
->nb_iterations_estimate
!= 0)
1070 --loop
->nb_iterations_estimate
;
1072 loop
->any_estimate
= false;
1077 ";; Unrolled loop %d times, counting # of iterations "
1078 "in runtime, %i insns\n",
1079 max_unroll
, num_loop_insns (loop
));
1082 /* Decide whether to unroll LOOP stupidly and how much. */
1084 decide_unroll_stupid (struct loop
*loop
, int flags
)
1086 unsigned nunroll
, nunroll_by_av
, i
;
1087 struct niter_desc
*desc
;
1088 widest_int iterations
;
1090 if (!(flags
& UAP_UNROLL_ALL
))
1092 /* We were not asked to, just return back silently. */
1097 fprintf (dump_file
, "\n;; Considering unrolling loop stupidly\n");
1099 /* nunroll = total number of copies of the original loop body in
1100 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
1101 nunroll
= PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS
) / loop
->ninsns
;
1103 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS
) / loop
->av_ninsns
;
1104 if (nunroll
> nunroll_by_av
)
1105 nunroll
= nunroll_by_av
;
1106 if (nunroll
> (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES
))
1107 nunroll
= PARAM_VALUE (PARAM_MAX_UNROLL_TIMES
);
1109 if (targetm
.loop_unroll_adjust
)
1110 nunroll
= targetm
.loop_unroll_adjust (nunroll
, loop
);
1112 /* Skip big loops. */
1116 fprintf (dump_file
, ";; Not considering loop, is too big\n");
1120 /* Check for simple loops. */
1121 desc
= get_simple_loop_desc (loop
);
1123 /* Check simpleness. */
1124 if (desc
->simple_p
&& !desc
->assumptions
)
1127 fprintf (dump_file
, ";; The loop is simple\n");
1131 /* Do not unroll loops with branches inside -- it increases number
1133 TODO: this heuristic needs tunning; call inside the loop body
1134 is also relatively good reason to not unroll. */
1135 if (num_loop_branches (loop
) > 1)
1138 fprintf (dump_file
, ";; Not unrolling, contains branches\n");
1142 /* Check whether the loop rolls. */
1143 if ((get_estimated_loop_iterations (loop
, &iterations
)
1144 || get_max_loop_iterations (loop
, &iterations
))
1145 && wi::ltu_p (iterations
, 2 * nunroll
))
1148 fprintf (dump_file
, ";; Not unrolling loop, doesn't roll\n");
1152 /* Success. Now force nunroll to be power of 2, as it seems that this
1153 improves results (partially because of better alignments, partially
1154 because of some dark magic). */
1155 for (i
= 1; 2 * i
<= nunroll
; i
*= 2)
1158 loop
->lpt_decision
.decision
= LPT_UNROLL_STUPID
;
1159 loop
->lpt_decision
.times
= i
- 1;
1162 /* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this:
1167 ==> (LOOP->LPT_DECISION.TIMES == 3)
1181 unroll_loop_stupid (struct loop
*loop
)
1184 unsigned nunroll
= loop
->lpt_decision
.times
;
1185 struct niter_desc
*desc
= get_simple_loop_desc (loop
);
1186 struct opt_info
*opt_info
= NULL
;
1189 if (flag_split_ivs_in_unroller
1190 || flag_variable_expansion_in_unroller
)
1191 opt_info
= analyze_insns_in_loop (loop
);
1194 wont_exit
= sbitmap_alloc (nunroll
+ 1);
1195 bitmap_clear (wont_exit
);
1196 opt_info_start_duplication (opt_info
);
1198 ok
= duplicate_loop_to_header_edge (loop
, loop_latch_edge (loop
),
1201 DLTHE_FLAG_UPDATE_FREQ
1203 ? DLTHE_RECORD_COPY_NUMBER
1209 apply_opt_in_copies (opt_info
, nunroll
, true, true);
1210 free_opt_info (opt_info
);
1217 /* We indeed may get here provided that there are nontrivial assumptions
1218 for a loop to be really simple. We could update the counts, but the
1219 problem is that we are unable to decide which exit will be taken
1220 (not really true in case the number of iterations is constant,
1221 but no one will do anything with this information, so we do not
1223 desc
->simple_p
= false;
1227 fprintf (dump_file
, ";; Unrolled loop %d times, %i insns\n",
1228 nunroll
, num_loop_insns (loop
));
1231 /* Returns true if REG is referenced in one nondebug insn in LOOP.
1232 Set *DEBUG_USES to the number of debug insns that reference the
1236 referenced_in_one_insn_in_loop_p (struct loop
*loop
, rtx reg
,
1239 basic_block
*body
, bb
;
1244 body
= get_loop_body (loop
);
1245 for (i
= 0; i
< loop
->num_nodes
; i
++)
1249 FOR_BB_INSNS (bb
, insn
)
1250 if (!rtx_referenced_p (reg
, insn
))
1252 else if (DEBUG_INSN_P (insn
))
1254 else if (++count_ref
> 1)
1258 return (count_ref
== 1);
1261 /* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
1264 reset_debug_uses_in_loop (struct loop
*loop
, rtx reg
, int debug_uses
)
1266 basic_block
*body
, bb
;
1270 body
= get_loop_body (loop
);
1271 for (i
= 0; debug_uses
&& i
< loop
->num_nodes
; i
++)
1275 FOR_BB_INSNS (bb
, insn
)
1276 if (!DEBUG_INSN_P (insn
) || !rtx_referenced_p (reg
, insn
))
1280 validate_change (insn
, &INSN_VAR_LOCATION_LOC (insn
),
1281 gen_rtx_UNKNOWN_VAR_LOC (), 0);
1289 /* Determine whether INSN contains an accumulator
1290 which can be expanded into separate copies,
1291 one for each copy of the LOOP body.
1293 for (i = 0 ; i < n; i++)
1307 Return NULL if INSN contains no opportunity for expansion of accumulator.
1308 Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
1309 information and return a pointer to it.
1312 static struct var_to_expand
*
1313 analyze_insn_to_expand_var (struct loop
*loop
, rtx_insn
*insn
)
1316 struct var_to_expand
*ves
;
1321 set
= single_set (insn
);
1325 dest
= SET_DEST (set
);
1326 src
= SET_SRC (set
);
1327 code
= GET_CODE (src
);
1329 if (code
!= PLUS
&& code
!= MINUS
&& code
!= MULT
&& code
!= FMA
)
1332 if (FLOAT_MODE_P (GET_MODE (dest
)))
1334 if (!flag_associative_math
)
1336 /* In the case of FMA, we're also changing the rounding. */
1337 if (code
== FMA
&& !flag_unsafe_math_optimizations
)
1341 /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
1342 in MD. But if there is no optab to generate the insn, we can not
1343 perform the variable expansion. This can happen if an MD provides
1344 an insn but not a named pattern to generate it, for example to avoid
1345 producing code that needs additional mode switches like for x87/mmx.
1347 So we check have_insn_for which looks for an optab for the operation
1348 in SRC. If it doesn't exist, we can't perform the expansion even
1349 though INSN is valid. */
1350 if (!have_insn_for (code
, GET_MODE (src
)))
1354 && !(GET_CODE (dest
) == SUBREG
1355 && REG_P (SUBREG_REG (dest
))))
1358 /* Find the accumulator use within the operation. */
1361 /* We only support accumulation via FMA in the ADD position. */
1362 if (!rtx_equal_p (dest
, XEXP (src
, 2)))
1366 else if (rtx_equal_p (dest
, XEXP (src
, 0)))
1368 else if (rtx_equal_p (dest
, XEXP (src
, 1)))
1370 /* The method of expansion that we are using; which includes the
1371 initialization of the expansions with zero and the summation of
1372 the expansions at the end of the computation will yield wrong
1373 results for (x = something - x) thus avoid using it in that case. */
1381 /* It must not otherwise be used. */
1384 if (rtx_referenced_p (dest
, XEXP (src
, 0))
1385 || rtx_referenced_p (dest
, XEXP (src
, 1)))
1388 else if (rtx_referenced_p (dest
, XEXP (src
, 1 - accum_pos
)))
1391 /* It must be used in exactly one insn. */
1392 if (!referenced_in_one_insn_in_loop_p (loop
, dest
, &debug_uses
))
1397 fprintf (dump_file
, "\n;; Expanding Accumulator ");
1398 print_rtl (dump_file
, dest
);
1399 fprintf (dump_file
, "\n");
1403 /* Instead of resetting the debug insns, we could replace each
1404 debug use in the loop with the sum or product of all expanded
1405 accummulators. Since we'll only know of all expansions at the
1406 end, we'd have to keep track of which vars_to_expand a debug
1407 insn in the loop references, take note of each copy of the
1408 debug insn during unrolling, and when it's all done, compute
1409 the sum or product of each variable and adjust the original
1410 debug insn and each copy thereof. What a pain! */
1411 reset_debug_uses_in_loop (loop
, dest
, debug_uses
);
1413 /* Record the accumulator to expand. */
1414 ves
= XNEW (struct var_to_expand
);
1416 ves
->reg
= copy_rtx (dest
);
1417 ves
->var_expansions
.create (1);
1419 ves
->op
= GET_CODE (src
);
1420 ves
->expansion_count
= 0;
1421 ves
->reuse_expansion
= 0;
1425 /* Determine whether there is an induction variable in INSN that
1426 we would like to split during unrolling.
1446 Return NULL if INSN contains no interesting IVs. Otherwise, allocate
1447 an IV_TO_SPLIT structure, fill it with the relevant information and return a
1450 static struct iv_to_split
*
1451 analyze_iv_to_split_insn (rtx_insn
*insn
)
1455 struct iv_to_split
*ivts
;
1458 /* For now we just split the basic induction variables. Later this may be
1459 extended for example by selecting also addresses of memory references. */
1460 set
= single_set (insn
);
1464 dest
= SET_DEST (set
);
1468 if (!biv_p (insn
, dest
))
1471 ok
= iv_analyze_result (insn
, dest
, &iv
);
1473 /* This used to be an assert under the assumption that if biv_p returns
1474 true that iv_analyze_result must also return true. However, that
1475 assumption is not strictly correct as evidenced by pr25569.
1477 Returning NULL when iv_analyze_result returns false is safe and
1478 avoids the problems in pr25569 until the iv_analyze_* routines
1479 can be fixed, which is apparently hard and time consuming
1480 according to their author. */
1484 if (iv
.step
== const0_rtx
1485 || iv
.mode
!= iv
.extend_mode
)
1488 /* Record the insn to split. */
1489 ivts
= XNEW (struct iv_to_split
);
1491 ivts
->orig_var
= dest
;
1492 ivts
->base_var
= NULL_RTX
;
1493 ivts
->step
= iv
.step
;
1499 /* Determines which of insns in LOOP can be optimized.
1500 Return a OPT_INFO struct with the relevant hash tables filled
1501 with all insns to be optimized. The FIRST_NEW_BLOCK field
1502 is undefined for the return value. */
1504 static struct opt_info
*
1505 analyze_insns_in_loop (struct loop
*loop
)
1507 basic_block
*body
, bb
;
1509 struct opt_info
*opt_info
= XCNEW (struct opt_info
);
1511 struct iv_to_split
*ivts
= NULL
;
1512 struct var_to_expand
*ves
= NULL
;
1513 iv_to_split
**slot1
;
1514 var_to_expand
**slot2
;
1515 vec
<edge
> edges
= get_loop_exit_edges (loop
);
1517 bool can_apply
= false;
1519 iv_analysis_loop_init (loop
);
1521 body
= get_loop_body (loop
);
1523 if (flag_split_ivs_in_unroller
)
1525 opt_info
->insns_to_split
1526 = new hash_table
<iv_split_hasher
> (5 * loop
->num_nodes
);
1527 opt_info
->iv_to_split_head
= NULL
;
1528 opt_info
->iv_to_split_tail
= &opt_info
->iv_to_split_head
;
1531 /* Record the loop exit bb and loop preheader before the unrolling. */
1532 opt_info
->loop_preheader
= loop_preheader_edge (loop
)->src
;
1534 if (edges
.length () == 1)
1537 if (!(exit
->flags
& EDGE_COMPLEX
))
1539 opt_info
->loop_exit
= split_edge (exit
);
1544 if (flag_variable_expansion_in_unroller
1547 opt_info
->insns_with_var_to_expand
1548 = new hash_table
<var_expand_hasher
> (5 * loop
->num_nodes
);
1549 opt_info
->var_to_expand_head
= NULL
;
1550 opt_info
->var_to_expand_tail
= &opt_info
->var_to_expand_head
;
1553 for (i
= 0; i
< loop
->num_nodes
; i
++)
1556 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
1559 FOR_BB_INSNS (bb
, insn
)
1564 if (opt_info
->insns_to_split
)
1565 ivts
= analyze_iv_to_split_insn (insn
);
1569 slot1
= opt_info
->insns_to_split
->find_slot (ivts
, INSERT
);
1570 gcc_assert (*slot1
== NULL
);
1572 *opt_info
->iv_to_split_tail
= ivts
;
1573 opt_info
->iv_to_split_tail
= &ivts
->next
;
1577 if (opt_info
->insns_with_var_to_expand
)
1578 ves
= analyze_insn_to_expand_var (loop
, insn
);
1582 slot2
= opt_info
->insns_with_var_to_expand
->find_slot (ves
, INSERT
);
1583 gcc_assert (*slot2
== NULL
);
1585 *opt_info
->var_to_expand_tail
= ves
;
1586 opt_info
->var_to_expand_tail
= &ves
->next
;
1596 /* Called just before loop duplication. Records start of duplicated area
1600 opt_info_start_duplication (struct opt_info
*opt_info
)
1603 opt_info
->first_new_block
= last_basic_block_for_fn (cfun
);
1606 /* Determine the number of iterations between initialization of the base
1607 variable and the current copy (N_COPY). N_COPIES is the total number
1608 of newly created copies. UNROLLING is true if we are unrolling
1609 (not peeling) the loop. */
1612 determine_split_iv_delta (unsigned n_copy
, unsigned n_copies
, bool unrolling
)
1616 /* If we are unrolling, initialization is done in the original loop
1622 /* If we are peeling, the copy in that the initialization occurs has
1623 number 1. The original loop (number 0) is the last. */
1631 /* Allocate basic variable for the induction variable chain. */
1634 allocate_basic_variable (struct iv_to_split
*ivts
)
1636 rtx expr
= SET_SRC (single_set (ivts
->insn
));
1638 ivts
->base_var
= gen_reg_rtx (GET_MODE (expr
));
1641 /* Insert initialization of basic variable of IVTS before INSN, taking
1642 the initial value from INSN. */
1645 insert_base_initialization (struct iv_to_split
*ivts
, rtx_insn
*insn
)
1647 rtx expr
= copy_rtx (SET_SRC (single_set (insn
)));
1651 expr
= force_operand (expr
, ivts
->base_var
);
1652 if (expr
!= ivts
->base_var
)
1653 emit_move_insn (ivts
->base_var
, expr
);
1657 emit_insn_before (seq
, insn
);
1660 /* Replace the use of induction variable described in IVTS in INSN
1661 by base variable + DELTA * step. */
1664 split_iv (struct iv_to_split
*ivts
, rtx_insn
*insn
, unsigned delta
)
1666 rtx expr
, *loc
, incr
, var
;
1668 machine_mode mode
= GET_MODE (ivts
->base_var
);
1671 /* Construct base + DELTA * step. */
1673 expr
= ivts
->base_var
;
1676 incr
= simplify_gen_binary (MULT
, mode
,
1677 ivts
->step
, gen_int_mode (delta
, mode
));
1678 expr
= simplify_gen_binary (PLUS
, GET_MODE (ivts
->base_var
),
1679 ivts
->base_var
, incr
);
1682 /* Figure out where to do the replacement. */
1683 loc
= &SET_SRC (single_set (insn
));
1685 /* If we can make the replacement right away, we're done. */
1686 if (validate_change (insn
, loc
, expr
, 0))
1689 /* Otherwise, force EXPR into a register and try again. */
1691 var
= gen_reg_rtx (mode
);
1692 expr
= force_operand (expr
, var
);
1694 emit_move_insn (var
, expr
);
1697 emit_insn_before (seq
, insn
);
1699 if (validate_change (insn
, loc
, var
, 0))
1702 /* The last chance. Try recreating the assignment in insn
1703 completely from scratch. */
1704 set
= single_set (insn
);
1709 src
= copy_rtx (SET_SRC (set
));
1710 dest
= copy_rtx (SET_DEST (set
));
1711 src
= force_operand (src
, dest
);
1713 emit_move_insn (dest
, src
);
1717 emit_insn_before (seq
, insn
);
1722 /* Return one expansion of the accumulator recorded in struct VE. */
1725 get_expansion (struct var_to_expand
*ve
)
1729 if (ve
->reuse_expansion
== 0)
1732 reg
= ve
->var_expansions
[ve
->reuse_expansion
- 1];
1734 if (ve
->var_expansions
.length () == (unsigned) ve
->reuse_expansion
)
1735 ve
->reuse_expansion
= 0;
1737 ve
->reuse_expansion
++;
1743 /* Given INSN replace the uses of the accumulator recorded in VE
1744 with a new register. */
1747 expand_var_during_unrolling (struct var_to_expand
*ve
, rtx_insn
*insn
)
1750 bool really_new_expansion
= false;
1752 set
= single_set (insn
);
1755 /* Generate a new register only if the expansion limit has not been
1756 reached. Else reuse an already existing expansion. */
1757 if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS
) > ve
->expansion_count
)
1759 really_new_expansion
= true;
1760 new_reg
= gen_reg_rtx (GET_MODE (ve
->reg
));
1763 new_reg
= get_expansion (ve
);
1765 validate_replace_rtx_group (SET_DEST (set
), new_reg
, insn
);
1766 if (apply_change_group ())
1767 if (really_new_expansion
)
1769 ve
->var_expansions
.safe_push (new_reg
);
1770 ve
->expansion_count
++;
1774 /* Initialize the variable expansions in loop preheader. PLACE is the
1775 loop-preheader basic block where the initialization of the
1776 expansions should take place. The expansions are initialized with
1777 (-0) when the operation is plus or minus to honor sign zero. This
1778 way we can prevent cases where the sign of the final result is
1779 effected by the sign of the expansion. Here is an example to
1782 for (i = 0 ; i < n; i++)
1796 When SUM is initialized with -zero and SOMETHING is also -zero; the
1797 final result of sum should be -zero thus the expansions sum1 and sum2
1798 should be initialized with -zero as well (otherwise we will get +zero
1799 as the final result). */
1802 insert_var_expansion_initialization (struct var_to_expand
*ve
,
1808 machine_mode mode
= GET_MODE (ve
->reg
);
1809 bool honor_signed_zero_p
= HONOR_SIGNED_ZEROS (mode
);
1811 if (ve
->var_expansions
.length () == 0)
1818 /* Note that we only accumulate FMA via the ADD operand. */
1821 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1823 if (honor_signed_zero_p
)
1824 zero_init
= simplify_gen_unary (NEG
, mode
, CONST0_RTX (mode
), mode
);
1826 zero_init
= CONST0_RTX (mode
);
1827 emit_move_insn (var
, zero_init
);
1832 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1834 zero_init
= CONST1_RTX (GET_MODE (var
));
1835 emit_move_insn (var
, zero_init
);
1846 emit_insn_after (seq
, BB_END (place
));
1849 /* Combine the variable expansions at the loop exit. PLACE is the
1850 loop exit basic block where the summation of the expansions should
1854 combine_var_copies_in_loop_exit (struct var_to_expand
*ve
, basic_block place
)
1858 rtx_insn
*seq
, *insn
;
1861 if (ve
->var_expansions
.length () == 0)
1868 /* Note that we only accumulate FMA via the ADD operand. */
1871 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1872 sum
= simplify_gen_binary (PLUS
, GET_MODE (ve
->reg
), var
, sum
);
1876 FOR_EACH_VEC_ELT (ve
->var_expansions
, i
, var
)
1877 sum
= simplify_gen_binary (MULT
, GET_MODE (ve
->reg
), var
, sum
);
1884 expr
= force_operand (sum
, ve
->reg
);
1885 if (expr
!= ve
->reg
)
1886 emit_move_insn (ve
->reg
, expr
);
1890 insn
= BB_HEAD (place
);
1891 while (!NOTE_INSN_BASIC_BLOCK_P (insn
))
1892 insn
= NEXT_INSN (insn
);
1894 emit_insn_after (seq
, insn
);
1897 /* Strip away REG_EQUAL notes for IVs we're splitting.
1899 Updating REG_EQUAL notes for IVs we split is tricky: We
1900 cannot tell until after unrolling, DF-rescanning, and liveness
1901 updating, whether an EQ_USE is reached by the split IV while
1902 the IV reg is still live. See PR55006.
1904 ??? We cannot use remove_reg_equal_equiv_notes_for_regno,
1905 because RTL loop-iv requires us to defer rescanning insns and
1906 any notes attached to them. So resort to old techniques... */
1909 maybe_strip_eq_note_for_split_iv (struct opt_info
*opt_info
, rtx_insn
*insn
)
1911 struct iv_to_split
*ivts
;
1912 rtx note
= find_reg_equal_equiv_note (insn
);
1915 for (ivts
= opt_info
->iv_to_split_head
; ivts
; ivts
= ivts
->next
)
1916 if (reg_mentioned_p (ivts
->orig_var
, note
))
1918 remove_note (insn
, note
);
1923 /* Apply loop optimizations in loop copies using the
1924 data which gathered during the unrolling. Structure
1925 OPT_INFO record that data.
1927 UNROLLING is true if we unrolled (not peeled) the loop.
1928 REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
1929 the loop (as it should happen in complete unrolling, but not in ordinary
1930 peeling of the loop). */
1933 apply_opt_in_copies (struct opt_info
*opt_info
,
1934 unsigned n_copies
, bool unrolling
,
1935 bool rewrite_original_loop
)
1938 basic_block bb
, orig_bb
;
1939 rtx_insn
*insn
, *orig_insn
, *next
;
1940 struct iv_to_split ivts_templ
, *ivts
;
1941 struct var_to_expand ve_templ
, *ves
;
1943 /* Sanity check -- we need to put initialization in the original loop
1945 gcc_assert (!unrolling
|| rewrite_original_loop
);
1947 /* Allocate the basic variables (i0). */
1948 if (opt_info
->insns_to_split
)
1949 for (ivts
= opt_info
->iv_to_split_head
; ivts
; ivts
= ivts
->next
)
1950 allocate_basic_variable (ivts
);
1952 for (i
= opt_info
->first_new_block
;
1953 i
< (unsigned) last_basic_block_for_fn (cfun
);
1956 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
1957 orig_bb
= get_bb_original (bb
);
1959 /* bb->aux holds position in copy sequence initialized by
1960 duplicate_loop_to_header_edge. */
1961 delta
= determine_split_iv_delta ((size_t)bb
->aux
, n_copies
,
1964 orig_insn
= BB_HEAD (orig_bb
);
1965 FOR_BB_INSNS_SAFE (bb
, insn
, next
)
1968 || (DEBUG_INSN_P (insn
)
1969 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn
)) == LABEL_DECL
))
1972 while (!INSN_P (orig_insn
)
1973 || (DEBUG_INSN_P (orig_insn
)
1974 && (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn
))
1976 orig_insn
= NEXT_INSN (orig_insn
);
1978 ivts_templ
.insn
= orig_insn
;
1979 ve_templ
.insn
= orig_insn
;
1981 /* Apply splitting iv optimization. */
1982 if (opt_info
->insns_to_split
)
1984 maybe_strip_eq_note_for_split_iv (opt_info
, insn
);
1986 ivts
= opt_info
->insns_to_split
->find (&ivts_templ
);
1990 gcc_assert (GET_CODE (PATTERN (insn
))
1991 == GET_CODE (PATTERN (orig_insn
)));
1994 insert_base_initialization (ivts
, insn
);
1995 split_iv (ivts
, insn
, delta
);
1998 /* Apply variable expansion optimization. */
1999 if (unrolling
&& opt_info
->insns_with_var_to_expand
)
2001 ves
= (struct var_to_expand
*)
2002 opt_info
->insns_with_var_to_expand
->find (&ve_templ
);
2005 gcc_assert (GET_CODE (PATTERN (insn
))
2006 == GET_CODE (PATTERN (orig_insn
)));
2007 expand_var_during_unrolling (ves
, insn
);
2010 orig_insn
= NEXT_INSN (orig_insn
);
2014 if (!rewrite_original_loop
)
2017 /* Initialize the variable expansions in the loop preheader
2018 and take care of combining them at the loop exit. */
2019 if (opt_info
->insns_with_var_to_expand
)
2021 for (ves
= opt_info
->var_to_expand_head
; ves
; ves
= ves
->next
)
2022 insert_var_expansion_initialization (ves
, opt_info
->loop_preheader
);
2023 for (ves
= opt_info
->var_to_expand_head
; ves
; ves
= ves
->next
)
2024 combine_var_copies_in_loop_exit (ves
, opt_info
->loop_exit
);
2027 /* Rewrite also the original loop body. Find them as originals of the blocks
2028 in the last copied iteration, i.e. those that have
2029 get_bb_copy (get_bb_original (bb)) == bb. */
2030 for (i
= opt_info
->first_new_block
;
2031 i
< (unsigned) last_basic_block_for_fn (cfun
);
2034 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2035 orig_bb
= get_bb_original (bb
);
2036 if (get_bb_copy (orig_bb
) != bb
)
2039 delta
= determine_split_iv_delta (0, n_copies
, unrolling
);
2040 for (orig_insn
= BB_HEAD (orig_bb
);
2041 orig_insn
!= NEXT_INSN (BB_END (bb
));
2044 next
= NEXT_INSN (orig_insn
);
2046 if (!INSN_P (orig_insn
))
2049 ivts_templ
.insn
= orig_insn
;
2050 if (opt_info
->insns_to_split
)
2052 maybe_strip_eq_note_for_split_iv (opt_info
, orig_insn
);
2054 ivts
= (struct iv_to_split
*)
2055 opt_info
->insns_to_split
->find (&ivts_templ
);
2059 insert_base_initialization (ivts
, orig_insn
);
2060 split_iv (ivts
, orig_insn
, delta
);
2069 /* Release OPT_INFO. */
2072 free_opt_info (struct opt_info
*opt_info
)
2074 delete opt_info
->insns_to_split
;
2075 opt_info
->insns_to_split
= NULL
;
2076 if (opt_info
->insns_with_var_to_expand
)
2078 struct var_to_expand
*ves
;
2080 for (ves
= opt_info
->var_to_expand_head
; ves
; ves
= ves
->next
)
2081 ves
->var_expansions
.release ();
2082 delete opt_info
->insns_with_var_to_expand
;
2083 opt_info
->insns_with_var_to_expand
= NULL
;