2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / loop-unroll.c
blobec7755ed6e27256767cb8106bc7d5eb34783c43e
1 /* Loop unrolling.
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "input.h"
26 #include "alias.h"
27 #include "symtab.h"
28 #include "tree.h"
29 #include "hard-reg-set.h"
30 #include "obstack.h"
31 #include "profile.h"
32 #include "predict.h"
33 #include "function.h"
34 #include "dominance.h"
35 #include "cfg.h"
36 #include "cfgrtl.h"
37 #include "basic-block.h"
38 #include "cfgloop.h"
39 #include "params.h"
40 #include "insn-codes.h"
41 #include "optabs.h"
42 #include "flags.h"
43 #include "insn-config.h"
44 #include "expmed.h"
45 #include "dojump.h"
46 #include "explow.h"
47 #include "calls.h"
48 #include "emit-rtl.h"
49 #include "varasm.h"
50 #include "stmt.h"
51 #include "expr.h"
52 #include "recog.h"
53 #include "target.h"
54 #include "dumpfile.h"
56 /* This pass performs loop unrolling. We only perform this
57 optimization on innermost loops (with single exception) because
58 the impact on performance is greatest here, and we want to avoid
59 unnecessary code size growth. The gain is caused by greater sequentiality
60 of code, better code to optimize for further passes and in some cases
61 by fewer testings of exit conditions. The main problem is code growth,
62 that impacts performance negatively due to effect of caches.
64 What we do:
66 -- unrolling of loops that roll constant times; this is almost always
67 win, as we get rid of exit condition tests.
68 -- unrolling of loops that roll number of times that we can compute
69 in runtime; we also get rid of exit condition tests here, but there
70 is the extra expense for calculating the number of iterations
71 -- simple unrolling of remaining loops; this is performed only if we
72 are asked to, as the gain is questionable in this case and often
73 it may even slow down the code
74 For more detailed descriptions of each of those, see comments at
75 appropriate function below.
77 There is a lot of parameters (defined and described in params.def) that
78 control how much we unroll.
80 ??? A great problem is that we don't have a good way how to determine
81 how many times we should unroll the loop; the experiments I have made
82 showed that this choice may affect performance in order of several %.
85 /* Information about induction variables to split. */
87 struct iv_to_split
89 rtx_insn *insn; /* The insn in that the induction variable occurs. */
90 rtx orig_var; /* The variable (register) for the IV before split. */
91 rtx base_var; /* The variable on that the values in the further
92 iterations are based. */
93 rtx step; /* Step of the induction variable. */
94 struct iv_to_split *next; /* Next entry in walking order. */
97 /* Information about accumulators to expand. */
99 struct var_to_expand
101 rtx_insn *insn; /* The insn in that the variable expansion occurs. */
102 rtx reg; /* The accumulator which is expanded. */
103 vec<rtx> var_expansions; /* The copies of the accumulator which is expanded. */
104 struct var_to_expand *next; /* Next entry in walking order. */
105 enum rtx_code op; /* The type of the accumulation - addition, subtraction
106 or multiplication. */
107 int expansion_count; /* Count the number of expansions generated so far. */
108 int reuse_expansion; /* The expansion we intend to reuse to expand
109 the accumulator. If REUSE_EXPANSION is 0 reuse
110 the original accumulator. Else use
111 var_expansions[REUSE_EXPANSION - 1]. */
114 /* Hashtable helper for iv_to_split. */
116 struct iv_split_hasher : typed_free_remove <iv_to_split>
118 typedef iv_to_split *value_type;
119 typedef iv_to_split *compare_type;
120 static inline hashval_t hash (const iv_to_split *);
121 static inline bool equal (const iv_to_split *, const iv_to_split *);
125 /* A hash function for information about insns to split. */
127 inline hashval_t
128 iv_split_hasher::hash (const iv_to_split *ivts)
130 return (hashval_t) INSN_UID (ivts->insn);
133 /* An equality functions for information about insns to split. */
135 inline bool
136 iv_split_hasher::equal (const iv_to_split *i1, const iv_to_split *i2)
138 return i1->insn == i2->insn;
141 /* Hashtable helper for iv_to_split. */
143 struct var_expand_hasher : typed_free_remove <var_to_expand>
145 typedef var_to_expand *value_type;
146 typedef var_to_expand *compare_type;
147 static inline hashval_t hash (const var_to_expand *);
148 static inline bool equal (const var_to_expand *, const var_to_expand *);
151 /* Return a hash for VES. */
153 inline hashval_t
154 var_expand_hasher::hash (const var_to_expand *ves)
156 return (hashval_t) INSN_UID (ves->insn);
159 /* Return true if I1 and I2 refer to the same instruction. */
161 inline bool
162 var_expand_hasher::equal (const var_to_expand *i1, const var_to_expand *i2)
164 return i1->insn == i2->insn;
167 /* Information about optimization applied in
168 the unrolled loop. */
170 struct opt_info
172 hash_table<iv_split_hasher> *insns_to_split; /* A hashtable of insns to
173 split. */
174 struct iv_to_split *iv_to_split_head; /* The first iv to split. */
175 struct iv_to_split **iv_to_split_tail; /* Pointer to the tail of the list. */
176 hash_table<var_expand_hasher> *insns_with_var_to_expand; /* A hashtable of
177 insns with accumulators to expand. */
178 struct var_to_expand *var_to_expand_head; /* The first var to expand. */
179 struct var_to_expand **var_to_expand_tail; /* Pointer to the tail of the list. */
180 unsigned first_new_block; /* The first basic block that was
181 duplicated. */
182 basic_block loop_exit; /* The loop exit basic block. */
183 basic_block loop_preheader; /* The loop preheader basic block. */
186 static void decide_unroll_stupid (struct loop *, int);
187 static void decide_unroll_constant_iterations (struct loop *, int);
188 static void decide_unroll_runtime_iterations (struct loop *, int);
189 static void unroll_loop_stupid (struct loop *);
190 static void decide_unrolling (int);
191 static void unroll_loop_constant_iterations (struct loop *);
192 static void unroll_loop_runtime_iterations (struct loop *);
193 static struct opt_info *analyze_insns_in_loop (struct loop *);
194 static void opt_info_start_duplication (struct opt_info *);
195 static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
196 static void free_opt_info (struct opt_info *);
197 static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx_insn *);
198 static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *);
199 static struct iv_to_split *analyze_iv_to_split_insn (rtx_insn *);
200 static void expand_var_during_unrolling (struct var_to_expand *, rtx_insn *);
201 static void insert_var_expansion_initialization (struct var_to_expand *,
202 basic_block);
203 static void combine_var_copies_in_loop_exit (struct var_to_expand *,
204 basic_block);
205 static rtx get_expansion (struct var_to_expand *);
207 /* Emit a message summarizing the unroll that will be
208 performed for LOOP, along with the loop's location LOCUS, if
209 appropriate given the dump or -fopt-info settings. */
211 static void
212 report_unroll (struct loop *loop, location_t locus)
214 int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS;
216 if (loop->lpt_decision.decision == LPT_NONE)
217 return;
219 if (!dump_enabled_p ())
220 return;
222 dump_printf_loc (report_flags, locus,
223 "loop unrolled %d times",
224 loop->lpt_decision.times);
225 if (profile_info)
226 dump_printf (report_flags,
227 " (header execution count %d)",
228 (int)loop->header->count);
230 dump_printf (report_flags, "\n");
233 /* Decide whether unroll loops and how much. */
234 static void
235 decide_unrolling (int flags)
237 struct loop *loop;
239 /* Scan the loops, inner ones first. */
240 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
242 loop->lpt_decision.decision = LPT_NONE;
243 location_t locus = get_loop_location (loop);
245 if (dump_enabled_p ())
246 dump_printf_loc (TDF_RTL, locus,
247 ";; *** Considering loop %d at BB %d for "
248 "unrolling ***\n",
249 loop->num, loop->header->index);
251 /* Do not peel cold areas. */
252 if (optimize_loop_for_size_p (loop))
254 if (dump_file)
255 fprintf (dump_file, ";; Not considering loop, cold area\n");
256 continue;
259 /* Can the loop be manipulated? */
260 if (!can_duplicate_loop_p (loop))
262 if (dump_file)
263 fprintf (dump_file,
264 ";; Not considering loop, cannot duplicate\n");
265 continue;
268 /* Skip non-innermost loops. */
269 if (loop->inner)
271 if (dump_file)
272 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
273 continue;
276 loop->ninsns = num_loop_insns (loop);
277 loop->av_ninsns = average_num_loop_insns (loop);
279 /* Try transformations one by one in decreasing order of
280 priority. */
282 decide_unroll_constant_iterations (loop, flags);
283 if (loop->lpt_decision.decision == LPT_NONE)
284 decide_unroll_runtime_iterations (loop, flags);
285 if (loop->lpt_decision.decision == LPT_NONE)
286 decide_unroll_stupid (loop, flags);
288 report_unroll (loop, locus);
292 /* Unroll LOOPS. */
293 void
294 unroll_loops (int flags)
296 struct loop *loop;
297 bool changed = false;
299 /* Now decide rest of unrolling. */
300 decide_unrolling (flags);
302 /* Scan the loops, inner ones first. */
303 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
305 /* And perform the appropriate transformations. */
306 switch (loop->lpt_decision.decision)
308 case LPT_UNROLL_CONSTANT:
309 unroll_loop_constant_iterations (loop);
310 changed = true;
311 break;
312 case LPT_UNROLL_RUNTIME:
313 unroll_loop_runtime_iterations (loop);
314 changed = true;
315 break;
316 case LPT_UNROLL_STUPID:
317 unroll_loop_stupid (loop);
318 changed = true;
319 break;
320 case LPT_NONE:
321 break;
322 default:
323 gcc_unreachable ();
327 if (changed)
329 calculate_dominance_info (CDI_DOMINATORS);
330 fix_loop_structure (NULL);
333 iv_analysis_done ();
336 /* Check whether exit of the LOOP is at the end of loop body. */
338 static bool
339 loop_exit_at_end_p (struct loop *loop)
341 struct niter_desc *desc = get_simple_loop_desc (loop);
342 rtx_insn *insn;
344 /* We should never have conditional in latch block. */
345 gcc_assert (desc->in_edge->dest != loop->header);
347 if (desc->in_edge->dest != loop->latch)
348 return false;
350 /* Check that the latch is empty. */
351 FOR_BB_INSNS (loop->latch, insn)
353 if (INSN_P (insn) && active_insn_p (insn))
354 return false;
357 return true;
360 /* Decide whether to unroll LOOP iterating constant number of times
361 and how much. */
363 static void
364 decide_unroll_constant_iterations (struct loop *loop, int flags)
366 unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
367 struct niter_desc *desc;
368 widest_int iterations;
370 if (!(flags & UAP_UNROLL))
372 /* We were not asked to, just return back silently. */
373 return;
376 if (dump_file)
377 fprintf (dump_file,
378 "\n;; Considering unrolling loop with constant "
379 "number of iterations\n");
381 /* nunroll = total number of copies of the original loop body in
382 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
383 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
384 nunroll_by_av
385 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
386 if (nunroll > nunroll_by_av)
387 nunroll = nunroll_by_av;
388 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
389 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
391 if (targetm.loop_unroll_adjust)
392 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
394 /* Skip big loops. */
395 if (nunroll <= 1)
397 if (dump_file)
398 fprintf (dump_file, ";; Not considering loop, is too big\n");
399 return;
402 /* Check for simple loops. */
403 desc = get_simple_loop_desc (loop);
405 /* Check number of iterations. */
406 if (!desc->simple_p || !desc->const_iter || desc->assumptions)
408 if (dump_file)
409 fprintf (dump_file,
410 ";; Unable to prove that the loop iterates constant times\n");
411 return;
414 /* Check whether the loop rolls enough to consider.
415 Consult also loop bounds and profile; in the case the loop has more
416 than one exit it may well loop less than determined maximal number
417 of iterations. */
418 if (desc->niter < 2 * nunroll
419 || ((get_estimated_loop_iterations (loop, &iterations)
420 || get_max_loop_iterations (loop, &iterations))
421 && wi::ltu_p (iterations, 2 * nunroll)))
423 if (dump_file)
424 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
425 return;
428 /* Success; now compute number of iterations to unroll. We alter
429 nunroll so that as few as possible copies of loop body are
430 necessary, while still not decreasing the number of unrollings
431 too much (at most by 1). */
432 best_copies = 2 * nunroll + 10;
434 i = 2 * nunroll + 2;
435 if (i - 1 >= desc->niter)
436 i = desc->niter - 2;
438 for (; i >= nunroll - 1; i--)
440 unsigned exit_mod = desc->niter % (i + 1);
442 if (!loop_exit_at_end_p (loop))
443 n_copies = exit_mod + i + 1;
444 else if (exit_mod != (unsigned) i
445 || desc->noloop_assumptions != NULL_RTX)
446 n_copies = exit_mod + i + 2;
447 else
448 n_copies = i + 1;
450 if (n_copies < best_copies)
452 best_copies = n_copies;
453 best_unroll = i;
457 loop->lpt_decision.decision = LPT_UNROLL_CONSTANT;
458 loop->lpt_decision.times = best_unroll;
461 /* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES times.
462 The transformation does this:
464 for (i = 0; i < 102; i++)
465 body;
467 ==> (LOOP->LPT_DECISION.TIMES == 3)
469 i = 0;
470 body; i++;
471 body; i++;
472 while (i < 102)
474 body; i++;
475 body; i++;
476 body; i++;
477 body; i++;
480 static void
481 unroll_loop_constant_iterations (struct loop *loop)
483 unsigned HOST_WIDE_INT niter;
484 unsigned exit_mod;
485 sbitmap wont_exit;
486 unsigned i;
487 edge e;
488 unsigned max_unroll = loop->lpt_decision.times;
489 struct niter_desc *desc = get_simple_loop_desc (loop);
490 bool exit_at_end = loop_exit_at_end_p (loop);
491 struct opt_info *opt_info = NULL;
492 bool ok;
494 niter = desc->niter;
496 /* Should not get here (such loop should be peeled instead). */
497 gcc_assert (niter > max_unroll + 1);
499 exit_mod = niter % (max_unroll + 1);
501 wont_exit = sbitmap_alloc (max_unroll + 1);
502 bitmap_ones (wont_exit);
504 auto_vec<edge> remove_edges;
505 if (flag_split_ivs_in_unroller
506 || flag_variable_expansion_in_unroller)
507 opt_info = analyze_insns_in_loop (loop);
509 if (!exit_at_end)
511 /* The exit is not at the end of the loop; leave exit test
512 in the first copy, so that the loops that start with test
513 of exit condition have continuous body after unrolling. */
515 if (dump_file)
516 fprintf (dump_file, ";; Condition at beginning of loop.\n");
518 /* Peel exit_mod iterations. */
519 bitmap_clear_bit (wont_exit, 0);
520 if (desc->noloop_assumptions)
521 bitmap_clear_bit (wont_exit, 1);
523 if (exit_mod)
525 opt_info_start_duplication (opt_info);
526 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
527 exit_mod,
528 wont_exit, desc->out_edge,
529 &remove_edges,
530 DLTHE_FLAG_UPDATE_FREQ
531 | (opt_info && exit_mod > 1
532 ? DLTHE_RECORD_COPY_NUMBER
533 : 0));
534 gcc_assert (ok);
536 if (opt_info && exit_mod > 1)
537 apply_opt_in_copies (opt_info, exit_mod, false, false);
539 desc->noloop_assumptions = NULL_RTX;
540 desc->niter -= exit_mod;
541 loop->nb_iterations_upper_bound -= exit_mod;
542 if (loop->any_estimate
543 && wi::leu_p (exit_mod, loop->nb_iterations_estimate))
544 loop->nb_iterations_estimate -= exit_mod;
545 else
546 loop->any_estimate = false;
549 bitmap_set_bit (wont_exit, 1);
551 else
553 /* Leave exit test in last copy, for the same reason as above if
554 the loop tests the condition at the end of loop body. */
556 if (dump_file)
557 fprintf (dump_file, ";; Condition at end of loop.\n");
559 /* We know that niter >= max_unroll + 2; so we do not need to care of
560 case when we would exit before reaching the loop. So just peel
561 exit_mod + 1 iterations. */
562 if (exit_mod != max_unroll
563 || desc->noloop_assumptions)
565 bitmap_clear_bit (wont_exit, 0);
566 if (desc->noloop_assumptions)
567 bitmap_clear_bit (wont_exit, 1);
569 opt_info_start_duplication (opt_info);
570 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
571 exit_mod + 1,
572 wont_exit, desc->out_edge,
573 &remove_edges,
574 DLTHE_FLAG_UPDATE_FREQ
575 | (opt_info && exit_mod > 0
576 ? DLTHE_RECORD_COPY_NUMBER
577 : 0));
578 gcc_assert (ok);
580 if (opt_info && exit_mod > 0)
581 apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
583 desc->niter -= exit_mod + 1;
584 loop->nb_iterations_upper_bound -= exit_mod + 1;
585 if (loop->any_estimate
586 && wi::leu_p (exit_mod + 1, loop->nb_iterations_estimate))
587 loop->nb_iterations_estimate -= exit_mod + 1;
588 else
589 loop->any_estimate = false;
590 desc->noloop_assumptions = NULL_RTX;
592 bitmap_set_bit (wont_exit, 0);
593 bitmap_set_bit (wont_exit, 1);
596 bitmap_clear_bit (wont_exit, max_unroll);
599 /* Now unroll the loop. */
601 opt_info_start_duplication (opt_info);
602 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
603 max_unroll,
604 wont_exit, desc->out_edge,
605 &remove_edges,
606 DLTHE_FLAG_UPDATE_FREQ
607 | (opt_info
608 ? DLTHE_RECORD_COPY_NUMBER
609 : 0));
610 gcc_assert (ok);
612 if (opt_info)
614 apply_opt_in_copies (opt_info, max_unroll, true, true);
615 free_opt_info (opt_info);
618 free (wont_exit);
620 if (exit_at_end)
622 basic_block exit_block = get_bb_copy (desc->in_edge->src);
623 /* Find a new in and out edge; they are in the last copy we have made. */
625 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
627 desc->out_edge = EDGE_SUCC (exit_block, 0);
628 desc->in_edge = EDGE_SUCC (exit_block, 1);
630 else
632 desc->out_edge = EDGE_SUCC (exit_block, 1);
633 desc->in_edge = EDGE_SUCC (exit_block, 0);
637 desc->niter /= max_unroll + 1;
638 loop->nb_iterations_upper_bound
639 = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
640 if (loop->any_estimate)
641 loop->nb_iterations_estimate
642 = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
643 desc->niter_expr = GEN_INT (desc->niter);
645 /* Remove the edges. */
646 FOR_EACH_VEC_ELT (remove_edges, i, e)
647 remove_path (e);
649 if (dump_file)
650 fprintf (dump_file,
651 ";; Unrolled loop %d times, constant # of iterations %i insns\n",
652 max_unroll, num_loop_insns (loop));
655 /* Decide whether to unroll LOOP iterating runtime computable number of times
656 and how much. */
657 static void
658 decide_unroll_runtime_iterations (struct loop *loop, int flags)
660 unsigned nunroll, nunroll_by_av, i;
661 struct niter_desc *desc;
662 widest_int iterations;
664 if (!(flags & UAP_UNROLL))
666 /* We were not asked to, just return back silently. */
667 return;
670 if (dump_file)
671 fprintf (dump_file,
672 "\n;; Considering unrolling loop with runtime "
673 "computable number of iterations\n");
675 /* nunroll = total number of copies of the original loop body in
676 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
677 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
678 nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
679 if (nunroll > nunroll_by_av)
680 nunroll = nunroll_by_av;
681 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
682 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
684 if (targetm.loop_unroll_adjust)
685 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
687 /* Skip big loops. */
688 if (nunroll <= 1)
690 if (dump_file)
691 fprintf (dump_file, ";; Not considering loop, is too big\n");
692 return;
695 /* Check for simple loops. */
696 desc = get_simple_loop_desc (loop);
698 /* Check simpleness. */
699 if (!desc->simple_p || desc->assumptions)
701 if (dump_file)
702 fprintf (dump_file,
703 ";; Unable to prove that the number of iterations "
704 "can be counted in runtime\n");
705 return;
708 if (desc->const_iter)
710 if (dump_file)
711 fprintf (dump_file, ";; Loop iterates constant times\n");
712 return;
715 /* Check whether the loop rolls. */
716 if ((get_estimated_loop_iterations (loop, &iterations)
717 || get_max_loop_iterations (loop, &iterations))
718 && wi::ltu_p (iterations, 2 * nunroll))
720 if (dump_file)
721 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
722 return;
725 /* Success; now force nunroll to be power of 2, as we are unable to
726 cope with overflows in computation of number of iterations. */
727 for (i = 1; 2 * i <= nunroll; i *= 2)
728 continue;
730 loop->lpt_decision.decision = LPT_UNROLL_RUNTIME;
731 loop->lpt_decision.times = i - 1;
734 /* Splits edge E and inserts the sequence of instructions INSNS on it, and
735 returns the newly created block. If INSNS is NULL_RTX, nothing is changed
736 and NULL is returned instead. */
738 basic_block
739 split_edge_and_insert (edge e, rtx_insn *insns)
741 basic_block bb;
743 if (!insns)
744 return NULL;
745 bb = split_edge (e);
746 emit_insn_after (insns, BB_END (bb));
748 /* ??? We used to assume that INSNS can contain control flow insns, and
749 that we had to try to find sub basic blocks in BB to maintain a valid
750 CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB
751 and call break_superblocks when going out of cfglayout mode. But it
752 turns out that this never happens; and that if it does ever happen,
753 the verify_flow_info at the end of the RTL loop passes would fail.
755 There are two reasons why we expected we could have control flow insns
756 in INSNS. The first is when a comparison has to be done in parts, and
757 the second is when the number of iterations is computed for loops with
758 the number of iterations known at runtime. In both cases, test cases
759 to get control flow in INSNS appear to be impossible to construct:
761 * If do_compare_rtx_and_jump needs several branches to do comparison
762 in a mode that needs comparison by parts, we cannot analyze the
763 number of iterations of the loop, and we never get to unrolling it.
765 * The code in expand_divmod that was suspected to cause creation of
766 branching code seems to be only accessed for signed division. The
767 divisions used by # of iterations analysis are always unsigned.
768 Problems might arise on architectures that emits branching code
769 for some operations that may appear in the unroller (especially
770 for division), but we have no such architectures.
772 Considering all this, it was decided that we should for now assume
773 that INSNS can in theory contain control flow insns, but in practice
774 it never does. So we don't handle the theoretical case, and should
775 a real failure ever show up, we have a pretty good clue for how to
776 fix it. */
778 return bb;
781 /* Prepare a sequence comparing OP0 with OP1 using COMP and jumping to LABEL if
782 true, with probability PROB. If CINSN is not NULL, it is the insn to copy
783 in order to create a jump. */
785 static rtx_insn *
786 compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp,
787 rtx_code_label *label, int prob, rtx_insn *cinsn)
789 rtx_insn *seq;
790 rtx_jump_insn *jump;
791 rtx cond;
792 machine_mode mode;
794 mode = GET_MODE (op0);
795 if (mode == VOIDmode)
796 mode = GET_MODE (op1);
798 start_sequence ();
799 if (GET_MODE_CLASS (mode) == MODE_CC)
801 /* A hack -- there seems to be no easy generic way how to make a
802 conditional jump from a ccmode comparison. */
803 gcc_assert (cinsn);
804 cond = XEXP (SET_SRC (pc_set (cinsn)), 0);
805 gcc_assert (GET_CODE (cond) == comp);
806 gcc_assert (rtx_equal_p (op0, XEXP (cond, 0)));
807 gcc_assert (rtx_equal_p (op1, XEXP (cond, 1)));
808 emit_jump_insn (copy_insn (PATTERN (cinsn)));
809 jump = as_a <rtx_jump_insn *> (get_last_insn ());
810 JUMP_LABEL (jump) = JUMP_LABEL (cinsn);
811 LABEL_NUSES (JUMP_LABEL (jump))++;
812 redirect_jump (jump, label, 0);
814 else
816 gcc_assert (!cinsn);
818 op0 = force_operand (op0, NULL_RTX);
819 op1 = force_operand (op1, NULL_RTX);
820 do_compare_rtx_and_jump (op0, op1, comp, 0,
821 mode, NULL_RTX, NULL, label, -1);
822 jump = as_a <rtx_jump_insn *> (get_last_insn ());
823 jump->set_jump_target (label);
824 LABEL_NUSES (label)++;
826 add_int_reg_note (jump, REG_BR_PROB, prob);
828 seq = get_insns ();
829 end_sequence ();
831 return seq;
834 /* Unroll LOOP for which we are able to count number of iterations in runtime
835 LOOP->LPT_DECISION.TIMES times. The transformation does this (with some
836 extra care for case n < 0):
838 for (i = 0; i < n; i++)
839 body;
841 ==> (LOOP->LPT_DECISION.TIMES == 3)
843 i = 0;
844 mod = n % 4;
846 switch (mod)
848 case 3:
849 body; i++;
850 case 2:
851 body; i++;
852 case 1:
853 body; i++;
854 case 0: ;
857 while (i < n)
859 body; i++;
860 body; i++;
861 body; i++;
862 body; i++;
865 static void
866 unroll_loop_runtime_iterations (struct loop *loop)
868 rtx old_niter, niter, tmp;
869 rtx_insn *init_code, *branch_code;
870 unsigned i, j, p;
871 basic_block preheader, *body, swtch, ezc_swtch;
872 sbitmap wont_exit;
873 int may_exit_copy;
874 unsigned n_peel;
875 edge e;
876 bool extra_zero_check, last_may_exit;
877 unsigned max_unroll = loop->lpt_decision.times;
878 struct niter_desc *desc = get_simple_loop_desc (loop);
879 bool exit_at_end = loop_exit_at_end_p (loop);
880 struct opt_info *opt_info = NULL;
881 bool ok;
883 if (flag_split_ivs_in_unroller
884 || flag_variable_expansion_in_unroller)
885 opt_info = analyze_insns_in_loop (loop);
887 /* Remember blocks whose dominators will have to be updated. */
888 auto_vec<basic_block> dom_bbs;
890 body = get_loop_body (loop);
891 for (i = 0; i < loop->num_nodes; i++)
893 vec<basic_block> ldom;
894 basic_block bb;
896 ldom = get_dominated_by (CDI_DOMINATORS, body[i]);
897 FOR_EACH_VEC_ELT (ldom, j, bb)
898 if (!flow_bb_inside_loop_p (loop, bb))
899 dom_bbs.safe_push (bb);
901 ldom.release ();
903 free (body);
905 if (!exit_at_end)
907 /* Leave exit in first copy (for explanation why see comment in
908 unroll_loop_constant_iterations). */
909 may_exit_copy = 0;
910 n_peel = max_unroll - 1;
911 extra_zero_check = true;
912 last_may_exit = false;
914 else
916 /* Leave exit in last copy (for explanation why see comment in
917 unroll_loop_constant_iterations). */
918 may_exit_copy = max_unroll;
919 n_peel = max_unroll;
920 extra_zero_check = false;
921 last_may_exit = true;
924 /* Get expression for number of iterations. */
925 start_sequence ();
926 old_niter = niter = gen_reg_rtx (desc->mode);
927 tmp = force_operand (copy_rtx (desc->niter_expr), niter);
928 if (tmp != niter)
929 emit_move_insn (niter, tmp);
931 /* Count modulo by ANDing it with max_unroll; we use the fact that
932 the number of unrollings is a power of two, and thus this is correct
933 even if there is overflow in the computation. */
934 niter = expand_simple_binop (desc->mode, AND,
935 niter, gen_int_mode (max_unroll, desc->mode),
936 NULL_RTX, 0, OPTAB_LIB_WIDEN);
938 init_code = get_insns ();
939 end_sequence ();
940 unshare_all_rtl_in_chain (init_code);
942 /* Precondition the loop. */
943 split_edge_and_insert (loop_preheader_edge (loop), init_code);
945 auto_vec<edge> remove_edges;
947 wont_exit = sbitmap_alloc (max_unroll + 2);
949 /* Peel the first copy of loop body (almost always we must leave exit test
950 here; the only exception is when we have extra zero check and the number
951 of iterations is reliable. Also record the place of (possible) extra
952 zero check. */
953 bitmap_clear (wont_exit);
954 if (extra_zero_check
955 && !desc->noloop_assumptions)
956 bitmap_set_bit (wont_exit, 1);
957 ezc_swtch = loop_preheader_edge (loop)->src;
958 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
959 1, wont_exit, desc->out_edge,
960 &remove_edges,
961 DLTHE_FLAG_UPDATE_FREQ);
962 gcc_assert (ok);
964 /* Record the place where switch will be built for preconditioning. */
965 swtch = split_edge (loop_preheader_edge (loop));
967 for (i = 0; i < n_peel; i++)
969 /* Peel the copy. */
970 bitmap_clear (wont_exit);
971 if (i != n_peel - 1 || !last_may_exit)
972 bitmap_set_bit (wont_exit, 1);
973 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
974 1, wont_exit, desc->out_edge,
975 &remove_edges,
976 DLTHE_FLAG_UPDATE_FREQ);
977 gcc_assert (ok);
979 /* Create item for switch. */
980 j = n_peel - i - (extra_zero_check ? 0 : 1);
981 p = REG_BR_PROB_BASE / (i + 2);
983 preheader = split_edge (loop_preheader_edge (loop));
984 branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
985 block_label (preheader), p,
986 NULL);
988 /* We rely on the fact that the compare and jump cannot be optimized out,
989 and hence the cfg we create is correct. */
990 gcc_assert (branch_code != NULL_RTX);
992 swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code);
993 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
994 single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p;
995 e = make_edge (swtch, preheader,
996 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
997 e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
998 e->probability = p;
1001 if (extra_zero_check)
1003 /* Add branch for zero iterations. */
1004 p = REG_BR_PROB_BASE / (max_unroll + 1);
1005 swtch = ezc_swtch;
1006 preheader = split_edge (loop_preheader_edge (loop));
1007 branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
1008 block_label (preheader), p,
1009 NULL);
1010 gcc_assert (branch_code != NULL_RTX);
1012 swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code);
1013 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
1014 single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p;
1015 e = make_edge (swtch, preheader,
1016 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
1017 e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
1018 e->probability = p;
1021 /* Recount dominators for outer blocks. */
1022 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
1024 /* And unroll loop. */
1026 bitmap_ones (wont_exit);
1027 bitmap_clear_bit (wont_exit, may_exit_copy);
1028 opt_info_start_duplication (opt_info);
1030 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1031 max_unroll,
1032 wont_exit, desc->out_edge,
1033 &remove_edges,
1034 DLTHE_FLAG_UPDATE_FREQ
1035 | (opt_info
1036 ? DLTHE_RECORD_COPY_NUMBER
1037 : 0));
1038 gcc_assert (ok);
1040 if (opt_info)
1042 apply_opt_in_copies (opt_info, max_unroll, true, true);
1043 free_opt_info (opt_info);
1046 free (wont_exit);
1048 if (exit_at_end)
1050 basic_block exit_block = get_bb_copy (desc->in_edge->src);
1051 /* Find a new in and out edge; they are in the last copy we have
1052 made. */
1054 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
1056 desc->out_edge = EDGE_SUCC (exit_block, 0);
1057 desc->in_edge = EDGE_SUCC (exit_block, 1);
1059 else
1061 desc->out_edge = EDGE_SUCC (exit_block, 1);
1062 desc->in_edge = EDGE_SUCC (exit_block, 0);
1066 /* Remove the edges. */
1067 FOR_EACH_VEC_ELT (remove_edges, i, e)
1068 remove_path (e);
1070 /* We must be careful when updating the number of iterations due to
1071 preconditioning and the fact that the value must be valid at entry
1072 of the loop. After passing through the above code, we see that
1073 the correct new number of iterations is this: */
1074 gcc_assert (!desc->const_iter);
1075 desc->niter_expr =
1076 simplify_gen_binary (UDIV, desc->mode, old_niter,
1077 gen_int_mode (max_unroll + 1, desc->mode));
1078 loop->nb_iterations_upper_bound
1079 = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
1080 if (loop->any_estimate)
1081 loop->nb_iterations_estimate
1082 = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
1083 if (exit_at_end)
1085 desc->niter_expr =
1086 simplify_gen_binary (MINUS, desc->mode, desc->niter_expr, const1_rtx);
1087 desc->noloop_assumptions = NULL_RTX;
1088 --loop->nb_iterations_upper_bound;
1089 if (loop->any_estimate
1090 && loop->nb_iterations_estimate != 0)
1091 --loop->nb_iterations_estimate;
1092 else
1093 loop->any_estimate = false;
1096 if (dump_file)
1097 fprintf (dump_file,
1098 ";; Unrolled loop %d times, counting # of iterations "
1099 "in runtime, %i insns\n",
1100 max_unroll, num_loop_insns (loop));
1103 /* Decide whether to unroll LOOP stupidly and how much. */
1104 static void
1105 decide_unroll_stupid (struct loop *loop, int flags)
1107 unsigned nunroll, nunroll_by_av, i;
1108 struct niter_desc *desc;
1109 widest_int iterations;
1111 if (!(flags & UAP_UNROLL_ALL))
1113 /* We were not asked to, just return back silently. */
1114 return;
1117 if (dump_file)
1118 fprintf (dump_file, "\n;; Considering unrolling loop stupidly\n");
1120 /* nunroll = total number of copies of the original loop body in
1121 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
1122 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
1123 nunroll_by_av
1124 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
1125 if (nunroll > nunroll_by_av)
1126 nunroll = nunroll_by_av;
1127 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
1128 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
1130 if (targetm.loop_unroll_adjust)
1131 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
1133 /* Skip big loops. */
1134 if (nunroll <= 1)
1136 if (dump_file)
1137 fprintf (dump_file, ";; Not considering loop, is too big\n");
1138 return;
1141 /* Check for simple loops. */
1142 desc = get_simple_loop_desc (loop);
1144 /* Check simpleness. */
1145 if (desc->simple_p && !desc->assumptions)
1147 if (dump_file)
1148 fprintf (dump_file, ";; The loop is simple\n");
1149 return;
1152 /* Do not unroll loops with branches inside -- it increases number
1153 of mispredicts.
1154 TODO: this heuristic needs tunning; call inside the loop body
1155 is also relatively good reason to not unroll. */
1156 if (num_loop_branches (loop) > 1)
1158 if (dump_file)
1159 fprintf (dump_file, ";; Not unrolling, contains branches\n");
1160 return;
1163 /* Check whether the loop rolls. */
1164 if ((get_estimated_loop_iterations (loop, &iterations)
1165 || get_max_loop_iterations (loop, &iterations))
1166 && wi::ltu_p (iterations, 2 * nunroll))
1168 if (dump_file)
1169 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
1170 return;
1173 /* Success. Now force nunroll to be power of 2, as it seems that this
1174 improves results (partially because of better alignments, partially
1175 because of some dark magic). */
1176 for (i = 1; 2 * i <= nunroll; i *= 2)
1177 continue;
1179 loop->lpt_decision.decision = LPT_UNROLL_STUPID;
1180 loop->lpt_decision.times = i - 1;
1183 /* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this:
1185 while (cond)
1186 body;
1188 ==> (LOOP->LPT_DECISION.TIMES == 3)
1190 while (cond)
1192 body;
1193 if (!cond) break;
1194 body;
1195 if (!cond) break;
1196 body;
1197 if (!cond) break;
1198 body;
1201 static void
1202 unroll_loop_stupid (struct loop *loop)
1204 sbitmap wont_exit;
1205 unsigned nunroll = loop->lpt_decision.times;
1206 struct niter_desc *desc = get_simple_loop_desc (loop);
1207 struct opt_info *opt_info = NULL;
1208 bool ok;
1210 if (flag_split_ivs_in_unroller
1211 || flag_variable_expansion_in_unroller)
1212 opt_info = analyze_insns_in_loop (loop);
1215 wont_exit = sbitmap_alloc (nunroll + 1);
1216 bitmap_clear (wont_exit);
1217 opt_info_start_duplication (opt_info);
1219 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1220 nunroll, wont_exit,
1221 NULL, NULL,
1222 DLTHE_FLAG_UPDATE_FREQ
1223 | (opt_info
1224 ? DLTHE_RECORD_COPY_NUMBER
1225 : 0));
1226 gcc_assert (ok);
1228 if (opt_info)
1230 apply_opt_in_copies (opt_info, nunroll, true, true);
1231 free_opt_info (opt_info);
1234 free (wont_exit);
1236 if (desc->simple_p)
1238 /* We indeed may get here provided that there are nontrivial assumptions
1239 for a loop to be really simple. We could update the counts, but the
1240 problem is that we are unable to decide which exit will be taken
1241 (not really true in case the number of iterations is constant,
1242 but no one will do anything with this information, so we do not
1243 worry about it). */
1244 desc->simple_p = false;
1247 if (dump_file)
1248 fprintf (dump_file, ";; Unrolled loop %d times, %i insns\n",
1249 nunroll, num_loop_insns (loop));
1252 /* Returns true if REG is referenced in one nondebug insn in LOOP.
1253 Set *DEBUG_USES to the number of debug insns that reference the
1254 variable. */
1256 static bool
1257 referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
1258 int *debug_uses)
1260 basic_block *body, bb;
1261 unsigned i;
1262 int count_ref = 0;
1263 rtx_insn *insn;
1265 body = get_loop_body (loop);
1266 for (i = 0; i < loop->num_nodes; i++)
1268 bb = body[i];
1270 FOR_BB_INSNS (bb, insn)
1271 if (!rtx_referenced_p (reg, insn))
1272 continue;
1273 else if (DEBUG_INSN_P (insn))
1274 ++*debug_uses;
1275 else if (++count_ref > 1)
1276 break;
1278 free (body);
1279 return (count_ref == 1);
1282 /* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
1284 static void
1285 reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
1287 basic_block *body, bb;
1288 unsigned i;
1289 rtx_insn *insn;
1291 body = get_loop_body (loop);
1292 for (i = 0; debug_uses && i < loop->num_nodes; i++)
1294 bb = body[i];
1296 FOR_BB_INSNS (bb, insn)
1297 if (!DEBUG_INSN_P (insn) || !rtx_referenced_p (reg, insn))
1298 continue;
1299 else
1301 validate_change (insn, &INSN_VAR_LOCATION_LOC (insn),
1302 gen_rtx_UNKNOWN_VAR_LOC (), 0);
1303 if (!--debug_uses)
1304 break;
1307 free (body);
1310 /* Determine whether INSN contains an accumulator
1311 which can be expanded into separate copies,
1312 one for each copy of the LOOP body.
1314 for (i = 0 ; i < n; i++)
1315 sum += a[i];
1319 sum += a[i]
1320 ....
1321 i = i+1;
1322 sum1 += a[i]
1323 ....
1324 i = i+1
1325 sum2 += a[i];
1326 ....
1328 Return NULL if INSN contains no opportunity for expansion of accumulator.
1329 Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
1330 information and return a pointer to it.
1333 static struct var_to_expand *
1334 analyze_insn_to_expand_var (struct loop *loop, rtx_insn *insn)
1336 rtx set, dest, src;
1337 struct var_to_expand *ves;
1338 unsigned accum_pos;
1339 enum rtx_code code;
1340 int debug_uses = 0;
1342 set = single_set (insn);
1343 if (!set)
1344 return NULL;
1346 dest = SET_DEST (set);
1347 src = SET_SRC (set);
1348 code = GET_CODE (src);
1350 if (code != PLUS && code != MINUS && code != MULT && code != FMA)
1351 return NULL;
1353 if (FLOAT_MODE_P (GET_MODE (dest)))
1355 if (!flag_associative_math)
1356 return NULL;
1357 /* In the case of FMA, we're also changing the rounding. */
1358 if (code == FMA && !flag_unsafe_math_optimizations)
1359 return NULL;
1362 /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
1363 in MD. But if there is no optab to generate the insn, we can not
1364 perform the variable expansion. This can happen if an MD provides
1365 an insn but not a named pattern to generate it, for example to avoid
1366 producing code that needs additional mode switches like for x87/mmx.
1368 So we check have_insn_for which looks for an optab for the operation
1369 in SRC. If it doesn't exist, we can't perform the expansion even
1370 though INSN is valid. */
1371 if (!have_insn_for (code, GET_MODE (src)))
1372 return NULL;
1374 if (!REG_P (dest)
1375 && !(GET_CODE (dest) == SUBREG
1376 && REG_P (SUBREG_REG (dest))))
1377 return NULL;
1379 /* Find the accumulator use within the operation. */
1380 if (code == FMA)
1382 /* We only support accumulation via FMA in the ADD position. */
1383 if (!rtx_equal_p (dest, XEXP (src, 2)))
1384 return NULL;
1385 accum_pos = 2;
1387 else if (rtx_equal_p (dest, XEXP (src, 0)))
1388 accum_pos = 0;
1389 else if (rtx_equal_p (dest, XEXP (src, 1)))
1391 /* The method of expansion that we are using; which includes the
1392 initialization of the expansions with zero and the summation of
1393 the expansions at the end of the computation will yield wrong
1394 results for (x = something - x) thus avoid using it in that case. */
1395 if (code == MINUS)
1396 return NULL;
1397 accum_pos = 1;
1399 else
1400 return NULL;
1402 /* It must not otherwise be used. */
1403 if (code == FMA)
1405 if (rtx_referenced_p (dest, XEXP (src, 0))
1406 || rtx_referenced_p (dest, XEXP (src, 1)))
1407 return NULL;
1409 else if (rtx_referenced_p (dest, XEXP (src, 1 - accum_pos)))
1410 return NULL;
1412 /* It must be used in exactly one insn. */
1413 if (!referenced_in_one_insn_in_loop_p (loop, dest, &debug_uses))
1414 return NULL;
1416 if (dump_file)
1418 fprintf (dump_file, "\n;; Expanding Accumulator ");
1419 print_rtl (dump_file, dest);
1420 fprintf (dump_file, "\n");
1423 if (debug_uses)
1424 /* Instead of resetting the debug insns, we could replace each
1425 debug use in the loop with the sum or product of all expanded
1426 accummulators. Since we'll only know of all expansions at the
1427 end, we'd have to keep track of which vars_to_expand a debug
1428 insn in the loop references, take note of each copy of the
1429 debug insn during unrolling, and when it's all done, compute
1430 the sum or product of each variable and adjust the original
1431 debug insn and each copy thereof. What a pain! */
1432 reset_debug_uses_in_loop (loop, dest, debug_uses);
1434 /* Record the accumulator to expand. */
1435 ves = XNEW (struct var_to_expand);
1436 ves->insn = insn;
1437 ves->reg = copy_rtx (dest);
1438 ves->var_expansions.create (1);
1439 ves->next = NULL;
1440 ves->op = GET_CODE (src);
1441 ves->expansion_count = 0;
1442 ves->reuse_expansion = 0;
1443 return ves;
1446 /* Determine whether there is an induction variable in INSN that
1447 we would like to split during unrolling.
1449 I.e. replace
1451 i = i + 1;
1453 i = i + 1;
1455 i = i + 1;
1458 type chains by
1460 i0 = i + 1
1462 i = i0 + 1
1464 i = i0 + 2
1467 Return NULL if INSN contains no interesting IVs. Otherwise, allocate
1468 an IV_TO_SPLIT structure, fill it with the relevant information and return a
1469 pointer to it. */
1471 static struct iv_to_split *
1472 analyze_iv_to_split_insn (rtx_insn *insn)
1474 rtx set, dest;
1475 struct rtx_iv iv;
1476 struct iv_to_split *ivts;
1477 bool ok;
1479 /* For now we just split the basic induction variables. Later this may be
1480 extended for example by selecting also addresses of memory references. */
1481 set = single_set (insn);
1482 if (!set)
1483 return NULL;
1485 dest = SET_DEST (set);
1486 if (!REG_P (dest))
1487 return NULL;
1489 if (!biv_p (insn, dest))
1490 return NULL;
1492 ok = iv_analyze_result (insn, dest, &iv);
1494 /* This used to be an assert under the assumption that if biv_p returns
1495 true that iv_analyze_result must also return true. However, that
1496 assumption is not strictly correct as evidenced by pr25569.
1498 Returning NULL when iv_analyze_result returns false is safe and
1499 avoids the problems in pr25569 until the iv_analyze_* routines
1500 can be fixed, which is apparently hard and time consuming
1501 according to their author. */
1502 if (! ok)
1503 return NULL;
1505 if (iv.step == const0_rtx
1506 || iv.mode != iv.extend_mode)
1507 return NULL;
1509 /* Record the insn to split. */
1510 ivts = XNEW (struct iv_to_split);
1511 ivts->insn = insn;
1512 ivts->orig_var = dest;
1513 ivts->base_var = NULL_RTX;
1514 ivts->step = iv.step;
1515 ivts->next = NULL;
1517 return ivts;
1520 /* Determines which of insns in LOOP can be optimized.
1521 Return a OPT_INFO struct with the relevant hash tables filled
1522 with all insns to be optimized. The FIRST_NEW_BLOCK field
1523 is undefined for the return value. */
1525 static struct opt_info *
1526 analyze_insns_in_loop (struct loop *loop)
1528 basic_block *body, bb;
1529 unsigned i;
1530 struct opt_info *opt_info = XCNEW (struct opt_info);
1531 rtx_insn *insn;
1532 struct iv_to_split *ivts = NULL;
1533 struct var_to_expand *ves = NULL;
1534 iv_to_split **slot1;
1535 var_to_expand **slot2;
1536 vec<edge> edges = get_loop_exit_edges (loop);
1537 edge exit;
1538 bool can_apply = false;
1540 iv_analysis_loop_init (loop);
1542 body = get_loop_body (loop);
1544 if (flag_split_ivs_in_unroller)
1546 opt_info->insns_to_split
1547 = new hash_table<iv_split_hasher> (5 * loop->num_nodes);
1548 opt_info->iv_to_split_head = NULL;
1549 opt_info->iv_to_split_tail = &opt_info->iv_to_split_head;
1552 /* Record the loop exit bb and loop preheader before the unrolling. */
1553 opt_info->loop_preheader = loop_preheader_edge (loop)->src;
1555 if (edges.length () == 1)
1557 exit = edges[0];
1558 if (!(exit->flags & EDGE_COMPLEX))
1560 opt_info->loop_exit = split_edge (exit);
1561 can_apply = true;
1565 if (flag_variable_expansion_in_unroller
1566 && can_apply)
1568 opt_info->insns_with_var_to_expand
1569 = new hash_table<var_expand_hasher> (5 * loop->num_nodes);
1570 opt_info->var_to_expand_head = NULL;
1571 opt_info->var_to_expand_tail = &opt_info->var_to_expand_head;
1574 for (i = 0; i < loop->num_nodes; i++)
1576 bb = body[i];
1577 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1578 continue;
1580 FOR_BB_INSNS (bb, insn)
1582 if (!INSN_P (insn))
1583 continue;
1585 if (opt_info->insns_to_split)
1586 ivts = analyze_iv_to_split_insn (insn);
1588 if (ivts)
1590 slot1 = opt_info->insns_to_split->find_slot (ivts, INSERT);
1591 gcc_assert (*slot1 == NULL);
1592 *slot1 = ivts;
1593 *opt_info->iv_to_split_tail = ivts;
1594 opt_info->iv_to_split_tail = &ivts->next;
1595 continue;
1598 if (opt_info->insns_with_var_to_expand)
1599 ves = analyze_insn_to_expand_var (loop, insn);
1601 if (ves)
1603 slot2 = opt_info->insns_with_var_to_expand->find_slot (ves, INSERT);
1604 gcc_assert (*slot2 == NULL);
1605 *slot2 = ves;
1606 *opt_info->var_to_expand_tail = ves;
1607 opt_info->var_to_expand_tail = &ves->next;
1612 edges.release ();
1613 free (body);
1614 return opt_info;
1617 /* Called just before loop duplication. Records start of duplicated area
1618 to OPT_INFO. */
1620 static void
1621 opt_info_start_duplication (struct opt_info *opt_info)
1623 if (opt_info)
1624 opt_info->first_new_block = last_basic_block_for_fn (cfun);
1627 /* Determine the number of iterations between initialization of the base
1628 variable and the current copy (N_COPY). N_COPIES is the total number
1629 of newly created copies. UNROLLING is true if we are unrolling
1630 (not peeling) the loop. */
1632 static unsigned
1633 determine_split_iv_delta (unsigned n_copy, unsigned n_copies, bool unrolling)
1635 if (unrolling)
1637 /* If we are unrolling, initialization is done in the original loop
1638 body (number 0). */
1639 return n_copy;
1641 else
1643 /* If we are peeling, the copy in that the initialization occurs has
1644 number 1. The original loop (number 0) is the last. */
1645 if (n_copy)
1646 return n_copy - 1;
1647 else
1648 return n_copies;
1652 /* Allocate basic variable for the induction variable chain. */
1654 static void
1655 allocate_basic_variable (struct iv_to_split *ivts)
1657 rtx expr = SET_SRC (single_set (ivts->insn));
1659 ivts->base_var = gen_reg_rtx (GET_MODE (expr));
1662 /* Insert initialization of basic variable of IVTS before INSN, taking
1663 the initial value from INSN. */
1665 static void
1666 insert_base_initialization (struct iv_to_split *ivts, rtx_insn *insn)
1668 rtx expr = copy_rtx (SET_SRC (single_set (insn)));
1669 rtx_insn *seq;
1671 start_sequence ();
1672 expr = force_operand (expr, ivts->base_var);
1673 if (expr != ivts->base_var)
1674 emit_move_insn (ivts->base_var, expr);
1675 seq = get_insns ();
1676 end_sequence ();
1678 emit_insn_before (seq, insn);
1681 /* Replace the use of induction variable described in IVTS in INSN
1682 by base variable + DELTA * step. */
1684 static void
1685 split_iv (struct iv_to_split *ivts, rtx_insn *insn, unsigned delta)
1687 rtx expr, *loc, incr, var;
1688 rtx_insn *seq;
1689 machine_mode mode = GET_MODE (ivts->base_var);
1690 rtx src, dest, set;
1692 /* Construct base + DELTA * step. */
1693 if (!delta)
1694 expr = ivts->base_var;
1695 else
1697 incr = simplify_gen_binary (MULT, mode,
1698 ivts->step, gen_int_mode (delta, mode));
1699 expr = simplify_gen_binary (PLUS, GET_MODE (ivts->base_var),
1700 ivts->base_var, incr);
1703 /* Figure out where to do the replacement. */
1704 loc = &SET_SRC (single_set (insn));
1706 /* If we can make the replacement right away, we're done. */
1707 if (validate_change (insn, loc, expr, 0))
1708 return;
1710 /* Otherwise, force EXPR into a register and try again. */
1711 start_sequence ();
1712 var = gen_reg_rtx (mode);
1713 expr = force_operand (expr, var);
1714 if (expr != var)
1715 emit_move_insn (var, expr);
1716 seq = get_insns ();
1717 end_sequence ();
1718 emit_insn_before (seq, insn);
1720 if (validate_change (insn, loc, var, 0))
1721 return;
1723 /* The last chance. Try recreating the assignment in insn
1724 completely from scratch. */
1725 set = single_set (insn);
1726 gcc_assert (set);
1728 start_sequence ();
1729 *loc = var;
1730 src = copy_rtx (SET_SRC (set));
1731 dest = copy_rtx (SET_DEST (set));
1732 src = force_operand (src, dest);
1733 if (src != dest)
1734 emit_move_insn (dest, src);
1735 seq = get_insns ();
1736 end_sequence ();
1738 emit_insn_before (seq, insn);
1739 delete_insn (insn);
1743 /* Return one expansion of the accumulator recorded in struct VE. */
1745 static rtx
1746 get_expansion (struct var_to_expand *ve)
1748 rtx reg;
1750 if (ve->reuse_expansion == 0)
1751 reg = ve->reg;
1752 else
1753 reg = ve->var_expansions[ve->reuse_expansion - 1];
1755 if (ve->var_expansions.length () == (unsigned) ve->reuse_expansion)
1756 ve->reuse_expansion = 0;
1757 else
1758 ve->reuse_expansion++;
1760 return reg;
1764 /* Given INSN replace the uses of the accumulator recorded in VE
1765 with a new register. */
1767 static void
1768 expand_var_during_unrolling (struct var_to_expand *ve, rtx_insn *insn)
1770 rtx new_reg, set;
1771 bool really_new_expansion = false;
1773 set = single_set (insn);
1774 gcc_assert (set);
1776 /* Generate a new register only if the expansion limit has not been
1777 reached. Else reuse an already existing expansion. */
1778 if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
1780 really_new_expansion = true;
1781 new_reg = gen_reg_rtx (GET_MODE (ve->reg));
1783 else
1784 new_reg = get_expansion (ve);
1786 validate_replace_rtx_group (SET_DEST (set), new_reg, insn);
1787 if (apply_change_group ())
1788 if (really_new_expansion)
1790 ve->var_expansions.safe_push (new_reg);
1791 ve->expansion_count++;
1795 /* Initialize the variable expansions in loop preheader. PLACE is the
1796 loop-preheader basic block where the initialization of the
1797 expansions should take place. The expansions are initialized with
1798 (-0) when the operation is plus or minus to honor sign zero. This
1799 way we can prevent cases where the sign of the final result is
1800 effected by the sign of the expansion. Here is an example to
1801 demonstrate this:
1803 for (i = 0 ; i < n; i++)
1804 sum += something;
1808 sum += something
1809 ....
1810 i = i+1;
1811 sum1 += something
1812 ....
1813 i = i+1
1814 sum2 += something;
1815 ....
1817 When SUM is initialized with -zero and SOMETHING is also -zero; the
1818 final result of sum should be -zero thus the expansions sum1 and sum2
1819 should be initialized with -zero as well (otherwise we will get +zero
1820 as the final result). */
1822 static void
1823 insert_var_expansion_initialization (struct var_to_expand *ve,
1824 basic_block place)
1826 rtx_insn *seq;
1827 rtx var, zero_init;
1828 unsigned i;
1829 machine_mode mode = GET_MODE (ve->reg);
1830 bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode);
1832 if (ve->var_expansions.length () == 0)
1833 return;
1835 start_sequence ();
1836 switch (ve->op)
1838 case FMA:
1839 /* Note that we only accumulate FMA via the ADD operand. */
1840 case PLUS:
1841 case MINUS:
1842 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
1844 if (honor_signed_zero_p)
1845 zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
1846 else
1847 zero_init = CONST0_RTX (mode);
1848 emit_move_insn (var, zero_init);
1850 break;
1852 case MULT:
1853 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
1855 zero_init = CONST1_RTX (GET_MODE (var));
1856 emit_move_insn (var, zero_init);
1858 break;
1860 default:
1861 gcc_unreachable ();
1864 seq = get_insns ();
1865 end_sequence ();
1867 emit_insn_after (seq, BB_END (place));
1870 /* Combine the variable expansions at the loop exit. PLACE is the
1871 loop exit basic block where the summation of the expansions should
1872 take place. */
1874 static void
1875 combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place)
1877 rtx sum = ve->reg;
1878 rtx expr, var;
1879 rtx_insn *seq, *insn;
1880 unsigned i;
1882 if (ve->var_expansions.length () == 0)
1883 return;
1885 start_sequence ();
1886 switch (ve->op)
1888 case FMA:
1889 /* Note that we only accumulate FMA via the ADD operand. */
1890 case PLUS:
1891 case MINUS:
1892 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
1893 sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), var, sum);
1894 break;
1896 case MULT:
1897 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
1898 sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum);
1899 break;
1901 default:
1902 gcc_unreachable ();
1905 expr = force_operand (sum, ve->reg);
1906 if (expr != ve->reg)
1907 emit_move_insn (ve->reg, expr);
1908 seq = get_insns ();
1909 end_sequence ();
1911 insn = BB_HEAD (place);
1912 while (!NOTE_INSN_BASIC_BLOCK_P (insn))
1913 insn = NEXT_INSN (insn);
1915 emit_insn_after (seq, insn);
1918 /* Strip away REG_EQUAL notes for IVs we're splitting.
1920 Updating REG_EQUAL notes for IVs we split is tricky: We
1921 cannot tell until after unrolling, DF-rescanning, and liveness
1922 updating, whether an EQ_USE is reached by the split IV while
1923 the IV reg is still live. See PR55006.
1925 ??? We cannot use remove_reg_equal_equiv_notes_for_regno,
1926 because RTL loop-iv requires us to defer rescanning insns and
1927 any notes attached to them. So resort to old techniques... */
1929 static void
1930 maybe_strip_eq_note_for_split_iv (struct opt_info *opt_info, rtx_insn *insn)
1932 struct iv_to_split *ivts;
1933 rtx note = find_reg_equal_equiv_note (insn);
1934 if (! note)
1935 return;
1936 for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
1937 if (reg_mentioned_p (ivts->orig_var, note))
1939 remove_note (insn, note);
1940 return;
1944 /* Apply loop optimizations in loop copies using the
1945 data which gathered during the unrolling. Structure
1946 OPT_INFO record that data.
1948 UNROLLING is true if we unrolled (not peeled) the loop.
1949 REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
1950 the loop (as it should happen in complete unrolling, but not in ordinary
1951 peeling of the loop). */
1953 static void
1954 apply_opt_in_copies (struct opt_info *opt_info,
1955 unsigned n_copies, bool unrolling,
1956 bool rewrite_original_loop)
1958 unsigned i, delta;
1959 basic_block bb, orig_bb;
1960 rtx_insn *insn, *orig_insn, *next;
1961 struct iv_to_split ivts_templ, *ivts;
1962 struct var_to_expand ve_templ, *ves;
1964 /* Sanity check -- we need to put initialization in the original loop
1965 body. */
1966 gcc_assert (!unrolling || rewrite_original_loop);
1968 /* Allocate the basic variables (i0). */
1969 if (opt_info->insns_to_split)
1970 for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
1971 allocate_basic_variable (ivts);
1973 for (i = opt_info->first_new_block;
1974 i < (unsigned) last_basic_block_for_fn (cfun);
1975 i++)
1977 bb = BASIC_BLOCK_FOR_FN (cfun, i);
1978 orig_bb = get_bb_original (bb);
1980 /* bb->aux holds position in copy sequence initialized by
1981 duplicate_loop_to_header_edge. */
1982 delta = determine_split_iv_delta ((size_t)bb->aux, n_copies,
1983 unrolling);
1984 bb->aux = 0;
1985 orig_insn = BB_HEAD (orig_bb);
1986 FOR_BB_INSNS_SAFE (bb, insn, next)
1988 if (!INSN_P (insn)
1989 || (DEBUG_INSN_P (insn)
1990 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL))
1991 continue;
1993 while (!INSN_P (orig_insn)
1994 || (DEBUG_INSN_P (orig_insn)
1995 && (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn))
1996 == LABEL_DECL)))
1997 orig_insn = NEXT_INSN (orig_insn);
1999 ivts_templ.insn = orig_insn;
2000 ve_templ.insn = orig_insn;
2002 /* Apply splitting iv optimization. */
2003 if (opt_info->insns_to_split)
2005 maybe_strip_eq_note_for_split_iv (opt_info, insn);
2007 ivts = opt_info->insns_to_split->find (&ivts_templ);
2009 if (ivts)
2011 gcc_assert (GET_CODE (PATTERN (insn))
2012 == GET_CODE (PATTERN (orig_insn)));
2014 if (!delta)
2015 insert_base_initialization (ivts, insn);
2016 split_iv (ivts, insn, delta);
2019 /* Apply variable expansion optimization. */
2020 if (unrolling && opt_info->insns_with_var_to_expand)
2022 ves = (struct var_to_expand *)
2023 opt_info->insns_with_var_to_expand->find (&ve_templ);
2024 if (ves)
2026 gcc_assert (GET_CODE (PATTERN (insn))
2027 == GET_CODE (PATTERN (orig_insn)));
2028 expand_var_during_unrolling (ves, insn);
2031 orig_insn = NEXT_INSN (orig_insn);
2035 if (!rewrite_original_loop)
2036 return;
2038 /* Initialize the variable expansions in the loop preheader
2039 and take care of combining them at the loop exit. */
2040 if (opt_info->insns_with_var_to_expand)
2042 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2043 insert_var_expansion_initialization (ves, opt_info->loop_preheader);
2044 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2045 combine_var_copies_in_loop_exit (ves, opt_info->loop_exit);
2048 /* Rewrite also the original loop body. Find them as originals of the blocks
2049 in the last copied iteration, i.e. those that have
2050 get_bb_copy (get_bb_original (bb)) == bb. */
2051 for (i = opt_info->first_new_block;
2052 i < (unsigned) last_basic_block_for_fn (cfun);
2053 i++)
2055 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2056 orig_bb = get_bb_original (bb);
2057 if (get_bb_copy (orig_bb) != bb)
2058 continue;
2060 delta = determine_split_iv_delta (0, n_copies, unrolling);
2061 for (orig_insn = BB_HEAD (orig_bb);
2062 orig_insn != NEXT_INSN (BB_END (bb));
2063 orig_insn = next)
2065 next = NEXT_INSN (orig_insn);
2067 if (!INSN_P (orig_insn))
2068 continue;
2070 ivts_templ.insn = orig_insn;
2071 if (opt_info->insns_to_split)
2073 maybe_strip_eq_note_for_split_iv (opt_info, orig_insn);
2075 ivts = (struct iv_to_split *)
2076 opt_info->insns_to_split->find (&ivts_templ);
2077 if (ivts)
2079 if (!delta)
2080 insert_base_initialization (ivts, orig_insn);
2081 split_iv (ivts, orig_insn, delta);
2082 continue;
2090 /* Release OPT_INFO. */
2092 static void
2093 free_opt_info (struct opt_info *opt_info)
2095 delete opt_info->insns_to_split;
2096 opt_info->insns_to_split = NULL;
2097 if (opt_info->insns_with_var_to_expand)
2099 struct var_to_expand *ves;
2101 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2102 ves->var_expansions.release ();
2103 delete opt_info->insns_with_var_to_expand;
2104 opt_info->insns_with_var_to_expand = NULL;
2106 free (opt_info);