2007-10-02 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / loop-doloop.c
blob6e33a4f9ba014273b7f8199efc18e4418541840f
1 /* Perform doloop optimizations
2 Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Based on code by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "flags.h"
27 #include "expr.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "toplev.h"
31 #include "tm_p.h"
32 #include "cfgloop.h"
33 #include "output.h"
34 #include "params.h"
35 #include "target.h"
37 /* This module is used to modify loops with a determinable number of
38 iterations to use special low-overhead looping instructions.
40 It first validates whether the loop is well behaved and has a
41 determinable number of iterations (either at compile or run-time).
42 It then modifies the loop to use a low-overhead looping pattern as
43 follows:
45 1. A pseudo register is allocated as the loop iteration counter.
47 2. The number of loop iterations is calculated and is stored
48 in the loop counter.
50 3. At the end of the loop, the jump insn is replaced by the
51 doloop_end pattern. The compare must remain because it might be
52 used elsewhere. If the loop-variable or condition register are
53 used elsewhere, they will be eliminated by flow.
55 4. An optional doloop_begin pattern is inserted at the top of the
56 loop.
58 TODO The optimization should only performed when either the biv used for exit
59 condition is unused at all except for the exit test, or if we do not have to
60 change its value, since otherwise we have to add a new induction variable,
61 which usually will not pay up (unless the cost of the doloop pattern is
62 somehow extremely lower than the cost of compare & jump, or unless the bct
63 register cannot be used for anything else but doloop -- ??? detect these
64 cases). */
66 #ifdef HAVE_doloop_end
68 /* Return the loop termination condition for PATTERN or zero
69 if it is not a decrement and branch jump insn. */
71 rtx
72 doloop_condition_get (rtx doloop_pat)
74 rtx cmp;
75 rtx inc;
76 rtx reg;
77 rtx inc_src;
78 rtx condition;
79 rtx pattern;
81 /* The canonical doloop pattern we expect has one of the following
82 forms:
84 1) (parallel [(set (pc) (if_then_else (condition)
85 (label_ref (label))
86 (pc)))
87 (set (reg) (plus (reg) (const_int -1)))
88 (additional clobbers and uses)])
90 The branch must be the first entry of the parallel (also required
91 by jump.c), and the second entry of the parallel must be a set of
92 the loop counter register. Some targets (IA-64) wrap the set of
93 the loop counter in an if_then_else too.
95 2) (set (reg) (plus (reg) (const_int -1))
96 (set (pc) (if_then_else (reg != 0)
97 (label_ref (label))
98 (pc))). */
100 pattern = PATTERN (doloop_pat);
102 if (GET_CODE (pattern) != PARALLEL)
104 rtx cond;
106 /* We expect the decrement to immediately precede the branch. */
108 if ((PREV_INSN (doloop_pat) == NULL_RTX)
109 || !INSN_P (PREV_INSN (doloop_pat)))
110 return 0;
112 cmp = pattern;
113 inc = PATTERN (PREV_INSN (doloop_pat));
114 /* We expect the condition to be of the form (reg != 0) */
115 cond = XEXP (SET_SRC (cmp), 0);
116 if (GET_CODE (cond) != NE || XEXP (cond, 1) != const0_rtx)
117 return 0;
120 else
122 cmp = XVECEXP (pattern, 0, 0);
123 inc = XVECEXP (pattern, 0, 1);
126 /* Check for (set (reg) (something)). */
127 if (GET_CODE (inc) != SET)
128 return 0;
129 reg = SET_DEST (inc);
130 if (! REG_P (reg))
131 return 0;
133 /* Check if something = (plus (reg) (const_int -1)).
134 On IA-64, this decrement is wrapped in an if_then_else. */
135 inc_src = SET_SRC (inc);
136 if (GET_CODE (inc_src) == IF_THEN_ELSE)
137 inc_src = XEXP (inc_src, 1);
138 if (GET_CODE (inc_src) != PLUS
139 || XEXP (inc_src, 0) != reg
140 || XEXP (inc_src, 1) != constm1_rtx)
141 return 0;
143 /* Check for (set (pc) (if_then_else (condition)
144 (label_ref (label))
145 (pc))). */
146 if (GET_CODE (cmp) != SET
147 || SET_DEST (cmp) != pc_rtx
148 || GET_CODE (SET_SRC (cmp)) != IF_THEN_ELSE
149 || GET_CODE (XEXP (SET_SRC (cmp), 1)) != LABEL_REF
150 || XEXP (SET_SRC (cmp), 2) != pc_rtx)
151 return 0;
153 /* Extract loop termination condition. */
154 condition = XEXP (SET_SRC (cmp), 0);
156 /* We expect a GE or NE comparison with 0 or 1. */
157 if ((GET_CODE (condition) != GE
158 && GET_CODE (condition) != NE)
159 || (XEXP (condition, 1) != const0_rtx
160 && XEXP (condition, 1) != const1_rtx))
161 return 0;
163 if ((XEXP (condition, 0) == reg)
164 || (GET_CODE (XEXP (condition, 0)) == PLUS
165 && XEXP (XEXP (condition, 0), 0) == reg))
167 if (GET_CODE (pattern) != PARALLEL)
168 /* The second form we expect:
170 (set (reg) (plus (reg) (const_int -1))
171 (set (pc) (if_then_else (reg != 0)
172 (label_ref (label))
173 (pc))).
175 is equivalent to the following:
177 (parallel [(set (pc) (if_then_else (reg != 1)
178 (label_ref (label))
179 (pc)))
180 (set (reg) (plus (reg) (const_int -1)))
181 (additional clobbers and uses)])
183 So we return that form instead.
185 condition = gen_rtx_fmt_ee (NE, VOIDmode, inc_src, const1_rtx);
187 return condition;
190 /* ??? If a machine uses a funny comparison, we could return a
191 canonicalized form here. */
193 return 0;
196 /* Return nonzero if the loop specified by LOOP is suitable for
197 the use of special low-overhead looping instructions. DESC
198 describes the number of iterations of the loop. */
200 static bool
201 doloop_valid_p (struct loop *loop, struct niter_desc *desc)
203 basic_block *body = get_loop_body (loop), bb;
204 rtx insn;
205 unsigned i;
206 bool result = true;
208 /* Check for loops that may not terminate under special conditions. */
209 if (!desc->simple_p
210 || desc->assumptions
211 || desc->infinite)
213 /* There are some cases that would require a special attention.
214 For example if the comparison is LEU and the comparison value
215 is UINT_MAX then the loop will not terminate. Similarly, if the
216 comparison code is GEU and the comparison value is 0, the
217 loop will not terminate.
219 If the absolute increment is not 1, the loop can be infinite
220 even with LTU/GTU, e.g. for (i = 3; i > 0; i -= 2)
222 ??? We could compute these conditions at run-time and have a
223 additional jump around the loop to ensure an infinite loop.
224 However, it is very unlikely that this is the intended
225 behavior of the loop and checking for these rare boundary
226 conditions would pessimize all other code.
228 If the loop is executed only a few times an extra check to
229 restart the loop could use up most of the benefits of using a
230 count register loop. Note however, that normally, this
231 restart branch would never execute, so it could be predicted
232 well by the CPU. We should generate the pessimistic code by
233 default, and have an option, e.g. -funsafe-loops that would
234 enable count-register loops in this case. */
235 if (dump_file)
236 fprintf (dump_file, "Doloop: Possible infinite iteration case.\n");
237 result = false;
238 goto cleanup;
241 for (i = 0; i < loop->num_nodes; i++)
243 bb = body[i];
245 for (insn = BB_HEAD (bb);
246 insn != NEXT_INSN (BB_END (bb));
247 insn = NEXT_INSN (insn))
249 /* Different targets have different necessities for low-overhead
250 looping. Call the back end for each instruction within the loop
251 to let it decide whether the insn prohibits a low-overhead loop.
252 It will then return the cause for it to emit to the dump file. */
253 const char * invalid = targetm.invalid_within_doloop (insn);
254 if (invalid)
256 if (dump_file)
257 fprintf (dump_file, "Doloop: %s\n", invalid);
258 result = false;
259 goto cleanup;
263 result = true;
265 cleanup:
266 free (body);
268 return result;
271 /* Adds test of COND jumping to DEST on edge *E and set *E to the new fallthru
272 edge. If the condition is always false, do not do anything. If it is always
273 true, redirect E to DEST and return false. In all other cases, true is
274 returned. */
276 static bool
277 add_test (rtx cond, edge *e, basic_block dest)
279 rtx seq, jump, label;
280 enum machine_mode mode;
281 rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1);
282 enum rtx_code code = GET_CODE (cond);
283 basic_block bb;
285 mode = GET_MODE (XEXP (cond, 0));
286 if (mode == VOIDmode)
287 mode = GET_MODE (XEXP (cond, 1));
289 start_sequence ();
290 op0 = force_operand (op0, NULL_RTX);
291 op1 = force_operand (op1, NULL_RTX);
292 label = block_label (dest);
293 do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX, NULL_RTX, label);
295 jump = get_last_insn ();
296 if (!jump || !JUMP_P (jump))
298 /* The condition is always false and the jump was optimized out. */
299 end_sequence ();
300 return true;
303 seq = get_insns ();
304 end_sequence ();
306 /* There always is at least the jump insn in the sequence. */
307 gcc_assert (seq != NULL_RTX);
309 bb = split_edge_and_insert (*e, seq);
310 *e = single_succ_edge (bb);
312 if (any_uncondjump_p (jump))
314 /* The condition is always true. */
315 delete_insn (jump);
316 redirect_edge_and_branch_force (*e, dest);
317 return false;
320 JUMP_LABEL (jump) = label;
322 /* The jump is supposed to handle an unlikely special case. */
323 REG_NOTES (jump)
324 = gen_rtx_EXPR_LIST (REG_BR_PROB,
325 const0_rtx, REG_NOTES (jump));
326 LABEL_NUSES (label)++;
328 make_edge (bb, dest, (*e)->flags & ~EDGE_FALLTHRU);
329 return true;
332 /* Modify the loop to use the low-overhead looping insn where LOOP
333 describes the loop, DESC describes the number of iterations of the
334 loop, and DOLOOP_INSN is the low-overhead looping insn to emit at the
335 end of the loop. CONDITION is the condition separated from the
336 DOLOOP_SEQ. COUNT is the number of iterations of the LOOP. */
338 static void
339 doloop_modify (struct loop *loop, struct niter_desc *desc,
340 rtx doloop_seq, rtx condition, rtx count)
342 rtx counter_reg;
343 rtx tmp, noloop = NULL_RTX;
344 rtx sequence;
345 rtx jump_insn;
346 rtx jump_label;
347 int nonneg = 0;
348 bool increment_count;
349 basic_block loop_end = desc->out_edge->src;
350 enum machine_mode mode;
352 jump_insn = BB_END (loop_end);
354 if (dump_file)
356 fprintf (dump_file, "Doloop: Inserting doloop pattern (");
357 if (desc->const_iter)
358 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
359 else
360 fputs ("runtime", dump_file);
361 fputs (" iterations).\n", dump_file);
364 /* Discard original jump to continue loop. The original compare
365 result may still be live, so it cannot be discarded explicitly. */
366 delete_insn (jump_insn);
368 counter_reg = XEXP (condition, 0);
369 if (GET_CODE (counter_reg) == PLUS)
370 counter_reg = XEXP (counter_reg, 0);
371 mode = GET_MODE (counter_reg);
373 increment_count = false;
374 switch (GET_CODE (condition))
376 case NE:
377 /* Currently only NE tests against zero and one are supported. */
378 noloop = XEXP (condition, 1);
379 if (noloop != const0_rtx)
381 gcc_assert (noloop == const1_rtx);
382 increment_count = true;
384 break;
386 case GE:
387 /* Currently only GE tests against zero are supported. */
388 gcc_assert (XEXP (condition, 1) == const0_rtx);
390 noloop = constm1_rtx;
392 /* The iteration count does not need incrementing for a GE test. */
393 increment_count = false;
395 /* Determine if the iteration counter will be non-negative.
396 Note that the maximum value loaded is iterations_max - 1. */
397 if (desc->niter_max
398 <= ((unsigned HOST_WIDEST_INT) 1
399 << (GET_MODE_BITSIZE (mode) - 1)))
400 nonneg = 1;
401 break;
403 /* Abort if an invalid doloop pattern has been generated. */
404 default:
405 gcc_unreachable ();
408 if (increment_count)
409 count = simplify_gen_binary (PLUS, mode, count, const1_rtx);
411 /* Insert initialization of the count register into the loop header. */
412 start_sequence ();
413 tmp = force_operand (count, counter_reg);
414 convert_move (counter_reg, tmp, 1);
415 sequence = get_insns ();
416 end_sequence ();
417 emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
419 if (desc->noloop_assumptions)
421 rtx ass = copy_rtx (desc->noloop_assumptions);
422 basic_block preheader = loop_preheader_edge (loop)->src;
423 basic_block set_zero
424 = split_edge (loop_preheader_edge (loop));
425 basic_block new_preheader
426 = split_edge (loop_preheader_edge (loop));
427 edge te;
429 /* Expand the condition testing the assumptions and if it does not pass,
430 reset the count register to 0. */
431 redirect_edge_and_branch_force (single_succ_edge (preheader), new_preheader);
432 set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader);
434 set_zero->count = 0;
435 set_zero->frequency = 0;
437 te = single_succ_edge (preheader);
438 for (; ass; ass = XEXP (ass, 1))
439 if (!add_test (XEXP (ass, 0), &te, set_zero))
440 break;
442 if (ass)
444 /* We reached a condition that is always true. This is very hard to
445 reproduce (such a loop does not roll, and thus it would most
446 likely get optimized out by some of the preceding optimizations).
447 In fact, I do not have any testcase for it. However, it would
448 also be very hard to show that it is impossible, so we must
449 handle this case. */
450 set_zero->count = preheader->count;
451 set_zero->frequency = preheader->frequency;
454 if (EDGE_COUNT (set_zero->preds) == 0)
456 /* All the conditions were simplified to false, remove the
457 unreachable set_zero block. */
458 delete_basic_block (set_zero);
460 else
462 /* Reset the counter to zero in the set_zero block. */
463 start_sequence ();
464 convert_move (counter_reg, noloop, 0);
465 sequence = get_insns ();
466 end_sequence ();
467 emit_insn_after (sequence, BB_END (set_zero));
469 set_immediate_dominator (CDI_DOMINATORS, set_zero,
470 recompute_dominator (CDI_DOMINATORS,
471 set_zero));
474 set_immediate_dominator (CDI_DOMINATORS, new_preheader,
475 recompute_dominator (CDI_DOMINATORS,
476 new_preheader));
479 /* Some targets (eg, C4x) need to initialize special looping
480 registers. */
481 #ifdef HAVE_doloop_begin
483 rtx init;
484 unsigned level = get_loop_level (loop) + 1;
485 init = gen_doloop_begin (counter_reg,
486 desc->const_iter ? desc->niter_expr : const0_rtx,
487 GEN_INT (desc->niter_max),
488 GEN_INT (level));
489 if (init)
491 start_sequence ();
492 emit_insn (init);
493 sequence = get_insns ();
494 end_sequence ();
495 emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
498 #endif
500 /* Insert the new low-overhead looping insn. */
501 emit_jump_insn_after (doloop_seq, BB_END (loop_end));
502 jump_insn = BB_END (loop_end);
503 jump_label = block_label (desc->in_edge->dest);
504 JUMP_LABEL (jump_insn) = jump_label;
505 LABEL_NUSES (jump_label)++;
507 /* Ensure the right fallthru edge is marked, for case we have reversed
508 the condition. */
509 desc->in_edge->flags &= ~EDGE_FALLTHRU;
510 desc->out_edge->flags |= EDGE_FALLTHRU;
512 /* Add a REG_NONNEG note if the actual or estimated maximum number
513 of iterations is non-negative. */
514 if (nonneg)
516 REG_NOTES (jump_insn)
517 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX, REG_NOTES (jump_insn));
521 /* Process loop described by LOOP validating that the loop is suitable for
522 conversion to use a low overhead looping instruction, replacing the jump
523 insn where suitable. Returns true if the loop was successfully
524 modified. */
526 static bool
527 doloop_optimize (struct loop *loop)
529 enum machine_mode mode;
530 rtx doloop_seq, doloop_pat, doloop_reg;
531 rtx iterations, count;
532 rtx iterations_max;
533 rtx start_label;
534 rtx condition;
535 unsigned level, est_niter;
536 int max_cost;
537 struct niter_desc *desc;
538 unsigned word_mode_size;
539 unsigned HOST_WIDE_INT word_mode_max;
541 if (dump_file)
542 fprintf (dump_file, "Doloop: Processing loop %d.\n", loop->num);
544 iv_analysis_loop_init (loop);
546 /* Find the simple exit of a LOOP. */
547 desc = get_simple_loop_desc (loop);
549 /* Check that loop is a candidate for a low-overhead looping insn. */
550 if (!doloop_valid_p (loop, desc))
552 if (dump_file)
553 fprintf (dump_file,
554 "Doloop: The loop is not suitable.\n");
555 return false;
557 mode = desc->mode;
559 est_niter = 3;
560 if (desc->const_iter)
561 est_niter = desc->niter;
562 /* If the estimate on number of iterations is reliable (comes from profile
563 feedback), use it. Do not use it normally, since the expected number
564 of iterations of an unrolled loop is 2. */
565 if (loop->header->count)
566 est_niter = expected_loop_iterations (loop);
568 if (est_niter < 3)
570 if (dump_file)
571 fprintf (dump_file,
572 "Doloop: Too few iterations (%u) to be profitable.\n",
573 est_niter);
574 return false;
577 max_cost
578 = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
579 if (rtx_cost (desc->niter_expr, SET) > max_cost)
581 if (dump_file)
582 fprintf (dump_file,
583 "Doloop: number of iterations too costly to compute.\n");
584 return false;
587 count = copy_rtx (desc->niter_expr);
588 iterations = desc->const_iter ? desc->niter_expr : const0_rtx;
589 iterations_max = GEN_INT (desc->niter_max);
590 level = get_loop_level (loop) + 1;
592 /* Generate looping insn. If the pattern FAILs then give up trying
593 to modify the loop since there is some aspect the back-end does
594 not like. */
595 start_label = block_label (desc->in_edge->dest);
596 doloop_reg = gen_reg_rtx (mode);
597 doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
598 GEN_INT (level), start_label);
600 word_mode_size = GET_MODE_BITSIZE (word_mode);
601 word_mode_max
602 = ((unsigned HOST_WIDE_INT) 1 << (word_mode_size - 1) << 1) - 1;
603 if (! doloop_seq
604 && mode != word_mode
605 /* Before trying mode different from the one in that # of iterations is
606 computed, we must be sure that the number of iterations fits into
607 the new mode. */
608 && (word_mode_size >= GET_MODE_BITSIZE (mode)
609 || desc->niter_max <= word_mode_max))
611 if (word_mode_size > GET_MODE_BITSIZE (mode))
613 count = simplify_gen_unary (ZERO_EXTEND, word_mode,
614 count, mode);
615 iterations = simplify_gen_unary (ZERO_EXTEND, word_mode,
616 iterations, mode);
617 iterations_max = simplify_gen_unary (ZERO_EXTEND, word_mode,
618 iterations_max, mode);
620 else
622 count = lowpart_subreg (word_mode, count, mode);
623 iterations = lowpart_subreg (word_mode, iterations, mode);
624 iterations_max = lowpart_subreg (word_mode, iterations_max, mode);
626 PUT_MODE (doloop_reg, word_mode);
627 doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
628 GEN_INT (level), start_label);
630 if (! doloop_seq)
632 if (dump_file)
633 fprintf (dump_file,
634 "Doloop: Target unwilling to use doloop pattern!\n");
635 return false;
638 /* If multiple instructions were created, the last must be the
639 jump instruction. Also, a raw define_insn may yield a plain
640 pattern. */
641 doloop_pat = doloop_seq;
642 if (INSN_P (doloop_pat))
644 while (NEXT_INSN (doloop_pat) != NULL_RTX)
645 doloop_pat = NEXT_INSN (doloop_pat);
646 if (!JUMP_P (doloop_pat))
647 doloop_pat = NULL_RTX;
650 if (! doloop_pat
651 || ! (condition = doloop_condition_get (doloop_pat)))
653 if (dump_file)
654 fprintf (dump_file, "Doloop: Unrecognizable doloop pattern!\n");
655 return false;
658 doloop_modify (loop, desc, doloop_seq, condition, count);
659 return true;
662 /* This is the main entry point. Process all loops using doloop_optimize. */
664 void
665 doloop_optimize_loops (void)
667 loop_iterator li;
668 struct loop *loop;
670 FOR_EACH_LOOP (li, loop, 0)
672 doloop_optimize (loop);
675 iv_analysis_done ();
677 #ifdef ENABLE_CHECKING
678 verify_dominators (CDI_DOMINATORS);
679 verify_loop_structure ();
680 #endif
682 #endif /* HAVE_doloop_end */