Initial revision
[official-gcc.git] / gcc / loop.c
blob5507e09f50ab605eba57858c836c184dee59ef57
1 /* Move constant computations out of loops.
2 Copyright (C) 1987, 88, 89, 91-4, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21 /* This is the loop optimization pass of the compiler.
22 It finds invariant computations within loops and moves them
23 to the beginning of the loop. Then it identifies basic and
24 general induction variables. Strength reduction is applied to the general
25 induction variables, and induction variable elimination is applied to
26 the basic induction variables.
28 It also finds cases where
29 a register is set within the loop by zero-extending a narrower value
30 and changes these to zero the entire register once before the loop
31 and merely copy the low part within the loop.
33 Most of the complexity is in heuristics to decide when it is worth
34 while to do these things. */
36 #include <stdio.h>
37 #include "config.h"
38 #include "rtl.h"
39 #include "obstack.h"
40 #include "expr.h"
41 #include "insn-config.h"
42 #include "insn-flags.h"
43 #include "regs.h"
44 #include "hard-reg-set.h"
45 #include "recog.h"
46 #include "flags.h"
47 #include "real.h"
48 #include "loop.h"
50 /* Vector mapping INSN_UIDs to luids.
51 The luids are like uids but increase monotonically always.
52 We use them to see whether a jump comes from outside a given loop. */
54 int *uid_luid;
56 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
57 number the insn is contained in. */
59 int *uid_loop_num;
61 /* 1 + largest uid of any insn. */
63 int max_uid_for_loop;
65 /* 1 + luid of last insn. */
67 static int max_luid;
69 /* Number of loops detected in current function. Used as index to the
70 next few tables. */
72 static int max_loop_num;
74 /* Indexed by loop number, contains the first and last insn of each loop. */
76 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
78 /* For each loop, gives the containing loop number, -1 if none. */
80 int *loop_outer_loop;
82 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
83 really a loop (an insn outside the loop branches into it). */
85 static char *loop_invalid;
87 /* Indexed by loop number, links together all LABEL_REFs which refer to
88 code labels outside the loop. Used by routines that need to know all
89 loop exits, such as final_biv_value and final_giv_value.
91 This does not include loop exits due to return instructions. This is
92 because all bivs and givs are pseudos, and hence must be dead after a
93 return, so the presense of a return does not affect any of the
94 optimizations that use this info. It is simpler to just not include return
95 instructions on this list. */
97 rtx *loop_number_exit_labels;
99 /* Holds the number of loop iterations. It is zero if the number could not be
100 calculated. Must be unsigned since the number of iterations can
101 be as high as 2^wordsize-1. For loops with a wider iterator, this number
102 will will be zero if the number of loop iterations is too large for an
103 unsigned integer to hold. */
105 unsigned HOST_WIDE_INT loop_n_iterations;
107 /* Nonzero if there is a subroutine call in the current loop.
108 (unknown_address_altered is also nonzero in this case.) */
110 static int loop_has_call;
112 /* Nonzero if there is a volatile memory reference in the current
113 loop. */
115 static int loop_has_volatile;
117 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
118 current loop. A continue statement will generate a branch to
119 NEXT_INSN (loop_continue). */
121 static rtx loop_continue;
123 /* Indexed by register number, contains the number of times the reg
124 is set during the loop being scanned.
125 During code motion, a negative value indicates a reg that has been
126 made a candidate; in particular -2 means that it is an candidate that
127 we know is equal to a constant and -1 means that it is an candidate
128 not known equal to a constant.
129 After code motion, regs moved have 0 (which is accurate now)
130 while the failed candidates have the original number of times set.
132 Therefore, at all times, == 0 indicates an invariant register;
133 < 0 a conditionally invariant one. */
135 static short *n_times_set;
137 /* Original value of n_times_set; same except that this value
138 is not set negative for a reg whose sets have been made candidates
139 and not set to 0 for a reg that is moved. */
141 static short *n_times_used;
143 /* Index by register number, 1 indicates that the register
144 cannot be moved or strength reduced. */
146 static char *may_not_optimize;
148 /* Nonzero means reg N has already been moved out of one loop.
149 This reduces the desire to move it out of another. */
151 static char *moved_once;
153 /* Array of MEMs that are stored in this loop. If there are too many to fit
154 here, we just turn on unknown_address_altered. */
156 #define NUM_STORES 20
157 static rtx loop_store_mems[NUM_STORES];
159 /* Index of first available slot in above array. */
160 static int loop_store_mems_idx;
162 /* Nonzero if we don't know what MEMs were changed in the current loop.
163 This happens if the loop contains a call (in which case `loop_has_call'
164 will also be set) or if we store into more than NUM_STORES MEMs. */
166 static int unknown_address_altered;
168 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
169 static int num_movables;
171 /* Count of memory write instructions discovered in the loop. */
172 static int num_mem_sets;
174 /* Number of loops contained within the current one, including itself. */
175 static int loops_enclosed;
177 /* Bound on pseudo register number before loop optimization.
178 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
179 int max_reg_before_loop;
181 /* This obstack is used in product_cheap_p to allocate its rtl. It
182 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
183 If we used the same obstack that it did, we would be deallocating
184 that array. */
186 static struct obstack temp_obstack;
188 /* This is where the pointer to the obstack being used for RTL is stored. */
190 extern struct obstack *rtl_obstack;
192 #define obstack_chunk_alloc xmalloc
193 #define obstack_chunk_free free
195 extern char *oballoc ();
197 /* During the analysis of a loop, a chain of `struct movable's
198 is made to record all the movable insns found.
199 Then the entire chain can be scanned to decide which to move. */
201 struct movable
203 rtx insn; /* A movable insn */
204 rtx set_src; /* The expression this reg is set from. */
205 rtx set_dest; /* The destination of this SET. */
206 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
207 of any registers used within the LIBCALL. */
208 int consec; /* Number of consecutive following insns
209 that must be moved with this one. */
210 int regno; /* The register it sets */
211 short lifetime; /* lifetime of that register;
212 may be adjusted when matching movables
213 that load the same value are found. */
214 short savings; /* Number of insns we can move for this reg,
215 including other movables that force this
216 or match this one. */
217 unsigned int cond : 1; /* 1 if only conditionally movable */
218 unsigned int force : 1; /* 1 means MUST move this insn */
219 unsigned int global : 1; /* 1 means reg is live outside this loop */
220 /* If PARTIAL is 1, GLOBAL means something different:
221 that the reg is live outside the range from where it is set
222 to the following label. */
223 unsigned int done : 1; /* 1 inhibits further processing of this */
225 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
226 In particular, moving it does not make it
227 invariant. */
228 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
229 load SRC, rather than copying INSN. */
230 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
231 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
232 that we should avoid changing when clearing
233 the rest of the reg. */
234 struct movable *match; /* First entry for same value */
235 struct movable *forces; /* An insn that must be moved if this is */
236 struct movable *next;
239 FILE *loop_dump_stream;
241 /* Forward declarations. */
243 static void find_and_verify_loops ();
244 static void mark_loop_jump ();
245 static void prescan_loop ();
246 static int reg_in_basic_block_p ();
247 static int consec_sets_invariant_p ();
248 static rtx libcall_other_reg ();
249 static int labels_in_range_p ();
250 static void count_loop_regs_set ();
251 static void note_addr_stored ();
252 static int loop_reg_used_before_p ();
253 static void scan_loop ();
254 static void replace_call_address ();
255 static rtx skip_consec_insns ();
256 static int libcall_benefit ();
257 static void ignore_some_movables ();
258 static void force_movables ();
259 static void combine_movables ();
260 static int rtx_equal_for_loop_p ();
261 static void move_movables ();
262 static void strength_reduce ();
263 static int valid_initial_value_p ();
264 static void find_mem_givs ();
265 static void record_biv ();
266 static void check_final_value ();
267 static void record_giv ();
268 static void update_giv_derive ();
269 static int basic_induction_var ();
270 static rtx simplify_giv_expr ();
271 static int general_induction_var ();
272 static int consec_sets_giv ();
273 static int check_dbra_loop ();
274 static rtx express_from ();
275 static int combine_givs_p ();
276 static void combine_givs ();
277 static int product_cheap_p ();
278 static int maybe_eliminate_biv ();
279 static int maybe_eliminate_biv_1 ();
280 static int last_use_this_basic_block ();
281 static void record_initial ();
282 static void update_reg_last_use ();
284 /* Relative gain of eliminating various kinds of operations. */
285 int add_cost;
286 #if 0
287 int shift_cost;
288 int mult_cost;
289 #endif
291 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
292 copy the value of the strength reduced giv to its original register. */
293 int copy_cost;
295 void
296 init_loop ()
298 char *free_point = (char *) oballoc (1);
299 rtx reg = gen_rtx (REG, word_mode, 0);
301 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
303 /* We multiply by 2 to reconcile the difference in scale between
304 these two ways of computing costs. Otherwise the cost of a copy
305 will be far less than the cost of an add. */
307 copy_cost = 2 * 2;
309 /* Free the objects we just allocated. */
310 obfree (free_point);
312 /* Initialize the obstack used for rtl in product_cheap_p. */
313 gcc_obstack_init (&temp_obstack);
316 /* Entry point of this file. Perform loop optimization
317 on the current function. F is the first insn of the function
318 and DUMPFILE is a stream for output of a trace of actions taken
319 (or 0 if none should be output). */
321 void
322 loop_optimize (f, dumpfile)
323 /* f is the first instruction of a chain of insns for one function */
324 rtx f;
325 FILE *dumpfile;
327 register rtx insn;
328 register int i;
329 rtx last_insn;
331 loop_dump_stream = dumpfile;
333 init_recog_no_volatile ();
334 init_alias_analysis ();
336 max_reg_before_loop = max_reg_num ();
338 moved_once = (char *) alloca (max_reg_before_loop);
339 bzero (moved_once, max_reg_before_loop);
341 regs_may_share = 0;
343 /* Count the number of loops. */
345 max_loop_num = 0;
346 for (insn = f; insn; insn = NEXT_INSN (insn))
348 if (GET_CODE (insn) == NOTE
349 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
350 max_loop_num++;
353 /* Don't waste time if no loops. */
354 if (max_loop_num == 0)
355 return;
357 /* Get size to use for tables indexed by uids.
358 Leave some space for labels allocated by find_and_verify_loops. */
359 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
361 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
362 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
364 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
365 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
367 /* Allocate tables for recording each loop. We set each entry, so they need
368 not be zeroed. */
369 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
370 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
371 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
372 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
373 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
375 /* Find and process each loop.
376 First, find them, and record them in order of their beginnings. */
377 find_and_verify_loops (f);
379 /* Now find all register lifetimes. This must be done after
380 find_and_verify_loops, because it might reorder the insns in the
381 function. */
382 reg_scan (f, max_reg_num (), 1);
384 /* See if we went too far. */
385 if (get_max_uid () > max_uid_for_loop)
386 abort ();
388 /* Compute the mapping from uids to luids.
389 LUIDs are numbers assigned to insns, like uids,
390 except that luids increase monotonically through the code.
391 Don't assign luids to line-number NOTEs, so that the distance in luids
392 between two insns is not affected by -g. */
394 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
396 last_insn = insn;
397 if (GET_CODE (insn) != NOTE
398 || NOTE_LINE_NUMBER (insn) <= 0)
399 uid_luid[INSN_UID (insn)] = ++i;
400 else
401 /* Give a line number note the same luid as preceding insn. */
402 uid_luid[INSN_UID (insn)] = i;
405 max_luid = i + 1;
407 /* Don't leave gaps in uid_luid for insns that have been
408 deleted. It is possible that the first or last insn
409 using some register has been deleted by cross-jumping.
410 Make sure that uid_luid for that former insn's uid
411 points to the general area where that insn used to be. */
412 for (i = 0; i < max_uid_for_loop; i++)
414 uid_luid[0] = uid_luid[i];
415 if (uid_luid[0] != 0)
416 break;
418 for (i = 0; i < max_uid_for_loop; i++)
419 if (uid_luid[i] == 0)
420 uid_luid[i] = uid_luid[i - 1];
422 /* Create a mapping from loops to BLOCK tree nodes. */
423 if (flag_unroll_loops && write_symbols != NO_DEBUG)
424 find_loop_tree_blocks ();
426 /* Now scan the loops, last ones first, since this means inner ones are done
427 before outer ones. */
428 for (i = max_loop_num-1; i >= 0; i--)
429 if (! loop_invalid[i] && loop_number_loop_ends[i])
430 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
431 max_reg_num ());
433 /* If debugging and unrolling loops, we must replicate the tree nodes
434 corresponding to the blocks inside the loop, so that the original one
435 to one mapping will remain. */
436 if (flag_unroll_loops && write_symbols != NO_DEBUG)
437 unroll_block_trees ();
440 /* Optimize one loop whose start is LOOP_START and end is END.
441 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
442 NOTE_INSN_LOOP_END. */
444 /* ??? Could also move memory writes out of loops if the destination address
445 is invariant, the source is invariant, the memory write is not volatile,
446 and if we can prove that no read inside the loop can read this address
447 before the write occurs. If there is a read of this address after the
448 write, then we can also mark the memory read as invariant. */
450 static void
451 scan_loop (loop_start, end, nregs)
452 rtx loop_start, end;
453 int nregs;
455 register int i;
456 register rtx p;
457 /* 1 if we are scanning insns that could be executed zero times. */
458 int maybe_never = 0;
459 /* 1 if we are scanning insns that might never be executed
460 due to a subroutine call which might exit before they are reached. */
461 int call_passed = 0;
462 /* For a rotated loop that is entered near the bottom,
463 this is the label at the top. Otherwise it is zero. */
464 rtx loop_top = 0;
465 /* Jump insn that enters the loop, or 0 if control drops in. */
466 rtx loop_entry_jump = 0;
467 /* Place in the loop where control enters. */
468 rtx scan_start;
469 /* Number of insns in the loop. */
470 int insn_count;
471 int in_libcall = 0;
472 int tem;
473 rtx temp;
474 /* The SET from an insn, if it is the only SET in the insn. */
475 rtx set, set1;
476 /* Chain describing insns movable in current loop. */
477 struct movable *movables = 0;
478 /* Last element in `movables' -- so we can add elements at the end. */
479 struct movable *last_movable = 0;
480 /* Ratio of extra register life span we can justify
481 for saving an instruction. More if loop doesn't call subroutines
482 since in that case saving an insn makes more difference
483 and more registers are available. */
484 int threshold;
485 /* If we have calls, contains the insn in which a register was used
486 if it was used exactly once; contains const0_rtx if it was used more
487 than once. */
488 rtx *reg_single_usage = 0;
489 /* Nonzero if we are scanning instructions in a sub-loop. */
490 int loop_depth = 0;
492 n_times_set = (short *) alloca (nregs * sizeof (short));
493 n_times_used = (short *) alloca (nregs * sizeof (short));
494 may_not_optimize = (char *) alloca (nregs);
496 /* Determine whether this loop starts with a jump down to a test at
497 the end. This will occur for a small number of loops with a test
498 that is too complex to duplicate in front of the loop.
500 We search for the first insn or label in the loop, skipping NOTEs.
501 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
502 (because we might have a loop executed only once that contains a
503 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
504 (in case we have a degenerate loop).
506 Note that if we mistakenly think that a loop is entered at the top
507 when, in fact, it is entered at the exit test, the only effect will be
508 slightly poorer optimization. Making the opposite error can generate
509 incorrect code. Since very few loops now start with a jump to the
510 exit test, the code here to detect that case is very conservative. */
512 for (p = NEXT_INSN (loop_start);
513 p != end
514 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
515 && (GET_CODE (p) != NOTE
516 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
517 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
518 p = NEXT_INSN (p))
521 scan_start = p;
523 /* Set up variables describing this loop. */
524 prescan_loop (loop_start, end);
525 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
527 /* If loop has a jump before the first label,
528 the true entry is the target of that jump.
529 Start scan from there.
530 But record in LOOP_TOP the place where the end-test jumps
531 back to so we can scan that after the end of the loop. */
532 if (GET_CODE (p) == JUMP_INSN)
534 loop_entry_jump = p;
536 /* Loop entry must be unconditional jump (and not a RETURN) */
537 if (simplejump_p (p)
538 && JUMP_LABEL (p) != 0
539 /* Check to see whether the jump actually
540 jumps out of the loop (meaning it's no loop).
541 This case can happen for things like
542 do {..} while (0). If this label was generated previously
543 by loop, we can't tell anything about it and have to reject
544 the loop. */
545 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
546 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
547 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
549 loop_top = next_label (scan_start);
550 scan_start = JUMP_LABEL (p);
554 /* If SCAN_START was an insn created by loop, we don't know its luid
555 as required by loop_reg_used_before_p. So skip such loops. (This
556 test may never be true, but it's best to play it safe.)
558 Also, skip loops where we do not start scanning at a label. This
559 test also rejects loops starting with a JUMP_INSN that failed the
560 test above. */
562 if (INSN_UID (scan_start) >= max_uid_for_loop
563 || GET_CODE (scan_start) != CODE_LABEL)
565 if (loop_dump_stream)
566 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
567 INSN_UID (loop_start), INSN_UID (end));
568 return;
571 /* Count number of times each reg is set during this loop.
572 Set may_not_optimize[I] if it is not safe to move out
573 the setting of register I. If this loop has calls, set
574 reg_single_usage[I]. */
576 bzero ((char *) n_times_set, nregs * sizeof (short));
577 bzero (may_not_optimize, nregs);
579 if (loop_has_call)
581 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
582 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
585 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
586 may_not_optimize, reg_single_usage, &insn_count, nregs);
588 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
589 may_not_optimize[i] = 1, n_times_set[i] = 1;
590 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (short));
592 if (loop_dump_stream)
594 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
595 INSN_UID (loop_start), INSN_UID (end), insn_count);
596 if (loop_continue)
597 fprintf (loop_dump_stream, "Continue at insn %d.\n",
598 INSN_UID (loop_continue));
601 /* Scan through the loop finding insns that are safe to move.
602 Set n_times_set negative for the reg being set, so that
603 this reg will be considered invariant for subsequent insns.
604 We consider whether subsequent insns use the reg
605 in deciding whether it is worth actually moving.
607 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
608 and therefore it is possible that the insns we are scanning
609 would never be executed. At such times, we must make sure
610 that it is safe to execute the insn once instead of zero times.
611 When MAYBE_NEVER is 0, all insns will be executed at least once
612 so that is not a problem. */
614 p = scan_start;
615 while (1)
617 p = NEXT_INSN (p);
618 /* At end of a straight-in loop, we are done.
619 At end of a loop entered at the bottom, scan the top. */
620 if (p == scan_start)
621 break;
622 if (p == end)
624 if (loop_top != 0)
625 p = loop_top;
626 else
627 break;
628 if (p == scan_start)
629 break;
632 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
633 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
634 in_libcall = 1;
635 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
636 && find_reg_note (p, REG_RETVAL, NULL_RTX))
637 in_libcall = 0;
639 if (GET_CODE (p) == INSN
640 && (set = single_set (p))
641 && GET_CODE (SET_DEST (set)) == REG
642 && ! may_not_optimize[REGNO (SET_DEST (set))])
644 int tem1 = 0;
645 int tem2 = 0;
646 int move_insn = 0;
647 rtx src = SET_SRC (set);
648 rtx dependencies = 0;
650 /* Figure out what to use as a source of this insn. If a REG_EQUIV
651 note is given or if a REG_EQUAL note with a constant operand is
652 specified, use it as the source and mark that we should move
653 this insn by calling emit_move_insn rather that duplicating the
654 insn.
656 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
657 is present. */
658 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
659 if (temp)
660 src = XEXP (temp, 0), move_insn = 1;
661 else
663 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
664 if (temp && CONSTANT_P (XEXP (temp, 0)))
665 src = XEXP (temp, 0), move_insn = 1;
666 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
668 src = XEXP (temp, 0);
669 /* A libcall block can use regs that don't appear in
670 the equivalent expression. To move the libcall,
671 we must move those regs too. */
672 dependencies = libcall_other_reg (p, src);
676 /* Don't try to optimize a register that was made
677 by loop-optimization for an inner loop.
678 We don't know its life-span, so we can't compute the benefit. */
679 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
681 /* In order to move a register, we need to have one of three cases:
682 (1) it is used only in the same basic block as the set
683 (2) it is not a user variable and it is not used in the
684 exit test (this can cause the variable to be used
685 before it is set just like a user-variable).
686 (3) the set is guaranteed to be executed once the loop starts,
687 and the reg is not used until after that. */
688 else if (! ((! maybe_never
689 && ! loop_reg_used_before_p (set, p, loop_start,
690 scan_start, end))
691 || (! REG_USERVAR_P (SET_DEST (set))
692 && ! REG_LOOP_TEST_P (SET_DEST (set)))
693 || reg_in_basic_block_p (p, SET_DEST (set))))
695 else if ((tem = invariant_p (src))
696 && (dependencies == 0
697 || (tem2 = invariant_p (dependencies)) != 0)
698 && (n_times_set[REGNO (SET_DEST (set))] == 1
699 || (tem1
700 = consec_sets_invariant_p (SET_DEST (set),
701 n_times_set[REGNO (SET_DEST (set))],
702 p)))
703 /* If the insn can cause a trap (such as divide by zero),
704 can't move it unless it's guaranteed to be executed
705 once loop is entered. Even a function call might
706 prevent the trap insn from being reached
707 (since it might exit!) */
708 && ! ((maybe_never || call_passed)
709 && may_trap_p (src)))
711 register struct movable *m;
712 register int regno = REGNO (SET_DEST (set));
714 /* A potential lossage is where we have a case where two insns
715 can be combined as long as they are both in the loop, but
716 we move one of them outside the loop. For large loops,
717 this can lose. The most common case of this is the address
718 of a function being called.
720 Therefore, if this register is marked as being used exactly
721 once if we are in a loop with calls (a "large loop"), see if
722 we can replace the usage of this register with the source
723 of this SET. If we can, delete this insn.
725 Don't do this if P has a REG_RETVAL note or if we have
726 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
728 if (reg_single_usage && reg_single_usage[regno] != 0
729 && reg_single_usage[regno] != const0_rtx
730 && regno_first_uid[regno] == INSN_UID (p)
731 && (regno_last_uid[regno]
732 == INSN_UID (reg_single_usage[regno]))
733 && n_times_set[REGNO (SET_DEST (set))] == 1
734 && ! side_effects_p (SET_SRC (set))
735 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
736 #ifdef SMALL_REGISTER_CLASSES
737 && ! (GET_CODE (SET_SRC (set)) == REG
738 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)
739 #endif
740 /* This test is not redundant; SET_SRC (set) might be
741 a call-clobbered register and the life of REGNO
742 might span a call. */
743 && ! modified_between_p (SET_SRC (set), p,
744 reg_single_usage[regno])
745 && no_labels_between_p (p, reg_single_usage[regno])
746 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
747 reg_single_usage[regno]))
749 /* Replace any usage in a REG_EQUAL note. Must copy the
750 new source, so that we don't get rtx sharing between the
751 SET_SOURCE and REG_NOTES of insn p. */
752 REG_NOTES (reg_single_usage[regno])
753 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
754 SET_DEST (set), copy_rtx (SET_SRC (set)));
756 PUT_CODE (p, NOTE);
757 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
758 NOTE_SOURCE_FILE (p) = 0;
759 n_times_set[regno] = 0;
760 continue;
763 m = (struct movable *) alloca (sizeof (struct movable));
764 m->next = 0;
765 m->insn = p;
766 m->set_src = src;
767 m->dependencies = dependencies;
768 m->set_dest = SET_DEST (set);
769 m->force = 0;
770 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
771 m->done = 0;
772 m->forces = 0;
773 m->partial = 0;
774 m->move_insn = move_insn;
775 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
776 m->savemode = VOIDmode;
777 m->regno = regno;
778 /* Set M->cond if either invariant_p or consec_sets_invariant_p
779 returned 2 (only conditionally invariant). */
780 m->cond = ((tem | tem1 | tem2) > 1);
781 m->global = (uid_luid[regno_last_uid[regno]] > INSN_LUID (end)
782 || uid_luid[regno_first_uid[regno]] < INSN_LUID (loop_start));
783 m->match = 0;
784 m->lifetime = (uid_luid[regno_last_uid[regno]]
785 - uid_luid[regno_first_uid[regno]]);
786 m->savings = n_times_used[regno];
787 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
788 m->savings += libcall_benefit (p);
789 n_times_set[regno] = move_insn ? -2 : -1;
790 /* Add M to the end of the chain MOVABLES. */
791 if (movables == 0)
792 movables = m;
793 else
794 last_movable->next = m;
795 last_movable = m;
797 if (m->consec > 0)
799 /* Skip this insn, not checking REG_LIBCALL notes. */
800 p = next_nonnote_insn (p);
801 /* Skip the consecutive insns, if there are any. */
802 p = skip_consec_insns (p, m->consec);
803 /* Back up to the last insn of the consecutive group. */
804 p = prev_nonnote_insn (p);
806 /* We must now reset m->move_insn, m->is_equiv, and possibly
807 m->set_src to correspond to the effects of all the
808 insns. */
809 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
810 if (temp)
811 m->set_src = XEXP (temp, 0), m->move_insn = 1;
812 else
814 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
815 if (temp && CONSTANT_P (XEXP (temp, 0)))
816 m->set_src = XEXP (temp, 0), m->move_insn = 1;
817 else
818 m->move_insn = 0;
821 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
824 /* If this register is always set within a STRICT_LOW_PART
825 or set to zero, then its high bytes are constant.
826 So clear them outside the loop and within the loop
827 just load the low bytes.
828 We must check that the machine has an instruction to do so.
829 Also, if the value loaded into the register
830 depends on the same register, this cannot be done. */
831 else if (SET_SRC (set) == const0_rtx
832 && GET_CODE (NEXT_INSN (p)) == INSN
833 && (set1 = single_set (NEXT_INSN (p)))
834 && GET_CODE (set1) == SET
835 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
836 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
837 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
838 == SET_DEST (set))
839 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
841 register int regno = REGNO (SET_DEST (set));
842 if (n_times_set[regno] == 2)
844 register struct movable *m;
845 m = (struct movable *) alloca (sizeof (struct movable));
846 m->next = 0;
847 m->insn = p;
848 m->set_dest = SET_DEST (set);
849 m->dependencies = 0;
850 m->force = 0;
851 m->consec = 0;
852 m->done = 0;
853 m->forces = 0;
854 m->move_insn = 0;
855 m->partial = 1;
856 /* If the insn may not be executed on some cycles,
857 we can't clear the whole reg; clear just high part.
858 Not even if the reg is used only within this loop.
859 Consider this:
860 while (1)
861 while (s != t) {
862 if (foo ()) x = *s;
863 use (x);
865 Clearing x before the inner loop could clobber a value
866 being saved from the last time around the outer loop.
867 However, if the reg is not used outside this loop
868 and all uses of the register are in the same
869 basic block as the store, there is no problem.
871 If this insn was made by loop, we don't know its
872 INSN_LUID and hence must make a conservative
873 assumption. */
874 m->global = (INSN_UID (p) >= max_uid_for_loop
875 || (uid_luid[regno_last_uid[regno]]
876 > INSN_LUID (end))
877 || (uid_luid[regno_first_uid[regno]]
878 < INSN_LUID (p))
879 || (labels_in_range_p
880 (p, uid_luid[regno_first_uid[regno]])));
881 if (maybe_never && m->global)
882 m->savemode = GET_MODE (SET_SRC (set1));
883 else
884 m->savemode = VOIDmode;
885 m->regno = regno;
886 m->cond = 0;
887 m->match = 0;
888 m->lifetime = (uid_luid[regno_last_uid[regno]]
889 - uid_luid[regno_first_uid[regno]]);
890 m->savings = 1;
891 n_times_set[regno] = -1;
892 /* Add M to the end of the chain MOVABLES. */
893 if (movables == 0)
894 movables = m;
895 else
896 last_movable->next = m;
897 last_movable = m;
901 /* Past a call insn, we get to insns which might not be executed
902 because the call might exit. This matters for insns that trap.
903 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
904 so they don't count. */
905 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
906 call_passed = 1;
907 /* Past a label or a jump, we get to insns for which we
908 can't count on whether or how many times they will be
909 executed during each iteration. Therefore, we can
910 only move out sets of trivial variables
911 (those not used after the loop). */
912 /* This code appears in three places, once in scan_loop, and twice
913 in strength_reduce. */
914 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
915 /* If we enter the loop in the middle, and scan around to the
916 beginning, don't set maybe_never for that. This must be an
917 unconditional jump, otherwise the code at the top of the
918 loop might never be executed. Unconditional jumps are
919 followed a by barrier then loop end. */
920 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
921 && NEXT_INSN (NEXT_INSN (p)) == end
922 && simplejump_p (p)))
923 maybe_never = 1;
924 else if (GET_CODE (p) == NOTE)
926 /* At the virtual top of a converted loop, insns are again known to
927 be executed: logically, the loop begins here even though the exit
928 code has been duplicated. */
929 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
930 maybe_never = call_passed = 0;
931 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
932 loop_depth++;
933 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
934 loop_depth--;
938 /* If one movable subsumes another, ignore that other. */
940 ignore_some_movables (movables);
942 /* For each movable insn, see if the reg that it loads
943 leads when it dies right into another conditionally movable insn.
944 If so, record that the second insn "forces" the first one,
945 since the second can be moved only if the first is. */
947 force_movables (movables);
949 /* See if there are multiple movable insns that load the same value.
950 If there are, make all but the first point at the first one
951 through the `match' field, and add the priorities of them
952 all together as the priority of the first. */
954 combine_movables (movables, nregs);
956 /* Now consider each movable insn to decide whether it is worth moving.
957 Store 0 in n_times_set for each reg that is moved. */
959 move_movables (movables, threshold,
960 insn_count, loop_start, end, nregs);
962 /* Now candidates that still are negative are those not moved.
963 Change n_times_set to indicate that those are not actually invariant. */
964 for (i = 0; i < nregs; i++)
965 if (n_times_set[i] < 0)
966 n_times_set[i] = n_times_used[i];
968 if (flag_strength_reduce)
969 strength_reduce (scan_start, end, loop_top,
970 insn_count, loop_start, end);
973 /* Add elements to *OUTPUT to record all the pseudo-regs
974 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
976 void
977 record_excess_regs (in_this, not_in_this, output)
978 rtx in_this, not_in_this;
979 rtx *output;
981 enum rtx_code code;
982 char *fmt;
983 int i;
985 code = GET_CODE (in_this);
987 switch (code)
989 case PC:
990 case CC0:
991 case CONST_INT:
992 case CONST_DOUBLE:
993 case CONST:
994 case SYMBOL_REF:
995 case LABEL_REF:
996 return;
998 case REG:
999 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1000 && ! reg_mentioned_p (in_this, not_in_this))
1001 *output = gen_rtx (EXPR_LIST, VOIDmode, in_this, *output);
1002 return;
1005 fmt = GET_RTX_FORMAT (code);
1006 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1008 int j;
1010 switch (fmt[i])
1012 case 'E':
1013 for (j = 0; j < XVECLEN (in_this, i); j++)
1014 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1015 break;
1017 case 'e':
1018 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1019 break;
1024 /* Check what regs are referred to in the libcall block ending with INSN,
1025 aside from those mentioned in the equivalent value.
1026 If there are none, return 0.
1027 If there are one or more, return an EXPR_LIST containing all of them. */
1029 static rtx
1030 libcall_other_reg (insn, equiv)
1031 rtx insn, equiv;
1033 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1034 rtx p = XEXP (note, 0);
1035 rtx output = 0;
1037 /* First, find all the regs used in the libcall block
1038 that are not mentioned as inputs to the result. */
1040 while (p != insn)
1042 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1043 || GET_CODE (p) == CALL_INSN)
1044 record_excess_regs (PATTERN (p), equiv, &output);
1045 p = NEXT_INSN (p);
1048 return output;
1051 /* Return 1 if all uses of REG
1052 are between INSN and the end of the basic block. */
1054 static int
1055 reg_in_basic_block_p (insn, reg)
1056 rtx insn, reg;
1058 int regno = REGNO (reg);
1059 rtx p;
1061 if (regno_first_uid[regno] != INSN_UID (insn))
1062 return 0;
1064 /* Search this basic block for the already recorded last use of the reg. */
1065 for (p = insn; p; p = NEXT_INSN (p))
1067 switch (GET_CODE (p))
1069 case NOTE:
1070 break;
1072 case INSN:
1073 case CALL_INSN:
1074 /* Ordinary insn: if this is the last use, we win. */
1075 if (regno_last_uid[regno] == INSN_UID (p))
1076 return 1;
1077 break;
1079 case JUMP_INSN:
1080 /* Jump insn: if this is the last use, we win. */
1081 if (regno_last_uid[regno] == INSN_UID (p))
1082 return 1;
1083 /* Otherwise, it's the end of the basic block, so we lose. */
1084 return 0;
1086 case CODE_LABEL:
1087 case BARRIER:
1088 /* It's the end of the basic block, so we lose. */
1089 return 0;
1093 /* The "last use" doesn't follow the "first use"?? */
1094 abort ();
1097 /* Compute the benefit of eliminating the insns in the block whose
1098 last insn is LAST. This may be a group of insns used to compute a
1099 value directly or can contain a library call. */
1101 static int
1102 libcall_benefit (last)
1103 rtx last;
1105 rtx insn;
1106 int benefit = 0;
1108 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1109 insn != last; insn = NEXT_INSN (insn))
1111 if (GET_CODE (insn) == CALL_INSN)
1112 benefit += 10; /* Assume at least this many insns in a library
1113 routine. */
1114 else if (GET_CODE (insn) == INSN
1115 && GET_CODE (PATTERN (insn)) != USE
1116 && GET_CODE (PATTERN (insn)) != CLOBBER)
1117 benefit++;
1120 return benefit;
1123 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1125 static rtx
1126 skip_consec_insns (insn, count)
1127 rtx insn;
1128 int count;
1130 for (; count > 0; count--)
1132 rtx temp;
1134 /* If first insn of libcall sequence, skip to end. */
1135 /* Do this at start of loop, since INSN is guaranteed to
1136 be an insn here. */
1137 if (GET_CODE (insn) != NOTE
1138 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1139 insn = XEXP (temp, 0);
1141 do insn = NEXT_INSN (insn);
1142 while (GET_CODE (insn) == NOTE);
1145 return insn;
1148 /* Ignore any movable whose insn falls within a libcall
1149 which is part of another movable.
1150 We make use of the fact that the movable for the libcall value
1151 was made later and so appears later on the chain. */
1153 static void
1154 ignore_some_movables (movables)
1155 struct movable *movables;
1157 register struct movable *m, *m1;
1159 for (m = movables; m; m = m->next)
1161 /* Is this a movable for the value of a libcall? */
1162 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1163 if (note)
1165 rtx insn;
1166 /* Check for earlier movables inside that range,
1167 and mark them invalid. We cannot use LUIDs here because
1168 insns created by loop.c for prior loops don't have LUIDs.
1169 Rather than reject all such insns from movables, we just
1170 explicitly check each insn in the libcall (since invariant
1171 libcalls aren't that common). */
1172 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1173 for (m1 = movables; m1 != m; m1 = m1->next)
1174 if (m1->insn == insn)
1175 m1->done = 1;
1180 /* For each movable insn, see if the reg that it loads
1181 leads when it dies right into another conditionally movable insn.
1182 If so, record that the second insn "forces" the first one,
1183 since the second can be moved only if the first is. */
1185 static void
1186 force_movables (movables)
1187 struct movable *movables;
1189 register struct movable *m, *m1;
1190 for (m1 = movables; m1; m1 = m1->next)
1191 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1192 if (!m1->partial && !m1->done)
1194 int regno = m1->regno;
1195 for (m = m1->next; m; m = m->next)
1196 /* ??? Could this be a bug? What if CSE caused the
1197 register of M1 to be used after this insn?
1198 Since CSE does not update regno_last_uid,
1199 this insn M->insn might not be where it dies.
1200 But very likely this doesn't matter; what matters is
1201 that M's reg is computed from M1's reg. */
1202 if (INSN_UID (m->insn) == regno_last_uid[regno]
1203 && !m->done)
1204 break;
1205 if (m != 0 && m->set_src == m1->set_dest
1206 /* If m->consec, m->set_src isn't valid. */
1207 && m->consec == 0)
1208 m = 0;
1210 /* Increase the priority of the moving the first insn
1211 since it permits the second to be moved as well. */
1212 if (m != 0)
1214 m->forces = m1;
1215 m1->lifetime += m->lifetime;
1216 m1->savings += m1->savings;
1221 /* Find invariant expressions that are equal and can be combined into
1222 one register. */
1224 static void
1225 combine_movables (movables, nregs)
1226 struct movable *movables;
1227 int nregs;
1229 register struct movable *m;
1230 char *matched_regs = (char *) alloca (nregs);
1231 enum machine_mode mode;
1233 /* Regs that are set more than once are not allowed to match
1234 or be matched. I'm no longer sure why not. */
1235 /* Perhaps testing m->consec_sets would be more appropriate here? */
1237 for (m = movables; m; m = m->next)
1238 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1240 register struct movable *m1;
1241 int regno = m->regno;
1243 bzero (matched_regs, nregs);
1244 matched_regs[regno] = 1;
1246 for (m1 = movables; m1; m1 = m1->next)
1247 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1248 /* A reg used outside the loop mustn't be eliminated. */
1249 && !m1->global
1250 /* A reg used for zero-extending mustn't be eliminated. */
1251 && !m1->partial
1252 && (matched_regs[m1->regno]
1255 /* Can combine regs with different modes loaded from the
1256 same constant only if the modes are the same or
1257 if both are integer modes with M wider or the same
1258 width as M1. The check for integer is redundant, but
1259 safe, since the only case of differing destination
1260 modes with equal sources is when both sources are
1261 VOIDmode, i.e., CONST_INT. */
1262 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1263 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1264 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1265 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1266 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1267 /* See if the source of M1 says it matches M. */
1268 && ((GET_CODE (m1->set_src) == REG
1269 && matched_regs[REGNO (m1->set_src)])
1270 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1271 movables))))
1272 && ((m->dependencies == m1->dependencies)
1273 || rtx_equal_p (m->dependencies, m1->dependencies)))
1275 m->lifetime += m1->lifetime;
1276 m->savings += m1->savings;
1277 m1->done = 1;
1278 m1->match = m;
1279 matched_regs[m1->regno] = 1;
1283 /* Now combine the regs used for zero-extension.
1284 This can be done for those not marked `global'
1285 provided their lives don't overlap. */
1287 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1288 mode = GET_MODE_WIDER_MODE (mode))
1290 register struct movable *m0 = 0;
1292 /* Combine all the registers for extension from mode MODE.
1293 Don't combine any that are used outside this loop. */
1294 for (m = movables; m; m = m->next)
1295 if (m->partial && ! m->global
1296 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1298 register struct movable *m1;
1299 int first = uid_luid[regno_first_uid[m->regno]];
1300 int last = uid_luid[regno_last_uid[m->regno]];
1302 if (m0 == 0)
1304 /* First one: don't check for overlap, just record it. */
1305 m0 = m;
1306 continue;
1309 /* Make sure they extend to the same mode.
1310 (Almost always true.) */
1311 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1312 continue;
1314 /* We already have one: check for overlap with those
1315 already combined together. */
1316 for (m1 = movables; m1 != m; m1 = m1->next)
1317 if (m1 == m0 || (m1->partial && m1->match == m0))
1318 if (! (uid_luid[regno_first_uid[m1->regno]] > last
1319 || uid_luid[regno_last_uid[m1->regno]] < first))
1320 goto overlap;
1322 /* No overlap: we can combine this with the others. */
1323 m0->lifetime += m->lifetime;
1324 m0->savings += m->savings;
1325 m->done = 1;
1326 m->match = m0;
1328 overlap: ;
1333 /* Return 1 if regs X and Y will become the same if moved. */
1335 static int
1336 regs_match_p (x, y, movables)
1337 rtx x, y;
1338 struct movable *movables;
1340 int xn = REGNO (x);
1341 int yn = REGNO (y);
1342 struct movable *mx, *my;
1344 for (mx = movables; mx; mx = mx->next)
1345 if (mx->regno == xn)
1346 break;
1348 for (my = movables; my; my = my->next)
1349 if (my->regno == yn)
1350 break;
1352 return (mx && my
1353 && ((mx->match == my->match && mx->match != 0)
1354 || mx->match == my
1355 || mx == my->match));
1358 /* Return 1 if X and Y are identical-looking rtx's.
1359 This is the Lisp function EQUAL for rtx arguments.
1361 If two registers are matching movables or a movable register and an
1362 equivalent constant, consider them equal. */
1364 static int
1365 rtx_equal_for_loop_p (x, y, movables)
1366 rtx x, y;
1367 struct movable *movables;
1369 register int i;
1370 register int j;
1371 register struct movable *m;
1372 register enum rtx_code code;
1373 register char *fmt;
1375 if (x == y)
1376 return 1;
1377 if (x == 0 || y == 0)
1378 return 0;
1380 code = GET_CODE (x);
1382 /* If we have a register and a constant, they may sometimes be
1383 equal. */
1384 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1385 && CONSTANT_P (y))
1386 for (m = movables; m; m = m->next)
1387 if (m->move_insn && m->regno == REGNO (x)
1388 && rtx_equal_p (m->set_src, y))
1389 return 1;
1391 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1392 && CONSTANT_P (x))
1393 for (m = movables; m; m = m->next)
1394 if (m->move_insn && m->regno == REGNO (y)
1395 && rtx_equal_p (m->set_src, x))
1396 return 1;
1398 /* Otherwise, rtx's of different codes cannot be equal. */
1399 if (code != GET_CODE (y))
1400 return 0;
1402 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1403 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1405 if (GET_MODE (x) != GET_MODE (y))
1406 return 0;
1408 /* These three types of rtx's can be compared nonrecursively. */
1409 if (code == REG)
1410 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1412 if (code == LABEL_REF)
1413 return XEXP (x, 0) == XEXP (y, 0);
1414 if (code == SYMBOL_REF)
1415 return XSTR (x, 0) == XSTR (y, 0);
1417 /* Compare the elements. If any pair of corresponding elements
1418 fail to match, return 0 for the whole things. */
1420 fmt = GET_RTX_FORMAT (code);
1421 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1423 switch (fmt[i])
1425 case 'w':
1426 if (XWINT (x, i) != XWINT (y, i))
1427 return 0;
1428 break;
1430 case 'i':
1431 if (XINT (x, i) != XINT (y, i))
1432 return 0;
1433 break;
1435 case 'E':
1436 /* Two vectors must have the same length. */
1437 if (XVECLEN (x, i) != XVECLEN (y, i))
1438 return 0;
1440 /* And the corresponding elements must match. */
1441 for (j = 0; j < XVECLEN (x, i); j++)
1442 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1443 return 0;
1444 break;
1446 case 'e':
1447 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1448 return 0;
1449 break;
1451 case 's':
1452 if (strcmp (XSTR (x, i), XSTR (y, i)))
1453 return 0;
1454 break;
1456 case 'u':
1457 /* These are just backpointers, so they don't matter. */
1458 break;
1460 case '0':
1461 break;
1463 /* It is believed that rtx's at this level will never
1464 contain anything but integers and other rtx's,
1465 except for within LABEL_REFs and SYMBOL_REFs. */
1466 default:
1467 abort ();
1470 return 1;
1473 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1474 insns in INSNS which use thet reference. */
1476 static void
1477 add_label_notes (x, insns)
1478 rtx x;
1479 rtx insns;
1481 enum rtx_code code = GET_CODE (x);
1482 int i, j;
1483 char *fmt;
1484 rtx insn;
1486 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1488 rtx next = next_real_insn (XEXP (x, 0));
1490 /* Don't record labels that refer to dispatch tables.
1491 This is not necessary, since the tablejump references the same label.
1492 And if we did record them, flow.c would make worse code. */
1493 if (next == 0
1494 || ! (GET_CODE (next) == JUMP_INSN
1495 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1496 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1498 for (insn = insns; insn; insn = NEXT_INSN (insn))
1499 if (reg_mentioned_p (XEXP (x, 0), insn))
1500 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL, XEXP (x, 0),
1501 REG_NOTES (insn));
1503 return;
1506 fmt = GET_RTX_FORMAT (code);
1507 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1509 if (fmt[i] == 'e')
1510 add_label_notes (XEXP (x, i), insns);
1511 else if (fmt[i] == 'E')
1512 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1513 add_label_notes (XVECEXP (x, i, j), insns);
1517 /* Scan MOVABLES, and move the insns that deserve to be moved.
1518 If two matching movables are combined, replace one reg with the
1519 other throughout. */
1521 static void
1522 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1523 struct movable *movables;
1524 int threshold;
1525 int insn_count;
1526 rtx loop_start;
1527 rtx end;
1528 int nregs;
1530 rtx new_start = 0;
1531 register struct movable *m;
1532 register rtx p;
1533 /* Map of pseudo-register replacements to handle combining
1534 when we move several insns that load the same value
1535 into different pseudo-registers. */
1536 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1537 char *already_moved = (char *) alloca (nregs);
1539 bzero (already_moved, nregs);
1540 bzero ((char *) reg_map, nregs * sizeof (rtx));
1542 num_movables = 0;
1544 for (m = movables; m; m = m->next)
1546 /* Describe this movable insn. */
1548 if (loop_dump_stream)
1550 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1551 INSN_UID (m->insn), m->regno, m->lifetime);
1552 if (m->consec > 0)
1553 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1554 if (m->cond)
1555 fprintf (loop_dump_stream, "cond ");
1556 if (m->force)
1557 fprintf (loop_dump_stream, "force ");
1558 if (m->global)
1559 fprintf (loop_dump_stream, "global ");
1560 if (m->done)
1561 fprintf (loop_dump_stream, "done ");
1562 if (m->move_insn)
1563 fprintf (loop_dump_stream, "move-insn ");
1564 if (m->match)
1565 fprintf (loop_dump_stream, "matches %d ",
1566 INSN_UID (m->match->insn));
1567 if (m->forces)
1568 fprintf (loop_dump_stream, "forces %d ",
1569 INSN_UID (m->forces->insn));
1572 /* Count movables. Value used in heuristics in strength_reduce. */
1573 num_movables++;
1575 /* Ignore the insn if it's already done (it matched something else).
1576 Otherwise, see if it is now safe to move. */
1578 if (!m->done
1579 && (! m->cond
1580 || (1 == invariant_p (m->set_src)
1581 && (m->dependencies == 0
1582 || 1 == invariant_p (m->dependencies))
1583 && (m->consec == 0
1584 || 1 == consec_sets_invariant_p (m->set_dest,
1585 m->consec + 1,
1586 m->insn))))
1587 && (! m->forces || m->forces->done))
1589 register int regno;
1590 register rtx p;
1591 int savings = m->savings;
1593 /* We have an insn that is safe to move.
1594 Compute its desirability. */
1596 p = m->insn;
1597 regno = m->regno;
1599 if (loop_dump_stream)
1600 fprintf (loop_dump_stream, "savings %d ", savings);
1602 if (moved_once[regno])
1604 insn_count *= 2;
1606 if (loop_dump_stream)
1607 fprintf (loop_dump_stream, "halved since already moved ");
1610 /* An insn MUST be moved if we already moved something else
1611 which is safe only if this one is moved too: that is,
1612 if already_moved[REGNO] is nonzero. */
1614 /* An insn is desirable to move if the new lifetime of the
1615 register is no more than THRESHOLD times the old lifetime.
1616 If it's not desirable, it means the loop is so big
1617 that moving won't speed things up much,
1618 and it is liable to make register usage worse. */
1620 /* It is also desirable to move if it can be moved at no
1621 extra cost because something else was already moved. */
1623 if (already_moved[regno]
1624 || (threshold * savings * m->lifetime) >= insn_count
1625 || (m->forces && m->forces->done
1626 && n_times_used[m->forces->regno] == 1))
1628 int count;
1629 register struct movable *m1;
1630 rtx first;
1632 /* Now move the insns that set the reg. */
1634 if (m->partial && m->match)
1636 rtx newpat, i1;
1637 rtx r1, r2;
1638 /* Find the end of this chain of matching regs.
1639 Thus, we load each reg in the chain from that one reg.
1640 And that reg is loaded with 0 directly,
1641 since it has ->match == 0. */
1642 for (m1 = m; m1->match; m1 = m1->match);
1643 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1644 SET_DEST (PATTERN (m1->insn)));
1645 i1 = emit_insn_before (newpat, loop_start);
1647 /* Mark the moved, invariant reg as being allowed to
1648 share a hard reg with the other matching invariant. */
1649 REG_NOTES (i1) = REG_NOTES (m->insn);
1650 r1 = SET_DEST (PATTERN (m->insn));
1651 r2 = SET_DEST (PATTERN (m1->insn));
1652 regs_may_share = gen_rtx (EXPR_LIST, VOIDmode, r1,
1653 gen_rtx (EXPR_LIST, VOIDmode, r2,
1654 regs_may_share));
1655 delete_insn (m->insn);
1657 if (new_start == 0)
1658 new_start = i1;
1660 if (loop_dump_stream)
1661 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1663 /* If we are to re-generate the item being moved with a
1664 new move insn, first delete what we have and then emit
1665 the move insn before the loop. */
1666 else if (m->move_insn)
1668 rtx i1, temp;
1670 for (count = m->consec; count >= 0; count--)
1672 /* If this is the first insn of a library call sequence,
1673 skip to the end. */
1674 if (GET_CODE (p) != NOTE
1675 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1676 p = XEXP (temp, 0);
1678 /* If this is the last insn of a libcall sequence, then
1679 delete every insn in the sequence except the last.
1680 The last insn is handled in the normal manner. */
1681 if (GET_CODE (p) != NOTE
1682 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1684 temp = XEXP (temp, 0);
1685 while (temp != p)
1686 temp = delete_insn (temp);
1689 p = delete_insn (p);
1690 while (p && GET_CODE (p) == NOTE)
1691 p = NEXT_INSN (p);
1694 start_sequence ();
1695 emit_move_insn (m->set_dest, m->set_src);
1696 temp = get_insns ();
1697 end_sequence ();
1699 add_label_notes (m->set_src, temp);
1701 i1 = emit_insns_before (temp, loop_start);
1702 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1703 REG_NOTES (i1)
1704 = gen_rtx (EXPR_LIST,
1705 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1706 m->set_src, REG_NOTES (i1));
1708 if (loop_dump_stream)
1709 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1711 /* The more regs we move, the less we like moving them. */
1712 threshold -= 3;
1714 else
1716 for (count = m->consec; count >= 0; count--)
1718 rtx i1, temp;
1720 /* If first insn of libcall sequence, skip to end. */
1721 /* Do this at start of loop, since p is guaranteed to
1722 be an insn here. */
1723 if (GET_CODE (p) != NOTE
1724 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1725 p = XEXP (temp, 0);
1727 /* If last insn of libcall sequence, move all
1728 insns except the last before the loop. The last
1729 insn is handled in the normal manner. */
1730 if (GET_CODE (p) != NOTE
1731 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1733 rtx fn_address = 0;
1734 rtx fn_reg = 0;
1735 rtx fn_address_insn = 0;
1737 first = 0;
1738 for (temp = XEXP (temp, 0); temp != p;
1739 temp = NEXT_INSN (temp))
1741 rtx body;
1742 rtx n;
1743 rtx next;
1745 if (GET_CODE (temp) == NOTE)
1746 continue;
1748 body = PATTERN (temp);
1750 /* Find the next insn after TEMP,
1751 not counting USE or NOTE insns. */
1752 for (next = NEXT_INSN (temp); next != p;
1753 next = NEXT_INSN (next))
1754 if (! (GET_CODE (next) == INSN
1755 && GET_CODE (PATTERN (next)) == USE)
1756 && GET_CODE (next) != NOTE)
1757 break;
1759 /* If that is the call, this may be the insn
1760 that loads the function address.
1762 Extract the function address from the insn
1763 that loads it into a register.
1764 If this insn was cse'd, we get incorrect code.
1766 So emit a new move insn that copies the
1767 function address into the register that the
1768 call insn will use. flow.c will delete any
1769 redundant stores that we have created. */
1770 if (GET_CODE (next) == CALL_INSN
1771 && GET_CODE (body) == SET
1772 && GET_CODE (SET_DEST (body)) == REG
1773 && (n = find_reg_note (temp, REG_EQUAL,
1774 NULL_RTX)))
1776 fn_reg = SET_SRC (body);
1777 if (GET_CODE (fn_reg) != REG)
1778 fn_reg = SET_DEST (body);
1779 fn_address = XEXP (n, 0);
1780 fn_address_insn = temp;
1782 /* We have the call insn.
1783 If it uses the register we suspect it might,
1784 load it with the correct address directly. */
1785 if (GET_CODE (temp) == CALL_INSN
1786 && fn_address != 0
1787 && reg_referenced_p (fn_reg, body))
1788 emit_insn_after (gen_move_insn (fn_reg,
1789 fn_address),
1790 fn_address_insn);
1792 if (GET_CODE (temp) == CALL_INSN)
1794 i1 = emit_call_insn_before (body, loop_start);
1795 /* Because the USAGE information potentially
1796 contains objects other than hard registers
1797 we need to copy it. */
1798 if (CALL_INSN_FUNCTION_USAGE (temp))
1799 CALL_INSN_FUNCTION_USAGE (i1) =
1800 copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1802 else
1803 i1 = emit_insn_before (body, loop_start);
1804 if (first == 0)
1805 first = i1;
1806 if (temp == fn_address_insn)
1807 fn_address_insn = i1;
1808 REG_NOTES (i1) = REG_NOTES (temp);
1809 delete_insn (temp);
1812 if (m->savemode != VOIDmode)
1814 /* P sets REG to zero; but we should clear only
1815 the bits that are not covered by the mode
1816 m->savemode. */
1817 rtx reg = m->set_dest;
1818 rtx sequence;
1819 rtx tem;
1821 start_sequence ();
1822 tem = expand_binop
1823 (GET_MODE (reg), and_optab, reg,
1824 GEN_INT ((((HOST_WIDE_INT) 1
1825 << GET_MODE_BITSIZE (m->savemode)))
1826 - 1),
1827 reg, 1, OPTAB_LIB_WIDEN);
1828 if (tem == 0)
1829 abort ();
1830 if (tem != reg)
1831 emit_move_insn (reg, tem);
1832 sequence = gen_sequence ();
1833 end_sequence ();
1834 i1 = emit_insn_before (sequence, loop_start);
1836 else if (GET_CODE (p) == CALL_INSN)
1838 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1839 /* Because the USAGE information potentially
1840 contains objects other than hard registers
1841 we need to copy it. */
1842 if (CALL_INSN_FUNCTION_USAGE (p))
1843 CALL_INSN_FUNCTION_USAGE (i1) =
1844 copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1846 else
1847 i1 = emit_insn_before (PATTERN (p), loop_start);
1849 REG_NOTES (i1) = REG_NOTES (p);
1851 /* If there is a REG_EQUAL note present whose value is
1852 not loop invariant, then delete it, since it may
1853 cause problems with later optimization passes.
1854 It is possible for cse to create such notes
1855 like this as a result of record_jump_cond. */
1857 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1858 && ! invariant_p (XEXP (temp, 0)))
1859 remove_note (i1, temp);
1861 if (new_start == 0)
1862 new_start = i1;
1864 if (loop_dump_stream)
1865 fprintf (loop_dump_stream, " moved to %d",
1866 INSN_UID (i1));
1868 #if 0
1869 /* This isn't needed because REG_NOTES is copied
1870 below and is wrong since P might be a PARALLEL. */
1871 if (REG_NOTES (i1) == 0
1872 && ! m->partial /* But not if it's a zero-extend clr. */
1873 && ! m->global /* and not if used outside the loop
1874 (since it might get set outside). */
1875 && CONSTANT_P (SET_SRC (PATTERN (p))))
1876 REG_NOTES (i1)
1877 = gen_rtx (EXPR_LIST, REG_EQUAL,
1878 SET_SRC (PATTERN (p)), REG_NOTES (i1));
1879 #endif
1881 /* If library call, now fix the REG_NOTES that contain
1882 insn pointers, namely REG_LIBCALL on FIRST
1883 and REG_RETVAL on I1. */
1884 if (temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))
1886 XEXP (temp, 0) = first;
1887 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
1888 XEXP (temp, 0) = i1;
1891 delete_insn (p);
1892 do p = NEXT_INSN (p);
1893 while (p && GET_CODE (p) == NOTE);
1896 /* The more regs we move, the less we like moving them. */
1897 threshold -= 3;
1900 /* Any other movable that loads the same register
1901 MUST be moved. */
1902 already_moved[regno] = 1;
1904 /* This reg has been moved out of one loop. */
1905 moved_once[regno] = 1;
1907 /* The reg set here is now invariant. */
1908 if (! m->partial)
1909 n_times_set[regno] = 0;
1911 m->done = 1;
1913 /* Change the length-of-life info for the register
1914 to say it lives at least the full length of this loop.
1915 This will help guide optimizations in outer loops. */
1917 if (uid_luid[regno_first_uid[regno]] > INSN_LUID (loop_start))
1918 /* This is the old insn before all the moved insns.
1919 We can't use the moved insn because it is out of range
1920 in uid_luid. Only the old insns have luids. */
1921 regno_first_uid[regno] = INSN_UID (loop_start);
1922 if (uid_luid[regno_last_uid[regno]] < INSN_LUID (end))
1923 regno_last_uid[regno] = INSN_UID (end);
1925 /* Combine with this moved insn any other matching movables. */
1927 if (! m->partial)
1928 for (m1 = movables; m1; m1 = m1->next)
1929 if (m1->match == m)
1931 rtx temp;
1933 /* Schedule the reg loaded by M1
1934 for replacement so that shares the reg of M.
1935 If the modes differ (only possible in restricted
1936 circumstances, make a SUBREG. */
1937 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
1938 reg_map[m1->regno] = m->set_dest;
1939 else
1940 reg_map[m1->regno]
1941 = gen_lowpart_common (GET_MODE (m1->set_dest),
1942 m->set_dest);
1944 /* Get rid of the matching insn
1945 and prevent further processing of it. */
1946 m1->done = 1;
1948 /* if library call, delete all insn except last, which
1949 is deleted below */
1950 if (temp = find_reg_note (m1->insn, REG_RETVAL,
1951 NULL_RTX))
1953 for (temp = XEXP (temp, 0); temp != m1->insn;
1954 temp = NEXT_INSN (temp))
1955 delete_insn (temp);
1957 delete_insn (m1->insn);
1959 /* Any other movable that loads the same register
1960 MUST be moved. */
1961 already_moved[m1->regno] = 1;
1963 /* The reg merged here is now invariant,
1964 if the reg it matches is invariant. */
1965 if (! m->partial)
1966 n_times_set[m1->regno] = 0;
1969 else if (loop_dump_stream)
1970 fprintf (loop_dump_stream, "not desirable");
1972 else if (loop_dump_stream && !m->match)
1973 fprintf (loop_dump_stream, "not safe");
1975 if (loop_dump_stream)
1976 fprintf (loop_dump_stream, "\n");
1979 if (new_start == 0)
1980 new_start = loop_start;
1982 /* Go through all the instructions in the loop, making
1983 all the register substitutions scheduled in REG_MAP. */
1984 for (p = new_start; p != end; p = NEXT_INSN (p))
1985 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1986 || GET_CODE (p) == CALL_INSN)
1988 replace_regs (PATTERN (p), reg_map, nregs, 0);
1989 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
1990 INSN_CODE (p) = -1;
1994 #if 0
1995 /* Scan X and replace the address of any MEM in it with ADDR.
1996 REG is the address that MEM should have before the replacement. */
1998 static void
1999 replace_call_address (x, reg, addr)
2000 rtx x, reg, addr;
2002 register enum rtx_code code;
2003 register int i;
2004 register char *fmt;
2006 if (x == 0)
2007 return;
2008 code = GET_CODE (x);
2009 switch (code)
2011 case PC:
2012 case CC0:
2013 case CONST_INT:
2014 case CONST_DOUBLE:
2015 case CONST:
2016 case SYMBOL_REF:
2017 case LABEL_REF:
2018 case REG:
2019 return;
2021 case SET:
2022 /* Short cut for very common case. */
2023 replace_call_address (XEXP (x, 1), reg, addr);
2024 return;
2026 case CALL:
2027 /* Short cut for very common case. */
2028 replace_call_address (XEXP (x, 0), reg, addr);
2029 return;
2031 case MEM:
2032 /* If this MEM uses a reg other than the one we expected,
2033 something is wrong. */
2034 if (XEXP (x, 0) != reg)
2035 abort ();
2036 XEXP (x, 0) = addr;
2037 return;
2040 fmt = GET_RTX_FORMAT (code);
2041 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2043 if (fmt[i] == 'e')
2044 replace_call_address (XEXP (x, i), reg, addr);
2045 if (fmt[i] == 'E')
2047 register int j;
2048 for (j = 0; j < XVECLEN (x, i); j++)
2049 replace_call_address (XVECEXP (x, i, j), reg, addr);
2053 #endif
2055 /* Return the number of memory refs to addresses that vary
2056 in the rtx X. */
2058 static int
2059 count_nonfixed_reads (x)
2060 rtx x;
2062 register enum rtx_code code;
2063 register int i;
2064 register char *fmt;
2065 int value;
2067 if (x == 0)
2068 return 0;
2070 code = GET_CODE (x);
2071 switch (code)
2073 case PC:
2074 case CC0:
2075 case CONST_INT:
2076 case CONST_DOUBLE:
2077 case CONST:
2078 case SYMBOL_REF:
2079 case LABEL_REF:
2080 case REG:
2081 return 0;
2083 case MEM:
2084 return ((invariant_p (XEXP (x, 0)) != 1)
2085 + count_nonfixed_reads (XEXP (x, 0)));
2088 value = 0;
2089 fmt = GET_RTX_FORMAT (code);
2090 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2092 if (fmt[i] == 'e')
2093 value += count_nonfixed_reads (XEXP (x, i));
2094 if (fmt[i] == 'E')
2096 register int j;
2097 for (j = 0; j < XVECLEN (x, i); j++)
2098 value += count_nonfixed_reads (XVECEXP (x, i, j));
2101 return value;
2105 #if 0
2106 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2107 Replace it with an instruction to load just the low bytes
2108 if the machine supports such an instruction,
2109 and insert above LOOP_START an instruction to clear the register. */
2111 static void
2112 constant_high_bytes (p, loop_start)
2113 rtx p, loop_start;
2115 register rtx new;
2116 register int insn_code_number;
2118 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2119 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2121 new = gen_rtx (SET, VOIDmode,
2122 gen_rtx (STRICT_LOW_PART, VOIDmode,
2123 gen_rtx (SUBREG, GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2124 SET_DEST (PATTERN (p)),
2125 0)),
2126 XEXP (SET_SRC (PATTERN (p)), 0));
2127 insn_code_number = recog (new, p);
2129 if (insn_code_number)
2131 register int i;
2133 /* Clear destination register before the loop. */
2134 emit_insn_before (gen_rtx (SET, VOIDmode,
2135 SET_DEST (PATTERN (p)),
2136 const0_rtx),
2137 loop_start);
2139 /* Inside the loop, just load the low part. */
2140 PATTERN (p) = new;
2143 #endif
2145 /* Scan a loop setting the variables `unknown_address_altered',
2146 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2147 and `loop_has_volatile'.
2148 Also, fill in the array `loop_store_mems'. */
2150 static void
2151 prescan_loop (start, end)
2152 rtx start, end;
2154 register int level = 1;
2155 register rtx insn;
2157 unknown_address_altered = 0;
2158 loop_has_call = 0;
2159 loop_has_volatile = 0;
2160 loop_store_mems_idx = 0;
2162 num_mem_sets = 0;
2163 loops_enclosed = 1;
2164 loop_continue = 0;
2166 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2167 insn = NEXT_INSN (insn))
2169 if (GET_CODE (insn) == NOTE)
2171 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2173 ++level;
2174 /* Count number of loops contained in this one. */
2175 loops_enclosed++;
2177 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2179 --level;
2180 if (level == 0)
2182 end = insn;
2183 break;
2186 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2188 if (level == 1)
2189 loop_continue = insn;
2192 else if (GET_CODE (insn) == CALL_INSN)
2194 unknown_address_altered = 1;
2195 loop_has_call = 1;
2197 else
2199 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2201 if (volatile_refs_p (PATTERN (insn)))
2202 loop_has_volatile = 1;
2204 note_stores (PATTERN (insn), note_addr_stored);
2210 /* Scan the function looking for loops. Record the start and end of each loop.
2211 Also mark as invalid loops any loops that contain a setjmp or are branched
2212 to from outside the loop. */
2214 static void
2215 find_and_verify_loops (f)
2216 rtx f;
2218 rtx insn, label;
2219 int current_loop = -1;
2220 int next_loop = -1;
2221 int loop;
2223 /* If there are jumps to undefined labels,
2224 treat them as jumps out of any/all loops.
2225 This also avoids writing past end of tables when there are no loops. */
2226 uid_loop_num[0] = -1;
2228 /* Find boundaries of loops, mark which loops are contained within
2229 loops, and invalidate loops that have setjmp. */
2231 for (insn = f; insn; insn = NEXT_INSN (insn))
2233 if (GET_CODE (insn) == NOTE)
2234 switch (NOTE_LINE_NUMBER (insn))
2236 case NOTE_INSN_LOOP_BEG:
2237 loop_number_loop_starts[++next_loop] = insn;
2238 loop_number_loop_ends[next_loop] = 0;
2239 loop_outer_loop[next_loop] = current_loop;
2240 loop_invalid[next_loop] = 0;
2241 loop_number_exit_labels[next_loop] = 0;
2242 current_loop = next_loop;
2243 break;
2245 case NOTE_INSN_SETJMP:
2246 /* In this case, we must invalidate our current loop and any
2247 enclosing loop. */
2248 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2250 loop_invalid[loop] = 1;
2251 if (loop_dump_stream)
2252 fprintf (loop_dump_stream,
2253 "\nLoop at %d ignored due to setjmp.\n",
2254 INSN_UID (loop_number_loop_starts[loop]));
2256 break;
2258 case NOTE_INSN_LOOP_END:
2259 if (current_loop == -1)
2260 abort ();
2262 loop_number_loop_ends[current_loop] = insn;
2263 current_loop = loop_outer_loop[current_loop];
2264 break;
2268 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2269 enclosing loop, but this doesn't matter. */
2270 uid_loop_num[INSN_UID (insn)] = current_loop;
2273 /* Any loop containing a label used in an initializer must be invalidated,
2274 because it can be jumped into from anywhere. */
2276 for (label = forced_labels; label; label = XEXP (label, 1))
2278 int loop_num;
2280 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2281 loop_num != -1;
2282 loop_num = loop_outer_loop[loop_num])
2283 loop_invalid[loop_num] = 1;
2286 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2287 loop that it is not contained within, that loop is marked invalid.
2288 If any INSN or CALL_INSN uses a label's address, then the loop containing
2289 that label is marked invalid, because it could be jumped into from
2290 anywhere.
2292 Also look for blocks of code ending in an unconditional branch that
2293 exits the loop. If such a block is surrounded by a conditional
2294 branch around the block, move the block elsewhere (see below) and
2295 invert the jump to point to the code block. This may eliminate a
2296 label in our loop and will simplify processing by both us and a
2297 possible second cse pass. */
2299 for (insn = f; insn; insn = NEXT_INSN (insn))
2300 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2302 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2304 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2306 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2307 if (note)
2309 int loop_num;
2311 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2312 loop_num != -1;
2313 loop_num = loop_outer_loop[loop_num])
2314 loop_invalid[loop_num] = 1;
2318 if (GET_CODE (insn) != JUMP_INSN)
2319 continue;
2321 mark_loop_jump (PATTERN (insn), this_loop_num);
2323 /* See if this is an unconditional branch outside the loop. */
2324 if (this_loop_num != -1
2325 && (GET_CODE (PATTERN (insn)) == RETURN
2326 || (simplejump_p (insn)
2327 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2328 != this_loop_num)))
2329 && get_max_uid () < max_uid_for_loop)
2331 rtx p;
2332 rtx our_next = next_real_insn (insn);
2334 /* Go backwards until we reach the start of the loop, a label,
2335 or a JUMP_INSN. */
2336 for (p = PREV_INSN (insn);
2337 GET_CODE (p) != CODE_LABEL
2338 && ! (GET_CODE (p) == NOTE
2339 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2340 && GET_CODE (p) != JUMP_INSN;
2341 p = PREV_INSN (p))
2344 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2345 we have a block of code to try to move.
2347 We look backward and then forward from the target of INSN
2348 to find a BARRIER at the same loop depth as the target.
2349 If we find such a BARRIER, we make a new label for the start
2350 of the block, invert the jump in P and point it to that label,
2351 and move the block of code to the spot we found. */
2353 if (GET_CODE (p) == JUMP_INSN
2354 && JUMP_LABEL (p) != 0
2355 /* Just ignore jumps to labels that were never emitted.
2356 These always indicate compilation errors. */
2357 && INSN_UID (JUMP_LABEL (p)) != 0
2358 && condjump_p (p)
2359 && ! simplejump_p (p)
2360 && next_real_insn (JUMP_LABEL (p)) == our_next)
2362 rtx target
2363 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2364 int target_loop_num = uid_loop_num[INSN_UID (target)];
2365 rtx loc;
2367 for (loc = target; loc; loc = PREV_INSN (loc))
2368 if (GET_CODE (loc) == BARRIER
2369 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2370 break;
2372 if (loc == 0)
2373 for (loc = target; loc; loc = NEXT_INSN (loc))
2374 if (GET_CODE (loc) == BARRIER
2375 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2376 break;
2378 if (loc)
2380 rtx cond_label = JUMP_LABEL (p);
2381 rtx new_label = get_label_after (p);
2383 /* Ensure our label doesn't go away. */
2384 LABEL_NUSES (cond_label)++;
2386 /* Verify that uid_loop_num is large enough and that
2387 we can invert P. */
2388 if (invert_jump (p, new_label))
2390 rtx q, r;
2392 /* Include the BARRIER after INSN and copy the
2393 block after LOC. */
2394 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2395 reorder_insns (new_label, NEXT_INSN (insn), loc);
2397 /* All those insns are now in TARGET_LOOP_NUM. */
2398 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2399 q = NEXT_INSN (q))
2400 uid_loop_num[INSN_UID (q)] = target_loop_num;
2402 /* The label jumped to by INSN is no longer a loop exit.
2403 Unless INSN does not have a label (e.g., it is a
2404 RETURN insn), search loop_number_exit_labels to find
2405 its label_ref, and remove it. Also turn off
2406 LABEL_OUTSIDE_LOOP_P bit. */
2407 if (JUMP_LABEL (insn))
2409 for (q = 0,
2410 r = loop_number_exit_labels[this_loop_num];
2411 r; q = r, r = LABEL_NEXTREF (r))
2412 if (XEXP (r, 0) == JUMP_LABEL (insn))
2414 LABEL_OUTSIDE_LOOP_P (r) = 0;
2415 if (q)
2416 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2417 else
2418 loop_number_exit_labels[this_loop_num]
2419 = LABEL_NEXTREF (r);
2420 break;
2423 /* If we didn't find it, then something is wrong. */
2424 if (! r)
2425 abort ();
2428 /* P is now a jump outside the loop, so it must be put
2429 in loop_number_exit_labels, and marked as such.
2430 The easiest way to do this is to just call
2431 mark_loop_jump again for P. */
2432 mark_loop_jump (PATTERN (p), this_loop_num);
2434 /* If INSN now jumps to the insn after it,
2435 delete INSN. */
2436 if (JUMP_LABEL (insn) != 0
2437 && (next_real_insn (JUMP_LABEL (insn))
2438 == next_real_insn (insn)))
2439 delete_insn (insn);
2442 /* Continue the loop after where the conditional
2443 branch used to jump, since the only branch insn
2444 in the block (if it still remains) is an inter-loop
2445 branch and hence needs no processing. */
2446 insn = NEXT_INSN (cond_label);
2448 if (--LABEL_NUSES (cond_label) == 0)
2449 delete_insn (cond_label);
2451 /* This loop will be continued with NEXT_INSN (insn). */
2452 insn = PREV_INSN (insn);
2459 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2460 loops it is contained in, mark the target loop invalid.
2462 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2464 static void
2465 mark_loop_jump (x, loop_num)
2466 rtx x;
2467 int loop_num;
2469 int dest_loop;
2470 int outer_loop;
2471 int i;
2473 switch (GET_CODE (x))
2475 case PC:
2476 case USE:
2477 case CLOBBER:
2478 case REG:
2479 case MEM:
2480 case CONST_INT:
2481 case CONST_DOUBLE:
2482 case RETURN:
2483 return;
2485 case CONST:
2486 /* There could be a label reference in here. */
2487 mark_loop_jump (XEXP (x, 0), loop_num);
2488 return;
2490 case PLUS:
2491 case MINUS:
2492 case MULT:
2493 mark_loop_jump (XEXP (x, 0), loop_num);
2494 mark_loop_jump (XEXP (x, 1), loop_num);
2495 return;
2497 case SIGN_EXTEND:
2498 case ZERO_EXTEND:
2499 mark_loop_jump (XEXP (x, 0), loop_num);
2500 return;
2502 case LABEL_REF:
2503 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2505 /* Link together all labels that branch outside the loop. This
2506 is used by final_[bg]iv_value and the loop unrolling code. Also
2507 mark this LABEL_REF so we know that this branch should predict
2508 false. */
2510 if (dest_loop != loop_num && loop_num != -1)
2512 LABEL_OUTSIDE_LOOP_P (x) = 1;
2513 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2514 loop_number_exit_labels[loop_num] = x;
2517 /* If this is inside a loop, but not in the current loop or one enclosed
2518 by it, it invalidates at least one loop. */
2520 if (dest_loop == -1)
2521 return;
2523 /* We must invalidate every nested loop containing the target of this
2524 label, except those that also contain the jump insn. */
2526 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2528 /* Stop when we reach a loop that also contains the jump insn. */
2529 for (outer_loop = loop_num; outer_loop != -1;
2530 outer_loop = loop_outer_loop[outer_loop])
2531 if (dest_loop == outer_loop)
2532 return;
2534 /* If we get here, we know we need to invalidate a loop. */
2535 if (loop_dump_stream && ! loop_invalid[dest_loop])
2536 fprintf (loop_dump_stream,
2537 "\nLoop at %d ignored due to multiple entry points.\n",
2538 INSN_UID (loop_number_loop_starts[dest_loop]));
2540 loop_invalid[dest_loop] = 1;
2542 return;
2544 case SET:
2545 /* If this is not setting pc, ignore. */
2546 if (SET_DEST (x) == pc_rtx)
2547 mark_loop_jump (SET_SRC (x), loop_num);
2548 return;
2550 case IF_THEN_ELSE:
2551 mark_loop_jump (XEXP (x, 1), loop_num);
2552 mark_loop_jump (XEXP (x, 2), loop_num);
2553 return;
2555 case PARALLEL:
2556 case ADDR_VEC:
2557 for (i = 0; i < XVECLEN (x, 0); i++)
2558 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2559 return;
2561 case ADDR_DIFF_VEC:
2562 for (i = 0; i < XVECLEN (x, 1); i++)
2563 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2564 return;
2566 default:
2567 /* Treat anything else (such as a symbol_ref)
2568 as a branch out of this loop, but not into any loop. */
2570 if (loop_num != -1)
2571 loop_number_exit_labels[loop_num] = x;
2573 return;
2577 /* Return nonzero if there is a label in the range from
2578 insn INSN to and including the insn whose luid is END
2579 INSN must have an assigned luid (i.e., it must not have
2580 been previously created by loop.c). */
2582 static int
2583 labels_in_range_p (insn, end)
2584 rtx insn;
2585 int end;
2587 while (insn && INSN_LUID (insn) <= end)
2589 if (GET_CODE (insn) == CODE_LABEL)
2590 return 1;
2591 insn = NEXT_INSN (insn);
2594 return 0;
2597 /* Record that a memory reference X is being set. */
2599 static void
2600 note_addr_stored (x)
2601 rtx x;
2603 register int i;
2605 if (x == 0 || GET_CODE (x) != MEM)
2606 return;
2608 /* Count number of memory writes.
2609 This affects heuristics in strength_reduce. */
2610 num_mem_sets++;
2612 /* BLKmode MEM means all memory is clobbered. */
2613 if (GET_MODE (x) == BLKmode)
2614 unknown_address_altered = 1;
2616 if (unknown_address_altered)
2617 return;
2619 for (i = 0; i < loop_store_mems_idx; i++)
2620 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2621 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2623 /* We are storing at the same address as previously noted. Save the
2624 wider reference. */
2625 if (GET_MODE_SIZE (GET_MODE (x))
2626 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2627 loop_store_mems[i] = x;
2628 break;
2631 if (i == NUM_STORES)
2632 unknown_address_altered = 1;
2634 else if (i == loop_store_mems_idx)
2635 loop_store_mems[loop_store_mems_idx++] = x;
2638 /* Return nonzero if the rtx X is invariant over the current loop.
2640 The value is 2 if we refer to something only conditionally invariant.
2642 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2643 Otherwise, a memory ref is invariant if it does not conflict with
2644 anything stored in `loop_store_mems'. */
2647 invariant_p (x)
2648 register rtx x;
2650 register int i;
2651 register enum rtx_code code;
2652 register char *fmt;
2653 int conditional = 0;
2655 if (x == 0)
2656 return 1;
2657 code = GET_CODE (x);
2658 switch (code)
2660 case CONST_INT:
2661 case CONST_DOUBLE:
2662 case SYMBOL_REF:
2663 case CONST:
2664 return 1;
2666 case LABEL_REF:
2667 /* A LABEL_REF is normally invariant, however, if we are unrolling
2668 loops, and this label is inside the loop, then it isn't invariant.
2669 This is because each unrolled copy of the loop body will have
2670 a copy of this label. If this was invariant, then an insn loading
2671 the address of this label into a register might get moved outside
2672 the loop, and then each loop body would end up using the same label.
2674 We don't know the loop bounds here though, so just fail for all
2675 labels. */
2676 if (flag_unroll_loops)
2677 return 0;
2678 else
2679 return 1;
2681 case PC:
2682 case CC0:
2683 case UNSPEC_VOLATILE:
2684 return 0;
2686 case REG:
2687 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2688 since the reg might be set by initialization within the loop. */
2689 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2690 || x == arg_pointer_rtx)
2691 return 1;
2692 if (loop_has_call
2693 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2694 return 0;
2695 if (n_times_set[REGNO (x)] < 0)
2696 return 2;
2697 return n_times_set[REGNO (x)] == 0;
2699 case MEM:
2700 /* Volatile memory references must be rejected. Do this before
2701 checking for read-only items, so that volatile read-only items
2702 will be rejected also. */
2703 if (MEM_VOLATILE_P (x))
2704 return 0;
2706 /* Read-only items (such as constants in a constant pool) are
2707 invariant if their address is. */
2708 if (RTX_UNCHANGING_P (x))
2709 break;
2711 /* If we filled the table (or had a subroutine call), any location
2712 in memory could have been clobbered. */
2713 if (unknown_address_altered)
2714 return 0;
2716 /* See if there is any dependence between a store and this load. */
2717 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2718 if (true_dependence (loop_store_mems[i], x))
2719 return 0;
2721 /* It's not invalidated by a store in memory
2722 but we must still verify the address is invariant. */
2723 break;
2725 case ASM_OPERANDS:
2726 /* Don't mess with insns declared volatile. */
2727 if (MEM_VOLATILE_P (x))
2728 return 0;
2731 fmt = GET_RTX_FORMAT (code);
2732 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2734 if (fmt[i] == 'e')
2736 int tem = invariant_p (XEXP (x, i));
2737 if (tem == 0)
2738 return 0;
2739 if (tem == 2)
2740 conditional = 1;
2742 else if (fmt[i] == 'E')
2744 register int j;
2745 for (j = 0; j < XVECLEN (x, i); j++)
2747 int tem = invariant_p (XVECEXP (x, i, j));
2748 if (tem == 0)
2749 return 0;
2750 if (tem == 2)
2751 conditional = 1;
2757 return 1 + conditional;
2761 /* Return nonzero if all the insns in the loop that set REG
2762 are INSN and the immediately following insns,
2763 and if each of those insns sets REG in an invariant way
2764 (not counting uses of REG in them).
2766 The value is 2 if some of these insns are only conditionally invariant.
2768 We assume that INSN itself is the first set of REG
2769 and that its source is invariant. */
2771 static int
2772 consec_sets_invariant_p (reg, n_sets, insn)
2773 int n_sets;
2774 rtx reg, insn;
2776 register rtx p = insn;
2777 register int regno = REGNO (reg);
2778 rtx temp;
2779 /* Number of sets we have to insist on finding after INSN. */
2780 int count = n_sets - 1;
2781 int old = n_times_set[regno];
2782 int value = 0;
2783 int this;
2785 /* If N_SETS hit the limit, we can't rely on its value. */
2786 if (n_sets == 127)
2787 return 0;
2789 n_times_set[regno] = 0;
2791 while (count > 0)
2793 register enum rtx_code code;
2794 rtx set;
2796 p = NEXT_INSN (p);
2797 code = GET_CODE (p);
2799 /* If library call, skip to end of of it. */
2800 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2801 p = XEXP (temp, 0);
2803 this = 0;
2804 if (code == INSN
2805 && (set = single_set (p))
2806 && GET_CODE (SET_DEST (set)) == REG
2807 && REGNO (SET_DEST (set)) == regno)
2809 this = invariant_p (SET_SRC (set));
2810 if (this != 0)
2811 value |= this;
2812 else if (temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
2814 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
2815 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
2816 notes are OK. */
2817 this = (CONSTANT_P (XEXP (temp, 0))
2818 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
2819 && invariant_p (XEXP (temp, 0))));
2820 if (this != 0)
2821 value |= this;
2824 if (this != 0)
2825 count--;
2826 else if (code != NOTE)
2828 n_times_set[regno] = old;
2829 return 0;
2833 n_times_set[regno] = old;
2834 /* If invariant_p ever returned 2, we return 2. */
2835 return 1 + (value & 2);
2838 #if 0
2839 /* I don't think this condition is sufficient to allow INSN
2840 to be moved, so we no longer test it. */
2842 /* Return 1 if all insns in the basic block of INSN and following INSN
2843 that set REG are invariant according to TABLE. */
2845 static int
2846 all_sets_invariant_p (reg, insn, table)
2847 rtx reg, insn;
2848 short *table;
2850 register rtx p = insn;
2851 register int regno = REGNO (reg);
2853 while (1)
2855 register enum rtx_code code;
2856 p = NEXT_INSN (p);
2857 code = GET_CODE (p);
2858 if (code == CODE_LABEL || code == JUMP_INSN)
2859 return 1;
2860 if (code == INSN && GET_CODE (PATTERN (p)) == SET
2861 && GET_CODE (SET_DEST (PATTERN (p))) == REG
2862 && REGNO (SET_DEST (PATTERN (p))) == regno)
2864 if (!invariant_p (SET_SRC (PATTERN (p)), table))
2865 return 0;
2869 #endif /* 0 */
2871 /* Look at all uses (not sets) of registers in X. For each, if it is
2872 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
2873 a different insn, set USAGE[REGNO] to const0_rtx. */
2875 static void
2876 find_single_use_in_loop (insn, x, usage)
2877 rtx insn;
2878 rtx x;
2879 rtx *usage;
2881 enum rtx_code code = GET_CODE (x);
2882 char *fmt = GET_RTX_FORMAT (code);
2883 int i, j;
2885 if (code == REG)
2886 usage[REGNO (x)]
2887 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
2888 ? const0_rtx : insn;
2890 else if (code == SET)
2892 /* Don't count SET_DEST if it is a REG; otherwise count things
2893 in SET_DEST because if a register is partially modified, it won't
2894 show up as a potential movable so we don't care how USAGE is set
2895 for it. */
2896 if (GET_CODE (SET_DEST (x)) != REG)
2897 find_single_use_in_loop (insn, SET_DEST (x), usage);
2898 find_single_use_in_loop (insn, SET_SRC (x), usage);
2900 else
2901 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2903 if (fmt[i] == 'e' && XEXP (x, i) != 0)
2904 find_single_use_in_loop (insn, XEXP (x, i), usage);
2905 else if (fmt[i] == 'E')
2906 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2907 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
2911 /* Increment N_TIMES_SET at the index of each register
2912 that is modified by an insn between FROM and TO.
2913 If the value of an element of N_TIMES_SET becomes 127 or more,
2914 stop incrementing it, to avoid overflow.
2916 Store in SINGLE_USAGE[I] the single insn in which register I is
2917 used, if it is only used once. Otherwise, it is set to 0 (for no
2918 uses) or const0_rtx for more than one use. This parameter may be zero,
2919 in which case this processing is not done.
2921 Store in *COUNT_PTR the number of actual instruction
2922 in the loop. We use this to decide what is worth moving out. */
2924 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
2925 In that case, it is the insn that last set reg n. */
2927 static void
2928 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
2929 register rtx from, to;
2930 char *may_not_move;
2931 rtx *single_usage;
2932 int *count_ptr;
2933 int nregs;
2935 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
2936 register rtx insn;
2937 register int count = 0;
2938 register rtx dest;
2940 bzero ((char *) last_set, nregs * sizeof (rtx));
2941 for (insn = from; insn != to; insn = NEXT_INSN (insn))
2943 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2945 ++count;
2947 /* If requested, record registers that have exactly one use. */
2948 if (single_usage)
2950 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
2952 /* Include uses in REG_EQUAL notes. */
2953 if (REG_NOTES (insn))
2954 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
2957 if (GET_CODE (PATTERN (insn)) == CLOBBER
2958 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
2959 /* Don't move a reg that has an explicit clobber.
2960 We might do so sometimes, but it's not worth the pain. */
2961 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
2963 if (GET_CODE (PATTERN (insn)) == SET
2964 || GET_CODE (PATTERN (insn)) == CLOBBER)
2966 dest = SET_DEST (PATTERN (insn));
2967 while (GET_CODE (dest) == SUBREG
2968 || GET_CODE (dest) == ZERO_EXTRACT
2969 || GET_CODE (dest) == SIGN_EXTRACT
2970 || GET_CODE (dest) == STRICT_LOW_PART)
2971 dest = XEXP (dest, 0);
2972 if (GET_CODE (dest) == REG)
2974 register int regno = REGNO (dest);
2975 /* If this is the first setting of this reg
2976 in current basic block, and it was set before,
2977 it must be set in two basic blocks, so it cannot
2978 be moved out of the loop. */
2979 if (n_times_set[regno] > 0 && last_set[regno] == 0)
2980 may_not_move[regno] = 1;
2981 /* If this is not first setting in current basic block,
2982 see if reg was used in between previous one and this.
2983 If so, neither one can be moved. */
2984 if (last_set[regno] != 0
2985 && reg_used_between_p (dest, last_set[regno], insn))
2986 may_not_move[regno] = 1;
2987 if (n_times_set[regno] < 127)
2988 ++n_times_set[regno];
2989 last_set[regno] = insn;
2992 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2994 register int i;
2995 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
2997 register rtx x = XVECEXP (PATTERN (insn), 0, i);
2998 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
2999 /* Don't move a reg that has an explicit clobber.
3000 It's not worth the pain to try to do it correctly. */
3001 may_not_move[REGNO (XEXP (x, 0))] = 1;
3003 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3005 dest = SET_DEST (x);
3006 while (GET_CODE (dest) == SUBREG
3007 || GET_CODE (dest) == ZERO_EXTRACT
3008 || GET_CODE (dest) == SIGN_EXTRACT
3009 || GET_CODE (dest) == STRICT_LOW_PART)
3010 dest = XEXP (dest, 0);
3011 if (GET_CODE (dest) == REG)
3013 register int regno = REGNO (dest);
3014 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3015 may_not_move[regno] = 1;
3016 if (last_set[regno] != 0
3017 && reg_used_between_p (dest, last_set[regno], insn))
3018 may_not_move[regno] = 1;
3019 if (n_times_set[regno] < 127)
3020 ++n_times_set[regno];
3021 last_set[regno] = insn;
3028 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3029 bzero ((char *) last_set, nregs * sizeof (rtx));
3031 *count_ptr = count;
3034 /* Given a loop that is bounded by LOOP_START and LOOP_END
3035 and that is entered at SCAN_START,
3036 return 1 if the register set in SET contained in insn INSN is used by
3037 any insn that precedes INSN in cyclic order starting
3038 from the loop entry point.
3040 We don't want to use INSN_LUID here because if we restrict INSN to those
3041 that have a valid INSN_LUID, it means we cannot move an invariant out
3042 from an inner loop past two loops. */
3044 static int
3045 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3046 rtx set, insn, loop_start, scan_start, loop_end;
3048 rtx reg = SET_DEST (set);
3049 rtx p;
3051 /* Scan forward checking for register usage. If we hit INSN, we
3052 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3053 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3055 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3056 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3057 return 1;
3059 if (p == loop_end)
3060 p = loop_start;
3063 return 0;
3066 /* A "basic induction variable" or biv is a pseudo reg that is set
3067 (within this loop) only by incrementing or decrementing it. */
3068 /* A "general induction variable" or giv is a pseudo reg whose
3069 value is a linear function of a biv. */
3071 /* Bivs are recognized by `basic_induction_var';
3072 Givs by `general_induct_var'. */
3074 /* Indexed by register number, indicates whether or not register is an
3075 induction variable, and if so what type. */
3077 enum iv_mode *reg_iv_type;
3079 /* Indexed by register number, contains pointer to `struct induction'
3080 if register is an induction variable. This holds general info for
3081 all induction variables. */
3083 struct induction **reg_iv_info;
3085 /* Indexed by register number, contains pointer to `struct iv_class'
3086 if register is a basic induction variable. This holds info describing
3087 the class (a related group) of induction variables that the biv belongs
3088 to. */
3090 struct iv_class **reg_biv_class;
3092 /* The head of a list which links together (via the next field)
3093 every iv class for the current loop. */
3095 struct iv_class *loop_iv_list;
3097 /* Communication with routines called via `note_stores'. */
3099 static rtx note_insn;
3101 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3103 static rtx addr_placeholder;
3105 /* ??? Unfinished optimizations, and possible future optimizations,
3106 for the strength reduction code. */
3108 /* ??? There is one more optimization you might be interested in doing: to
3109 allocate pseudo registers for frequently-accessed memory locations.
3110 If the same memory location is referenced each time around, it might
3111 be possible to copy it into a register before and out after.
3112 This is especially useful when the memory location is a variable which
3113 is in a stack slot because somewhere its address is taken. If the
3114 loop doesn't contain a function call and the variable isn't volatile,
3115 it is safe to keep the value in a register for the duration of the
3116 loop. One tricky thing is that the copying of the value back from the
3117 register has to be done on all exits from the loop. You need to check that
3118 all the exits from the loop go to the same place. */
3120 /* ??? The interaction of biv elimination, and recognition of 'constant'
3121 bivs, may cause problems. */
3123 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3124 performance problems.
3126 Perhaps don't eliminate things that can be combined with an addressing
3127 mode. Find all givs that have the same biv, mult_val, and add_val;
3128 then for each giv, check to see if its only use dies in a following
3129 memory address. If so, generate a new memory address and check to see
3130 if it is valid. If it is valid, then store the modified memory address,
3131 otherwise, mark the giv as not done so that it will get its own iv. */
3133 /* ??? Could try to optimize branches when it is known that a biv is always
3134 positive. */
3136 /* ??? When replace a biv in a compare insn, we should replace with closest
3137 giv so that an optimized branch can still be recognized by the combiner,
3138 e.g. the VAX acb insn. */
3140 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3141 was rerun in loop_optimize whenever a register was added or moved.
3142 Also, some of the optimizations could be a little less conservative. */
3144 /* Perform strength reduction and induction variable elimination. */
3146 /* Pseudo registers created during this function will be beyond the last
3147 valid index in several tables including n_times_set and regno_last_uid.
3148 This does not cause a problem here, because the added registers cannot be
3149 givs outside of their loop, and hence will never be reconsidered.
3150 But scan_loop must check regnos to make sure they are in bounds. */
3152 static void
3153 strength_reduce (scan_start, end, loop_top, insn_count,
3154 loop_start, loop_end)
3155 rtx scan_start;
3156 rtx end;
3157 rtx loop_top;
3158 int insn_count;
3159 rtx loop_start;
3160 rtx loop_end;
3162 rtx p;
3163 rtx set;
3164 rtx inc_val;
3165 rtx mult_val;
3166 rtx dest_reg;
3167 /* This is 1 if current insn is not executed at least once for every loop
3168 iteration. */
3169 int not_every_iteration = 0;
3170 /* This is 1 if current insn may be executed more than once for every
3171 loop iteration. */
3172 int maybe_multiple = 0;
3173 /* Temporary list pointers for traversing loop_iv_list. */
3174 struct iv_class *bl, **backbl;
3175 /* Ratio of extra register life span we can justify
3176 for saving an instruction. More if loop doesn't call subroutines
3177 since in that case saving an insn makes more difference
3178 and more registers are available. */
3179 /* ??? could set this to last value of threshold in move_movables */
3180 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3181 /* Map of pseudo-register replacements. */
3182 rtx *reg_map;
3183 int call_seen;
3184 rtx test;
3185 rtx end_insert_before;
3186 int loop_depth = 0;
3188 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3189 * sizeof (enum iv_mode *));
3190 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3191 reg_iv_info = (struct induction **)
3192 alloca (max_reg_before_loop * sizeof (struct induction *));
3193 bzero ((char *) reg_iv_info, (max_reg_before_loop
3194 * sizeof (struct induction *)));
3195 reg_biv_class = (struct iv_class **)
3196 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3197 bzero ((char *) reg_biv_class, (max_reg_before_loop
3198 * sizeof (struct iv_class *)));
3200 loop_iv_list = 0;
3201 addr_placeholder = gen_reg_rtx (Pmode);
3203 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3204 must be put before this insn, so that they will appear in the right
3205 order (i.e. loop order).
3207 If loop_end is the end of the current function, then emit a
3208 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3209 dummy note insn. */
3210 if (NEXT_INSN (loop_end) != 0)
3211 end_insert_before = NEXT_INSN (loop_end);
3212 else
3213 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3215 /* Scan through loop to find all possible bivs. */
3217 p = scan_start;
3218 while (1)
3220 p = NEXT_INSN (p);
3221 /* At end of a straight-in loop, we are done.
3222 At end of a loop entered at the bottom, scan the top. */
3223 if (p == scan_start)
3224 break;
3225 if (p == end)
3227 if (loop_top != 0)
3228 p = loop_top;
3229 else
3230 break;
3231 if (p == scan_start)
3232 break;
3235 if (GET_CODE (p) == INSN
3236 && (set = single_set (p))
3237 && GET_CODE (SET_DEST (set)) == REG)
3239 dest_reg = SET_DEST (set);
3240 if (REGNO (dest_reg) < max_reg_before_loop
3241 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3242 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3244 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3245 dest_reg, p, &inc_val, &mult_val))
3247 /* It is a possible basic induction variable.
3248 Create and initialize an induction structure for it. */
3250 struct induction *v
3251 = (struct induction *) alloca (sizeof (struct induction));
3253 record_biv (v, p, dest_reg, inc_val, mult_val,
3254 not_every_iteration, maybe_multiple);
3255 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3257 else if (REGNO (dest_reg) < max_reg_before_loop)
3258 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3262 /* Past CODE_LABEL, we get to insns that may be executed multiple
3263 times. The only way we can be sure that they can't is if every
3264 every jump insn between here and the end of the loop either
3265 returns, exits the loop, or is a forward jump. */
3267 if (GET_CODE (p) == CODE_LABEL)
3269 rtx insn = p;
3271 maybe_multiple = 0;
3273 while (1)
3275 insn = NEXT_INSN (insn);
3276 if (insn == scan_start)
3277 break;
3278 if (insn == end)
3280 if (loop_top != 0)
3281 insn = loop_top;
3282 else
3283 break;
3284 if (insn == scan_start)
3285 break;
3288 if (GET_CODE (insn) == JUMP_INSN
3289 && GET_CODE (PATTERN (insn)) != RETURN
3290 && (! condjump_p (insn)
3291 || (JUMP_LABEL (insn) != 0
3292 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3293 || INSN_UID (insn) >= max_uid_for_loop
3294 || (INSN_LUID (JUMP_LABEL (insn))
3295 < INSN_LUID (insn))))))
3297 maybe_multiple = 1;
3298 break;
3303 /* Past a label or a jump, we get to insns for which we can't count
3304 on whether or how many times they will be executed during each
3305 iteration. */
3306 /* This code appears in three places, once in scan_loop, and twice
3307 in strength_reduce. */
3308 if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
3309 /* If we enter the loop in the middle, and scan around to the
3310 beginning, don't set not_every_iteration for that.
3311 This can be any kind of jump, since we want to know if insns
3312 will be executed if the loop is executed. */
3313 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
3314 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3315 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3316 not_every_iteration = 1;
3318 else if (GET_CODE (p) == NOTE)
3320 /* At the virtual top of a converted loop, insns are again known to
3321 be executed each iteration: logically, the loop begins here
3322 even though the exit code has been duplicated. */
3323 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3324 not_every_iteration = 0;
3325 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3326 loop_depth++;
3327 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3328 loop_depth--;
3331 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3332 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3333 or not an insn is known to be executed each iteration of the
3334 loop, whether or not any iterations are known to occur.
3336 Therefore, if we have just passed a label and have no more labels
3337 between here and the test insn of the loop, we know these insns
3338 will be executed each iteration. This can also happen if we
3339 have just passed a jump, for example, when there are nested loops. */
3341 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3342 && no_labels_between_p (p, loop_end))
3343 not_every_iteration = 0;
3346 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3347 Make a sanity check against n_times_set. */
3348 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3350 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3351 /* Above happens if register modified by subreg, etc. */
3352 /* Make sure it is not recognized as a basic induction var: */
3353 || n_times_set[bl->regno] != bl->biv_count
3354 /* If never incremented, it is invariant that we decided not to
3355 move. So leave it alone. */
3356 || ! bl->incremented)
3358 if (loop_dump_stream)
3359 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3360 bl->regno,
3361 (reg_iv_type[bl->regno] != BASIC_INDUCT
3362 ? "not induction variable"
3363 : (! bl->incremented ? "never incremented"
3364 : "count error")));
3366 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3367 *backbl = bl->next;
3369 else
3371 backbl = &bl->next;
3373 if (loop_dump_stream)
3374 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3378 /* Exit if there are no bivs. */
3379 if (! loop_iv_list)
3381 /* Can still unroll the loop anyways, but indicate that there is no
3382 strength reduction info available. */
3383 if (flag_unroll_loops)
3384 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3386 return;
3389 /* Find initial value for each biv by searching backwards from loop_start,
3390 halting at first label. Also record any test condition. */
3392 call_seen = 0;
3393 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3395 note_insn = p;
3397 if (GET_CODE (p) == CALL_INSN)
3398 call_seen = 1;
3400 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3401 || GET_CODE (p) == CALL_INSN)
3402 note_stores (PATTERN (p), record_initial);
3404 /* Record any test of a biv that branches around the loop if no store
3405 between it and the start of loop. We only care about tests with
3406 constants and registers and only certain of those. */
3407 if (GET_CODE (p) == JUMP_INSN
3408 && JUMP_LABEL (p) != 0
3409 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3410 && (test = get_condition_for_loop (p)) != 0
3411 && GET_CODE (XEXP (test, 0)) == REG
3412 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3413 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3414 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3415 && bl->init_insn == 0)
3417 /* If an NE test, we have an initial value! */
3418 if (GET_CODE (test) == NE)
3420 bl->init_insn = p;
3421 bl->init_set = gen_rtx (SET, VOIDmode,
3422 XEXP (test, 0), XEXP (test, 1));
3424 else
3425 bl->initial_test = test;
3429 /* Look at the each biv and see if we can say anything better about its
3430 initial value from any initializing insns set up above. (This is done
3431 in two passes to avoid missing SETs in a PARALLEL.) */
3432 for (bl = loop_iv_list; bl; bl = bl->next)
3434 rtx src;
3436 if (! bl->init_insn)
3437 continue;
3439 src = SET_SRC (bl->init_set);
3441 if (loop_dump_stream)
3442 fprintf (loop_dump_stream,
3443 "Biv %d initialized at insn %d: initial value ",
3444 bl->regno, INSN_UID (bl->init_insn));
3446 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3447 || GET_MODE (src) == VOIDmode)
3448 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3450 bl->initial_value = src;
3452 if (loop_dump_stream)
3454 if (GET_CODE (src) == CONST_INT)
3455 fprintf (loop_dump_stream, "%d\n", INTVAL (src));
3456 else
3458 print_rtl (loop_dump_stream, src);
3459 fprintf (loop_dump_stream, "\n");
3463 else
3465 /* Biv initial value is not simple move,
3466 so let it keep initial value of "itself". */
3468 if (loop_dump_stream)
3469 fprintf (loop_dump_stream, "is complex\n");
3473 /* Search the loop for general induction variables. */
3475 /* A register is a giv if: it is only set once, it is a function of a
3476 biv and a constant (or invariant), and it is not a biv. */
3478 not_every_iteration = 0;
3479 loop_depth = 0;
3480 p = scan_start;
3481 while (1)
3483 p = NEXT_INSN (p);
3484 /* At end of a straight-in loop, we are done.
3485 At end of a loop entered at the bottom, scan the top. */
3486 if (p == scan_start)
3487 break;
3488 if (p == end)
3490 if (loop_top != 0)
3491 p = loop_top;
3492 else
3493 break;
3494 if (p == scan_start)
3495 break;
3498 /* Look for a general induction variable in a register. */
3499 if (GET_CODE (p) == INSN
3500 && (set = single_set (p))
3501 && GET_CODE (SET_DEST (set)) == REG
3502 && ! may_not_optimize[REGNO (SET_DEST (set))])
3504 rtx src_reg;
3505 rtx add_val;
3506 rtx mult_val;
3507 int benefit;
3508 rtx regnote = 0;
3510 dest_reg = SET_DEST (set);
3511 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3512 continue;
3514 if (/* SET_SRC is a giv. */
3515 ((benefit = general_induction_var (SET_SRC (set),
3516 &src_reg, &add_val,
3517 &mult_val))
3518 /* Equivalent expression is a giv. */
3519 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3520 && (benefit = general_induction_var (XEXP (regnote, 0),
3521 &src_reg,
3522 &add_val, &mult_val))))
3523 /* Don't try to handle any regs made by loop optimization.
3524 We have nothing on them in regno_first_uid, etc. */
3525 && REGNO (dest_reg) < max_reg_before_loop
3526 /* Don't recognize a BASIC_INDUCT_VAR here. */
3527 && dest_reg != src_reg
3528 /* This must be the only place where the register is set. */
3529 && (n_times_set[REGNO (dest_reg)] == 1
3530 /* or all sets must be consecutive and make a giv. */
3531 || (benefit = consec_sets_giv (benefit, p,
3532 src_reg, dest_reg,
3533 &add_val, &mult_val))))
3535 int count;
3536 struct induction *v
3537 = (struct induction *) alloca (sizeof (struct induction));
3538 rtx temp;
3540 /* If this is a library call, increase benefit. */
3541 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3542 benefit += libcall_benefit (p);
3544 /* Skip the consecutive insns, if there are any. */
3545 for (count = n_times_set[REGNO (dest_reg)] - 1;
3546 count > 0; count--)
3548 /* If first insn of libcall sequence, skip to end.
3549 Do this at start of loop, since INSN is guaranteed to
3550 be an insn here. */
3551 if (GET_CODE (p) != NOTE
3552 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3553 p = XEXP (temp, 0);
3555 do p = NEXT_INSN (p);
3556 while (GET_CODE (p) == NOTE);
3559 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3560 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3561 loop_end);
3566 #ifndef DONT_REDUCE_ADDR
3567 /* Look for givs which are memory addresses. */
3568 /* This resulted in worse code on a VAX 8600. I wonder if it
3569 still does. */
3570 if (GET_CODE (p) == INSN)
3571 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3572 loop_end);
3573 #endif
3575 /* Update the status of whether giv can derive other givs. This can
3576 change when we pass a label or an insn that updates a biv. */
3577 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3578 || GET_CODE (p) == CODE_LABEL)
3579 update_giv_derive (p);
3581 /* Past a label or a jump, we get to insns for which we can't count
3582 on whether or how many times they will be executed during each
3583 iteration. */
3584 /* This code appears in three places, once in scan_loop, and twice
3585 in strength_reduce. */
3586 if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
3587 /* If we enter the loop in the middle, and scan around
3588 to the beginning, don't set not_every_iteration for that.
3589 This can be any kind of jump, since we want to know if insns
3590 will be executed if the loop is executed. */
3591 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
3592 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3593 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3594 not_every_iteration = 1;
3596 else if (GET_CODE (p) == NOTE)
3598 /* At the virtual top of a converted loop, insns are again known to
3599 be executed each iteration: logically, the loop begins here
3600 even though the exit code has been duplicated. */
3601 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3602 not_every_iteration = 0;
3603 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3604 loop_depth++;
3605 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3606 loop_depth--;
3609 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3610 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3611 or not an insn is known to be executed each iteration of the
3612 loop, whether or not any iterations are known to occur.
3614 Therefore, if we have just passed a label and have no more labels
3615 between here and the test insn of the loop, we know these insns
3616 will be executed each iteration. */
3618 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3619 && no_labels_between_p (p, loop_end))
3620 not_every_iteration = 0;
3623 /* Try to calculate and save the number of loop iterations. This is
3624 set to zero if the actual number can not be calculated. This must
3625 be called after all giv's have been identified, since otherwise it may
3626 fail if the iteration variable is a giv. */
3628 loop_n_iterations = loop_iterations (loop_start, loop_end);
3630 /* Now for each giv for which we still don't know whether or not it is
3631 replaceable, check to see if it is replaceable because its final value
3632 can be calculated. This must be done after loop_iterations is called,
3633 so that final_giv_value will work correctly. */
3635 for (bl = loop_iv_list; bl; bl = bl->next)
3637 struct induction *v;
3639 for (v = bl->giv; v; v = v->next_iv)
3640 if (! v->replaceable && ! v->not_replaceable)
3641 check_final_value (v, loop_start, loop_end);
3644 /* Try to prove that the loop counter variable (if any) is always
3645 nonnegative; if so, record that fact with a REG_NONNEG note
3646 so that "decrement and branch until zero" insn can be used. */
3647 check_dbra_loop (loop_end, insn_count, loop_start);
3649 /* Create reg_map to hold substitutions for replaceable giv regs. */
3650 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3651 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3653 /* Examine each iv class for feasibility of strength reduction/induction
3654 variable elimination. */
3656 for (bl = loop_iv_list; bl; bl = bl->next)
3658 struct induction *v;
3659 int benefit;
3660 int all_reduced;
3661 rtx final_value = 0;
3663 /* Test whether it will be possible to eliminate this biv
3664 provided all givs are reduced. This is possible if either
3665 the reg is not used outside the loop, or we can compute
3666 what its final value will be.
3668 For architectures with a decrement_and_branch_until_zero insn,
3669 don't do this if we put a REG_NONNEG note on the endtest for
3670 this biv. */
3672 /* Compare against bl->init_insn rather than loop_start.
3673 We aren't concerned with any uses of the biv between
3674 init_insn and loop_start since these won't be affected
3675 by the value of the biv elsewhere in the function, so
3676 long as init_insn doesn't use the biv itself.
3677 March 14, 1989 -- self@bayes.arc.nasa.gov */
3679 if ((uid_luid[regno_last_uid[bl->regno]] < INSN_LUID (loop_end)
3680 && bl->init_insn
3681 && INSN_UID (bl->init_insn) < max_uid_for_loop
3682 && uid_luid[regno_first_uid[bl->regno]] >= INSN_LUID (bl->init_insn)
3683 #ifdef HAVE_decrement_and_branch_until_zero
3684 && ! bl->nonneg
3685 #endif
3686 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3687 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3688 #ifdef HAVE_decrement_and_branch_until_zero
3689 && ! bl->nonneg
3690 #endif
3692 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3693 threshold, insn_count);
3694 else
3696 if (loop_dump_stream)
3698 fprintf (loop_dump_stream,
3699 "Cannot eliminate biv %d.\n",
3700 bl->regno);
3701 fprintf (loop_dump_stream,
3702 "First use: insn %d, last use: insn %d.\n",
3703 regno_first_uid[bl->regno],
3704 regno_last_uid[bl->regno]);
3708 /* Combine all giv's for this iv_class. */
3709 combine_givs (bl);
3711 /* This will be true at the end, if all givs which depend on this
3712 biv have been strength reduced.
3713 We can't (currently) eliminate the biv unless this is so. */
3714 all_reduced = 1;
3716 /* Check each giv in this class to see if we will benefit by reducing
3717 it. Skip giv's combined with others. */
3718 for (v = bl->giv; v; v = v->next_iv)
3720 struct induction *tv;
3722 if (v->ignore || v->same)
3723 continue;
3725 benefit = v->benefit;
3727 /* Reduce benefit if not replaceable, since we will insert
3728 a move-insn to replace the insn that calculates this giv.
3729 Don't do this unless the giv is a user variable, since it
3730 will often be marked non-replaceable because of the duplication
3731 of the exit code outside the loop. In such a case, the copies
3732 we insert are dead and will be deleted. So they don't have
3733 a cost. Similar situations exist. */
3734 /* ??? The new final_[bg]iv_value code does a much better job
3735 of finding replaceable giv's, and hence this code may no longer
3736 be necessary. */
3737 if (! v->replaceable && ! bl->eliminable
3738 && REG_USERVAR_P (v->dest_reg))
3739 benefit -= copy_cost;
3741 /* Decrease the benefit to count the add-insns that we will
3742 insert to increment the reduced reg for the giv. */
3743 benefit -= add_cost * bl->biv_count;
3745 /* Decide whether to strength-reduce this giv or to leave the code
3746 unchanged (recompute it from the biv each time it is used).
3747 This decision can be made independently for each giv. */
3749 /* ??? Perhaps attempt to guess whether autoincrement will handle
3750 some of the new add insns; if so, can increase BENEFIT
3751 (undo the subtraction of add_cost that was done above). */
3753 /* If an insn is not to be strength reduced, then set its ignore
3754 flag, and clear all_reduced. */
3756 /* A giv that depends on a reversed biv must be reduced if it is
3757 used after the loop exit, otherwise, it would have the wrong
3758 value after the loop exit. To make it simple, just reduce all
3759 of such giv's whether or not we know they are used after the loop
3760 exit. */
3762 if (v->lifetime * threshold * benefit < insn_count
3763 && ! bl->reversed)
3765 if (loop_dump_stream)
3766 fprintf (loop_dump_stream,
3767 "giv of insn %d not worth while, %d vs %d.\n",
3768 INSN_UID (v->insn),
3769 v->lifetime * threshold * benefit, insn_count);
3770 v->ignore = 1;
3771 all_reduced = 0;
3773 else
3775 /* Check that we can increment the reduced giv without a
3776 multiply insn. If not, reject it. */
3778 for (tv = bl->biv; tv; tv = tv->next_iv)
3779 if (tv->mult_val == const1_rtx
3780 && ! product_cheap_p (tv->add_val, v->mult_val))
3782 if (loop_dump_stream)
3783 fprintf (loop_dump_stream,
3784 "giv of insn %d: would need a multiply.\n",
3785 INSN_UID (v->insn));
3786 v->ignore = 1;
3787 all_reduced = 0;
3788 break;
3793 /* Reduce each giv that we decided to reduce. */
3795 for (v = bl->giv; v; v = v->next_iv)
3797 struct induction *tv;
3798 if (! v->ignore && v->same == 0)
3800 v->new_reg = gen_reg_rtx (v->mode);
3802 /* For each place where the biv is incremented,
3803 add an insn to increment the new, reduced reg for the giv. */
3804 for (tv = bl->biv; tv; tv = tv->next_iv)
3806 if (tv->mult_val == const1_rtx)
3807 emit_iv_add_mult (tv->add_val, v->mult_val,
3808 v->new_reg, v->new_reg, tv->insn);
3809 else /* tv->mult_val == const0_rtx */
3810 /* A multiply is acceptable here
3811 since this is presumed to be seldom executed. */
3812 emit_iv_add_mult (tv->add_val, v->mult_val,
3813 v->add_val, v->new_reg, tv->insn);
3816 /* Add code at loop start to initialize giv's reduced reg. */
3818 emit_iv_add_mult (bl->initial_value, v->mult_val,
3819 v->add_val, v->new_reg, loop_start);
3823 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
3824 as not reduced.
3826 For each giv register that can be reduced now: if replaceable,
3827 substitute reduced reg wherever the old giv occurs;
3828 else add new move insn "giv_reg = reduced_reg".
3830 Also check for givs whose first use is their definition and whose
3831 last use is the definition of another giv. If so, it is likely
3832 dead and should not be used to eliminate a biv. */
3833 for (v = bl->giv; v; v = v->next_iv)
3835 if (v->same && v->same->ignore)
3836 v->ignore = 1;
3838 if (v->ignore)
3839 continue;
3841 if (v->giv_type == DEST_REG
3842 && regno_first_uid[REGNO (v->dest_reg)] == INSN_UID (v->insn))
3844 struct induction *v1;
3846 for (v1 = bl->giv; v1; v1 = v1->next_iv)
3847 if (regno_last_uid[REGNO (v->dest_reg)] == INSN_UID (v1->insn))
3848 v->maybe_dead = 1;
3851 /* Update expression if this was combined, in case other giv was
3852 replaced. */
3853 if (v->same)
3854 v->new_reg = replace_rtx (v->new_reg,
3855 v->same->dest_reg, v->same->new_reg);
3857 if (v->giv_type == DEST_ADDR)
3858 /* Store reduced reg as the address in the memref where we found
3859 this giv. */
3860 validate_change (v->insn, v->location, v->new_reg, 0);
3861 else if (v->replaceable)
3863 reg_map[REGNO (v->dest_reg)] = v->new_reg;
3865 #if 0
3866 /* I can no longer duplicate the original problem. Perhaps
3867 this is unnecessary now? */
3869 /* Replaceable; it isn't strictly necessary to delete the old
3870 insn and emit a new one, because v->dest_reg is now dead.
3872 However, especially when unrolling loops, the special
3873 handling for (set REG0 REG1) in the second cse pass may
3874 make v->dest_reg live again. To avoid this problem, emit
3875 an insn to set the original giv reg from the reduced giv.
3876 We can not delete the original insn, since it may be part
3877 of a LIBCALL, and the code in flow that eliminates dead
3878 libcalls will fail if it is deleted. */
3879 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
3880 v->insn);
3881 #endif
3883 else
3885 /* Not replaceable; emit an insn to set the original giv reg from
3886 the reduced giv, same as above. */
3887 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
3888 v->insn);
3891 /* When a loop is reversed, givs which depend on the reversed
3892 biv, and which are live outside the loop, must be set to their
3893 correct final value. This insn is only needed if the giv is
3894 not replaceable. The correct final value is the same as the
3895 value that the giv starts the reversed loop with. */
3896 if (bl->reversed && ! v->replaceable)
3897 emit_iv_add_mult (bl->initial_value, v->mult_val,
3898 v->add_val, v->dest_reg, end_insert_before);
3899 else if (v->final_value)
3901 rtx insert_before;
3903 /* If the loop has multiple exits, emit the insn before the
3904 loop to ensure that it will always be executed no matter
3905 how the loop exits. Otherwise, emit the insn after the loop,
3906 since this is slightly more efficient. */
3907 if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
3908 insert_before = loop_start;
3909 else
3910 insert_before = end_insert_before;
3911 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
3912 insert_before);
3914 #if 0
3915 /* If the insn to set the final value of the giv was emitted
3916 before the loop, then we must delete the insn inside the loop
3917 that sets it. If this is a LIBCALL, then we must delete
3918 every insn in the libcall. Note, however, that
3919 final_giv_value will only succeed when there are multiple
3920 exits if the giv is dead at each exit, hence it does not
3921 matter that the original insn remains because it is dead
3922 anyways. */
3923 /* Delete the insn inside the loop that sets the giv since
3924 the giv is now set before (or after) the loop. */
3925 delete_insn (v->insn);
3926 #endif
3929 if (loop_dump_stream)
3931 fprintf (loop_dump_stream, "giv at %d reduced to ",
3932 INSN_UID (v->insn));
3933 print_rtl (loop_dump_stream, v->new_reg);
3934 fprintf (loop_dump_stream, "\n");
3938 /* All the givs based on the biv bl have been reduced if they
3939 merit it. */
3941 /* For each giv not marked as maybe dead that has been combined with a
3942 second giv, clear any "maybe dead" mark on that second giv.
3943 v->new_reg will either be or refer to the register of the giv it
3944 combined with.
3946 Doing this clearing avoids problems in biv elimination where a
3947 giv's new_reg is a complex value that can't be put in the insn but
3948 the giv combined with (with a reg as new_reg) is marked maybe_dead.
3949 Since the register will be used in either case, we'd prefer it be
3950 used from the simpler giv. */
3952 for (v = bl->giv; v; v = v->next_iv)
3953 if (! v->maybe_dead && v->same)
3954 v->same->maybe_dead = 0;
3956 /* Try to eliminate the biv, if it is a candidate.
3957 This won't work if ! all_reduced,
3958 since the givs we planned to use might not have been reduced.
3960 We have to be careful that we didn't initially think we could eliminate
3961 this biv because of a giv that we now think may be dead and shouldn't
3962 be used as a biv replacement.
3964 Also, there is the possibility that we may have a giv that looks
3965 like it can be used to eliminate a biv, but the resulting insn
3966 isn't valid. This can happen, for example, on the 88k, where a
3967 JUMP_INSN can compare a register only with zero. Attempts to
3968 replace it with a compare with a constant will fail.
3970 Note that in cases where this call fails, we may have replaced some
3971 of the occurrences of the biv with a giv, but no harm was done in
3972 doing so in the rare cases where it can occur. */
3974 if (all_reduced == 1 && bl->eliminable
3975 && maybe_eliminate_biv (bl, loop_start, end, 1,
3976 threshold, insn_count))
3979 /* ?? If we created a new test to bypass the loop entirely,
3980 or otherwise drop straight in, based on this test, then
3981 we might want to rewrite it also. This way some later
3982 pass has more hope of removing the initialization of this
3983 biv entirely. */
3985 /* If final_value != 0, then the biv may be used after loop end
3986 and we must emit an insn to set it just in case.
3988 Reversed bivs already have an insn after the loop setting their
3989 value, so we don't need another one. We can't calculate the
3990 proper final value for such a biv here anyways. */
3991 if (final_value != 0 && ! bl->reversed)
3993 rtx insert_before;
3995 /* If the loop has multiple exits, emit the insn before the
3996 loop to ensure that it will always be executed no matter
3997 how the loop exits. Otherwise, emit the insn after the
3998 loop, since this is slightly more efficient. */
3999 if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
4000 insert_before = loop_start;
4001 else
4002 insert_before = end_insert_before;
4004 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4005 end_insert_before);
4008 #if 0
4009 /* Delete all of the instructions inside the loop which set
4010 the biv, as they are all dead. If is safe to delete them,
4011 because an insn setting a biv will never be part of a libcall. */
4012 /* However, deleting them will invalidate the regno_last_uid info,
4013 so keeping them around is more convenient. Final_biv_value
4014 will only succeed when there are multiple exits if the biv
4015 is dead at each exit, hence it does not matter that the original
4016 insn remains, because it is dead anyways. */
4017 for (v = bl->biv; v; v = v->next_iv)
4018 delete_insn (v->insn);
4019 #endif
4021 if (loop_dump_stream)
4022 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4023 bl->regno);
4027 /* Go through all the instructions in the loop, making all the
4028 register substitutions scheduled in REG_MAP. */
4030 for (p = loop_start; p != end; p = NEXT_INSN (p))
4031 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4032 || GET_CODE (p) == CALL_INSN)
4034 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4035 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4036 INSN_CODE (p) = -1;
4039 /* Unroll loops from within strength reduction so that we can use the
4040 induction variable information that strength_reduce has already
4041 collected. */
4043 if (flag_unroll_loops)
4044 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4046 if (loop_dump_stream)
4047 fprintf (loop_dump_stream, "\n");
4050 /* Return 1 if X is a valid source for an initial value (or as value being
4051 compared against in an initial test).
4053 X must be either a register or constant and must not be clobbered between
4054 the current insn and the start of the loop.
4056 INSN is the insn containing X. */
4058 static int
4059 valid_initial_value_p (x, insn, call_seen, loop_start)
4060 rtx x;
4061 rtx insn;
4062 int call_seen;
4063 rtx loop_start;
4065 if (CONSTANT_P (x))
4066 return 1;
4068 /* Only consider pseudos we know about initialized in insns whose luids
4069 we know. */
4070 if (GET_CODE (x) != REG
4071 || REGNO (x) >= max_reg_before_loop)
4072 return 0;
4074 /* Don't use call-clobbered registers across a call which clobbers it. On
4075 some machines, don't use any hard registers at all. */
4076 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4077 #ifndef SMALL_REGISTER_CLASSES
4078 && call_used_regs[REGNO (x)] && call_seen
4079 #endif
4081 return 0;
4083 /* Don't use registers that have been clobbered before the start of the
4084 loop. */
4085 if (reg_set_between_p (x, insn, loop_start))
4086 return 0;
4088 return 1;
4091 /* Scan X for memory refs and check each memory address
4092 as a possible giv. INSN is the insn whose pattern X comes from.
4093 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4094 every loop iteration. */
4096 static void
4097 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4098 rtx x;
4099 rtx insn;
4100 int not_every_iteration;
4101 rtx loop_start, loop_end;
4103 register int i, j;
4104 register enum rtx_code code;
4105 register char *fmt;
4107 if (x == 0)
4108 return;
4110 code = GET_CODE (x);
4111 switch (code)
4113 case REG:
4114 case CONST_INT:
4115 case CONST:
4116 case CONST_DOUBLE:
4117 case SYMBOL_REF:
4118 case LABEL_REF:
4119 case PC:
4120 case CC0:
4121 case ADDR_VEC:
4122 case ADDR_DIFF_VEC:
4123 case USE:
4124 case CLOBBER:
4125 return;
4127 case MEM:
4129 rtx src_reg;
4130 rtx add_val;
4131 rtx mult_val;
4132 int benefit;
4134 benefit = general_induction_var (XEXP (x, 0),
4135 &src_reg, &add_val, &mult_val);
4137 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4138 Such a giv isn't useful. */
4139 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4141 /* Found one; record it. */
4142 struct induction *v
4143 = (struct induction *) oballoc (sizeof (struct induction));
4145 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4146 add_val, benefit, DEST_ADDR, not_every_iteration,
4147 &XEXP (x, 0), loop_start, loop_end);
4149 v->mem_mode = GET_MODE (x);
4151 return;
4155 /* Recursively scan the subexpressions for other mem refs. */
4157 fmt = GET_RTX_FORMAT (code);
4158 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4159 if (fmt[i] == 'e')
4160 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4161 loop_end);
4162 else if (fmt[i] == 'E')
4163 for (j = 0; j < XVECLEN (x, i); j++)
4164 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4165 loop_start, loop_end);
4168 /* Fill in the data about one biv update.
4169 V is the `struct induction' in which we record the biv. (It is
4170 allocated by the caller, with alloca.)
4171 INSN is the insn that sets it.
4172 DEST_REG is the biv's reg.
4174 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4175 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4176 being set to INC_VAL.
4178 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4179 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4180 can be executed more than once per iteration. If MAYBE_MULTIPLE
4181 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4182 executed exactly once per iteration. */
4184 static void
4185 record_biv (v, insn, dest_reg, inc_val, mult_val,
4186 not_every_iteration, maybe_multiple)
4187 struct induction *v;
4188 rtx insn;
4189 rtx dest_reg;
4190 rtx inc_val;
4191 rtx mult_val;
4192 int not_every_iteration;
4193 int maybe_multiple;
4195 struct iv_class *bl;
4197 v->insn = insn;
4198 v->src_reg = dest_reg;
4199 v->dest_reg = dest_reg;
4200 v->mult_val = mult_val;
4201 v->add_val = inc_val;
4202 v->mode = GET_MODE (dest_reg);
4203 v->always_computable = ! not_every_iteration;
4204 v->maybe_multiple = maybe_multiple;
4206 /* Add this to the reg's iv_class, creating a class
4207 if this is the first incrementation of the reg. */
4209 bl = reg_biv_class[REGNO (dest_reg)];
4210 if (bl == 0)
4212 /* Create and initialize new iv_class. */
4214 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4216 bl->regno = REGNO (dest_reg);
4217 bl->biv = 0;
4218 bl->giv = 0;
4219 bl->biv_count = 0;
4220 bl->giv_count = 0;
4222 /* Set initial value to the reg itself. */
4223 bl->initial_value = dest_reg;
4224 /* We haven't seen the initializing insn yet */
4225 bl->init_insn = 0;
4226 bl->init_set = 0;
4227 bl->initial_test = 0;
4228 bl->incremented = 0;
4229 bl->eliminable = 0;
4230 bl->nonneg = 0;
4231 bl->reversed = 0;
4232 bl->total_benefit = 0;
4234 /* Add this class to loop_iv_list. */
4235 bl->next = loop_iv_list;
4236 loop_iv_list = bl;
4238 /* Put it in the array of biv register classes. */
4239 reg_biv_class[REGNO (dest_reg)] = bl;
4242 /* Update IV_CLASS entry for this biv. */
4243 v->next_iv = bl->biv;
4244 bl->biv = v;
4245 bl->biv_count++;
4246 if (mult_val == const1_rtx)
4247 bl->incremented = 1;
4249 if (loop_dump_stream)
4251 fprintf (loop_dump_stream,
4252 "Insn %d: possible biv, reg %d,",
4253 INSN_UID (insn), REGNO (dest_reg));
4254 if (GET_CODE (inc_val) == CONST_INT)
4255 fprintf (loop_dump_stream, " const = %d\n",
4256 INTVAL (inc_val));
4257 else
4259 fprintf (loop_dump_stream, " const = ");
4260 print_rtl (loop_dump_stream, inc_val);
4261 fprintf (loop_dump_stream, "\n");
4266 /* Fill in the data about one giv.
4267 V is the `struct induction' in which we record the giv. (It is
4268 allocated by the caller, with alloca.)
4269 INSN is the insn that sets it.
4270 BENEFIT estimates the savings from deleting this insn.
4271 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4272 into a register or is used as a memory address.
4274 SRC_REG is the biv reg which the giv is computed from.
4275 DEST_REG is the giv's reg (if the giv is stored in a reg).
4276 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4277 LOCATION points to the place where this giv's value appears in INSN. */
4279 static void
4280 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4281 type, not_every_iteration, location, loop_start, loop_end)
4282 struct induction *v;
4283 rtx insn;
4284 rtx src_reg;
4285 rtx dest_reg;
4286 rtx mult_val, add_val;
4287 int benefit;
4288 enum g_types type;
4289 int not_every_iteration;
4290 rtx *location;
4291 rtx loop_start, loop_end;
4293 struct induction *b;
4294 struct iv_class *bl;
4295 rtx set = single_set (insn);
4296 rtx p;
4298 v->insn = insn;
4299 v->src_reg = src_reg;
4300 v->giv_type = type;
4301 v->dest_reg = dest_reg;
4302 v->mult_val = mult_val;
4303 v->add_val = add_val;
4304 v->benefit = benefit;
4305 v->location = location;
4306 v->cant_derive = 0;
4307 v->combined_with = 0;
4308 v->maybe_multiple = 0;
4309 v->maybe_dead = 0;
4310 v->derive_adjustment = 0;
4311 v->same = 0;
4312 v->ignore = 0;
4313 v->new_reg = 0;
4314 v->final_value = 0;
4315 v->same_insn = 0;
4317 /* The v->always_computable field is used in update_giv_derive, to
4318 determine whether a giv can be used to derive another giv. For a
4319 DEST_REG giv, INSN computes a new value for the giv, so its value
4320 isn't computable if INSN insn't executed every iteration.
4321 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4322 it does not compute a new value. Hence the value is always computable
4323 regardless of whether INSN is executed each iteration. */
4325 if (type == DEST_ADDR)
4326 v->always_computable = 1;
4327 else
4328 v->always_computable = ! not_every_iteration;
4330 if (type == DEST_ADDR)
4332 v->mode = GET_MODE (*location);
4333 v->lifetime = 1;
4334 v->times_used = 1;
4336 else /* type == DEST_REG */
4338 v->mode = GET_MODE (SET_DEST (set));
4340 v->lifetime = (uid_luid[regno_last_uid[REGNO (dest_reg)]]
4341 - uid_luid[regno_first_uid[REGNO (dest_reg)]]);
4343 v->times_used = n_times_used[REGNO (dest_reg)];
4345 /* If the lifetime is zero, it means that this register is
4346 really a dead store. So mark this as a giv that can be
4347 ignored. This will not prevent the biv from being eliminated. */
4348 if (v->lifetime == 0)
4349 v->ignore = 1;
4351 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4352 reg_iv_info[REGNO (dest_reg)] = v;
4355 /* Add the giv to the class of givs computed from one biv. */
4357 bl = reg_biv_class[REGNO (src_reg)];
4358 if (bl)
4360 v->next_iv = bl->giv;
4361 bl->giv = v;
4362 /* Don't count DEST_ADDR. This is supposed to count the number of
4363 insns that calculate givs. */
4364 if (type == DEST_REG)
4365 bl->giv_count++;
4366 bl->total_benefit += benefit;
4368 else
4369 /* Fatal error, biv missing for this giv? */
4370 abort ();
4372 if (type == DEST_ADDR)
4373 v->replaceable = 1;
4374 else
4376 /* The giv can be replaced outright by the reduced register only if all
4377 of the following conditions are true:
4378 - the insn that sets the giv is always executed on any iteration
4379 on which the giv is used at all
4380 (there are two ways to deduce this:
4381 either the insn is executed on every iteration,
4382 or all uses follow that insn in the same basic block),
4383 - the giv is not used outside the loop
4384 - no assignments to the biv occur during the giv's lifetime. */
4386 if (regno_first_uid[REGNO (dest_reg)] == INSN_UID (insn)
4387 /* Previous line always fails if INSN was moved by loop opt. */
4388 && uid_luid[regno_last_uid[REGNO (dest_reg)]] < INSN_LUID (loop_end)
4389 && (! not_every_iteration
4390 || last_use_this_basic_block (dest_reg, insn)))
4392 /* Now check that there are no assignments to the biv within the
4393 giv's lifetime. This requires two separate checks. */
4395 /* Check each biv update, and fail if any are between the first
4396 and last use of the giv.
4398 If this loop contains an inner loop that was unrolled, then
4399 the insn modifying the biv may have been emitted by the loop
4400 unrolling code, and hence does not have a valid luid. Just
4401 mark the biv as not replaceable in this case. It is not very
4402 useful as a biv, because it is used in two different loops.
4403 It is very unlikely that we would be able to optimize the giv
4404 using this biv anyways. */
4406 v->replaceable = 1;
4407 for (b = bl->biv; b; b = b->next_iv)
4409 if (INSN_UID (b->insn) >= max_uid_for_loop
4410 || ((uid_luid[INSN_UID (b->insn)]
4411 >= uid_luid[regno_first_uid[REGNO (dest_reg)]])
4412 && (uid_luid[INSN_UID (b->insn)]
4413 <= uid_luid[regno_last_uid[REGNO (dest_reg)]])))
4415 v->replaceable = 0;
4416 v->not_replaceable = 1;
4417 break;
4421 /* Check each insn between the first and last use of the giv,
4422 and fail if any of them are branches that jump to a named label
4423 outside this range, but still inside the loop. This catches
4424 cases of spaghetti code where the execution order of insns
4425 is not linear, and hence the above test fails. For example,
4426 in the following code, j is not replaceable:
4427 for (i = 0; i < 100; ) {
4428 L0: j = 4*i; goto L1;
4429 L2: k = j; goto L3;
4430 L1: i++; goto L2;
4431 L3: ; }
4432 printf ("k = %d\n", k); }
4433 This test is conservative, but this test succeeds rarely enough
4434 that it isn't a problem. See also check_final_value below. */
4436 if (v->replaceable)
4437 for (p = insn;
4438 INSN_UID (p) >= max_uid_for_loop
4439 || INSN_LUID (p) < uid_luid[regno_last_uid[REGNO (dest_reg)]];
4440 p = NEXT_INSN (p))
4442 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4443 && LABEL_NAME (JUMP_LABEL (p))
4444 && ((INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start)
4445 && (INSN_LUID (JUMP_LABEL (p))
4446 < uid_luid[regno_first_uid[REGNO (dest_reg)]]))
4447 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end)
4448 && (INSN_LUID (JUMP_LABEL (p))
4449 > uid_luid[regno_last_uid[REGNO (dest_reg)]]))))
4451 v->replaceable = 0;
4452 v->not_replaceable = 1;
4454 if (loop_dump_stream)
4455 fprintf (loop_dump_stream,
4456 "Found branch outside giv lifetime.\n");
4458 break;
4462 else
4464 /* May still be replaceable, we don't have enough info here to
4465 decide. */
4466 v->replaceable = 0;
4467 v->not_replaceable = 0;
4471 if (loop_dump_stream)
4473 if (type == DEST_REG)
4474 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4475 INSN_UID (insn), REGNO (dest_reg));
4476 else
4477 fprintf (loop_dump_stream, "Insn %d: dest address",
4478 INSN_UID (insn));
4480 fprintf (loop_dump_stream, " src reg %d benefit %d",
4481 REGNO (src_reg), v->benefit);
4482 fprintf (loop_dump_stream, " used %d lifetime %d",
4483 v->times_used, v->lifetime);
4485 if (v->replaceable)
4486 fprintf (loop_dump_stream, " replaceable");
4488 if (GET_CODE (mult_val) == CONST_INT)
4489 fprintf (loop_dump_stream, " mult %d",
4490 INTVAL (mult_val));
4491 else
4493 fprintf (loop_dump_stream, " mult ");
4494 print_rtl (loop_dump_stream, mult_val);
4497 if (GET_CODE (add_val) == CONST_INT)
4498 fprintf (loop_dump_stream, " add %d",
4499 INTVAL (add_val));
4500 else
4502 fprintf (loop_dump_stream, " add ");
4503 print_rtl (loop_dump_stream, add_val);
4507 if (loop_dump_stream)
4508 fprintf (loop_dump_stream, "\n");
4513 /* All this does is determine whether a giv can be made replaceable because
4514 its final value can be calculated. This code can not be part of record_giv
4515 above, because final_giv_value requires that the number of loop iterations
4516 be known, and that can not be accurately calculated until after all givs
4517 have been identified. */
4519 static void
4520 check_final_value (v, loop_start, loop_end)
4521 struct induction *v;
4522 rtx loop_start, loop_end;
4524 struct iv_class *bl;
4525 rtx final_value = 0;
4527 bl = reg_biv_class[REGNO (v->src_reg)];
4529 /* DEST_ADDR givs will never reach here, because they are always marked
4530 replaceable above in record_giv. */
4532 /* The giv can be replaced outright by the reduced register only if all
4533 of the following conditions are true:
4534 - the insn that sets the giv is always executed on any iteration
4535 on which the giv is used at all
4536 (there are two ways to deduce this:
4537 either the insn is executed on every iteration,
4538 or all uses follow that insn in the same basic block),
4539 - its final value can be calculated (this condition is different
4540 than the one above in record_giv)
4541 - no assignments to the biv occur during the giv's lifetime. */
4543 #if 0
4544 /* This is only called now when replaceable is known to be false. */
4545 /* Clear replaceable, so that it won't confuse final_giv_value. */
4546 v->replaceable = 0;
4547 #endif
4549 if ((final_value = final_giv_value (v, loop_start, loop_end))
4550 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4552 int biv_increment_seen = 0;
4553 rtx p = v->insn;
4554 rtx last_giv_use;
4556 v->replaceable = 1;
4558 /* When trying to determine whether or not a biv increment occurs
4559 during the lifetime of the giv, we can ignore uses of the variable
4560 outside the loop because final_value is true. Hence we can not
4561 use regno_last_uid and regno_first_uid as above in record_giv. */
4563 /* Search the loop to determine whether any assignments to the
4564 biv occur during the giv's lifetime. Start with the insn
4565 that sets the giv, and search around the loop until we come
4566 back to that insn again.
4568 Also fail if there is a jump within the giv's lifetime that jumps
4569 to somewhere outside the lifetime but still within the loop. This
4570 catches spaghetti code where the execution order is not linear, and
4571 hence the above test fails. Here we assume that the giv lifetime
4572 does not extend from one iteration of the loop to the next, so as
4573 to make the test easier. Since the lifetime isn't known yet,
4574 this requires two loops. See also record_giv above. */
4576 last_giv_use = v->insn;
4578 while (1)
4580 p = NEXT_INSN (p);
4581 if (p == loop_end)
4582 p = NEXT_INSN (loop_start);
4583 if (p == v->insn)
4584 break;
4586 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4587 || GET_CODE (p) == CALL_INSN)
4589 if (biv_increment_seen)
4591 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4593 v->replaceable = 0;
4594 v->not_replaceable = 1;
4595 break;
4598 else if (GET_CODE (PATTERN (p)) == SET
4599 && SET_DEST (PATTERN (p)) == v->src_reg)
4600 biv_increment_seen = 1;
4601 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4602 last_giv_use = p;
4606 /* Now that the lifetime of the giv is known, check for branches
4607 from within the lifetime to outside the lifetime if it is still
4608 replaceable. */
4610 if (v->replaceable)
4612 p = v->insn;
4613 while (1)
4615 p = NEXT_INSN (p);
4616 if (p == loop_end)
4617 p = NEXT_INSN (loop_start);
4618 if (p == last_giv_use)
4619 break;
4621 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4622 && LABEL_NAME (JUMP_LABEL (p))
4623 && ((INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4624 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4625 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4626 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
4628 v->replaceable = 0;
4629 v->not_replaceable = 1;
4631 if (loop_dump_stream)
4632 fprintf (loop_dump_stream,
4633 "Found branch outside giv lifetime.\n");
4635 break;
4640 /* If it is replaceable, then save the final value. */
4641 if (v->replaceable)
4642 v->final_value = final_value;
4645 if (loop_dump_stream && v->replaceable)
4646 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
4647 INSN_UID (v->insn), REGNO (v->dest_reg));
4650 /* Update the status of whether a giv can derive other givs.
4652 We need to do something special if there is or may be an update to the biv
4653 between the time the giv is defined and the time it is used to derive
4654 another giv.
4656 In addition, a giv that is only conditionally set is not allowed to
4657 derive another giv once a label has been passed.
4659 The cases we look at are when a label or an update to a biv is passed. */
4661 static void
4662 update_giv_derive (p)
4663 rtx p;
4665 struct iv_class *bl;
4666 struct induction *biv, *giv;
4667 rtx tem;
4668 int dummy;
4670 /* Search all IV classes, then all bivs, and finally all givs.
4672 There are three cases we are concerned with. First we have the situation
4673 of a giv that is only updated conditionally. In that case, it may not
4674 derive any givs after a label is passed.
4676 The second case is when a biv update occurs, or may occur, after the
4677 definition of a giv. For certain biv updates (see below) that are
4678 known to occur between the giv definition and use, we can adjust the
4679 giv definition. For others, or when the biv update is conditional,
4680 we must prevent the giv from deriving any other givs. There are two
4681 sub-cases within this case.
4683 If this is a label, we are concerned with any biv update that is done
4684 conditionally, since it may be done after the giv is defined followed by
4685 a branch here (actually, we need to pass both a jump and a label, but
4686 this extra tracking doesn't seem worth it).
4688 If this is a jump, we are concerned about any biv update that may be
4689 executed multiple times. We are actually only concerned about
4690 backward jumps, but it is probably not worth performing the test
4691 on the jump again here.
4693 If this is a biv update, we must adjust the giv status to show that a
4694 subsequent biv update was performed. If this adjustment cannot be done,
4695 the giv cannot derive further givs. */
4697 for (bl = loop_iv_list; bl; bl = bl->next)
4698 for (biv = bl->biv; biv; biv = biv->next_iv)
4699 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
4700 || biv->insn == p)
4702 for (giv = bl->giv; giv; giv = giv->next_iv)
4704 /* If cant_derive is already true, there is no point in
4705 checking all of these conditions again. */
4706 if (giv->cant_derive)
4707 continue;
4709 /* If this giv is conditionally set and we have passed a label,
4710 it cannot derive anything. */
4711 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
4712 giv->cant_derive = 1;
4714 /* Skip givs that have mult_val == 0, since
4715 they are really invariants. Also skip those that are
4716 replaceable, since we know their lifetime doesn't contain
4717 any biv update. */
4718 else if (giv->mult_val == const0_rtx || giv->replaceable)
4719 continue;
4721 /* The only way we can allow this giv to derive another
4722 is if this is a biv increment and we can form the product
4723 of biv->add_val and giv->mult_val. In this case, we will
4724 be able to compute a compensation. */
4725 else if (biv->insn == p)
4727 tem = 0;
4729 if (biv->mult_val == const1_rtx)
4730 tem = simplify_giv_expr (gen_rtx (MULT, giv->mode,
4731 biv->add_val,
4732 giv->mult_val),
4733 &dummy);
4735 if (tem && giv->derive_adjustment)
4736 tem = simplify_giv_expr (gen_rtx (PLUS, giv->mode, tem,
4737 giv->derive_adjustment),
4738 &dummy);
4739 if (tem)
4740 giv->derive_adjustment = tem;
4741 else
4742 giv->cant_derive = 1;
4744 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
4745 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
4746 giv->cant_derive = 1;
4751 /* Check whether an insn is an increment legitimate for a basic induction var.
4752 X is the source of insn P, or a part of it.
4753 MODE is the mode in which X should be interpreted.
4755 DEST_REG is the putative biv, also the destination of the insn.
4756 We accept patterns of these forms:
4757 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
4758 REG = INVARIANT + REG
4760 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
4761 and store the additive term into *INC_VAL.
4763 If X is an assignment of an invariant into DEST_REG, we set
4764 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
4766 We also want to detect a BIV when it corresponds to a variable
4767 whose mode was promoted via PROMOTED_MODE. In that case, an increment
4768 of the variable may be a PLUS that adds a SUBREG of that variable to
4769 an invariant and then sign- or zero-extends the result of the PLUS
4770 into the variable.
4772 Most GIVs in such cases will be in the promoted mode, since that is the
4773 probably the natural computation mode (and almost certainly the mode
4774 used for addresses) on the machine. So we view the pseudo-reg containing
4775 the variable as the BIV, as if it were simply incremented.
4777 Note that treating the entire pseudo as a BIV will result in making
4778 simple increments to any GIVs based on it. However, if the variable
4779 overflows in its declared mode but not its promoted mode, the result will
4780 be incorrect. This is acceptable if the variable is signed, since
4781 overflows in such cases are undefined, but not if it is unsigned, since
4782 those overflows are defined. So we only check for SIGN_EXTEND and
4783 not ZERO_EXTEND.
4785 If we cannot find a biv, we return 0. */
4787 static int
4788 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
4789 register rtx x;
4790 enum machine_mode mode;
4791 rtx p;
4792 rtx dest_reg;
4793 rtx *inc_val;
4794 rtx *mult_val;
4796 register enum rtx_code code;
4797 rtx arg;
4798 rtx insn, set = 0;
4800 code = GET_CODE (x);
4801 switch (code)
4803 case PLUS:
4804 if (XEXP (x, 0) == dest_reg
4805 || (GET_CODE (XEXP (x, 0)) == SUBREG
4806 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
4807 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
4808 arg = XEXP (x, 1);
4809 else if (XEXP (x, 1) == dest_reg
4810 || (GET_CODE (XEXP (x, 1)) == SUBREG
4811 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
4812 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
4813 arg = XEXP (x, 0);
4814 else
4815 return 0;
4817 if (invariant_p (arg) != 1)
4818 return 0;
4820 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
4821 *mult_val = const1_rtx;
4822 return 1;
4824 case SUBREG:
4825 /* If this is a SUBREG for a promoted variable, check the inner
4826 value. */
4827 if (SUBREG_PROMOTED_VAR_P (x))
4828 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
4829 dest_reg, p, inc_val, mult_val);
4831 case REG:
4832 /* If this register is assigned in the previous insn, look at its
4833 source, but don't go outside the loop or past a label. */
4835 for (insn = PREV_INSN (p);
4836 (insn && GET_CODE (insn) == NOTE
4837 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
4838 insn = PREV_INSN (insn))
4841 if (insn)
4842 set = single_set (insn);
4844 if (set != 0
4845 && (SET_DEST (set) == x
4846 || (GET_CODE (SET_DEST (set)) == SUBREG
4847 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
4848 <= UNITS_PER_WORD)
4849 && SUBREG_REG (SET_DEST (set)) == x)))
4850 return basic_induction_var (SET_SRC (set),
4851 (GET_MODE (SET_SRC (set)) == VOIDmode
4852 ? GET_MODE (x)
4853 : GET_MODE (SET_SRC (set))),
4854 dest_reg, insn,
4855 inc_val, mult_val);
4856 /* ... fall through ... */
4858 /* Can accept constant setting of biv only when inside inner most loop.
4859 Otherwise, a biv of an inner loop may be incorrectly recognized
4860 as a biv of the outer loop,
4861 causing code to be moved INTO the inner loop. */
4862 case MEM:
4863 if (invariant_p (x) != 1)
4864 return 0;
4865 case CONST_INT:
4866 case SYMBOL_REF:
4867 case CONST:
4868 if (loops_enclosed == 1)
4870 /* Possible bug here? Perhaps we don't know the mode of X. */
4871 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
4872 *mult_val = const0_rtx;
4873 return 1;
4875 else
4876 return 0;
4878 case SIGN_EXTEND:
4879 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
4880 dest_reg, p, inc_val, mult_val);
4881 case ASHIFTRT:
4882 /* Similar, since this can be a sign extension. */
4883 for (insn = PREV_INSN (p);
4884 (insn && GET_CODE (insn) == NOTE
4885 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
4886 insn = PREV_INSN (insn))
4889 if (insn)
4890 set = single_set (insn);
4892 if (set && SET_DEST (set) == XEXP (x, 0)
4893 && GET_CODE (XEXP (x, 1)) == CONST_INT
4894 && INTVAL (XEXP (x, 1)) >= 0
4895 && GET_CODE (SET_SRC (set)) == ASHIFT
4896 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
4897 return basic_induction_var (XEXP (SET_SRC (set), 0),
4898 GET_MODE (XEXP (x, 0)),
4899 dest_reg, insn, inc_val, mult_val);
4900 return 0;
4902 default:
4903 return 0;
4907 /* A general induction variable (giv) is any quantity that is a linear
4908 function of a basic induction variable,
4909 i.e. giv = biv * mult_val + add_val.
4910 The coefficients can be any loop invariant quantity.
4911 A giv need not be computed directly from the biv;
4912 it can be computed by way of other givs. */
4914 /* Determine whether X computes a giv.
4915 If it does, return a nonzero value
4916 which is the benefit from eliminating the computation of X;
4917 set *SRC_REG to the register of the biv that it is computed from;
4918 set *ADD_VAL and *MULT_VAL to the coefficients,
4919 such that the value of X is biv * mult + add; */
4921 static int
4922 general_induction_var (x, src_reg, add_val, mult_val)
4923 rtx x;
4924 rtx *src_reg;
4925 rtx *add_val;
4926 rtx *mult_val;
4928 rtx orig_x = x;
4929 int benefit = 0;
4930 char *storage;
4932 /* If this is an invariant, forget it, it isn't a giv. */
4933 if (invariant_p (x) == 1)
4934 return 0;
4936 /* See if the expression could be a giv and get its form.
4937 Mark our place on the obstack in case we don't find a giv. */
4938 storage = (char *) oballoc (0);
4939 x = simplify_giv_expr (x, &benefit);
4940 if (x == 0)
4942 obfree (storage);
4943 return 0;
4946 switch (GET_CODE (x))
4948 case USE:
4949 case CONST_INT:
4950 /* Since this is now an invariant and wasn't before, it must be a giv
4951 with MULT_VAL == 0. It doesn't matter which BIV we associate this
4952 with. */
4953 *src_reg = loop_iv_list->biv->dest_reg;
4954 *mult_val = const0_rtx;
4955 *add_val = x;
4956 break;
4958 case REG:
4959 /* This is equivalent to a BIV. */
4960 *src_reg = x;
4961 *mult_val = const1_rtx;
4962 *add_val = const0_rtx;
4963 break;
4965 case PLUS:
4966 /* Either (plus (biv) (invar)) or
4967 (plus (mult (biv) (invar_1)) (invar_2)). */
4968 if (GET_CODE (XEXP (x, 0)) == MULT)
4970 *src_reg = XEXP (XEXP (x, 0), 0);
4971 *mult_val = XEXP (XEXP (x, 0), 1);
4973 else
4975 *src_reg = XEXP (x, 0);
4976 *mult_val = const1_rtx;
4978 *add_val = XEXP (x, 1);
4979 break;
4981 case MULT:
4982 /* ADD_VAL is zero. */
4983 *src_reg = XEXP (x, 0);
4984 *mult_val = XEXP (x, 1);
4985 *add_val = const0_rtx;
4986 break;
4988 default:
4989 abort ();
4992 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
4993 unless they are CONST_INT). */
4994 if (GET_CODE (*add_val) == USE)
4995 *add_val = XEXP (*add_val, 0);
4996 if (GET_CODE (*mult_val) == USE)
4997 *mult_val = XEXP (*mult_val, 0);
4999 benefit += rtx_cost (orig_x, SET);
5001 /* Always return some benefit if this is a giv so it will be detected
5002 as such. This allows elimination of bivs that might otherwise
5003 not be eliminated. */
5004 return benefit == 0 ? 1 : benefit;
5007 /* Given an expression, X, try to form it as a linear function of a biv.
5008 We will canonicalize it to be of the form
5009 (plus (mult (BIV) (invar_1))
5010 (invar_2))
5011 with possible degeneracies.
5013 The invariant expressions must each be of a form that can be used as a
5014 machine operand. We surround then with a USE rtx (a hack, but localized
5015 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5016 routine; it is the caller's responsibility to strip them.
5018 If no such canonicalization is possible (i.e., two biv's are used or an
5019 expression that is neither invariant nor a biv or giv), this routine
5020 returns 0.
5022 For a non-zero return, the result will have a code of CONST_INT, USE,
5023 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5025 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5027 static rtx
5028 simplify_giv_expr (x, benefit)
5029 rtx x;
5030 int *benefit;
5032 enum machine_mode mode = GET_MODE (x);
5033 rtx arg0, arg1;
5034 rtx tem;
5036 /* If this is not an integer mode, or if we cannot do arithmetic in this
5037 mode, this can't be a giv. */
5038 if (mode != VOIDmode
5039 && (GET_MODE_CLASS (mode) != MODE_INT
5040 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5041 return 0;
5043 switch (GET_CODE (x))
5045 case PLUS:
5046 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5047 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5048 if (arg0 == 0 || arg1 == 0)
5049 return 0;
5051 /* Put constant last, CONST_INT last if both constant. */
5052 if ((GET_CODE (arg0) == USE
5053 || GET_CODE (arg0) == CONST_INT)
5054 && GET_CODE (arg1) != CONST_INT)
5055 tem = arg0, arg0 = arg1, arg1 = tem;
5057 /* Handle addition of zero, then addition of an invariant. */
5058 if (arg1 == const0_rtx)
5059 return arg0;
5060 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5061 switch (GET_CODE (arg0))
5063 case CONST_INT:
5064 case USE:
5065 /* Both invariant. Only valid if sum is machine operand.
5066 First strip off possible USE on first operand. */
5067 if (GET_CODE (arg0) == USE)
5068 arg0 = XEXP (arg0, 0);
5070 tem = 0;
5071 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5073 tem = plus_constant (arg0, INTVAL (arg1));
5074 if (GET_CODE (tem) != CONST_INT)
5075 tem = gen_rtx (USE, mode, tem);
5078 return tem;
5080 case REG:
5081 case MULT:
5082 /* biv + invar or mult + invar. Return sum. */
5083 return gen_rtx (PLUS, mode, arg0, arg1);
5085 case PLUS:
5086 /* (a + invar_1) + invar_2. Associate. */
5087 return simplify_giv_expr (gen_rtx (PLUS, mode,
5088 XEXP (arg0, 0),
5089 gen_rtx (PLUS, mode,
5090 XEXP (arg0, 1), arg1)),
5091 benefit);
5093 default:
5094 abort ();
5097 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5098 MULT to reduce cases. */
5099 if (GET_CODE (arg0) == REG)
5100 arg0 = gen_rtx (MULT, mode, arg0, const1_rtx);
5101 if (GET_CODE (arg1) == REG)
5102 arg1 = gen_rtx (MULT, mode, arg1, const1_rtx);
5104 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5105 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5106 Recurse to associate the second PLUS. */
5107 if (GET_CODE (arg1) == MULT)
5108 tem = arg0, arg0 = arg1, arg1 = tem;
5110 if (GET_CODE (arg1) == PLUS)
5111 return simplify_giv_expr (gen_rtx (PLUS, mode,
5112 gen_rtx (PLUS, mode,
5113 arg0, XEXP (arg1, 0)),
5114 XEXP (arg1, 1)),
5115 benefit);
5117 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5118 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5119 abort ();
5121 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5122 return 0;
5124 return simplify_giv_expr (gen_rtx (MULT, mode,
5125 XEXP (arg0, 0),
5126 gen_rtx (PLUS, mode,
5127 XEXP (arg0, 1),
5128 XEXP (arg1, 1))),
5129 benefit);
5131 case MINUS:
5132 /* Handle "a - b" as "a + b * (-1)". */
5133 return simplify_giv_expr (gen_rtx (PLUS, mode,
5134 XEXP (x, 0),
5135 gen_rtx (MULT, mode,
5136 XEXP (x, 1), constm1_rtx)),
5137 benefit);
5139 case MULT:
5140 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5141 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5142 if (arg0 == 0 || arg1 == 0)
5143 return 0;
5145 /* Put constant last, CONST_INT last if both constant. */
5146 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5147 && GET_CODE (arg1) != CONST_INT)
5148 tem = arg0, arg0 = arg1, arg1 = tem;
5150 /* If second argument is not now constant, not giv. */
5151 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5152 return 0;
5154 /* Handle multiply by 0 or 1. */
5155 if (arg1 == const0_rtx)
5156 return const0_rtx;
5158 else if (arg1 == const1_rtx)
5159 return arg0;
5161 switch (GET_CODE (arg0))
5163 case REG:
5164 /* biv * invar. Done. */
5165 return gen_rtx (MULT, mode, arg0, arg1);
5167 case CONST_INT:
5168 /* Product of two constants. */
5169 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5171 case USE:
5172 /* invar * invar. Not giv. */
5173 return 0;
5175 case MULT:
5176 /* (a * invar_1) * invar_2. Associate. */
5177 return simplify_giv_expr (gen_rtx (MULT, mode,
5178 XEXP (arg0, 0),
5179 gen_rtx (MULT, mode,
5180 XEXP (arg0, 1), arg1)),
5181 benefit);
5183 case PLUS:
5184 /* (a + invar_1) * invar_2. Distribute. */
5185 return simplify_giv_expr (gen_rtx (PLUS, mode,
5186 gen_rtx (MULT, mode,
5187 XEXP (arg0, 0), arg1),
5188 gen_rtx (MULT, mode,
5189 XEXP (arg0, 1), arg1)),
5190 benefit);
5192 default:
5193 abort ();
5196 case ASHIFT:
5197 /* Shift by constant is multiply by power of two. */
5198 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5199 return 0;
5201 return simplify_giv_expr (gen_rtx (MULT, mode,
5202 XEXP (x, 0),
5203 GEN_INT ((HOST_WIDE_INT) 1
5204 << INTVAL (XEXP (x, 1)))),
5205 benefit);
5207 case NEG:
5208 /* "-a" is "a * (-1)" */
5209 return simplify_giv_expr (gen_rtx (MULT, mode, XEXP (x, 0), constm1_rtx),
5210 benefit);
5212 case NOT:
5213 /* "~a" is "-a - 1". Silly, but easy. */
5214 return simplify_giv_expr (gen_rtx (MINUS, mode,
5215 gen_rtx (NEG, mode, XEXP (x, 0)),
5216 const1_rtx),
5217 benefit);
5219 case USE:
5220 /* Already in proper form for invariant. */
5221 return x;
5223 case REG:
5224 /* If this is a new register, we can't deal with it. */
5225 if (REGNO (x) >= max_reg_before_loop)
5226 return 0;
5228 /* Check for biv or giv. */
5229 switch (reg_iv_type[REGNO (x)])
5231 case BASIC_INDUCT:
5232 return x;
5233 case GENERAL_INDUCT:
5235 struct induction *v = reg_iv_info[REGNO (x)];
5237 /* Form expression from giv and add benefit. Ensure this giv
5238 can derive another and subtract any needed adjustment if so. */
5239 *benefit += v->benefit;
5240 if (v->cant_derive)
5241 return 0;
5243 tem = gen_rtx (PLUS, mode, gen_rtx (MULT, mode,
5244 v->src_reg, v->mult_val),
5245 v->add_val);
5246 if (v->derive_adjustment)
5247 tem = gen_rtx (MINUS, mode, tem, v->derive_adjustment);
5248 return simplify_giv_expr (tem, benefit);
5252 /* Fall through to general case. */
5253 default:
5254 /* If invariant, return as USE (unless CONST_INT).
5255 Otherwise, not giv. */
5256 if (GET_CODE (x) == USE)
5257 x = XEXP (x, 0);
5259 if (invariant_p (x) == 1)
5261 if (GET_CODE (x) == CONST_INT)
5262 return x;
5263 else
5264 return gen_rtx (USE, mode, x);
5266 else
5267 return 0;
5271 /* Help detect a giv that is calculated by several consecutive insns;
5272 for example,
5273 giv = biv * M
5274 giv = giv + A
5275 The caller has already identified the first insn P as having a giv as dest;
5276 we check that all other insns that set the same register follow
5277 immediately after P, that they alter nothing else,
5278 and that the result of the last is still a giv.
5280 The value is 0 if the reg set in P is not really a giv.
5281 Otherwise, the value is the amount gained by eliminating
5282 all the consecutive insns that compute the value.
5284 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5285 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5287 The coefficients of the ultimate giv value are stored in
5288 *MULT_VAL and *ADD_VAL. */
5290 static int
5291 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5292 add_val, mult_val)
5293 int first_benefit;
5294 rtx p;
5295 rtx src_reg;
5296 rtx dest_reg;
5297 rtx *add_val;
5298 rtx *mult_val;
5300 int count;
5301 enum rtx_code code;
5302 int benefit;
5303 rtx temp;
5304 rtx set;
5306 /* Indicate that this is a giv so that we can update the value produced in
5307 each insn of the multi-insn sequence.
5309 This induction structure will be used only by the call to
5310 general_induction_var below, so we can allocate it on our stack.
5311 If this is a giv, our caller will replace the induct var entry with
5312 a new induction structure. */
5313 struct induction *v
5314 = (struct induction *) alloca (sizeof (struct induction));
5315 v->src_reg = src_reg;
5316 v->mult_val = *mult_val;
5317 v->add_val = *add_val;
5318 v->benefit = first_benefit;
5319 v->cant_derive = 0;
5320 v->derive_adjustment = 0;
5322 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5323 reg_iv_info[REGNO (dest_reg)] = v;
5325 count = n_times_set[REGNO (dest_reg)] - 1;
5327 while (count > 0)
5329 p = NEXT_INSN (p);
5330 code = GET_CODE (p);
5332 /* If libcall, skip to end of call sequence. */
5333 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5334 p = XEXP (temp, 0);
5336 if (code == INSN
5337 && (set = single_set (p))
5338 && GET_CODE (SET_DEST (set)) == REG
5339 && SET_DEST (set) == dest_reg
5340 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5341 add_val, mult_val))
5342 /* Giv created by equivalent expression. */
5343 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5344 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5345 add_val, mult_val))))
5346 && src_reg == v->src_reg)
5348 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5349 benefit += libcall_benefit (p);
5351 count--;
5352 v->mult_val = *mult_val;
5353 v->add_val = *add_val;
5354 v->benefit = benefit;
5356 else if (code != NOTE)
5358 /* Allow insns that set something other than this giv to a
5359 constant. Such insns are needed on machines which cannot
5360 include long constants and should not disqualify a giv. */
5361 if (code == INSN
5362 && (set = single_set (p))
5363 && SET_DEST (set) != dest_reg
5364 && CONSTANT_P (SET_SRC (set)))
5365 continue;
5367 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5368 return 0;
5372 return v->benefit;
5375 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5376 represented by G1. If no such expression can be found, or it is clear that
5377 it cannot possibly be a valid address, 0 is returned.
5379 To perform the computation, we note that
5380 G1 = a * v + b and
5381 G2 = c * v + d
5382 where `v' is the biv.
5384 So G2 = (c/a) * G1 + (d - b*c/a) */
5386 #ifdef ADDRESS_COST
5387 static rtx
5388 express_from (g1, g2)
5389 struct induction *g1, *g2;
5391 rtx mult, add;
5393 /* The value that G1 will be multiplied by must be a constant integer. Also,
5394 the only chance we have of getting a valid address is if b*c/a (see above
5395 for notation) is also an integer. */
5396 if (GET_CODE (g1->mult_val) != CONST_INT
5397 || GET_CODE (g2->mult_val) != CONST_INT
5398 || GET_CODE (g1->add_val) != CONST_INT
5399 || g1->mult_val == const0_rtx
5400 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5401 return 0;
5403 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5404 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5406 /* Form simplified final result. */
5407 if (mult == const0_rtx)
5408 return add;
5409 else if (mult == const1_rtx)
5410 mult = g1->dest_reg;
5411 else
5412 mult = gen_rtx (MULT, g2->mode, g1->dest_reg, mult);
5414 if (add == const0_rtx)
5415 return mult;
5416 else
5417 return gen_rtx (PLUS, g2->mode, mult, add);
5419 #endif
5421 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5422 (either directly or via an address expression) a register used to represent
5423 G1. Set g2->new_reg to a represtation of G1 (normally just
5424 g1->dest_reg). */
5426 static int
5427 combine_givs_p (g1, g2)
5428 struct induction *g1, *g2;
5430 rtx tem;
5432 /* If these givs are identical, they can be combined. */
5433 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5434 && rtx_equal_p (g1->add_val, g2->add_val))
5436 g2->new_reg = g1->dest_reg;
5437 return 1;
5440 #ifdef ADDRESS_COST
5441 /* If G2 can be expressed as a function of G1 and that function is valid
5442 as an address and no more expensive than using a register for G2,
5443 the expression of G2 in terms of G1 can be used. */
5444 if (g2->giv_type == DEST_ADDR
5445 && (tem = express_from (g1, g2)) != 0
5446 && memory_address_p (g2->mem_mode, tem)
5447 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5449 g2->new_reg = tem;
5450 return 1;
5452 #endif
5454 return 0;
5457 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5458 any other. If so, point SAME to the giv combined with and set NEW_REG to
5459 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5460 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5462 static void
5463 combine_givs (bl)
5464 struct iv_class *bl;
5466 struct induction *g1, *g2;
5467 int pass;
5469 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5470 for (pass = 0; pass <= 1; pass++)
5471 for (g2 = bl->giv; g2; g2 = g2->next_iv)
5472 if (g1 != g2
5473 /* First try to combine with replaceable givs, then all givs. */
5474 && (g1->replaceable || pass == 1)
5475 /* If either has already been combined or is to be ignored, can't
5476 combine. */
5477 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5478 /* If something has been based on G2, G2 cannot itself be based
5479 on something else. */
5480 && ! g2->combined_with
5481 && combine_givs_p (g1, g2))
5483 /* g2->new_reg set by `combine_givs_p' */
5484 g2->same = g1;
5485 g1->combined_with = 1;
5486 g1->benefit += g2->benefit;
5487 /* ??? The new final_[bg]iv_value code does a much better job
5488 of finding replaceable giv's, and hence this code may no
5489 longer be necessary. */
5490 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5491 g1->benefit -= copy_cost;
5492 g1->lifetime += g2->lifetime;
5493 g1->times_used += g2->times_used;
5495 if (loop_dump_stream)
5496 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5497 INSN_UID (g2->insn), INSN_UID (g1->insn));
5501 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5503 void
5504 emit_iv_add_mult (b, m, a, reg, insert_before)
5505 rtx b; /* initial value of basic induction variable */
5506 rtx m; /* multiplicative constant */
5507 rtx a; /* additive constant */
5508 rtx reg; /* destination register */
5509 rtx insert_before;
5511 rtx seq;
5512 rtx result;
5514 /* Prevent unexpected sharing of these rtx. */
5515 a = copy_rtx (a);
5516 b = copy_rtx (b);
5518 /* Increase the lifetime of any invariants moved further in code. */
5519 update_reg_last_use (a, insert_before);
5520 update_reg_last_use (b, insert_before);
5521 update_reg_last_use (m, insert_before);
5523 start_sequence ();
5524 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5525 if (reg != result)
5526 emit_move_insn (reg, result);
5527 seq = gen_sequence ();
5528 end_sequence ();
5530 emit_insn_before (seq, insert_before);
5533 /* Test whether A * B can be computed without
5534 an actual multiply insn. Value is 1 if so. */
5536 static int
5537 product_cheap_p (a, b)
5538 rtx a;
5539 rtx b;
5541 int i;
5542 rtx tmp;
5543 struct obstack *old_rtl_obstack = rtl_obstack;
5544 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5545 int win = 1;
5547 /* If only one is constant, make it B. */
5548 if (GET_CODE (a) == CONST_INT)
5549 tmp = a, a = b, b = tmp;
5551 /* If first constant, both constant, so don't need multiply. */
5552 if (GET_CODE (a) == CONST_INT)
5553 return 1;
5555 /* If second not constant, neither is constant, so would need multiply. */
5556 if (GET_CODE (b) != CONST_INT)
5557 return 0;
5559 /* One operand is constant, so might not need multiply insn. Generate the
5560 code for the multiply and see if a call or multiply, or long sequence
5561 of insns is generated. */
5563 rtl_obstack = &temp_obstack;
5564 start_sequence ();
5565 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
5566 tmp = gen_sequence ();
5567 end_sequence ();
5569 if (GET_CODE (tmp) == SEQUENCE)
5571 if (XVEC (tmp, 0) == 0)
5572 win = 1;
5573 else if (XVECLEN (tmp, 0) > 3)
5574 win = 0;
5575 else
5576 for (i = 0; i < XVECLEN (tmp, 0); i++)
5578 rtx insn = XVECEXP (tmp, 0, i);
5580 if (GET_CODE (insn) != INSN
5581 || (GET_CODE (PATTERN (insn)) == SET
5582 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
5583 || (GET_CODE (PATTERN (insn)) == PARALLEL
5584 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
5585 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
5587 win = 0;
5588 break;
5592 else if (GET_CODE (tmp) == SET
5593 && GET_CODE (SET_SRC (tmp)) == MULT)
5594 win = 0;
5595 else if (GET_CODE (tmp) == PARALLEL
5596 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
5597 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
5598 win = 0;
5600 /* Free any storage we obtained in generating this multiply and restore rtl
5601 allocation to its normal obstack. */
5602 obstack_free (&temp_obstack, storage);
5603 rtl_obstack = old_rtl_obstack;
5605 return win;
5608 /* Check to see if loop can be terminated by a "decrement and branch until
5609 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
5610 Also try reversing an increment loop to a decrement loop
5611 to see if the optimization can be performed.
5612 Value is nonzero if optimization was performed. */
5614 /* This is useful even if the architecture doesn't have such an insn,
5615 because it might change a loops which increments from 0 to n to a loop
5616 which decrements from n to 0. A loop that decrements to zero is usually
5617 faster than one that increments from zero. */
5619 /* ??? This could be rewritten to use some of the loop unrolling procedures,
5620 such as approx_final_value, biv_total_increment, loop_iterations, and
5621 final_[bg]iv_value. */
5623 static int
5624 check_dbra_loop (loop_end, insn_count, loop_start)
5625 rtx loop_end;
5626 int insn_count;
5627 rtx loop_start;
5629 struct iv_class *bl;
5630 rtx reg;
5631 rtx jump_label;
5632 rtx final_value;
5633 rtx start_value;
5634 rtx new_add_val;
5635 rtx comparison;
5636 rtx before_comparison;
5637 rtx p;
5639 /* If last insn is a conditional branch, and the insn before tests a
5640 register value, try to optimize it. Otherwise, we can't do anything. */
5642 comparison = get_condition_for_loop (PREV_INSN (loop_end));
5643 if (comparison == 0)
5644 return 0;
5646 /* Check all of the bivs to see if the compare uses one of them.
5647 Skip biv's set more than once because we can't guarantee that
5648 it will be zero on the last iteration. Also skip if the biv is
5649 used between its update and the test insn. */
5651 for (bl = loop_iv_list; bl; bl = bl->next)
5653 if (bl->biv_count == 1
5654 && bl->biv->dest_reg == XEXP (comparison, 0)
5655 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
5656 PREV_INSN (PREV_INSN (loop_end))))
5657 break;
5660 if (! bl)
5661 return 0;
5663 /* Look for the case where the basic induction variable is always
5664 nonnegative, and equals zero on the last iteration.
5665 In this case, add a reg_note REG_NONNEG, which allows the
5666 m68k DBRA instruction to be used. */
5668 if (((GET_CODE (comparison) == GT
5669 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
5670 && INTVAL (XEXP (comparison, 1)) == -1)
5671 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
5672 && GET_CODE (bl->biv->add_val) == CONST_INT
5673 && INTVAL (bl->biv->add_val) < 0)
5675 /* Initial value must be greater than 0,
5676 init_val % -dec_value == 0 to ensure that it equals zero on
5677 the last iteration */
5679 if (GET_CODE (bl->initial_value) == CONST_INT
5680 && INTVAL (bl->initial_value) > 0
5681 && (INTVAL (bl->initial_value) %
5682 (-INTVAL (bl->biv->add_val))) == 0)
5684 /* register always nonnegative, add REG_NOTE to branch */
5685 REG_NOTES (PREV_INSN (loop_end))
5686 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5687 REG_NOTES (PREV_INSN (loop_end)));
5688 bl->nonneg = 1;
5690 return 1;
5693 /* If the decrement is 1 and the value was tested as >= 0 before
5694 the loop, then we can safely optimize. */
5695 for (p = loop_start; p; p = PREV_INSN (p))
5697 if (GET_CODE (p) == CODE_LABEL)
5698 break;
5699 if (GET_CODE (p) != JUMP_INSN)
5700 continue;
5702 before_comparison = get_condition_for_loop (p);
5703 if (before_comparison
5704 && XEXP (before_comparison, 0) == bl->biv->dest_reg
5705 && GET_CODE (before_comparison) == LT
5706 && XEXP (before_comparison, 1) == const0_rtx
5707 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
5708 && INTVAL (bl->biv->add_val) == -1)
5710 REG_NOTES (PREV_INSN (loop_end))
5711 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5712 REG_NOTES (PREV_INSN (loop_end)));
5713 bl->nonneg = 1;
5715 return 1;
5719 else if (num_mem_sets <= 1)
5721 /* Try to change inc to dec, so can apply above optimization. */
5722 /* Can do this if:
5723 all registers modified are induction variables or invariant,
5724 all memory references have non-overlapping addresses
5725 (obviously true if only one write)
5726 allow 2 insns for the compare/jump at the end of the loop. */
5727 int num_nonfixed_reads = 0;
5728 /* 1 if the iteration var is used only to count iterations. */
5729 int no_use_except_counting = 0;
5730 /* 1 if the loop has no memory store, or it has a single memory store
5731 which is reversible. */
5732 int reversible_mem_store = 1;
5734 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
5735 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
5736 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
5738 if (bl->giv_count == 0
5739 && ! loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
5741 rtx bivreg = regno_reg_rtx[bl->regno];
5743 /* If there are no givs for this biv, and the only exit is the
5744 fall through at the end of the the loop, then
5745 see if perhaps there are no uses except to count. */
5746 no_use_except_counting = 1;
5747 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
5748 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
5750 rtx set = single_set (p);
5752 if (set && GET_CODE (SET_DEST (set)) == REG
5753 && REGNO (SET_DEST (set)) == bl->regno)
5754 /* An insn that sets the biv is okay. */
5756 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
5757 || p == prev_nonnote_insn (loop_end))
5758 /* Don't bother about the end test. */
5760 else if (reg_mentioned_p (bivreg, PATTERN (p)))
5761 /* Any other use of the biv is no good. */
5763 no_use_except_counting = 0;
5764 break;
5769 /* If the loop has a single store, and the destination address is
5770 invariant, then we can't reverse the loop, because this address
5771 might then have the wrong value at loop exit.
5772 This would work if the source was invariant also, however, in that
5773 case, the insn should have been moved out of the loop. */
5775 if (num_mem_sets == 1)
5776 reversible_mem_store
5777 = (! unknown_address_altered
5778 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
5780 /* This code only acts for innermost loops. Also it simplifies
5781 the memory address check by only reversing loops with
5782 zero or one memory access.
5783 Two memory accesses could involve parts of the same array,
5784 and that can't be reversed. */
5786 if (num_nonfixed_reads <= 1
5787 && !loop_has_call
5788 && !loop_has_volatile
5789 && reversible_mem_store
5790 && (no_use_except_counting
5791 || (bl->giv_count + bl->biv_count + num_mem_sets
5792 + num_movables + 2 == insn_count)))
5794 rtx tem;
5796 /* Loop can be reversed. */
5797 if (loop_dump_stream)
5798 fprintf (loop_dump_stream, "Can reverse loop\n");
5800 /* Now check other conditions:
5801 initial_value must be zero,
5802 final_value % add_val == 0, so that when reversed, the
5803 biv will be zero on the last iteration.
5805 This test can probably be improved since +/- 1 in the constant
5806 can be obtained by changing LT to LE and vice versa; this is
5807 confusing. */
5809 if (comparison && bl->initial_value == const0_rtx
5810 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
5811 /* LE gets turned into LT */
5812 && GET_CODE (comparison) == LT
5813 && (INTVAL (XEXP (comparison, 1))
5814 % INTVAL (bl->biv->add_val)) == 0)
5816 /* Register will always be nonnegative, with value
5817 0 on last iteration if loop reversed */
5819 /* Save some info needed to produce the new insns. */
5820 reg = bl->biv->dest_reg;
5821 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
5822 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
5824 final_value = XEXP (comparison, 1);
5825 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
5826 - INTVAL (bl->biv->add_val));
5828 /* Initialize biv to start_value before loop start.
5829 The old initializing insn will be deleted as a
5830 dead store by flow.c. */
5831 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
5833 /* Add insn to decrement register, and delete insn
5834 that incremented the register. */
5835 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
5836 bl->biv->insn);
5837 delete_insn (bl->biv->insn);
5839 /* Update biv info to reflect its new status. */
5840 bl->biv->insn = p;
5841 bl->initial_value = start_value;
5842 bl->biv->add_val = new_add_val;
5844 /* Inc LABEL_NUSES so that delete_insn will
5845 not delete the label. */
5846 LABEL_NUSES (XEXP (jump_label, 0)) ++;
5848 /* Emit an insn after the end of the loop to set the biv's
5849 proper exit value if it is used anywhere outside the loop. */
5850 if ((regno_last_uid[bl->regno]
5851 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
5852 || ! bl->init_insn
5853 || regno_first_uid[bl->regno] != INSN_UID (bl->init_insn))
5854 emit_insn_after (gen_move_insn (reg, final_value),
5855 loop_end);
5857 /* Delete compare/branch at end of loop. */
5858 delete_insn (PREV_INSN (loop_end));
5859 delete_insn (PREV_INSN (loop_end));
5861 /* Add new compare/branch insn at end of loop. */
5862 start_sequence ();
5863 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
5864 GET_MODE (reg), 0, 0);
5865 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
5866 tem = gen_sequence ();
5867 end_sequence ();
5868 emit_jump_insn_before (tem, loop_end);
5870 for (tem = PREV_INSN (loop_end);
5871 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
5873 if (tem)
5875 JUMP_LABEL (tem) = XEXP (jump_label, 0);
5877 /* Increment of LABEL_NUSES done above. */
5878 /* Register is now always nonnegative,
5879 so add REG_NONNEG note to the branch. */
5880 REG_NOTES (tem) = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5881 REG_NOTES (tem));
5884 bl->nonneg = 1;
5886 /* Mark that this biv has been reversed. Each giv which depends
5887 on this biv, and which is also live past the end of the loop
5888 will have to be fixed up. */
5890 bl->reversed = 1;
5892 if (loop_dump_stream)
5893 fprintf (loop_dump_stream,
5894 "Reversed loop and added reg_nonneg\n");
5896 return 1;
5901 return 0;
5904 /* Verify whether the biv BL appears to be eliminable,
5905 based on the insns in the loop that refer to it.
5906 LOOP_START is the first insn of the loop, and END is the end insn.
5908 If ELIMINATE_P is non-zero, actually do the elimination.
5910 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
5911 determine whether invariant insns should be placed inside or at the
5912 start of the loop. */
5914 static int
5915 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
5916 struct iv_class *bl;
5917 rtx loop_start;
5918 rtx end;
5919 int eliminate_p;
5920 int threshold, insn_count;
5922 rtx reg = bl->biv->dest_reg;
5923 rtx p;
5925 /* Scan all insns in the loop, stopping if we find one that uses the
5926 biv in a way that we cannot eliminate. */
5928 for (p = loop_start; p != end; p = NEXT_INSN (p))
5930 enum rtx_code code = GET_CODE (p);
5931 rtx where = threshold >= insn_count ? loop_start : p;
5933 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
5934 && reg_mentioned_p (reg, PATTERN (p))
5935 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
5937 if (loop_dump_stream)
5938 fprintf (loop_dump_stream,
5939 "Cannot eliminate biv %d: biv used in insn %d.\n",
5940 bl->regno, INSN_UID (p));
5941 break;
5945 if (p == end)
5947 if (loop_dump_stream)
5948 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
5949 bl->regno, eliminate_p ? "was" : "can be");
5950 return 1;
5953 return 0;
5956 /* If BL appears in X (part of the pattern of INSN), see if we can
5957 eliminate its use. If so, return 1. If not, return 0.
5959 If BIV does not appear in X, return 1.
5961 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
5962 where extra insns should be added. Depending on how many items have been
5963 moved out of the loop, it will either be before INSN or at the start of
5964 the loop. */
5966 static int
5967 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
5968 rtx x, insn;
5969 struct iv_class *bl;
5970 int eliminate_p;
5971 rtx where;
5973 enum rtx_code code = GET_CODE (x);
5974 rtx reg = bl->biv->dest_reg;
5975 enum machine_mode mode = GET_MODE (reg);
5976 struct induction *v;
5977 rtx arg, new, tem;
5978 int arg_operand;
5979 char *fmt;
5980 int i, j;
5982 switch (code)
5984 case REG:
5985 /* If we haven't already been able to do something with this BIV,
5986 we can't eliminate it. */
5987 if (x == reg)
5988 return 0;
5989 return 1;
5991 case SET:
5992 /* If this sets the BIV, it is not a problem. */
5993 if (SET_DEST (x) == reg)
5994 return 1;
5996 /* If this is an insn that defines a giv, it is also ok because
5997 it will go away when the giv is reduced. */
5998 for (v = bl->giv; v; v = v->next_iv)
5999 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
6000 return 1;
6002 #ifdef HAVE_cc0
6003 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
6005 /* Can replace with any giv that was reduced and
6006 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6007 Require a constant for MULT_VAL, so we know it's nonzero. */
6009 for (v = bl->giv; v; v = v->next_iv)
6010 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6011 && v->add_val == const0_rtx
6012 && ! v->ignore && ! v->maybe_dead && v->always_computable
6013 && v->mode == mode)
6015 if (! eliminate_p)
6016 return 1;
6018 /* If the giv has the opposite direction of change,
6019 then reverse the comparison. */
6020 if (INTVAL (v->mult_val) < 0)
6021 new = gen_rtx (COMPARE, GET_MODE (v->new_reg),
6022 const0_rtx, v->new_reg);
6023 else
6024 new = v->new_reg;
6026 /* We can probably test that giv's reduced reg. */
6027 if (validate_change (insn, &SET_SRC (x), new, 0))
6028 return 1;
6031 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6032 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6033 Require a constant for MULT_VAL, so we know it's nonzero. */
6035 for (v = bl->giv; v; v = v->next_iv)
6036 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6037 && ! v->ignore && ! v->maybe_dead && v->always_computable
6038 && v->mode == mode)
6040 if (! eliminate_p)
6041 return 1;
6043 /* If the giv has the opposite direction of change,
6044 then reverse the comparison. */
6045 if (INTVAL (v->mult_val) < 0)
6046 new = gen_rtx (COMPARE, VOIDmode, copy_rtx (v->add_val),
6047 v->new_reg);
6048 else
6049 new = gen_rtx (COMPARE, VOIDmode, v->new_reg,
6050 copy_rtx (v->add_val));
6052 /* Replace biv with the giv's reduced register. */
6053 update_reg_last_use (v->add_val, insn);
6054 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6055 return 1;
6057 /* Insn doesn't support that constant or invariant. Copy it
6058 into a register (it will be a loop invariant.) */
6059 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6061 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6062 where);
6064 if (validate_change (insn, &SET_SRC (PATTERN (insn)),
6065 gen_rtx (COMPARE, VOIDmode,
6066 v->new_reg, tem), 0))
6067 return 1;
6070 #endif
6071 break;
6073 case COMPARE:
6074 case EQ: case NE:
6075 case GT: case GE: case GTU: case GEU:
6076 case LT: case LE: case LTU: case LEU:
6077 /* See if either argument is the biv. */
6078 if (XEXP (x, 0) == reg)
6079 arg = XEXP (x, 1), arg_operand = 1;
6080 else if (XEXP (x, 1) == reg)
6081 arg = XEXP (x, 0), arg_operand = 0;
6082 else
6083 break;
6085 if (CONSTANT_P (arg))
6087 /* First try to replace with any giv that has constant positive
6088 mult_val and constant add_val. We might be able to support
6089 negative mult_val, but it seems complex to do it in general. */
6091 for (v = bl->giv; v; v = v->next_iv)
6092 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6093 && CONSTANT_P (v->add_val)
6094 && ! v->ignore && ! v->maybe_dead && v->always_computable
6095 && v->mode == mode)
6097 if (! eliminate_p)
6098 return 1;
6100 /* Replace biv with the giv's reduced reg. */
6101 XEXP (x, 1-arg_operand) = v->new_reg;
6103 /* If all constants are actually constant integers and
6104 the derived constant can be directly placed in the COMPARE,
6105 do so. */
6106 if (GET_CODE (arg) == CONST_INT
6107 && GET_CODE (v->mult_val) == CONST_INT
6108 && GET_CODE (v->add_val) == CONST_INT
6109 && validate_change (insn, &XEXP (x, arg_operand),
6110 GEN_INT (INTVAL (arg)
6111 * INTVAL (v->mult_val)
6112 + INTVAL (v->add_val)), 0))
6113 return 1;
6115 /* Otherwise, load it into a register. */
6116 tem = gen_reg_rtx (mode);
6117 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6118 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6119 return 1;
6121 /* If that failed, put back the change we made above. */
6122 XEXP (x, 1-arg_operand) = reg;
6125 /* Look for giv with positive constant mult_val and nonconst add_val.
6126 Insert insns to calculate new compare value. */
6128 for (v = bl->giv; v; v = v->next_iv)
6129 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6130 && ! v->ignore && ! v->maybe_dead && v->always_computable
6131 && v->mode == mode)
6133 rtx tem;
6135 if (! eliminate_p)
6136 return 1;
6138 tem = gen_reg_rtx (mode);
6140 /* Replace biv with giv's reduced register. */
6141 validate_change (insn, &XEXP (x, 1 - arg_operand),
6142 v->new_reg, 1);
6144 /* Compute value to compare against. */
6145 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6146 /* Use it in this insn. */
6147 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6148 if (apply_change_group ())
6149 return 1;
6152 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6154 if (invariant_p (arg) == 1)
6156 /* Look for giv with constant positive mult_val and nonconst
6157 add_val. Insert insns to compute new compare value. */
6159 for (v = bl->giv; v; v = v->next_iv)
6160 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6161 && ! v->ignore && ! v->maybe_dead && v->always_computable
6162 && v->mode == mode)
6164 rtx tem;
6166 if (! eliminate_p)
6167 return 1;
6169 tem = gen_reg_rtx (mode);
6171 /* Replace biv with giv's reduced register. */
6172 validate_change (insn, &XEXP (x, 1 - arg_operand),
6173 v->new_reg, 1);
6175 /* Compute value to compare against. */
6176 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6177 tem, where);
6178 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6179 if (apply_change_group ())
6180 return 1;
6184 /* This code has problems. Basically, you can't know when
6185 seeing if we will eliminate BL, whether a particular giv
6186 of ARG will be reduced. If it isn't going to be reduced,
6187 we can't eliminate BL. We can try forcing it to be reduced,
6188 but that can generate poor code.
6190 The problem is that the benefit of reducing TV, below should
6191 be increased if BL can actually be eliminated, but this means
6192 we might have to do a topological sort of the order in which
6193 we try to process biv. It doesn't seem worthwhile to do
6194 this sort of thing now. */
6196 #if 0
6197 /* Otherwise the reg compared with had better be a biv. */
6198 if (GET_CODE (arg) != REG
6199 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6200 return 0;
6202 /* Look for a pair of givs, one for each biv,
6203 with identical coefficients. */
6204 for (v = bl->giv; v; v = v->next_iv)
6206 struct induction *tv;
6208 if (v->ignore || v->maybe_dead || v->mode != mode)
6209 continue;
6211 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6212 if (! tv->ignore && ! tv->maybe_dead
6213 && rtx_equal_p (tv->mult_val, v->mult_val)
6214 && rtx_equal_p (tv->add_val, v->add_val)
6215 && tv->mode == mode)
6217 if (! eliminate_p)
6218 return 1;
6220 /* Replace biv with its giv's reduced reg. */
6221 XEXP (x, 1-arg_operand) = v->new_reg;
6222 /* Replace other operand with the other giv's
6223 reduced reg. */
6224 XEXP (x, arg_operand) = tv->new_reg;
6225 return 1;
6228 #endif
6231 /* If we get here, the biv can't be eliminated. */
6232 return 0;
6234 case MEM:
6235 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6236 biv is used in it, since it will be replaced. */
6237 for (v = bl->giv; v; v = v->next_iv)
6238 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6239 return 1;
6240 break;
6243 /* See if any subexpression fails elimination. */
6244 fmt = GET_RTX_FORMAT (code);
6245 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6247 switch (fmt[i])
6249 case 'e':
6250 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6251 eliminate_p, where))
6252 return 0;
6253 break;
6255 case 'E':
6256 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6257 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6258 eliminate_p, where))
6259 return 0;
6260 break;
6264 return 1;
6267 /* Return nonzero if the last use of REG
6268 is in an insn following INSN in the same basic block. */
6270 static int
6271 last_use_this_basic_block (reg, insn)
6272 rtx reg;
6273 rtx insn;
6275 rtx n;
6276 for (n = insn;
6277 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6278 n = NEXT_INSN (n))
6280 if (regno_last_uid[REGNO (reg)] == INSN_UID (n))
6281 return 1;
6283 return 0;
6286 /* Called via `note_stores' to record the initial value of a biv. Here we
6287 just record the location of the set and process it later. */
6289 static void
6290 record_initial (dest, set)
6291 rtx dest;
6292 rtx set;
6294 struct iv_class *bl;
6296 if (GET_CODE (dest) != REG
6297 || REGNO (dest) >= max_reg_before_loop
6298 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6299 return;
6301 bl = reg_biv_class[REGNO (dest)];
6303 /* If this is the first set found, record it. */
6304 if (bl->init_insn == 0)
6306 bl->init_insn = note_insn;
6307 bl->init_set = set;
6311 /* If any of the registers in X are "old" and currently have a last use earlier
6312 than INSN, update them to have a last use of INSN. Their actual last use
6313 will be the previous insn but it will not have a valid uid_luid so we can't
6314 use it. */
6316 static void
6317 update_reg_last_use (x, insn)
6318 rtx x;
6319 rtx insn;
6321 /* Check for the case where INSN does not have a valid luid. In this case,
6322 there is no need to modify the regno_last_uid, as this can only happen
6323 when code is inserted after the loop_end to set a pseudo's final value,
6324 and hence this insn will never be the last use of x. */
6325 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6326 && INSN_UID (insn) < max_uid_for_loop
6327 && uid_luid[regno_last_uid[REGNO (x)]] < uid_luid[INSN_UID (insn)])
6328 regno_last_uid[REGNO (x)] = INSN_UID (insn);
6329 else
6331 register int i, j;
6332 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6333 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6335 if (fmt[i] == 'e')
6336 update_reg_last_use (XEXP (x, i), insn);
6337 else if (fmt[i] == 'E')
6338 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6339 update_reg_last_use (XVECEXP (x, i, j), insn);
6344 /* Given a jump insn JUMP, return the condition that will cause it to branch
6345 to its JUMP_LABEL. If the condition cannot be understood, or is an
6346 inequality floating-point comparison which needs to be reversed, 0 will
6347 be returned.
6349 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6350 insn used in locating the condition was found. If a replacement test
6351 of the condition is desired, it should be placed in front of that
6352 insn and we will be sure that the inputs are still valid.
6354 The condition will be returned in a canonical form to simplify testing by
6355 callers. Specifically:
6357 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6358 (2) Both operands will be machine operands; (cc0) will have been replaced.
6359 (3) If an operand is a constant, it will be the second operand.
6360 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6361 for GE, GEU, and LEU. */
6364 get_condition (jump, earliest)
6365 rtx jump;
6366 rtx *earliest;
6368 enum rtx_code code;
6369 rtx prev = jump;
6370 rtx set;
6371 rtx tem;
6372 rtx op0, op1;
6373 int reverse_code = 0;
6374 int did_reverse_condition = 0;
6376 /* If this is not a standard conditional jump, we can't parse it. */
6377 if (GET_CODE (jump) != JUMP_INSN
6378 || ! condjump_p (jump) || simplejump_p (jump))
6379 return 0;
6381 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6382 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6383 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6385 if (earliest)
6386 *earliest = jump;
6388 /* If this branches to JUMP_LABEL when the condition is false, reverse
6389 the condition. */
6390 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6391 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6392 code = reverse_condition (code), did_reverse_condition ^= 1;
6394 /* If we are comparing a register with zero, see if the register is set
6395 in the previous insn to a COMPARE or a comparison operation. Perform
6396 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6397 in cse.c */
6399 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
6401 /* Set non-zero when we find something of interest. */
6402 rtx x = 0;
6404 #ifdef HAVE_cc0
6405 /* If comparison with cc0, import actual comparison from compare
6406 insn. */
6407 if (op0 == cc0_rtx)
6409 if ((prev = prev_nonnote_insn (prev)) == 0
6410 || GET_CODE (prev) != INSN
6411 || (set = single_set (prev)) == 0
6412 || SET_DEST (set) != cc0_rtx)
6413 return 0;
6415 op0 = SET_SRC (set);
6416 op1 = CONST0_RTX (GET_MODE (op0));
6417 if (earliest)
6418 *earliest = prev;
6420 #endif
6422 /* If this is a COMPARE, pick up the two things being compared. */
6423 if (GET_CODE (op0) == COMPARE)
6425 op1 = XEXP (op0, 1);
6426 op0 = XEXP (op0, 0);
6427 continue;
6429 else if (GET_CODE (op0) != REG)
6430 break;
6432 /* Go back to the previous insn. Stop if it is not an INSN. We also
6433 stop if it isn't a single set or if it has a REG_INC note because
6434 we don't want to bother dealing with it. */
6436 if ((prev = prev_nonnote_insn (prev)) == 0
6437 || GET_CODE (prev) != INSN
6438 || FIND_REG_INC_NOTE (prev, 0)
6439 || (set = single_set (prev)) == 0)
6440 break;
6442 /* If this is setting OP0, get what it sets it to if it looks
6443 relevant. */
6444 if (SET_DEST (set) == op0)
6446 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
6448 if ((GET_CODE (SET_SRC (set)) == COMPARE
6449 || (((code == NE
6450 || (code == LT
6451 && GET_MODE_CLASS (inner_mode) == MODE_INT
6452 && (GET_MODE_BITSIZE (inner_mode)
6453 <= HOST_BITS_PER_WIDE_INT)
6454 && (STORE_FLAG_VALUE
6455 & ((HOST_WIDE_INT) 1
6456 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6457 #ifdef FLOAT_STORE_FLAG_VALUE
6458 || (code == LT
6459 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6460 && FLOAT_STORE_FLAG_VALUE < 0)
6461 #endif
6463 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
6464 x = SET_SRC (set);
6465 else if (((code == EQ
6466 || (code == GE
6467 && (GET_MODE_BITSIZE (inner_mode)
6468 <= HOST_BITS_PER_WIDE_INT)
6469 && GET_MODE_CLASS (inner_mode) == MODE_INT
6470 && (STORE_FLAG_VALUE
6471 & ((HOST_WIDE_INT) 1
6472 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6473 #ifdef FLOAT_STORE_FLAG_VALUE
6474 || (code == GE
6475 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6476 && FLOAT_STORE_FLAG_VALUE < 0)
6477 #endif
6479 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
6481 /* We might have reversed a LT to get a GE here. But this wasn't
6482 actually the comparison of data, so we don't flag that we
6483 have had to reverse the condition. */
6484 did_reverse_condition ^= 1;
6485 reverse_code = 1;
6486 x = SET_SRC (set);
6488 else
6489 break;
6492 else if (reg_set_p (op0, prev))
6493 /* If this sets OP0, but not directly, we have to give up. */
6494 break;
6496 if (x)
6498 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
6499 code = GET_CODE (x);
6500 if (reverse_code)
6502 code = reverse_condition (code);
6503 did_reverse_condition ^= 1;
6504 reverse_code = 0;
6507 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
6508 if (earliest)
6509 *earliest = prev;
6513 /* If constant is first, put it last. */
6514 if (CONSTANT_P (op0))
6515 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
6517 /* If OP0 is the result of a comparison, we weren't able to find what
6518 was really being compared, so fail. */
6519 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6520 return 0;
6522 /* Canonicalize any ordered comparison with integers involving equality
6523 if we can do computations in the relevant mode and we do not
6524 overflow. */
6526 if (GET_CODE (op1) == CONST_INT
6527 && GET_MODE (op0) != VOIDmode
6528 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
6530 HOST_WIDE_INT const_val = INTVAL (op1);
6531 unsigned HOST_WIDE_INT uconst_val = const_val;
6532 unsigned HOST_WIDE_INT max_val
6533 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
6535 switch (code)
6537 case LE:
6538 if (const_val != max_val >> 1)
6539 code = LT, op1 = GEN_INT (const_val + 1);
6540 break;
6542 case GE:
6543 if (const_val
6544 != (((HOST_WIDE_INT) 1
6545 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
6546 code = GT, op1 = GEN_INT (const_val - 1);
6547 break;
6549 case LEU:
6550 if (uconst_val != max_val)
6551 code = LTU, op1 = GEN_INT (uconst_val + 1);
6552 break;
6554 case GEU:
6555 if (uconst_val != 0)
6556 code = GTU, op1 = GEN_INT (uconst_val - 1);
6557 break;
6561 /* If this was floating-point and we reversed anything other than an
6562 EQ or NE, return zero. */
6563 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
6564 && did_reverse_condition && code != NE && code != EQ
6565 && ! flag_fast_math
6566 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
6567 return 0;
6569 #ifdef HAVE_cc0
6570 /* Never return CC0; return zero instead. */
6571 if (op0 == cc0_rtx)
6572 return 0;
6573 #endif
6575 return gen_rtx (code, VOIDmode, op0, op1);
6578 /* Similar to above routine, except that we also put an invariant last
6579 unless both operands are invariants. */
6582 get_condition_for_loop (x)
6583 rtx x;
6585 rtx comparison = get_condition (x, NULL_PTR);
6587 if (comparison == 0
6588 || ! invariant_p (XEXP (comparison, 0))
6589 || invariant_p (XEXP (comparison, 1)))
6590 return comparison;
6592 return gen_rtx (swap_condition (GET_CODE (comparison)), VOIDmode,
6593 XEXP (comparison, 1), XEXP (comparison, 0));