({save,restore}_stack_nonlocal): Delete.
[official-gcc.git] / gcc / loop.c
blobc6caefe93d7aed41bb312d276ea428526061f483
1 /* Move constant computations out of loops.
2 Copyright (C) 1987, 88, 89, 91, 92, 93, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21 /* This is the loop optimization pass of the compiler.
22 It finds invariant computations within loops and moves them
23 to the beginning of the loop. Then it identifies basic and
24 general induction variables. Strength reduction is applied to the general
25 induction variables, and induction variable elimination is applied to
26 the basic induction variables.
28 It also finds cases where
29 a register is set within the loop by zero-extending a narrower value
30 and changes these to zero the entire register once before the loop
31 and merely copy the low part within the loop.
33 Most of the complexity is in heuristics to decide when it is worth
34 while to do these things. */
36 #include <stdio.h>
37 #include "config.h"
38 #include "rtl.h"
39 #include "obstack.h"
40 #include "expr.h"
41 #include "insn-config.h"
42 #include "insn-flags.h"
43 #include "regs.h"
44 #include "hard-reg-set.h"
45 #include "recog.h"
46 #include "flags.h"
47 #include "real.h"
48 #include "loop.h"
50 /* Vector mapping INSN_UIDs to luids.
51 The luids are like uids but increase monotonically always.
52 We use them to see whether a jump comes from outside a given loop. */
54 int *uid_luid;
56 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
57 number the insn is contained in. */
59 int *uid_loop_num;
61 /* 1 + largest uid of any insn. */
63 int max_uid_for_loop;
65 /* 1 + luid of last insn. */
67 static int max_luid;
69 /* Number of loops detected in current function. Used as index to the
70 next few tables. */
72 static int max_loop_num;
74 /* Indexed by loop number, contains the first and last insn of each loop. */
76 static rtx *loop_number_loop_starts, *loop_number_loop_ends;
78 /* For each loop, gives the containing loop number, -1 if none. */
80 int *loop_outer_loop;
82 /* Indexed by loop number, contains a nonzero value if the "loop" isn't
83 really a loop (an insn outside the loop branches into it). */
85 static char *loop_invalid;
87 /* Indexed by loop number, links together all LABEL_REFs which refer to
88 code labels outside the loop. Used by routines that need to know all
89 loop exits, such as final_biv_value and final_giv_value.
91 This does not include loop exits due to return instructions. This is
92 because all bivs and givs are pseudos, and hence must be dead after a
93 return, so the presense of a return does not affect any of the
94 optimizations that use this info. It is simpler to just not include return
95 instructions on this list. */
97 rtx *loop_number_exit_labels;
99 /* Holds the number of loop iterations. It is zero if the number could not be
100 calculated. Must be unsigned since the number of iterations can
101 be as high as 2^wordsize-1. For loops with a wider iterator, this number
102 will will be zero if the number of loop iterations is too large for an
103 unsigned integer to hold. */
105 unsigned HOST_WIDE_INT loop_n_iterations;
107 /* Nonzero if there is a subroutine call in the current loop.
108 (unknown_address_altered is also nonzero in this case.) */
110 static int loop_has_call;
112 /* Nonzero if there is a volatile memory reference in the current
113 loop. */
115 static int loop_has_volatile;
117 /* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
118 current loop. A continue statement will generate a branch to
119 NEXT_INSN (loop_continue). */
121 static rtx loop_continue;
123 /* Indexed by register number, contains the number of times the reg
124 is set during the loop being scanned.
125 During code motion, a negative value indicates a reg that has been
126 made a candidate; in particular -2 means that it is an candidate that
127 we know is equal to a constant and -1 means that it is an candidate
128 not known equal to a constant.
129 After code motion, regs moved have 0 (which is accurate now)
130 while the failed candidates have the original number of times set.
132 Therefore, at all times, == 0 indicates an invariant register;
133 < 0 a conditionally invariant one. */
135 static short *n_times_set;
137 /* Original value of n_times_set; same except that this value
138 is not set negative for a reg whose sets have been made candidates
139 and not set to 0 for a reg that is moved. */
141 static short *n_times_used;
143 /* Index by register number, 1 indicates that the register
144 cannot be moved or strength reduced. */
146 static char *may_not_optimize;
148 /* Nonzero means reg N has already been moved out of one loop.
149 This reduces the desire to move it out of another. */
151 static char *moved_once;
153 /* Array of MEMs that are stored in this loop. If there are too many to fit
154 here, we just turn on unknown_address_altered. */
156 #define NUM_STORES 20
157 static rtx loop_store_mems[NUM_STORES];
159 /* Index of first available slot in above array. */
160 static int loop_store_mems_idx;
162 /* Nonzero if we don't know what MEMs were changed in the current loop.
163 This happens if the loop contains a call (in which case `loop_has_call'
164 will also be set) or if we store into more than NUM_STORES MEMs. */
166 static int unknown_address_altered;
168 /* Count of movable (i.e. invariant) instructions discovered in the loop. */
169 static int num_movables;
171 /* Count of memory write instructions discovered in the loop. */
172 static int num_mem_sets;
174 /* Number of loops contained within the current one, including itself. */
175 static int loops_enclosed;
177 /* Bound on pseudo register number before loop optimization.
178 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
179 int max_reg_before_loop;
181 /* This obstack is used in product_cheap_p to allocate its rtl. It
182 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
183 If we used the same obstack that it did, we would be deallocating
184 that array. */
186 static struct obstack temp_obstack;
188 /* This is where the pointer to the obstack being used for RTL is stored. */
190 extern struct obstack *rtl_obstack;
192 #define obstack_chunk_alloc xmalloc
193 #define obstack_chunk_free free
195 extern char *oballoc ();
197 /* During the analysis of a loop, a chain of `struct movable's
198 is made to record all the movable insns found.
199 Then the entire chain can be scanned to decide which to move. */
201 struct movable
203 rtx insn; /* A movable insn */
204 rtx set_src; /* The expression this reg is set from. */
205 rtx set_dest; /* The destination of this SET. */
206 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
207 of any registers used within the LIBCALL. */
208 int consec; /* Number of consecutive following insns
209 that must be moved with this one. */
210 int regno; /* The register it sets */
211 short lifetime; /* lifetime of that register;
212 may be adjusted when matching movables
213 that load the same value are found. */
214 short savings; /* Number of insns we can move for this reg,
215 including other movables that force this
216 or match this one. */
217 unsigned int cond : 1; /* 1 if only conditionally movable */
218 unsigned int force : 1; /* 1 means MUST move this insn */
219 unsigned int global : 1; /* 1 means reg is live outside this loop */
220 /* If PARTIAL is 1, GLOBAL means something different:
221 that the reg is live outside the range from where it is set
222 to the following label. */
223 unsigned int done : 1; /* 1 inhibits further processing of this */
225 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
226 In particular, moving it does not make it
227 invariant. */
228 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
229 load SRC, rather than copying INSN. */
230 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
231 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
232 that we should avoid changing when clearing
233 the rest of the reg. */
234 struct movable *match; /* First entry for same value */
235 struct movable *forces; /* An insn that must be moved if this is */
236 struct movable *next;
239 FILE *loop_dump_stream;
241 /* Forward declarations. */
243 static void find_and_verify_loops ();
244 static void mark_loop_jump ();
245 static void prescan_loop ();
246 static int reg_in_basic_block_p ();
247 static int consec_sets_invariant_p ();
248 static rtx libcall_other_reg ();
249 static int labels_in_range_p ();
250 static void count_loop_regs_set ();
251 static void note_addr_stored ();
252 static int loop_reg_used_before_p ();
253 static void scan_loop ();
254 static void replace_call_address ();
255 static rtx skip_consec_insns ();
256 static int libcall_benefit ();
257 static void ignore_some_movables ();
258 static void force_movables ();
259 static void combine_movables ();
260 static int rtx_equal_for_loop_p ();
261 static void move_movables ();
262 static void strength_reduce ();
263 static int valid_initial_value_p ();
264 static void find_mem_givs ();
265 static void record_biv ();
266 static void check_final_value ();
267 static void record_giv ();
268 static void update_giv_derive ();
269 static int basic_induction_var ();
270 static rtx simplify_giv_expr ();
271 static int general_induction_var ();
272 static int consec_sets_giv ();
273 static int check_dbra_loop ();
274 static rtx express_from ();
275 static int combine_givs_p ();
276 static void combine_givs ();
277 static int product_cheap_p ();
278 static int maybe_eliminate_biv ();
279 static int maybe_eliminate_biv_1 ();
280 static int last_use_this_basic_block ();
281 static void record_initial ();
282 static void update_reg_last_use ();
284 /* Relative gain of eliminating various kinds of operations. */
285 int add_cost;
286 #if 0
287 int shift_cost;
288 int mult_cost;
289 #endif
291 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
292 copy the value of the strength reduced giv to its original register. */
293 int copy_cost;
295 void
296 init_loop ()
298 char *free_point = (char *) oballoc (1);
299 rtx reg = gen_rtx (REG, word_mode, 0);
301 add_cost = rtx_cost (gen_rtx (PLUS, word_mode, reg, reg), SET);
303 /* We multiply by 2 to reconcile the difference in scale between
304 these two ways of computing costs. Otherwise the cost of a copy
305 will be far less than the cost of an add. */
307 copy_cost = 2 * 2;
309 /* Free the objects we just allocated. */
310 obfree (free_point);
312 /* Initialize the obstack used for rtl in product_cheap_p. */
313 gcc_obstack_init (&temp_obstack);
316 /* Entry point of this file. Perform loop optimization
317 on the current function. F is the first insn of the function
318 and DUMPFILE is a stream for output of a trace of actions taken
319 (or 0 if none should be output). */
321 void
322 loop_optimize (f, dumpfile)
323 /* f is the first instruction of a chain of insns for one function */
324 rtx f;
325 FILE *dumpfile;
327 register rtx insn;
328 register int i;
329 rtx last_insn;
331 loop_dump_stream = dumpfile;
333 init_recog_no_volatile ();
334 init_alias_analysis ();
336 max_reg_before_loop = max_reg_num ();
338 moved_once = (char *) alloca (max_reg_before_loop);
339 bzero (moved_once, max_reg_before_loop);
341 regs_may_share = 0;
343 /* Count the number of loops. */
345 max_loop_num = 0;
346 for (insn = f; insn; insn = NEXT_INSN (insn))
348 if (GET_CODE (insn) == NOTE
349 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
350 max_loop_num++;
353 /* Don't waste time if no loops. */
354 if (max_loop_num == 0)
355 return;
357 /* Get size to use for tables indexed by uids.
358 Leave some space for labels allocated by find_and_verify_loops. */
359 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
361 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
362 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
364 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
365 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
367 /* Allocate tables for recording each loop. We set each entry, so they need
368 not be zeroed. */
369 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
370 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
371 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
372 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
373 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
375 /* Find and process each loop.
376 First, find them, and record them in order of their beginnings. */
377 find_and_verify_loops (f);
379 /* Now find all register lifetimes. This must be done after
380 find_and_verify_loops, because it might reorder the insns in the
381 function. */
382 reg_scan (f, max_reg_num (), 1);
384 /* See if we went too far. */
385 if (get_max_uid () > max_uid_for_loop)
386 abort ();
388 /* Compute the mapping from uids to luids.
389 LUIDs are numbers assigned to insns, like uids,
390 except that luids increase monotonically through the code.
391 Don't assign luids to line-number NOTEs, so that the distance in luids
392 between two insns is not affected by -g. */
394 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
396 last_insn = insn;
397 if (GET_CODE (insn) != NOTE
398 || NOTE_LINE_NUMBER (insn) <= 0)
399 uid_luid[INSN_UID (insn)] = ++i;
400 else
401 /* Give a line number note the same luid as preceding insn. */
402 uid_luid[INSN_UID (insn)] = i;
405 max_luid = i + 1;
407 /* Don't leave gaps in uid_luid for insns that have been
408 deleted. It is possible that the first or last insn
409 using some register has been deleted by cross-jumping.
410 Make sure that uid_luid for that former insn's uid
411 points to the general area where that insn used to be. */
412 for (i = 0; i < max_uid_for_loop; i++)
414 uid_luid[0] = uid_luid[i];
415 if (uid_luid[0] != 0)
416 break;
418 for (i = 0; i < max_uid_for_loop; i++)
419 if (uid_luid[i] == 0)
420 uid_luid[i] = uid_luid[i - 1];
422 /* Create a mapping from loops to BLOCK tree nodes. */
423 if (flag_unroll_loops && write_symbols != NO_DEBUG)
424 find_loop_tree_blocks ();
426 /* Now scan the loops, last ones first, since this means inner ones are done
427 before outer ones. */
428 for (i = max_loop_num-1; i >= 0; i--)
429 if (! loop_invalid[i] && loop_number_loop_ends[i])
430 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
431 max_reg_num ());
433 /* If debugging and unrolling loops, we must replicate the tree nodes
434 corresponding to the blocks inside the loop, so that the original one
435 to one mapping will remain. */
436 if (flag_unroll_loops && write_symbols != NO_DEBUG)
437 unroll_block_trees ();
440 /* Optimize one loop whose start is LOOP_START and end is END.
441 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
442 NOTE_INSN_LOOP_END. */
444 /* ??? Could also move memory writes out of loops if the destination address
445 is invariant, the source is invariant, the memory write is not volatile,
446 and if we can prove that no read inside the loop can read this address
447 before the write occurs. If there is a read of this address after the
448 write, then we can also mark the memory read as invariant. */
450 static void
451 scan_loop (loop_start, end, nregs)
452 rtx loop_start, end;
453 int nregs;
455 register int i;
456 register rtx p;
457 /* 1 if we are scanning insns that could be executed zero times. */
458 int maybe_never = 0;
459 /* 1 if we are scanning insns that might never be executed
460 due to a subroutine call which might exit before they are reached. */
461 int call_passed = 0;
462 /* For a rotated loop that is entered near the bottom,
463 this is the label at the top. Otherwise it is zero. */
464 rtx loop_top = 0;
465 /* Jump insn that enters the loop, or 0 if control drops in. */
466 rtx loop_entry_jump = 0;
467 /* Place in the loop where control enters. */
468 rtx scan_start;
469 /* Number of insns in the loop. */
470 int insn_count;
471 int in_libcall = 0;
472 int tem;
473 rtx temp;
474 /* The SET from an insn, if it is the only SET in the insn. */
475 rtx set, set1;
476 /* Chain describing insns movable in current loop. */
477 struct movable *movables = 0;
478 /* Last element in `movables' -- so we can add elements at the end. */
479 struct movable *last_movable = 0;
480 /* Ratio of extra register life span we can justify
481 for saving an instruction. More if loop doesn't call subroutines
482 since in that case saving an insn makes more difference
483 and more registers are available. */
484 int threshold;
485 /* If we have calls, contains the insn in which a register was used
486 if it was used exactly once; contains const0_rtx if it was used more
487 than once. */
488 rtx *reg_single_usage = 0;
489 /* Nonzero if we are scanning instructions in a sub-loop. */
490 int loop_depth = 0;
492 n_times_set = (short *) alloca (nregs * sizeof (short));
493 n_times_used = (short *) alloca (nregs * sizeof (short));
494 may_not_optimize = (char *) alloca (nregs);
496 /* Determine whether this loop starts with a jump down to a test at
497 the end. This will occur for a small number of loops with a test
498 that is too complex to duplicate in front of the loop.
500 We search for the first insn or label in the loop, skipping NOTEs.
501 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
502 (because we might have a loop executed only once that contains a
503 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
504 (in case we have a degenerate loop).
506 Note that if we mistakenly think that a loop is entered at the top
507 when, in fact, it is entered at the exit test, the only effect will be
508 slightly poorer optimization. Making the opposite error can generate
509 incorrect code. Since very few loops now start with a jump to the
510 exit test, the code here to detect that case is very conservative. */
512 for (p = NEXT_INSN (loop_start);
513 p != end
514 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
515 && (GET_CODE (p) != NOTE
516 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
517 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
518 p = NEXT_INSN (p))
521 scan_start = p;
523 /* Set up variables describing this loop. */
524 prescan_loop (loop_start, end);
525 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
527 /* If loop has a jump before the first label,
528 the true entry is the target of that jump.
529 Start scan from there.
530 But record in LOOP_TOP the place where the end-test jumps
531 back to so we can scan that after the end of the loop. */
532 if (GET_CODE (p) == JUMP_INSN)
534 loop_entry_jump = p;
536 /* Loop entry must be unconditional jump (and not a RETURN) */
537 if (simplejump_p (p)
538 && JUMP_LABEL (p) != 0
539 /* Check to see whether the jump actually
540 jumps out of the loop (meaning it's no loop).
541 This case can happen for things like
542 do {..} while (0). If this label was generated previously
543 by loop, we can't tell anything about it and have to reject
544 the loop. */
545 && INSN_UID (JUMP_LABEL (p)) < max_uid_for_loop
546 && INSN_LUID (JUMP_LABEL (p)) >= INSN_LUID (loop_start)
547 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (end))
549 loop_top = next_label (scan_start);
550 scan_start = JUMP_LABEL (p);
554 /* If SCAN_START was an insn created by loop, we don't know its luid
555 as required by loop_reg_used_before_p. So skip such loops. (This
556 test may never be true, but it's best to play it safe.)
558 Also, skip loops where we do not start scanning at a label. This
559 test also rejects loops starting with a JUMP_INSN that failed the
560 test above. */
562 if (INSN_UID (scan_start) >= max_uid_for_loop
563 || GET_CODE (scan_start) != CODE_LABEL)
565 if (loop_dump_stream)
566 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
567 INSN_UID (loop_start), INSN_UID (end));
568 return;
571 /* Count number of times each reg is set during this loop.
572 Set may_not_optimize[I] if it is not safe to move out
573 the setting of register I. If this loop has calls, set
574 reg_single_usage[I]. */
576 bzero ((char *) n_times_set, nregs * sizeof (short));
577 bzero (may_not_optimize, nregs);
579 if (loop_has_call)
581 reg_single_usage = (rtx *) alloca (nregs * sizeof (rtx));
582 bzero ((char *) reg_single_usage, nregs * sizeof (rtx));
585 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
586 may_not_optimize, reg_single_usage, &insn_count, nregs);
588 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
589 may_not_optimize[i] = 1, n_times_set[i] = 1;
590 bcopy ((char *) n_times_set, (char *) n_times_used, nregs * sizeof (short));
592 if (loop_dump_stream)
594 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
595 INSN_UID (loop_start), INSN_UID (end), insn_count);
596 if (loop_continue)
597 fprintf (loop_dump_stream, "Continue at insn %d.\n",
598 INSN_UID (loop_continue));
601 /* Scan through the loop finding insns that are safe to move.
602 Set n_times_set negative for the reg being set, so that
603 this reg will be considered invariant for subsequent insns.
604 We consider whether subsequent insns use the reg
605 in deciding whether it is worth actually moving.
607 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
608 and therefore it is possible that the insns we are scanning
609 would never be executed. At such times, we must make sure
610 that it is safe to execute the insn once instead of zero times.
611 When MAYBE_NEVER is 0, all insns will be executed at least once
612 so that is not a problem. */
614 p = scan_start;
615 while (1)
617 p = NEXT_INSN (p);
618 /* At end of a straight-in loop, we are done.
619 At end of a loop entered at the bottom, scan the top. */
620 if (p == scan_start)
621 break;
622 if (p == end)
624 if (loop_top != 0)
625 p = loop_top;
626 else
627 break;
628 if (p == scan_start)
629 break;
632 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
633 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
634 in_libcall = 1;
635 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
636 && find_reg_note (p, REG_RETVAL, NULL_RTX))
637 in_libcall = 0;
639 if (GET_CODE (p) == INSN
640 && (set = single_set (p))
641 && GET_CODE (SET_DEST (set)) == REG
642 && ! may_not_optimize[REGNO (SET_DEST (set))])
644 int tem1 = 0;
645 int tem2 = 0;
646 int move_insn = 0;
647 rtx src = SET_SRC (set);
648 rtx dependencies = 0;
650 /* Figure out what to use as a source of this insn. If a REG_EQUIV
651 note is given or if a REG_EQUAL note with a constant operand is
652 specified, use it as the source and mark that we should move
653 this insn by calling emit_move_insn rather that duplicating the
654 insn.
656 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
657 is present. */
658 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
659 if (temp)
660 src = XEXP (temp, 0), move_insn = 1;
661 else
663 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
664 if (temp && CONSTANT_P (XEXP (temp, 0)))
665 src = XEXP (temp, 0), move_insn = 1;
666 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
668 src = XEXP (temp, 0);
669 /* A libcall block can use regs that don't appear in
670 the equivalent expression. To move the libcall,
671 we must move those regs too. */
672 dependencies = libcall_other_reg (p, src);
676 /* Don't try to optimize a register that was made
677 by loop-optimization for an inner loop.
678 We don't know its life-span, so we can't compute the benefit. */
679 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
681 /* In order to move a register, we need to have one of three cases:
682 (1) it is used only in the same basic block as the set
683 (2) it is not a user variable and it is not used in the
684 exit test (this can cause the variable to be used
685 before it is set just like a user-variable).
686 (3) the set is guaranteed to be executed once the loop starts,
687 and the reg is not used until after that. */
688 else if (! ((! maybe_never
689 && ! loop_reg_used_before_p (set, p, loop_start,
690 scan_start, end))
691 || (! REG_USERVAR_P (SET_DEST (set))
692 && ! REG_LOOP_TEST_P (SET_DEST (set)))
693 || reg_in_basic_block_p (p, SET_DEST (set))))
695 else if ((tem = invariant_p (src))
696 && (dependencies == 0
697 || (tem2 = invariant_p (dependencies)) != 0)
698 && (n_times_set[REGNO (SET_DEST (set))] == 1
699 || (tem1
700 = consec_sets_invariant_p (SET_DEST (set),
701 n_times_set[REGNO (SET_DEST (set))],
702 p)))
703 /* If the insn can cause a trap (such as divide by zero),
704 can't move it unless it's guaranteed to be executed
705 once loop is entered. Even a function call might
706 prevent the trap insn from being reached
707 (since it might exit!) */
708 && ! ((maybe_never || call_passed)
709 && may_trap_p (src)))
711 register struct movable *m;
712 register int regno = REGNO (SET_DEST (set));
714 /* A potential lossage is where we have a case where two insns
715 can be combined as long as they are both in the loop, but
716 we move one of them outside the loop. For large loops,
717 this can lose. The most common case of this is the address
718 of a function being called.
720 Therefore, if this register is marked as being used exactly
721 once if we are in a loop with calls (a "large loop"), see if
722 we can replace the usage of this register with the source
723 of this SET. If we can, delete this insn.
725 Don't do this if P has a REG_RETVAL note or if we have
726 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
728 if (reg_single_usage && reg_single_usage[regno] != 0
729 && reg_single_usage[regno] != const0_rtx
730 && regno_first_uid[regno] == INSN_UID (p)
731 && (regno_last_uid[regno]
732 == INSN_UID (reg_single_usage[regno]))
733 && n_times_set[REGNO (SET_DEST (set))] == 1
734 && ! side_effects_p (SET_SRC (set))
735 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
736 #ifdef SMALL_REGISTER_CLASSES
737 && ! (GET_CODE (SET_SRC (set)) == REG
738 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)
739 #endif
740 /* This test is not redundant; SET_SRC (set) might be
741 a call-clobbered register and the life of REGNO
742 might span a call. */
743 && ! modified_between_p (SET_SRC (set), p,
744 reg_single_usage[regno])
745 && no_labels_between_p (p, reg_single_usage[regno])
746 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
747 reg_single_usage[regno]))
749 /* Replace any usage in a REG_EQUAL note. */
750 REG_NOTES (reg_single_usage[regno])
751 = replace_rtx (REG_NOTES (reg_single_usage[regno]),
752 SET_DEST (set), SET_SRC (set));
754 PUT_CODE (p, NOTE);
755 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
756 NOTE_SOURCE_FILE (p) = 0;
757 n_times_set[regno] = 0;
758 continue;
761 m = (struct movable *) alloca (sizeof (struct movable));
762 m->next = 0;
763 m->insn = p;
764 m->set_src = src;
765 m->dependencies = dependencies;
766 m->set_dest = SET_DEST (set);
767 m->force = 0;
768 m->consec = n_times_set[REGNO (SET_DEST (set))] - 1;
769 m->done = 0;
770 m->forces = 0;
771 m->partial = 0;
772 m->move_insn = move_insn;
773 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
774 m->savemode = VOIDmode;
775 m->regno = regno;
776 /* Set M->cond if either invariant_p or consec_sets_invariant_p
777 returned 2 (only conditionally invariant). */
778 m->cond = ((tem | tem1 | tem2) > 1);
779 m->global = (uid_luid[regno_last_uid[regno]] > INSN_LUID (end)
780 || uid_luid[regno_first_uid[regno]] < INSN_LUID (loop_start));
781 m->match = 0;
782 m->lifetime = (uid_luid[regno_last_uid[regno]]
783 - uid_luid[regno_first_uid[regno]]);
784 m->savings = n_times_used[regno];
785 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
786 m->savings += libcall_benefit (p);
787 n_times_set[regno] = move_insn ? -2 : -1;
788 /* Add M to the end of the chain MOVABLES. */
789 if (movables == 0)
790 movables = m;
791 else
792 last_movable->next = m;
793 last_movable = m;
795 if (m->consec > 0)
797 /* Skip this insn, not checking REG_LIBCALL notes. */
798 p = next_nonnote_insn (p);
799 /* Skip the consecutive insns, if there are any. */
800 p = skip_consec_insns (p, m->consec);
801 /* Back up to the last insn of the consecutive group. */
802 p = prev_nonnote_insn (p);
804 /* We must now reset m->move_insn, m->is_equiv, and possibly
805 m->set_src to correspond to the effects of all the
806 insns. */
807 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
808 if (temp)
809 m->set_src = XEXP (temp, 0), m->move_insn = 1;
810 else
812 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
813 if (temp && CONSTANT_P (XEXP (temp, 0)))
814 m->set_src = XEXP (temp, 0), m->move_insn = 1;
815 else
816 m->move_insn = 0;
819 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
822 /* If this register is always set within a STRICT_LOW_PART
823 or set to zero, then its high bytes are constant.
824 So clear them outside the loop and within the loop
825 just load the low bytes.
826 We must check that the machine has an instruction to do so.
827 Also, if the value loaded into the register
828 depends on the same register, this cannot be done. */
829 else if (SET_SRC (set) == const0_rtx
830 && GET_CODE (NEXT_INSN (p)) == INSN
831 && (set1 = single_set (NEXT_INSN (p)))
832 && GET_CODE (set1) == SET
833 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
834 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
835 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
836 == SET_DEST (set))
837 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
839 register int regno = REGNO (SET_DEST (set));
840 if (n_times_set[regno] == 2)
842 register struct movable *m;
843 m = (struct movable *) alloca (sizeof (struct movable));
844 m->next = 0;
845 m->insn = p;
846 m->set_dest = SET_DEST (set);
847 m->dependencies = 0;
848 m->force = 0;
849 m->consec = 0;
850 m->done = 0;
851 m->forces = 0;
852 m->move_insn = 0;
853 m->partial = 1;
854 /* If the insn may not be executed on some cycles,
855 we can't clear the whole reg; clear just high part.
856 Not even if the reg is used only within this loop.
857 Consider this:
858 while (1)
859 while (s != t) {
860 if (foo ()) x = *s;
861 use (x);
863 Clearing x before the inner loop could clobber a value
864 being saved from the last time around the outer loop.
865 However, if the reg is not used outside this loop
866 and all uses of the register are in the same
867 basic block as the store, there is no problem.
869 If this insn was made by loop, we don't know its
870 INSN_LUID and hence must make a conservative
871 assumption. */
872 m->global = (INSN_UID (p) >= max_uid_for_loop
873 || (uid_luid[regno_last_uid[regno]]
874 > INSN_LUID (end))
875 || (uid_luid[regno_first_uid[regno]]
876 < INSN_LUID (p))
877 || (labels_in_range_p
878 (p, uid_luid[regno_first_uid[regno]])));
879 if (maybe_never && m->global)
880 m->savemode = GET_MODE (SET_SRC (set1));
881 else
882 m->savemode = VOIDmode;
883 m->regno = regno;
884 m->cond = 0;
885 m->match = 0;
886 m->lifetime = (uid_luid[regno_last_uid[regno]]
887 - uid_luid[regno_first_uid[regno]]);
888 m->savings = 1;
889 n_times_set[regno] = -1;
890 /* Add M to the end of the chain MOVABLES. */
891 if (movables == 0)
892 movables = m;
893 else
894 last_movable->next = m;
895 last_movable = m;
899 /* Past a call insn, we get to insns which might not be executed
900 because the call might exit. This matters for insns that trap.
901 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
902 so they don't count. */
903 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
904 call_passed = 1;
905 /* Past a label or a jump, we get to insns for which we
906 can't count on whether or how many times they will be
907 executed during each iteration. Therefore, we can
908 only move out sets of trivial variables
909 (those not used after the loop). */
910 /* This code appears in three places, once in scan_loop, and twice
911 in strength_reduce. */
912 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
913 /* If we enter the loop in the middle, and scan around to the
914 beginning, don't set maybe_never for that. This must be an
915 unconditional jump, otherwise the code at the top of the
916 loop might never be executed. Unconditional jumps are
917 followed a by barrier then loop end. */
918 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
919 && NEXT_INSN (NEXT_INSN (p)) == end
920 && simplejump_p (p)))
921 maybe_never = 1;
922 else if (GET_CODE (p) == NOTE)
924 /* At the virtual top of a converted loop, insns are again known to
925 be executed: logically, the loop begins here even though the exit
926 code has been duplicated. */
927 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
928 maybe_never = call_passed = 0;
929 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
930 loop_depth++;
931 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
932 loop_depth--;
936 /* If one movable subsumes another, ignore that other. */
938 ignore_some_movables (movables);
940 /* For each movable insn, see if the reg that it loads
941 leads when it dies right into another conditionally movable insn.
942 If so, record that the second insn "forces" the first one,
943 since the second can be moved only if the first is. */
945 force_movables (movables);
947 /* See if there are multiple movable insns that load the same value.
948 If there are, make all but the first point at the first one
949 through the `match' field, and add the priorities of them
950 all together as the priority of the first. */
952 combine_movables (movables, nregs);
954 /* Now consider each movable insn to decide whether it is worth moving.
955 Store 0 in n_times_set for each reg that is moved. */
957 move_movables (movables, threshold,
958 insn_count, loop_start, end, nregs);
960 /* Now candidates that still are negative are those not moved.
961 Change n_times_set to indicate that those are not actually invariant. */
962 for (i = 0; i < nregs; i++)
963 if (n_times_set[i] < 0)
964 n_times_set[i] = n_times_used[i];
966 if (flag_strength_reduce)
967 strength_reduce (scan_start, end, loop_top,
968 insn_count, loop_start, end);
971 /* Add elements to *OUTPUT to record all the pseudo-regs
972 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
974 void
975 record_excess_regs (in_this, not_in_this, output)
976 rtx in_this, not_in_this;
977 rtx *output;
979 enum rtx_code code;
980 char *fmt;
981 int i;
983 code = GET_CODE (in_this);
985 switch (code)
987 case PC:
988 case CC0:
989 case CONST_INT:
990 case CONST_DOUBLE:
991 case CONST:
992 case SYMBOL_REF:
993 case LABEL_REF:
994 return;
996 case REG:
997 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
998 && ! reg_mentioned_p (in_this, not_in_this))
999 *output = gen_rtx (EXPR_LIST, VOIDmode, in_this, *output);
1000 return;
1003 fmt = GET_RTX_FORMAT (code);
1004 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1006 int j;
1008 switch (fmt[i])
1010 case 'E':
1011 for (j = 0; j < XVECLEN (in_this, i); j++)
1012 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1013 break;
1015 case 'e':
1016 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1017 break;
1022 /* Check what regs are referred to in the libcall block ending with INSN,
1023 aside from those mentioned in the equivalent value.
1024 If there are none, return 0.
1025 If there are one or more, return an EXPR_LIST containing all of them. */
1027 static rtx
1028 libcall_other_reg (insn, equiv)
1029 rtx insn, equiv;
1031 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1032 rtx p = XEXP (note, 0);
1033 rtx output = 0;
1035 /* First, find all the regs used in the libcall block
1036 that are not mentioned as inputs to the result. */
1038 while (p != insn)
1040 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1041 || GET_CODE (p) == CALL_INSN)
1042 record_excess_regs (PATTERN (p), equiv, &output);
1043 p = NEXT_INSN (p);
1046 return output;
1049 /* Return 1 if all uses of REG
1050 are between INSN and the end of the basic block. */
1052 static int
1053 reg_in_basic_block_p (insn, reg)
1054 rtx insn, reg;
1056 int regno = REGNO (reg);
1057 rtx p;
1059 if (regno_first_uid[regno] != INSN_UID (insn))
1060 return 0;
1062 /* Search this basic block for the already recorded last use of the reg. */
1063 for (p = insn; p; p = NEXT_INSN (p))
1065 switch (GET_CODE (p))
1067 case NOTE:
1068 break;
1070 case INSN:
1071 case CALL_INSN:
1072 /* Ordinary insn: if this is the last use, we win. */
1073 if (regno_last_uid[regno] == INSN_UID (p))
1074 return 1;
1075 break;
1077 case JUMP_INSN:
1078 /* Jump insn: if this is the last use, we win. */
1079 if (regno_last_uid[regno] == INSN_UID (p))
1080 return 1;
1081 /* Otherwise, it's the end of the basic block, so we lose. */
1082 return 0;
1084 case CODE_LABEL:
1085 case BARRIER:
1086 /* It's the end of the basic block, so we lose. */
1087 return 0;
1091 /* The "last use" doesn't follow the "first use"?? */
1092 abort ();
1095 /* Compute the benefit of eliminating the insns in the block whose
1096 last insn is LAST. This may be a group of insns used to compute a
1097 value directly or can contain a library call. */
1099 static int
1100 libcall_benefit (last)
1101 rtx last;
1103 rtx insn;
1104 int benefit = 0;
1106 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1107 insn != last; insn = NEXT_INSN (insn))
1109 if (GET_CODE (insn) == CALL_INSN)
1110 benefit += 10; /* Assume at least this many insns in a library
1111 routine. */
1112 else if (GET_CODE (insn) == INSN
1113 && GET_CODE (PATTERN (insn)) != USE
1114 && GET_CODE (PATTERN (insn)) != CLOBBER)
1115 benefit++;
1118 return benefit;
1121 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1123 static rtx
1124 skip_consec_insns (insn, count)
1125 rtx insn;
1126 int count;
1128 for (; count > 0; count--)
1130 rtx temp;
1132 /* If first insn of libcall sequence, skip to end. */
1133 /* Do this at start of loop, since INSN is guaranteed to
1134 be an insn here. */
1135 if (GET_CODE (insn) != NOTE
1136 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1137 insn = XEXP (temp, 0);
1139 do insn = NEXT_INSN (insn);
1140 while (GET_CODE (insn) == NOTE);
1143 return insn;
1146 /* Ignore any movable whose insn falls within a libcall
1147 which is part of another movable.
1148 We make use of the fact that the movable for the libcall value
1149 was made later and so appears later on the chain. */
1151 static void
1152 ignore_some_movables (movables)
1153 struct movable *movables;
1155 register struct movable *m, *m1;
1157 for (m = movables; m; m = m->next)
1159 /* Is this a movable for the value of a libcall? */
1160 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1161 if (note)
1163 rtx insn;
1164 /* Check for earlier movables inside that range,
1165 and mark them invalid. We cannot use LUIDs here because
1166 insns created by loop.c for prior loops don't have LUIDs.
1167 Rather than reject all such insns from movables, we just
1168 explicitly check each insn in the libcall (since invariant
1169 libcalls aren't that common). */
1170 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1171 for (m1 = movables; m1 != m; m1 = m1->next)
1172 if (m1->insn == insn)
1173 m1->done = 1;
1178 /* For each movable insn, see if the reg that it loads
1179 leads when it dies right into another conditionally movable insn.
1180 If so, record that the second insn "forces" the first one,
1181 since the second can be moved only if the first is. */
1183 static void
1184 force_movables (movables)
1185 struct movable *movables;
1187 register struct movable *m, *m1;
1188 for (m1 = movables; m1; m1 = m1->next)
1189 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1190 if (!m1->partial && !m1->done)
1192 int regno = m1->regno;
1193 for (m = m1->next; m; m = m->next)
1194 /* ??? Could this be a bug? What if CSE caused the
1195 register of M1 to be used after this insn?
1196 Since CSE does not update regno_last_uid,
1197 this insn M->insn might not be where it dies.
1198 But very likely this doesn't matter; what matters is
1199 that M's reg is computed from M1's reg. */
1200 if (INSN_UID (m->insn) == regno_last_uid[regno]
1201 && !m->done)
1202 break;
1203 if (m != 0 && m->set_src == m1->set_dest
1204 /* If m->consec, m->set_src isn't valid. */
1205 && m->consec == 0)
1206 m = 0;
1208 /* Increase the priority of the moving the first insn
1209 since it permits the second to be moved as well. */
1210 if (m != 0)
1212 m->forces = m1;
1213 m1->lifetime += m->lifetime;
1214 m1->savings += m1->savings;
1219 /* Find invariant expressions that are equal and can be combined into
1220 one register. */
1222 static void
1223 combine_movables (movables, nregs)
1224 struct movable *movables;
1225 int nregs;
1227 register struct movable *m;
1228 char *matched_regs = (char *) alloca (nregs);
1229 enum machine_mode mode;
1231 /* Regs that are set more than once are not allowed to match
1232 or be matched. I'm no longer sure why not. */
1233 /* Perhaps testing m->consec_sets would be more appropriate here? */
1235 for (m = movables; m; m = m->next)
1236 if (m->match == 0 && n_times_used[m->regno] == 1 && !m->partial)
1238 register struct movable *m1;
1239 int regno = m->regno;
1241 bzero (matched_regs, nregs);
1242 matched_regs[regno] = 1;
1244 for (m1 = movables; m1; m1 = m1->next)
1245 if (m != m1 && m1->match == 0 && n_times_used[m1->regno] == 1
1246 /* A reg used outside the loop mustn't be eliminated. */
1247 && !m1->global
1248 /* A reg used for zero-extending mustn't be eliminated. */
1249 && !m1->partial
1250 && (matched_regs[m1->regno]
1253 /* Can combine regs with different modes loaded from the
1254 same constant only if the modes are the same or
1255 if both are integer modes with M wider or the same
1256 width as M1. The check for integer is redundant, but
1257 safe, since the only case of differing destination
1258 modes with equal sources is when both sources are
1259 VOIDmode, i.e., CONST_INT. */
1260 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1261 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1262 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1263 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1264 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1265 /* See if the source of M1 says it matches M. */
1266 && ((GET_CODE (m1->set_src) == REG
1267 && matched_regs[REGNO (m1->set_src)])
1268 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1269 movables))))
1270 && ((m->dependencies == m1->dependencies)
1271 || rtx_equal_p (m->dependencies, m1->dependencies)))
1273 m->lifetime += m1->lifetime;
1274 m->savings += m1->savings;
1275 m1->done = 1;
1276 m1->match = m;
1277 matched_regs[m1->regno] = 1;
1281 /* Now combine the regs used for zero-extension.
1282 This can be done for those not marked `global'
1283 provided their lives don't overlap. */
1285 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1286 mode = GET_MODE_WIDER_MODE (mode))
1288 register struct movable *m0 = 0;
1290 /* Combine all the registers for extension from mode MODE.
1291 Don't combine any that are used outside this loop. */
1292 for (m = movables; m; m = m->next)
1293 if (m->partial && ! m->global
1294 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1296 register struct movable *m1;
1297 int first = uid_luid[regno_first_uid[m->regno]];
1298 int last = uid_luid[regno_last_uid[m->regno]];
1300 if (m0 == 0)
1302 /* First one: don't check for overlap, just record it. */
1303 m0 = m;
1304 continue;
1307 /* Make sure they extend to the same mode.
1308 (Almost always true.) */
1309 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1310 continue;
1312 /* We already have one: check for overlap with those
1313 already combined together. */
1314 for (m1 = movables; m1 != m; m1 = m1->next)
1315 if (m1 == m0 || (m1->partial && m1->match == m0))
1316 if (! (uid_luid[regno_first_uid[m1->regno]] > last
1317 || uid_luid[regno_last_uid[m1->regno]] < first))
1318 goto overlap;
1320 /* No overlap: we can combine this with the others. */
1321 m0->lifetime += m->lifetime;
1322 m0->savings += m->savings;
1323 m->done = 1;
1324 m->match = m0;
1326 overlap: ;
1331 /* Return 1 if regs X and Y will become the same if moved. */
1333 static int
1334 regs_match_p (x, y, movables)
1335 rtx x, y;
1336 struct movable *movables;
1338 int xn = REGNO (x);
1339 int yn = REGNO (y);
1340 struct movable *mx, *my;
1342 for (mx = movables; mx; mx = mx->next)
1343 if (mx->regno == xn)
1344 break;
1346 for (my = movables; my; my = my->next)
1347 if (my->regno == yn)
1348 break;
1350 return (mx && my
1351 && ((mx->match == my->match && mx->match != 0)
1352 || mx->match == my
1353 || mx == my->match));
1356 /* Return 1 if X and Y are identical-looking rtx's.
1357 This is the Lisp function EQUAL for rtx arguments.
1359 If two registers are matching movables or a movable register and an
1360 equivalent constant, consider them equal. */
1362 static int
1363 rtx_equal_for_loop_p (x, y, movables)
1364 rtx x, y;
1365 struct movable *movables;
1367 register int i;
1368 register int j;
1369 register struct movable *m;
1370 register enum rtx_code code;
1371 register char *fmt;
1373 if (x == y)
1374 return 1;
1375 if (x == 0 || y == 0)
1376 return 0;
1378 code = GET_CODE (x);
1380 /* If we have a register and a constant, they may sometimes be
1381 equal. */
1382 if (GET_CODE (x) == REG && n_times_set[REGNO (x)] == -2
1383 && CONSTANT_P (y))
1384 for (m = movables; m; m = m->next)
1385 if (m->move_insn && m->regno == REGNO (x)
1386 && rtx_equal_p (m->set_src, y))
1387 return 1;
1389 else if (GET_CODE (y) == REG && n_times_set[REGNO (y)] == -2
1390 && CONSTANT_P (x))
1391 for (m = movables; m; m = m->next)
1392 if (m->move_insn && m->regno == REGNO (y)
1393 && rtx_equal_p (m->set_src, x))
1394 return 1;
1396 /* Otherwise, rtx's of different codes cannot be equal. */
1397 if (code != GET_CODE (y))
1398 return 0;
1400 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1401 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1403 if (GET_MODE (x) != GET_MODE (y))
1404 return 0;
1406 /* These three types of rtx's can be compared nonrecursively. */
1407 if (code == REG)
1408 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1410 if (code == LABEL_REF)
1411 return XEXP (x, 0) == XEXP (y, 0);
1412 if (code == SYMBOL_REF)
1413 return XSTR (x, 0) == XSTR (y, 0);
1415 /* Compare the elements. If any pair of corresponding elements
1416 fail to match, return 0 for the whole things. */
1418 fmt = GET_RTX_FORMAT (code);
1419 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1421 switch (fmt[i])
1423 case 'w':
1424 if (XWINT (x, i) != XWINT (y, i))
1425 return 0;
1426 break;
1428 case 'i':
1429 if (XINT (x, i) != XINT (y, i))
1430 return 0;
1431 break;
1433 case 'E':
1434 /* Two vectors must have the same length. */
1435 if (XVECLEN (x, i) != XVECLEN (y, i))
1436 return 0;
1438 /* And the corresponding elements must match. */
1439 for (j = 0; j < XVECLEN (x, i); j++)
1440 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1441 return 0;
1442 break;
1444 case 'e':
1445 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1446 return 0;
1447 break;
1449 case 's':
1450 if (strcmp (XSTR (x, i), XSTR (y, i)))
1451 return 0;
1452 break;
1454 case 'u':
1455 /* These are just backpointers, so they don't matter. */
1456 break;
1458 case '0':
1459 break;
1461 /* It is believed that rtx's at this level will never
1462 contain anything but integers and other rtx's,
1463 except for within LABEL_REFs and SYMBOL_REFs. */
1464 default:
1465 abort ();
1468 return 1;
1471 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1472 insns in INSNS which use thet reference. */
1474 static void
1475 add_label_notes (x, insns)
1476 rtx x;
1477 rtx insns;
1479 enum rtx_code code = GET_CODE (x);
1480 int i, j;
1481 char *fmt;
1482 rtx insn;
1484 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1486 rtx next = next_real_insn (XEXP (x, 0));
1488 /* Don't record labels that refer to dispatch tables.
1489 This is not necessary, since the tablejump references the same label.
1490 And if we did record them, flow.c would make worse code. */
1491 if (next == 0
1492 || ! (GET_CODE (next) == JUMP_INSN
1493 && (GET_CODE (PATTERN (next)) == ADDR_VEC
1494 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)))
1496 for (insn = insns; insn; insn = NEXT_INSN (insn))
1497 if (reg_mentioned_p (XEXP (x, 0), insn))
1498 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL, XEXP (x, 0),
1499 REG_NOTES (insn));
1501 return;
1504 fmt = GET_RTX_FORMAT (code);
1505 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1507 if (fmt[i] == 'e')
1508 add_label_notes (XEXP (x, i), insns);
1509 else if (fmt[i] == 'E')
1510 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1511 add_label_notes (XVECEXP (x, i, j), insns);
1515 /* Scan MOVABLES, and move the insns that deserve to be moved.
1516 If two matching movables are combined, replace one reg with the
1517 other throughout. */
1519 static void
1520 move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1521 struct movable *movables;
1522 int threshold;
1523 int insn_count;
1524 rtx loop_start;
1525 rtx end;
1526 int nregs;
1528 rtx new_start = 0;
1529 register struct movable *m;
1530 register rtx p;
1531 /* Map of pseudo-register replacements to handle combining
1532 when we move several insns that load the same value
1533 into different pseudo-registers. */
1534 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1535 char *already_moved = (char *) alloca (nregs);
1537 bzero (already_moved, nregs);
1538 bzero ((char *) reg_map, nregs * sizeof (rtx));
1540 num_movables = 0;
1542 for (m = movables; m; m = m->next)
1544 /* Describe this movable insn. */
1546 if (loop_dump_stream)
1548 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1549 INSN_UID (m->insn), m->regno, m->lifetime);
1550 if (m->consec > 0)
1551 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1552 if (m->cond)
1553 fprintf (loop_dump_stream, "cond ");
1554 if (m->force)
1555 fprintf (loop_dump_stream, "force ");
1556 if (m->global)
1557 fprintf (loop_dump_stream, "global ");
1558 if (m->done)
1559 fprintf (loop_dump_stream, "done ");
1560 if (m->move_insn)
1561 fprintf (loop_dump_stream, "move-insn ");
1562 if (m->match)
1563 fprintf (loop_dump_stream, "matches %d ",
1564 INSN_UID (m->match->insn));
1565 if (m->forces)
1566 fprintf (loop_dump_stream, "forces %d ",
1567 INSN_UID (m->forces->insn));
1570 /* Count movables. Value used in heuristics in strength_reduce. */
1571 num_movables++;
1573 /* Ignore the insn if it's already done (it matched something else).
1574 Otherwise, see if it is now safe to move. */
1576 if (!m->done
1577 && (! m->cond
1578 || (1 == invariant_p (m->set_src)
1579 && (m->dependencies == 0
1580 || 1 == invariant_p (m->dependencies))
1581 && (m->consec == 0
1582 || 1 == consec_sets_invariant_p (m->set_dest,
1583 m->consec + 1,
1584 m->insn))))
1585 && (! m->forces || m->forces->done))
1587 register int regno;
1588 register rtx p;
1589 int savings = m->savings;
1591 /* We have an insn that is safe to move.
1592 Compute its desirability. */
1594 p = m->insn;
1595 regno = m->regno;
1597 if (loop_dump_stream)
1598 fprintf (loop_dump_stream, "savings %d ", savings);
1600 if (moved_once[regno])
1602 insn_count *= 2;
1604 if (loop_dump_stream)
1605 fprintf (loop_dump_stream, "halved since already moved ");
1608 /* An insn MUST be moved if we already moved something else
1609 which is safe only if this one is moved too: that is,
1610 if already_moved[REGNO] is nonzero. */
1612 /* An insn is desirable to move if the new lifetime of the
1613 register is no more than THRESHOLD times the old lifetime.
1614 If it's not desirable, it means the loop is so big
1615 that moving won't speed things up much,
1616 and it is liable to make register usage worse. */
1618 /* It is also desirable to move if it can be moved at no
1619 extra cost because something else was already moved. */
1621 if (already_moved[regno]
1622 || (threshold * savings * m->lifetime) >= insn_count
1623 || (m->forces && m->forces->done
1624 && n_times_used[m->forces->regno] == 1))
1626 int count;
1627 register struct movable *m1;
1628 rtx first;
1630 /* Now move the insns that set the reg. */
1632 if (m->partial && m->match)
1634 rtx newpat, i1;
1635 rtx r1, r2;
1636 /* Find the end of this chain of matching regs.
1637 Thus, we load each reg in the chain from that one reg.
1638 And that reg is loaded with 0 directly,
1639 since it has ->match == 0. */
1640 for (m1 = m; m1->match; m1 = m1->match);
1641 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1642 SET_DEST (PATTERN (m1->insn)));
1643 i1 = emit_insn_before (newpat, loop_start);
1645 /* Mark the moved, invariant reg as being allowed to
1646 share a hard reg with the other matching invariant. */
1647 REG_NOTES (i1) = REG_NOTES (m->insn);
1648 r1 = SET_DEST (PATTERN (m->insn));
1649 r2 = SET_DEST (PATTERN (m1->insn));
1650 regs_may_share = gen_rtx (EXPR_LIST, VOIDmode, r1,
1651 gen_rtx (EXPR_LIST, VOIDmode, r2,
1652 regs_may_share));
1653 delete_insn (m->insn);
1655 if (new_start == 0)
1656 new_start = i1;
1658 if (loop_dump_stream)
1659 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1661 /* If we are to re-generate the item being moved with a
1662 new move insn, first delete what we have and then emit
1663 the move insn before the loop. */
1664 else if (m->move_insn)
1666 rtx i1, temp;
1668 for (count = m->consec; count >= 0; count--)
1670 /* If this is the first insn of a library call sequence,
1671 skip to the end. */
1672 if (GET_CODE (p) != NOTE
1673 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1674 p = XEXP (temp, 0);
1676 /* If this is the last insn of a libcall sequence, then
1677 delete every insn in the sequence except the last.
1678 The last insn is handled in the normal manner. */
1679 if (GET_CODE (p) != NOTE
1680 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1682 temp = XEXP (temp, 0);
1683 while (temp != p)
1684 temp = delete_insn (temp);
1687 p = delete_insn (p);
1690 start_sequence ();
1691 emit_move_insn (m->set_dest, m->set_src);
1692 temp = get_insns ();
1693 end_sequence ();
1695 add_label_notes (m->set_src, temp);
1697 i1 = emit_insns_before (temp, loop_start);
1698 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1699 REG_NOTES (i1)
1700 = gen_rtx (EXPR_LIST,
1701 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1702 m->set_src, REG_NOTES (i1));
1704 if (loop_dump_stream)
1705 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1707 /* The more regs we move, the less we like moving them. */
1708 threshold -= 3;
1710 else
1712 for (count = m->consec; count >= 0; count--)
1714 rtx i1, temp;
1716 /* If first insn of libcall sequence, skip to end. */
1717 /* Do this at start of loop, since p is guaranteed to
1718 be an insn here. */
1719 if (GET_CODE (p) != NOTE
1720 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1721 p = XEXP (temp, 0);
1723 /* If last insn of libcall sequence, move all
1724 insns except the last before the loop. The last
1725 insn is handled in the normal manner. */
1726 if (GET_CODE (p) != NOTE
1727 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1729 rtx fn_address = 0;
1730 rtx fn_reg = 0;
1731 rtx fn_address_insn = 0;
1733 first = 0;
1734 for (temp = XEXP (temp, 0); temp != p;
1735 temp = NEXT_INSN (temp))
1737 rtx body;
1738 rtx n;
1739 rtx next;
1741 if (GET_CODE (temp) == NOTE)
1742 continue;
1744 body = PATTERN (temp);
1746 /* Find the next insn after TEMP,
1747 not counting USE or NOTE insns. */
1748 for (next = NEXT_INSN (temp); next != p;
1749 next = NEXT_INSN (next))
1750 if (! (GET_CODE (next) == INSN
1751 && GET_CODE (PATTERN (next)) == USE)
1752 && GET_CODE (next) != NOTE)
1753 break;
1755 /* If that is the call, this may be the insn
1756 that loads the function address.
1758 Extract the function address from the insn
1759 that loads it into a register.
1760 If this insn was cse'd, we get incorrect code.
1762 So emit a new move insn that copies the
1763 function address into the register that the
1764 call insn will use. flow.c will delete any
1765 redundant stores that we have created. */
1766 if (GET_CODE (next) == CALL_INSN
1767 && GET_CODE (body) == SET
1768 && GET_CODE (SET_DEST (body)) == REG
1769 && (n = find_reg_note (temp, REG_EQUAL,
1770 NULL_RTX)))
1772 fn_reg = SET_SRC (body);
1773 if (GET_CODE (fn_reg) != REG)
1774 fn_reg = SET_DEST (body);
1775 fn_address = XEXP (n, 0);
1776 fn_address_insn = temp;
1778 /* We have the call insn.
1779 If it uses the register we suspect it might,
1780 load it with the correct address directly. */
1781 if (GET_CODE (temp) == CALL_INSN
1782 && fn_address != 0
1783 && reg_referenced_p (fn_reg, body))
1784 emit_insn_after (gen_move_insn (fn_reg,
1785 fn_address),
1786 fn_address_insn);
1788 if (GET_CODE (temp) == CALL_INSN)
1790 i1 = emit_call_insn_before (body, loop_start);
1791 /* Because the USAGE information potentially
1792 contains objects other than hard registers
1793 we need to copy it. */
1794 CALL_INSN_FUNCTION_USAGE (i1) =
1795 copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1797 else
1798 i1 = emit_insn_before (body, loop_start);
1799 if (first == 0)
1800 first = i1;
1801 if (temp == fn_address_insn)
1802 fn_address_insn = i1;
1803 REG_NOTES (i1) = REG_NOTES (temp);
1804 delete_insn (temp);
1807 if (m->savemode != VOIDmode)
1809 /* P sets REG to zero; but we should clear only
1810 the bits that are not covered by the mode
1811 m->savemode. */
1812 rtx reg = m->set_dest;
1813 rtx sequence;
1814 rtx tem;
1816 start_sequence ();
1817 tem = expand_binop
1818 (GET_MODE (reg), and_optab, reg,
1819 GEN_INT ((((HOST_WIDE_INT) 1
1820 << GET_MODE_BITSIZE (m->savemode)))
1821 - 1),
1822 reg, 1, OPTAB_LIB_WIDEN);
1823 if (tem == 0)
1824 abort ();
1825 if (tem != reg)
1826 emit_move_insn (reg, tem);
1827 sequence = gen_sequence ();
1828 end_sequence ();
1829 i1 = emit_insn_before (sequence, loop_start);
1831 else if (GET_CODE (p) == CALL_INSN)
1833 i1 = emit_call_insn_before (PATTERN (p), loop_start);
1834 /* Because the USAGE information potentially
1835 contains objects other than hard registers
1836 we need to copy it. */
1837 CALL_INSN_FUNCTION_USAGE (i1) =
1838 copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1840 else
1841 i1 = emit_insn_before (PATTERN (p), loop_start);
1843 REG_NOTES (i1) = REG_NOTES (p);
1845 /* If there is a REG_EQUAL note present whose value is
1846 not loop invariant, then delete it, since it may
1847 cause problems with later optimization passes.
1848 It is possible for cse to create such notes
1849 like this as a result of record_jump_cond. */
1851 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
1852 && ! invariant_p (XEXP (temp, 0)))
1853 remove_note (i1, temp);
1855 if (new_start == 0)
1856 new_start = i1;
1858 if (loop_dump_stream)
1859 fprintf (loop_dump_stream, " moved to %d",
1860 INSN_UID (i1));
1862 #if 0
1863 /* This isn't needed because REG_NOTES is copied
1864 below and is wrong since P might be a PARALLEL. */
1865 if (REG_NOTES (i1) == 0
1866 && ! m->partial /* But not if it's a zero-extend clr. */
1867 && ! m->global /* and not if used outside the loop
1868 (since it might get set outside). */
1869 && CONSTANT_P (SET_SRC (PATTERN (p))))
1870 REG_NOTES (i1)
1871 = gen_rtx (EXPR_LIST, REG_EQUAL,
1872 SET_SRC (PATTERN (p)), REG_NOTES (i1));
1873 #endif
1875 /* If library call, now fix the REG_NOTES that contain
1876 insn pointers, namely REG_LIBCALL on FIRST
1877 and REG_RETVAL on I1. */
1878 if (temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))
1880 XEXP (temp, 0) = first;
1881 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
1882 XEXP (temp, 0) = i1;
1885 delete_insn (p);
1886 do p = NEXT_INSN (p);
1887 while (p && GET_CODE (p) == NOTE);
1890 /* The more regs we move, the less we like moving them. */
1891 threshold -= 3;
1894 /* Any other movable that loads the same register
1895 MUST be moved. */
1896 already_moved[regno] = 1;
1898 /* This reg has been moved out of one loop. */
1899 moved_once[regno] = 1;
1901 /* The reg set here is now invariant. */
1902 if (! m->partial)
1903 n_times_set[regno] = 0;
1905 m->done = 1;
1907 /* Change the length-of-life info for the register
1908 to say it lives at least the full length of this loop.
1909 This will help guide optimizations in outer loops. */
1911 if (uid_luid[regno_first_uid[regno]] > INSN_LUID (loop_start))
1912 /* This is the old insn before all the moved insns.
1913 We can't use the moved insn because it is out of range
1914 in uid_luid. Only the old insns have luids. */
1915 regno_first_uid[regno] = INSN_UID (loop_start);
1916 if (uid_luid[regno_last_uid[regno]] < INSN_LUID (end))
1917 regno_last_uid[regno] = INSN_UID (end);
1919 /* Combine with this moved insn any other matching movables. */
1921 if (! m->partial)
1922 for (m1 = movables; m1; m1 = m1->next)
1923 if (m1->match == m)
1925 rtx temp;
1927 /* Schedule the reg loaded by M1
1928 for replacement so that shares the reg of M.
1929 If the modes differ (only possible in restricted
1930 circumstances, make a SUBREG. */
1931 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
1932 reg_map[m1->regno] = m->set_dest;
1933 else
1934 reg_map[m1->regno]
1935 = gen_lowpart_common (GET_MODE (m1->set_dest),
1936 m->set_dest);
1938 /* Get rid of the matching insn
1939 and prevent further processing of it. */
1940 m1->done = 1;
1942 /* if library call, delete all insn except last, which
1943 is deleted below */
1944 if (temp = find_reg_note (m1->insn, REG_RETVAL,
1945 NULL_RTX))
1947 for (temp = XEXP (temp, 0); temp != m1->insn;
1948 temp = NEXT_INSN (temp))
1949 delete_insn (temp);
1951 delete_insn (m1->insn);
1953 /* Any other movable that loads the same register
1954 MUST be moved. */
1955 already_moved[m1->regno] = 1;
1957 /* The reg merged here is now invariant,
1958 if the reg it matches is invariant. */
1959 if (! m->partial)
1960 n_times_set[m1->regno] = 0;
1963 else if (loop_dump_stream)
1964 fprintf (loop_dump_stream, "not desirable");
1966 else if (loop_dump_stream && !m->match)
1967 fprintf (loop_dump_stream, "not safe");
1969 if (loop_dump_stream)
1970 fprintf (loop_dump_stream, "\n");
1973 if (new_start == 0)
1974 new_start = loop_start;
1976 /* Go through all the instructions in the loop, making
1977 all the register substitutions scheduled in REG_MAP. */
1978 for (p = new_start; p != end; p = NEXT_INSN (p))
1979 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1980 || GET_CODE (p) == CALL_INSN)
1982 replace_regs (PATTERN (p), reg_map, nregs, 0);
1983 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
1984 INSN_CODE (p) = -1;
1988 #if 0
1989 /* Scan X and replace the address of any MEM in it with ADDR.
1990 REG is the address that MEM should have before the replacement. */
1992 static void
1993 replace_call_address (x, reg, addr)
1994 rtx x, reg, addr;
1996 register enum rtx_code code;
1997 register int i;
1998 register char *fmt;
2000 if (x == 0)
2001 return;
2002 code = GET_CODE (x);
2003 switch (code)
2005 case PC:
2006 case CC0:
2007 case CONST_INT:
2008 case CONST_DOUBLE:
2009 case CONST:
2010 case SYMBOL_REF:
2011 case LABEL_REF:
2012 case REG:
2013 return;
2015 case SET:
2016 /* Short cut for very common case. */
2017 replace_call_address (XEXP (x, 1), reg, addr);
2018 return;
2020 case CALL:
2021 /* Short cut for very common case. */
2022 replace_call_address (XEXP (x, 0), reg, addr);
2023 return;
2025 case MEM:
2026 /* If this MEM uses a reg other than the one we expected,
2027 something is wrong. */
2028 if (XEXP (x, 0) != reg)
2029 abort ();
2030 XEXP (x, 0) = addr;
2031 return;
2034 fmt = GET_RTX_FORMAT (code);
2035 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2037 if (fmt[i] == 'e')
2038 replace_call_address (XEXP (x, i), reg, addr);
2039 if (fmt[i] == 'E')
2041 register int j;
2042 for (j = 0; j < XVECLEN (x, i); j++)
2043 replace_call_address (XVECEXP (x, i, j), reg, addr);
2047 #endif
2049 /* Return the number of memory refs to addresses that vary
2050 in the rtx X. */
2052 static int
2053 count_nonfixed_reads (x)
2054 rtx x;
2056 register enum rtx_code code;
2057 register int i;
2058 register char *fmt;
2059 int value;
2061 if (x == 0)
2062 return 0;
2064 code = GET_CODE (x);
2065 switch (code)
2067 case PC:
2068 case CC0:
2069 case CONST_INT:
2070 case CONST_DOUBLE:
2071 case CONST:
2072 case SYMBOL_REF:
2073 case LABEL_REF:
2074 case REG:
2075 return 0;
2077 case MEM:
2078 return ((invariant_p (XEXP (x, 0)) != 1)
2079 + count_nonfixed_reads (XEXP (x, 0)));
2082 value = 0;
2083 fmt = GET_RTX_FORMAT (code);
2084 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2086 if (fmt[i] == 'e')
2087 value += count_nonfixed_reads (XEXP (x, i));
2088 if (fmt[i] == 'E')
2090 register int j;
2091 for (j = 0; j < XVECLEN (x, i); j++)
2092 value += count_nonfixed_reads (XVECEXP (x, i, j));
2095 return value;
2099 #if 0
2100 /* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2101 Replace it with an instruction to load just the low bytes
2102 if the machine supports such an instruction,
2103 and insert above LOOP_START an instruction to clear the register. */
2105 static void
2106 constant_high_bytes (p, loop_start)
2107 rtx p, loop_start;
2109 register rtx new;
2110 register int insn_code_number;
2112 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2113 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2115 new = gen_rtx (SET, VOIDmode,
2116 gen_rtx (STRICT_LOW_PART, VOIDmode,
2117 gen_rtx (SUBREG, GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
2118 SET_DEST (PATTERN (p)),
2119 0)),
2120 XEXP (SET_SRC (PATTERN (p)), 0));
2121 insn_code_number = recog (new, p);
2123 if (insn_code_number)
2125 register int i;
2127 /* Clear destination register before the loop. */
2128 emit_insn_before (gen_rtx (SET, VOIDmode,
2129 SET_DEST (PATTERN (p)),
2130 const0_rtx),
2131 loop_start);
2133 /* Inside the loop, just load the low part. */
2134 PATTERN (p) = new;
2137 #endif
2139 /* Scan a loop setting the variables `unknown_address_altered',
2140 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
2141 and `loop_has_volatile'.
2142 Also, fill in the array `loop_store_mems'. */
2144 static void
2145 prescan_loop (start, end)
2146 rtx start, end;
2148 register int level = 1;
2149 register rtx insn;
2151 unknown_address_altered = 0;
2152 loop_has_call = 0;
2153 loop_has_volatile = 0;
2154 loop_store_mems_idx = 0;
2156 num_mem_sets = 0;
2157 loops_enclosed = 1;
2158 loop_continue = 0;
2160 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2161 insn = NEXT_INSN (insn))
2163 if (GET_CODE (insn) == NOTE)
2165 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2167 ++level;
2168 /* Count number of loops contained in this one. */
2169 loops_enclosed++;
2171 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2173 --level;
2174 if (level == 0)
2176 end = insn;
2177 break;
2180 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2182 if (level == 1)
2183 loop_continue = insn;
2186 else if (GET_CODE (insn) == CALL_INSN)
2188 unknown_address_altered = 1;
2189 loop_has_call = 1;
2191 else
2193 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2195 if (volatile_refs_p (PATTERN (insn)))
2196 loop_has_volatile = 1;
2198 note_stores (PATTERN (insn), note_addr_stored);
2204 /* Scan the function looking for loops. Record the start and end of each loop.
2205 Also mark as invalid loops any loops that contain a setjmp or are branched
2206 to from outside the loop. */
2208 static void
2209 find_and_verify_loops (f)
2210 rtx f;
2212 rtx insn, label;
2213 int current_loop = -1;
2214 int next_loop = -1;
2215 int loop;
2217 /* If there are jumps to undefined labels,
2218 treat them as jumps out of any/all loops.
2219 This also avoids writing past end of tables when there are no loops. */
2220 uid_loop_num[0] = -1;
2222 /* Find boundaries of loops, mark which loops are contained within
2223 loops, and invalidate loops that have setjmp. */
2225 for (insn = f; insn; insn = NEXT_INSN (insn))
2227 if (GET_CODE (insn) == NOTE)
2228 switch (NOTE_LINE_NUMBER (insn))
2230 case NOTE_INSN_LOOP_BEG:
2231 loop_number_loop_starts[++next_loop] = insn;
2232 loop_number_loop_ends[next_loop] = 0;
2233 loop_outer_loop[next_loop] = current_loop;
2234 loop_invalid[next_loop] = 0;
2235 loop_number_exit_labels[next_loop] = 0;
2236 current_loop = next_loop;
2237 break;
2239 case NOTE_INSN_SETJMP:
2240 /* In this case, we must invalidate our current loop and any
2241 enclosing loop. */
2242 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2244 loop_invalid[loop] = 1;
2245 if (loop_dump_stream)
2246 fprintf (loop_dump_stream,
2247 "\nLoop at %d ignored due to setjmp.\n",
2248 INSN_UID (loop_number_loop_starts[loop]));
2250 break;
2252 case NOTE_INSN_LOOP_END:
2253 if (current_loop == -1)
2254 abort ();
2256 loop_number_loop_ends[current_loop] = insn;
2257 current_loop = loop_outer_loop[current_loop];
2258 break;
2262 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2263 enclosing loop, but this doesn't matter. */
2264 uid_loop_num[INSN_UID (insn)] = current_loop;
2267 /* Any loop containing a label used in an initializer must be invalidated,
2268 because it can be jumped into from anywhere. */
2270 for (label = forced_labels; label; label = XEXP (label, 1))
2272 int loop_num;
2274 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2275 loop_num != -1;
2276 loop_num = loop_outer_loop[loop_num])
2277 loop_invalid[loop_num] = 1;
2280 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2281 loop that it is not contained within, that loop is marked invalid.
2282 If any INSN or CALL_INSN uses a label's address, then the loop containing
2283 that label is marked invalid, because it could be jumped into from
2284 anywhere.
2286 Also look for blocks of code ending in an unconditional branch that
2287 exits the loop. If such a block is surrounded by a conditional
2288 branch around the block, move the block elsewhere (see below) and
2289 invert the jump to point to the code block. This may eliminate a
2290 label in our loop and will simplify processing by both us and a
2291 possible second cse pass. */
2293 for (insn = f; insn; insn = NEXT_INSN (insn))
2294 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2296 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2298 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2300 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2301 if (note)
2303 int loop_num;
2305 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2306 loop_num != -1;
2307 loop_num = loop_outer_loop[loop_num])
2308 loop_invalid[loop_num] = 1;
2312 if (GET_CODE (insn) != JUMP_INSN)
2313 continue;
2315 mark_loop_jump (PATTERN (insn), this_loop_num);
2317 /* See if this is an unconditional branch outside the loop. */
2318 if (this_loop_num != -1
2319 && (GET_CODE (PATTERN (insn)) == RETURN
2320 || (simplejump_p (insn)
2321 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
2322 != this_loop_num)))
2323 && get_max_uid () < max_uid_for_loop)
2325 rtx p;
2326 rtx our_next = next_real_insn (insn);
2328 /* Go backwards until we reach the start of the loop, a label,
2329 or a JUMP_INSN. */
2330 for (p = PREV_INSN (insn);
2331 GET_CODE (p) != CODE_LABEL
2332 && ! (GET_CODE (p) == NOTE
2333 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2334 && GET_CODE (p) != JUMP_INSN;
2335 p = PREV_INSN (p))
2338 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2339 we have a block of code to try to move.
2341 We look backward and then forward from the target of INSN
2342 to find a BARRIER at the same loop depth as the target.
2343 If we find such a BARRIER, we make a new label for the start
2344 of the block, invert the jump in P and point it to that label,
2345 and move the block of code to the spot we found. */
2347 if (GET_CODE (p) == JUMP_INSN
2348 && JUMP_LABEL (p) != 0
2349 /* Just ignore jumps to labels that were never emitted.
2350 These always indicate compilation errors. */
2351 && INSN_UID (JUMP_LABEL (p)) != 0
2352 && condjump_p (p)
2353 && ! simplejump_p (p)
2354 && next_real_insn (JUMP_LABEL (p)) == our_next)
2356 rtx target
2357 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2358 int target_loop_num = uid_loop_num[INSN_UID (target)];
2359 rtx loc;
2361 for (loc = target; loc; loc = PREV_INSN (loc))
2362 if (GET_CODE (loc) == BARRIER
2363 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2364 break;
2366 if (loc == 0)
2367 for (loc = target; loc; loc = NEXT_INSN (loc))
2368 if (GET_CODE (loc) == BARRIER
2369 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2370 break;
2372 if (loc)
2374 rtx cond_label = JUMP_LABEL (p);
2375 rtx new_label = get_label_after (p);
2377 /* Ensure our label doesn't go away. */
2378 LABEL_NUSES (cond_label)++;
2380 /* Verify that uid_loop_num is large enough and that
2381 we can invert P. */
2382 if (invert_jump (p, new_label))
2384 rtx q, r;
2386 /* Include the BARRIER after INSN and copy the
2387 block after LOC. */
2388 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
2389 reorder_insns (new_label, NEXT_INSN (insn), loc);
2391 /* All those insns are now in TARGET_LOOP_NUM. */
2392 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2393 q = NEXT_INSN (q))
2394 uid_loop_num[INSN_UID (q)] = target_loop_num;
2396 /* The label jumped to by INSN is no longer a loop exit.
2397 Unless INSN does not have a label (e.g., it is a
2398 RETURN insn), search loop_number_exit_labels to find
2399 its label_ref, and remove it. Also turn off
2400 LABEL_OUTSIDE_LOOP_P bit. */
2401 if (JUMP_LABEL (insn))
2403 for (q = 0,
2404 r = loop_number_exit_labels[this_loop_num];
2405 r; q = r, r = LABEL_NEXTREF (r))
2406 if (XEXP (r, 0) == JUMP_LABEL (insn))
2408 LABEL_OUTSIDE_LOOP_P (r) = 0;
2409 if (q)
2410 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2411 else
2412 loop_number_exit_labels[this_loop_num]
2413 = LABEL_NEXTREF (r);
2414 break;
2417 /* If we didn't find it, then something is wrong. */
2418 if (! r)
2419 abort ();
2422 /* P is now a jump outside the loop, so it must be put
2423 in loop_number_exit_labels, and marked as such.
2424 The easiest way to do this is to just call
2425 mark_loop_jump again for P. */
2426 mark_loop_jump (PATTERN (p), this_loop_num);
2428 /* If INSN now jumps to the insn after it,
2429 delete INSN. */
2430 if (JUMP_LABEL (insn) != 0
2431 && (next_real_insn (JUMP_LABEL (insn))
2432 == next_real_insn (insn)))
2433 delete_insn (insn);
2436 /* Continue the loop after where the conditional
2437 branch used to jump, since the only branch insn
2438 in the block (if it still remains) is an inter-loop
2439 branch and hence needs no processing. */
2440 insn = NEXT_INSN (cond_label);
2442 if (--LABEL_NUSES (cond_label) == 0)
2443 delete_insn (cond_label);
2445 /* This loop will be continued with NEXT_INSN (insn). */
2446 insn = PREV_INSN (insn);
2453 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2454 loops it is contained in, mark the target loop invalid.
2456 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2458 static void
2459 mark_loop_jump (x, loop_num)
2460 rtx x;
2461 int loop_num;
2463 int dest_loop;
2464 int outer_loop;
2465 int i;
2467 switch (GET_CODE (x))
2469 case PC:
2470 case USE:
2471 case CLOBBER:
2472 case REG:
2473 case MEM:
2474 case CONST_INT:
2475 case CONST_DOUBLE:
2476 case RETURN:
2477 return;
2479 case CONST:
2480 /* There could be a label reference in here. */
2481 mark_loop_jump (XEXP (x, 0), loop_num);
2482 return;
2484 case PLUS:
2485 case MINUS:
2486 case MULT:
2487 mark_loop_jump (XEXP (x, 0), loop_num);
2488 mark_loop_jump (XEXP (x, 1), loop_num);
2489 return;
2491 case SIGN_EXTEND:
2492 case ZERO_EXTEND:
2493 mark_loop_jump (XEXP (x, 0), loop_num);
2494 return;
2496 case LABEL_REF:
2497 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2499 /* Link together all labels that branch outside the loop. This
2500 is used by final_[bg]iv_value and the loop unrolling code. Also
2501 mark this LABEL_REF so we know that this branch should predict
2502 false. */
2504 if (dest_loop != loop_num && loop_num != -1)
2506 LABEL_OUTSIDE_LOOP_P (x) = 1;
2507 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2508 loop_number_exit_labels[loop_num] = x;
2511 /* If this is inside a loop, but not in the current loop or one enclosed
2512 by it, it invalidates at least one loop. */
2514 if (dest_loop == -1)
2515 return;
2517 /* We must invalidate every nested loop containing the target of this
2518 label, except those that also contain the jump insn. */
2520 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2522 /* Stop when we reach a loop that also contains the jump insn. */
2523 for (outer_loop = loop_num; outer_loop != -1;
2524 outer_loop = loop_outer_loop[outer_loop])
2525 if (dest_loop == outer_loop)
2526 return;
2528 /* If we get here, we know we need to invalidate a loop. */
2529 if (loop_dump_stream && ! loop_invalid[dest_loop])
2530 fprintf (loop_dump_stream,
2531 "\nLoop at %d ignored due to multiple entry points.\n",
2532 INSN_UID (loop_number_loop_starts[dest_loop]));
2534 loop_invalid[dest_loop] = 1;
2536 return;
2538 case SET:
2539 /* If this is not setting pc, ignore. */
2540 if (SET_DEST (x) == pc_rtx)
2541 mark_loop_jump (SET_SRC (x), loop_num);
2542 return;
2544 case IF_THEN_ELSE:
2545 mark_loop_jump (XEXP (x, 1), loop_num);
2546 mark_loop_jump (XEXP (x, 2), loop_num);
2547 return;
2549 case PARALLEL:
2550 case ADDR_VEC:
2551 for (i = 0; i < XVECLEN (x, 0); i++)
2552 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2553 return;
2555 case ADDR_DIFF_VEC:
2556 for (i = 0; i < XVECLEN (x, 1); i++)
2557 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2558 return;
2560 default:
2561 /* Treat anything else (such as a symbol_ref)
2562 as a branch out of this loop, but not into any loop. */
2564 if (loop_num != -1)
2566 LABEL_OUTSIDE_LOOP_P (x) = 1;
2567 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2568 loop_number_exit_labels[loop_num] = x;
2571 return;
2575 /* Return nonzero if there is a label in the range from
2576 insn INSN to and including the insn whose luid is END
2577 INSN must have an assigned luid (i.e., it must not have
2578 been previously created by loop.c). */
2580 static int
2581 labels_in_range_p (insn, end)
2582 rtx insn;
2583 int end;
2585 while (insn && INSN_LUID (insn) <= end)
2587 if (GET_CODE (insn) == CODE_LABEL)
2588 return 1;
2589 insn = NEXT_INSN (insn);
2592 return 0;
2595 /* Record that a memory reference X is being set. */
2597 static void
2598 note_addr_stored (x)
2599 rtx x;
2601 register int i;
2603 if (x == 0 || GET_CODE (x) != MEM)
2604 return;
2606 /* Count number of memory writes.
2607 This affects heuristics in strength_reduce. */
2608 num_mem_sets++;
2610 /* BLKmode MEM means all memory is clobbered. */
2611 if (GET_MODE (x) == BLKmode)
2612 unknown_address_altered = 1;
2614 if (unknown_address_altered)
2615 return;
2617 for (i = 0; i < loop_store_mems_idx; i++)
2618 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
2619 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
2621 /* We are storing at the same address as previously noted. Save the
2622 wider reference. */
2623 if (GET_MODE_SIZE (GET_MODE (x))
2624 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
2625 loop_store_mems[i] = x;
2626 break;
2629 if (i == NUM_STORES)
2630 unknown_address_altered = 1;
2632 else if (i == loop_store_mems_idx)
2633 loop_store_mems[loop_store_mems_idx++] = x;
2636 /* Return nonzero if the rtx X is invariant over the current loop.
2638 The value is 2 if we refer to something only conditionally invariant.
2640 If `unknown_address_altered' is nonzero, no memory ref is invariant.
2641 Otherwise, a memory ref is invariant if it does not conflict with
2642 anything stored in `loop_store_mems'. */
2645 invariant_p (x)
2646 register rtx x;
2648 register int i;
2649 register enum rtx_code code;
2650 register char *fmt;
2651 int conditional = 0;
2653 if (x == 0)
2654 return 1;
2655 code = GET_CODE (x);
2656 switch (code)
2658 case CONST_INT:
2659 case CONST_DOUBLE:
2660 case SYMBOL_REF:
2661 case CONST:
2662 return 1;
2664 case LABEL_REF:
2665 /* A LABEL_REF is normally invariant, however, if we are unrolling
2666 loops, and this label is inside the loop, then it isn't invariant.
2667 This is because each unrolled copy of the loop body will have
2668 a copy of this label. If this was invariant, then an insn loading
2669 the address of this label into a register might get moved outside
2670 the loop, and then each loop body would end up using the same label.
2672 We don't know the loop bounds here though, so just fail for all
2673 labels. */
2674 if (flag_unroll_loops)
2675 return 0;
2676 else
2677 return 1;
2679 case PC:
2680 case CC0:
2681 case UNSPEC_VOLATILE:
2682 return 0;
2684 case REG:
2685 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
2686 since the reg might be set by initialization within the loop. */
2687 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2688 || x == arg_pointer_rtx)
2689 return 1;
2690 if (loop_has_call
2691 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
2692 return 0;
2693 if (n_times_set[REGNO (x)] < 0)
2694 return 2;
2695 return n_times_set[REGNO (x)] == 0;
2697 case MEM:
2698 /* Read-only items (such as constants in a constant pool) are
2699 invariant if their address is. */
2700 if (RTX_UNCHANGING_P (x))
2701 break;
2703 /* If we filled the table (or had a subroutine call), any location
2704 in memory could have been clobbered. */
2705 if (unknown_address_altered
2706 /* Don't mess with volatile memory references. */
2707 || MEM_VOLATILE_P (x))
2708 return 0;
2710 /* See if there is any dependence between a store and this load. */
2711 for (i = loop_store_mems_idx - 1; i >= 0; i--)
2712 if (true_dependence (loop_store_mems[i], x))
2713 return 0;
2715 /* It's not invalidated by a store in memory
2716 but we must still verify the address is invariant. */
2717 break;
2719 case ASM_OPERANDS:
2720 /* Don't mess with insns declared volatile. */
2721 if (MEM_VOLATILE_P (x))
2722 return 0;
2725 fmt = GET_RTX_FORMAT (code);
2726 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2728 if (fmt[i] == 'e')
2730 int tem = invariant_p (XEXP (x, i));
2731 if (tem == 0)
2732 return 0;
2733 if (tem == 2)
2734 conditional = 1;
2736 else if (fmt[i] == 'E')
2738 register int j;
2739 for (j = 0; j < XVECLEN (x, i); j++)
2741 int tem = invariant_p (XVECEXP (x, i, j));
2742 if (tem == 0)
2743 return 0;
2744 if (tem == 2)
2745 conditional = 1;
2751 return 1 + conditional;
2755 /* Return nonzero if all the insns in the loop that set REG
2756 are INSN and the immediately following insns,
2757 and if each of those insns sets REG in an invariant way
2758 (not counting uses of REG in them).
2760 The value is 2 if some of these insns are only conditionally invariant.
2762 We assume that INSN itself is the first set of REG
2763 and that its source is invariant. */
2765 static int
2766 consec_sets_invariant_p (reg, n_sets, insn)
2767 int n_sets;
2768 rtx reg, insn;
2770 register rtx p = insn;
2771 register int regno = REGNO (reg);
2772 rtx temp;
2773 /* Number of sets we have to insist on finding after INSN. */
2774 int count = n_sets - 1;
2775 int old = n_times_set[regno];
2776 int value = 0;
2777 int this;
2779 /* If N_SETS hit the limit, we can't rely on its value. */
2780 if (n_sets == 127)
2781 return 0;
2783 n_times_set[regno] = 0;
2785 while (count > 0)
2787 register enum rtx_code code;
2788 rtx set;
2790 p = NEXT_INSN (p);
2791 code = GET_CODE (p);
2793 /* If library call, skip to end of of it. */
2794 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2795 p = XEXP (temp, 0);
2797 this = 0;
2798 if (code == INSN
2799 && (set = single_set (p))
2800 && GET_CODE (SET_DEST (set)) == REG
2801 && REGNO (SET_DEST (set)) == regno)
2803 this = invariant_p (SET_SRC (set));
2804 if (this != 0)
2805 value |= this;
2806 else if (temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
2808 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
2809 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
2810 notes are OK. */
2811 this = (CONSTANT_P (XEXP (temp, 0))
2812 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
2813 && invariant_p (XEXP (temp, 0))));
2814 if (this != 0)
2815 value |= this;
2818 if (this != 0)
2819 count--;
2820 else if (code != NOTE)
2822 n_times_set[regno] = old;
2823 return 0;
2827 n_times_set[regno] = old;
2828 /* If invariant_p ever returned 2, we return 2. */
2829 return 1 + (value & 2);
2832 #if 0
2833 /* I don't think this condition is sufficient to allow INSN
2834 to be moved, so we no longer test it. */
2836 /* Return 1 if all insns in the basic block of INSN and following INSN
2837 that set REG are invariant according to TABLE. */
2839 static int
2840 all_sets_invariant_p (reg, insn, table)
2841 rtx reg, insn;
2842 short *table;
2844 register rtx p = insn;
2845 register int regno = REGNO (reg);
2847 while (1)
2849 register enum rtx_code code;
2850 p = NEXT_INSN (p);
2851 code = GET_CODE (p);
2852 if (code == CODE_LABEL || code == JUMP_INSN)
2853 return 1;
2854 if (code == INSN && GET_CODE (PATTERN (p)) == SET
2855 && GET_CODE (SET_DEST (PATTERN (p))) == REG
2856 && REGNO (SET_DEST (PATTERN (p))) == regno)
2858 if (!invariant_p (SET_SRC (PATTERN (p)), table))
2859 return 0;
2863 #endif /* 0 */
2865 /* Look at all uses (not sets) of registers in X. For each, if it is
2866 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
2867 a different insn, set USAGE[REGNO] to const0_rtx. */
2869 static void
2870 find_single_use_in_loop (insn, x, usage)
2871 rtx insn;
2872 rtx x;
2873 rtx *usage;
2875 enum rtx_code code = GET_CODE (x);
2876 char *fmt = GET_RTX_FORMAT (code);
2877 int i, j;
2879 if (code == REG)
2880 usage[REGNO (x)]
2881 = (usage[REGNO (x)] != 0 && usage[REGNO (x)] != insn)
2882 ? const0_rtx : insn;
2884 else if (code == SET)
2886 /* Don't count SET_DEST if it is a REG; otherwise count things
2887 in SET_DEST because if a register is partially modified, it won't
2888 show up as a potential movable so we don't care how USAGE is set
2889 for it. */
2890 if (GET_CODE (SET_DEST (x)) != REG)
2891 find_single_use_in_loop (insn, SET_DEST (x), usage);
2892 find_single_use_in_loop (insn, SET_SRC (x), usage);
2894 else
2895 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2897 if (fmt[i] == 'e' && XEXP (x, i) != 0)
2898 find_single_use_in_loop (insn, XEXP (x, i), usage);
2899 else if (fmt[i] == 'E')
2900 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2901 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
2905 /* Increment N_TIMES_SET at the index of each register
2906 that is modified by an insn between FROM and TO.
2907 If the value of an element of N_TIMES_SET becomes 127 or more,
2908 stop incrementing it, to avoid overflow.
2910 Store in SINGLE_USAGE[I] the single insn in which register I is
2911 used, if it is only used once. Otherwise, it is set to 0 (for no
2912 uses) or const0_rtx for more than one use. This parameter may be zero,
2913 in which case this processing is not done.
2915 Store in *COUNT_PTR the number of actual instruction
2916 in the loop. We use this to decide what is worth moving out. */
2918 /* last_set[n] is nonzero iff reg n has been set in the current basic block.
2919 In that case, it is the insn that last set reg n. */
2921 static void
2922 count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
2923 register rtx from, to;
2924 char *may_not_move;
2925 rtx *single_usage;
2926 int *count_ptr;
2927 int nregs;
2929 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
2930 register rtx insn;
2931 register int count = 0;
2932 register rtx dest;
2934 bzero ((char *) last_set, nregs * sizeof (rtx));
2935 for (insn = from; insn != to; insn = NEXT_INSN (insn))
2937 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2939 ++count;
2941 /* If requested, record registers that have exactly one use. */
2942 if (single_usage)
2944 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
2946 /* Include uses in REG_EQUAL notes. */
2947 if (REG_NOTES (insn))
2948 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
2951 if (GET_CODE (PATTERN (insn)) == CLOBBER
2952 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
2953 /* Don't move a reg that has an explicit clobber.
2954 We might do so sometimes, but it's not worth the pain. */
2955 may_not_move[REGNO (XEXP (PATTERN (insn), 0))] = 1;
2957 if (GET_CODE (PATTERN (insn)) == SET
2958 || GET_CODE (PATTERN (insn)) == CLOBBER)
2960 dest = SET_DEST (PATTERN (insn));
2961 while (GET_CODE (dest) == SUBREG
2962 || GET_CODE (dest) == ZERO_EXTRACT
2963 || GET_CODE (dest) == SIGN_EXTRACT
2964 || GET_CODE (dest) == STRICT_LOW_PART)
2965 dest = XEXP (dest, 0);
2966 if (GET_CODE (dest) == REG)
2968 register int regno = REGNO (dest);
2969 /* If this is the first setting of this reg
2970 in current basic block, and it was set before,
2971 it must be set in two basic blocks, so it cannot
2972 be moved out of the loop. */
2973 if (n_times_set[regno] > 0 && last_set[regno] == 0)
2974 may_not_move[regno] = 1;
2975 /* If this is not first setting in current basic block,
2976 see if reg was used in between previous one and this.
2977 If so, neither one can be moved. */
2978 if (last_set[regno] != 0
2979 && reg_used_between_p (dest, last_set[regno], insn))
2980 may_not_move[regno] = 1;
2981 if (n_times_set[regno] < 127)
2982 ++n_times_set[regno];
2983 last_set[regno] = insn;
2986 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2988 register int i;
2989 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
2991 register rtx x = XVECEXP (PATTERN (insn), 0, i);
2992 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
2993 /* Don't move a reg that has an explicit clobber.
2994 It's not worth the pain to try to do it correctly. */
2995 may_not_move[REGNO (XEXP (x, 0))] = 1;
2997 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
2999 dest = SET_DEST (x);
3000 while (GET_CODE (dest) == SUBREG
3001 || GET_CODE (dest) == ZERO_EXTRACT
3002 || GET_CODE (dest) == SIGN_EXTRACT
3003 || GET_CODE (dest) == STRICT_LOW_PART)
3004 dest = XEXP (dest, 0);
3005 if (GET_CODE (dest) == REG)
3007 register int regno = REGNO (dest);
3008 if (n_times_set[regno] > 0 && last_set[regno] == 0)
3009 may_not_move[regno] = 1;
3010 if (last_set[regno] != 0
3011 && reg_used_between_p (dest, last_set[regno], insn))
3012 may_not_move[regno] = 1;
3013 if (n_times_set[regno] < 127)
3014 ++n_times_set[regno];
3015 last_set[regno] = insn;
3022 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
3023 bzero ((char *) last_set, nregs * sizeof (rtx));
3025 *count_ptr = count;
3028 /* Given a loop that is bounded by LOOP_START and LOOP_END
3029 and that is entered at SCAN_START,
3030 return 1 if the register set in SET contained in insn INSN is used by
3031 any insn that precedes INSN in cyclic order starting
3032 from the loop entry point.
3034 We don't want to use INSN_LUID here because if we restrict INSN to those
3035 that have a valid INSN_LUID, it means we cannot move an invariant out
3036 from an inner loop past two loops. */
3038 static int
3039 loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3040 rtx set, insn, loop_start, scan_start, loop_end;
3042 rtx reg = SET_DEST (set);
3043 rtx p;
3045 /* Scan forward checking for register usage. If we hit INSN, we
3046 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3047 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3049 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3050 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3051 return 1;
3053 if (p == loop_end)
3054 p = loop_start;
3057 return 0;
3060 /* A "basic induction variable" or biv is a pseudo reg that is set
3061 (within this loop) only by incrementing or decrementing it. */
3062 /* A "general induction variable" or giv is a pseudo reg whose
3063 value is a linear function of a biv. */
3065 /* Bivs are recognized by `basic_induction_var';
3066 Givs by `general_induct_var'. */
3068 /* Indexed by register number, indicates whether or not register is an
3069 induction variable, and if so what type. */
3071 enum iv_mode *reg_iv_type;
3073 /* Indexed by register number, contains pointer to `struct induction'
3074 if register is an induction variable. This holds general info for
3075 all induction variables. */
3077 struct induction **reg_iv_info;
3079 /* Indexed by register number, contains pointer to `struct iv_class'
3080 if register is a basic induction variable. This holds info describing
3081 the class (a related group) of induction variables that the biv belongs
3082 to. */
3084 struct iv_class **reg_biv_class;
3086 /* The head of a list which links together (via the next field)
3087 every iv class for the current loop. */
3089 struct iv_class *loop_iv_list;
3091 /* Communication with routines called via `note_stores'. */
3093 static rtx note_insn;
3095 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3097 static rtx addr_placeholder;
3099 /* ??? Unfinished optimizations, and possible future optimizations,
3100 for the strength reduction code. */
3102 /* ??? There is one more optimization you might be interested in doing: to
3103 allocate pseudo registers for frequently-accessed memory locations.
3104 If the same memory location is referenced each time around, it might
3105 be possible to copy it into a register before and out after.
3106 This is especially useful when the memory location is a variable which
3107 is in a stack slot because somewhere its address is taken. If the
3108 loop doesn't contain a function call and the variable isn't volatile,
3109 it is safe to keep the value in a register for the duration of the
3110 loop. One tricky thing is that the copying of the value back from the
3111 register has to be done on all exits from the loop. You need to check that
3112 all the exits from the loop go to the same place. */
3114 /* ??? The interaction of biv elimination, and recognition of 'constant'
3115 bivs, may cause problems. */
3117 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3118 performance problems.
3120 Perhaps don't eliminate things that can be combined with an addressing
3121 mode. Find all givs that have the same biv, mult_val, and add_val;
3122 then for each giv, check to see if its only use dies in a following
3123 memory address. If so, generate a new memory address and check to see
3124 if it is valid. If it is valid, then store the modified memory address,
3125 otherwise, mark the giv as not done so that it will get its own iv. */
3127 /* ??? Could try to optimize branches when it is known that a biv is always
3128 positive. */
3130 /* ??? When replace a biv in a compare insn, we should replace with closest
3131 giv so that an optimized branch can still be recognized by the combiner,
3132 e.g. the VAX acb insn. */
3134 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3135 was rerun in loop_optimize whenever a register was added or moved.
3136 Also, some of the optimizations could be a little less conservative. */
3138 /* Perform strength reduction and induction variable elimination. */
3140 /* Pseudo registers created during this function will be beyond the last
3141 valid index in several tables including n_times_set and regno_last_uid.
3142 This does not cause a problem here, because the added registers cannot be
3143 givs outside of their loop, and hence will never be reconsidered.
3144 But scan_loop must check regnos to make sure they are in bounds. */
3146 static void
3147 strength_reduce (scan_start, end, loop_top, insn_count,
3148 loop_start, loop_end)
3149 rtx scan_start;
3150 rtx end;
3151 rtx loop_top;
3152 int insn_count;
3153 rtx loop_start;
3154 rtx loop_end;
3156 rtx p;
3157 rtx set;
3158 rtx inc_val;
3159 rtx mult_val;
3160 rtx dest_reg;
3161 /* This is 1 if current insn is not executed at least once for every loop
3162 iteration. */
3163 int not_every_iteration = 0;
3164 /* This is 1 if current insn may be executed more than once for every
3165 loop iteration. */
3166 int maybe_multiple = 0;
3167 /* Temporary list pointers for traversing loop_iv_list. */
3168 struct iv_class *bl, **backbl;
3169 /* Ratio of extra register life span we can justify
3170 for saving an instruction. More if loop doesn't call subroutines
3171 since in that case saving an insn makes more difference
3172 and more registers are available. */
3173 /* ??? could set this to last value of threshold in move_movables */
3174 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3175 /* Map of pseudo-register replacements. */
3176 rtx *reg_map;
3177 int call_seen;
3178 rtx test;
3179 rtx end_insert_before;
3180 int loop_depth = 0;
3182 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3183 * sizeof (enum iv_mode *));
3184 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3185 reg_iv_info = (struct induction **)
3186 alloca (max_reg_before_loop * sizeof (struct induction *));
3187 bzero ((char *) reg_iv_info, (max_reg_before_loop
3188 * sizeof (struct induction *)));
3189 reg_biv_class = (struct iv_class **)
3190 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3191 bzero ((char *) reg_biv_class, (max_reg_before_loop
3192 * sizeof (struct iv_class *)));
3194 loop_iv_list = 0;
3195 addr_placeholder = gen_reg_rtx (Pmode);
3197 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3198 must be put before this insn, so that they will appear in the right
3199 order (i.e. loop order).
3201 If loop_end is the end of the current function, then emit a
3202 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3203 dummy note insn. */
3204 if (NEXT_INSN (loop_end) != 0)
3205 end_insert_before = NEXT_INSN (loop_end);
3206 else
3207 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
3209 /* Scan through loop to find all possible bivs. */
3211 p = scan_start;
3212 while (1)
3214 p = NEXT_INSN (p);
3215 /* At end of a straight-in loop, we are done.
3216 At end of a loop entered at the bottom, scan the top. */
3217 if (p == scan_start)
3218 break;
3219 if (p == end)
3221 if (loop_top != 0)
3222 p = loop_top;
3223 else
3224 break;
3225 if (p == scan_start)
3226 break;
3229 if (GET_CODE (p) == INSN
3230 && (set = single_set (p))
3231 && GET_CODE (SET_DEST (set)) == REG)
3233 dest_reg = SET_DEST (set);
3234 if (REGNO (dest_reg) < max_reg_before_loop
3235 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3236 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3238 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3239 dest_reg, p, &inc_val, &mult_val))
3241 /* It is a possible basic induction variable.
3242 Create and initialize an induction structure for it. */
3244 struct induction *v
3245 = (struct induction *) alloca (sizeof (struct induction));
3247 record_biv (v, p, dest_reg, inc_val, mult_val,
3248 not_every_iteration, maybe_multiple);
3249 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3251 else if (REGNO (dest_reg) < max_reg_before_loop)
3252 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3256 /* Past CODE_LABEL, we get to insns that may be executed multiple
3257 times. The only way we can be sure that they can't is if every
3258 every jump insn between here and the end of the loop either
3259 returns, exits the loop, or is a forward jump. */
3261 if (GET_CODE (p) == CODE_LABEL)
3263 rtx insn = p;
3265 maybe_multiple = 0;
3267 while (1)
3269 insn = NEXT_INSN (insn);
3270 if (insn == scan_start)
3271 break;
3272 if (insn == end)
3274 if (loop_top != 0)
3275 insn = loop_top;
3276 else
3277 break;
3278 if (insn == scan_start)
3279 break;
3282 if (GET_CODE (insn) == JUMP_INSN
3283 && GET_CODE (PATTERN (insn)) != RETURN
3284 && (! condjump_p (insn)
3285 || (JUMP_LABEL (insn) != 0
3286 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3287 || INSN_UID (insn) >= max_uid_for_loop
3288 || (INSN_LUID (JUMP_LABEL (insn))
3289 < INSN_LUID (insn))))))
3291 maybe_multiple = 1;
3292 break;
3297 /* Past a label or a jump, we get to insns for which we can't count
3298 on whether or how many times they will be executed during each
3299 iteration. */
3300 /* This code appears in three places, once in scan_loop, and twice
3301 in strength_reduce. */
3302 if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
3303 /* If we enter the loop in the middle, and scan around to the
3304 beginning, don't set not_every_iteration for that.
3305 This can be any kind of jump, since we want to know if insns
3306 will be executed if the loop is executed. */
3307 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
3308 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3309 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3310 not_every_iteration = 1;
3312 else if (GET_CODE (p) == NOTE)
3314 /* At the virtual top of a converted loop, insns are again known to
3315 be executed each iteration: logically, the loop begins here
3316 even though the exit code has been duplicated. */
3317 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3318 not_every_iteration = 0;
3319 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3320 loop_depth++;
3321 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3322 loop_depth--;
3325 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3326 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3327 or not an insn is known to be executed each iteration of the
3328 loop, whether or not any iterations are known to occur.
3330 Therefore, if we have just passed a label and have no more labels
3331 between here and the test insn of the loop, we know these insns
3332 will be executed each iteration. This can also happen if we
3333 have just passed a jump, for example, when there are nested loops. */
3335 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3336 && no_labels_between_p (p, loop_end))
3337 not_every_iteration = 0;
3340 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3341 Make a sanity check against n_times_set. */
3342 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3344 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3345 /* Above happens if register modified by subreg, etc. */
3346 /* Make sure it is not recognized as a basic induction var: */
3347 || n_times_set[bl->regno] != bl->biv_count
3348 /* If never incremented, it is invariant that we decided not to
3349 move. So leave it alone. */
3350 || ! bl->incremented)
3352 if (loop_dump_stream)
3353 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3354 bl->regno,
3355 (reg_iv_type[bl->regno] != BASIC_INDUCT
3356 ? "not induction variable"
3357 : (! bl->incremented ? "never incremented"
3358 : "count error")));
3360 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3361 *backbl = bl->next;
3363 else
3365 backbl = &bl->next;
3367 if (loop_dump_stream)
3368 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3372 /* Exit if there are no bivs. */
3373 if (! loop_iv_list)
3375 /* Can still unroll the loop anyways, but indicate that there is no
3376 strength reduction info available. */
3377 if (flag_unroll_loops)
3378 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3380 return;
3383 /* Find initial value for each biv by searching backwards from loop_start,
3384 halting at first label. Also record any test condition. */
3386 call_seen = 0;
3387 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3389 note_insn = p;
3391 if (GET_CODE (p) == CALL_INSN)
3392 call_seen = 1;
3394 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3395 || GET_CODE (p) == CALL_INSN)
3396 note_stores (PATTERN (p), record_initial);
3398 /* Record any test of a biv that branches around the loop if no store
3399 between it and the start of loop. We only care about tests with
3400 constants and registers and only certain of those. */
3401 if (GET_CODE (p) == JUMP_INSN
3402 && JUMP_LABEL (p) != 0
3403 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3404 && (test = get_condition_for_loop (p)) != 0
3405 && GET_CODE (XEXP (test, 0)) == REG
3406 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3407 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3408 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3409 && bl->init_insn == 0)
3411 /* If an NE test, we have an initial value! */
3412 if (GET_CODE (test) == NE)
3414 bl->init_insn = p;
3415 bl->init_set = gen_rtx (SET, VOIDmode,
3416 XEXP (test, 0), XEXP (test, 1));
3418 else
3419 bl->initial_test = test;
3423 /* Look at the each biv and see if we can say anything better about its
3424 initial value from any initializing insns set up above. (This is done
3425 in two passes to avoid missing SETs in a PARALLEL.) */
3426 for (bl = loop_iv_list; bl; bl = bl->next)
3428 rtx src;
3430 if (! bl->init_insn)
3431 continue;
3433 src = SET_SRC (bl->init_set);
3435 if (loop_dump_stream)
3436 fprintf (loop_dump_stream,
3437 "Biv %d initialized at insn %d: initial value ",
3438 bl->regno, INSN_UID (bl->init_insn));
3440 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3441 || GET_MODE (src) == VOIDmode)
3442 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
3444 bl->initial_value = src;
3446 if (loop_dump_stream)
3448 if (GET_CODE (src) == CONST_INT)
3449 fprintf (loop_dump_stream, "%d\n", INTVAL (src));
3450 else
3452 print_rtl (loop_dump_stream, src);
3453 fprintf (loop_dump_stream, "\n");
3457 else
3459 /* Biv initial value is not simple move,
3460 so let it keep initial value of "itself". */
3462 if (loop_dump_stream)
3463 fprintf (loop_dump_stream, "is complex\n");
3467 /* Search the loop for general induction variables. */
3469 /* A register is a giv if: it is only set once, it is a function of a
3470 biv and a constant (or invariant), and it is not a biv. */
3472 not_every_iteration = 0;
3473 loop_depth = 0;
3474 p = scan_start;
3475 while (1)
3477 p = NEXT_INSN (p);
3478 /* At end of a straight-in loop, we are done.
3479 At end of a loop entered at the bottom, scan the top. */
3480 if (p == scan_start)
3481 break;
3482 if (p == end)
3484 if (loop_top != 0)
3485 p = loop_top;
3486 else
3487 break;
3488 if (p == scan_start)
3489 break;
3492 /* Look for a general induction variable in a register. */
3493 if (GET_CODE (p) == INSN
3494 && (set = single_set (p))
3495 && GET_CODE (SET_DEST (set)) == REG
3496 && ! may_not_optimize[REGNO (SET_DEST (set))])
3498 rtx src_reg;
3499 rtx add_val;
3500 rtx mult_val;
3501 int benefit;
3502 rtx regnote = 0;
3504 dest_reg = SET_DEST (set);
3505 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3506 continue;
3508 if (/* SET_SRC is a giv. */
3509 ((benefit = general_induction_var (SET_SRC (set),
3510 &src_reg, &add_val,
3511 &mult_val))
3512 /* Equivalent expression is a giv. */
3513 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
3514 && (benefit = general_induction_var (XEXP (regnote, 0),
3515 &src_reg,
3516 &add_val, &mult_val))))
3517 /* Don't try to handle any regs made by loop optimization.
3518 We have nothing on them in regno_first_uid, etc. */
3519 && REGNO (dest_reg) < max_reg_before_loop
3520 /* Don't recognize a BASIC_INDUCT_VAR here. */
3521 && dest_reg != src_reg
3522 /* This must be the only place where the register is set. */
3523 && (n_times_set[REGNO (dest_reg)] == 1
3524 /* or all sets must be consecutive and make a giv. */
3525 || (benefit = consec_sets_giv (benefit, p,
3526 src_reg, dest_reg,
3527 &add_val, &mult_val))))
3529 int count;
3530 struct induction *v
3531 = (struct induction *) alloca (sizeof (struct induction));
3532 rtx temp;
3534 /* If this is a library call, increase benefit. */
3535 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
3536 benefit += libcall_benefit (p);
3538 /* Skip the consecutive insns, if there are any. */
3539 for (count = n_times_set[REGNO (dest_reg)] - 1;
3540 count > 0; count--)
3542 /* If first insn of libcall sequence, skip to end.
3543 Do this at start of loop, since INSN is guaranteed to
3544 be an insn here. */
3545 if (GET_CODE (p) != NOTE
3546 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3547 p = XEXP (temp, 0);
3549 do p = NEXT_INSN (p);
3550 while (GET_CODE (p) == NOTE);
3553 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
3554 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
3555 loop_end);
3560 #ifndef DONT_REDUCE_ADDR
3561 /* Look for givs which are memory addresses. */
3562 /* This resulted in worse code on a VAX 8600. I wonder if it
3563 still does. */
3564 if (GET_CODE (p) == INSN)
3565 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
3566 loop_end);
3567 #endif
3569 /* Update the status of whether giv can derive other givs. This can
3570 change when we pass a label or an insn that updates a biv. */
3571 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3572 || GET_CODE (p) == CODE_LABEL)
3573 update_giv_derive (p);
3575 /* Past a label or a jump, we get to insns for which we can't count
3576 on whether or how many times they will be executed during each
3577 iteration. */
3578 /* This code appears in three places, once in scan_loop, and twice
3579 in strength_reduce. */
3580 if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
3581 /* If we enter the loop in the middle, and scan around
3582 to the beginning, don't set not_every_iteration for that.
3583 This can be any kind of jump, since we want to know if insns
3584 will be executed if the loop is executed. */
3585 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
3586 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3587 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
3588 not_every_iteration = 1;
3590 else if (GET_CODE (p) == NOTE)
3592 /* At the virtual top of a converted loop, insns are again known to
3593 be executed each iteration: logically, the loop begins here
3594 even though the exit code has been duplicated. */
3595 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3596 not_every_iteration = 0;
3597 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3598 loop_depth++;
3599 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3600 loop_depth--;
3603 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3604 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3605 or not an insn is known to be executed each iteration of the
3606 loop, whether or not any iterations are known to occur.
3608 Therefore, if we have just passed a label and have no more labels
3609 between here and the test insn of the loop, we know these insns
3610 will be executed each iteration. */
3612 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3613 && no_labels_between_p (p, loop_end))
3614 not_every_iteration = 0;
3617 /* Try to calculate and save the number of loop iterations. This is
3618 set to zero if the actual number can not be calculated. This must
3619 be called after all giv's have been identified, since otherwise it may
3620 fail if the iteration variable is a giv. */
3622 loop_n_iterations = loop_iterations (loop_start, loop_end);
3624 /* Now for each giv for which we still don't know whether or not it is
3625 replaceable, check to see if it is replaceable because its final value
3626 can be calculated. This must be done after loop_iterations is called,
3627 so that final_giv_value will work correctly. */
3629 for (bl = loop_iv_list; bl; bl = bl->next)
3631 struct induction *v;
3633 for (v = bl->giv; v; v = v->next_iv)
3634 if (! v->replaceable && ! v->not_replaceable)
3635 check_final_value (v, loop_start, loop_end);
3638 /* Try to prove that the loop counter variable (if any) is always
3639 nonnegative; if so, record that fact with a REG_NONNEG note
3640 so that "decrement and branch until zero" insn can be used. */
3641 check_dbra_loop (loop_end, insn_count, loop_start);
3643 /* Create reg_map to hold substitutions for replaceable giv regs. */
3644 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
3645 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
3647 /* Examine each iv class for feasibility of strength reduction/induction
3648 variable elimination. */
3650 for (bl = loop_iv_list; bl; bl = bl->next)
3652 struct induction *v;
3653 int benefit;
3654 int all_reduced;
3655 rtx final_value = 0;
3657 /* Test whether it will be possible to eliminate this biv
3658 provided all givs are reduced. This is possible if either
3659 the reg is not used outside the loop, or we can compute
3660 what its final value will be.
3662 For architectures with a decrement_and_branch_until_zero insn,
3663 don't do this if we put a REG_NONNEG note on the endtest for
3664 this biv. */
3666 /* Compare against bl->init_insn rather than loop_start.
3667 We aren't concerned with any uses of the biv between
3668 init_insn and loop_start since these won't be affected
3669 by the value of the biv elsewhere in the function, so
3670 long as init_insn doesn't use the biv itself.
3671 March 14, 1989 -- self@bayes.arc.nasa.gov */
3673 if ((uid_luid[regno_last_uid[bl->regno]] < INSN_LUID (loop_end)
3674 && bl->init_insn
3675 && INSN_UID (bl->init_insn) < max_uid_for_loop
3676 && uid_luid[regno_first_uid[bl->regno]] >= INSN_LUID (bl->init_insn)
3677 #ifdef HAVE_decrement_and_branch_until_zero
3678 && ! bl->nonneg
3679 #endif
3680 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3681 || ((final_value = final_biv_value (bl, loop_start, loop_end))
3682 #ifdef HAVE_decrement_and_branch_until_zero
3683 && ! bl->nonneg
3684 #endif
3686 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
3687 threshold, insn_count);
3688 else
3690 if (loop_dump_stream)
3692 fprintf (loop_dump_stream,
3693 "Cannot eliminate biv %d.\n",
3694 bl->regno);
3695 fprintf (loop_dump_stream,
3696 "First use: insn %d, last use: insn %d.\n",
3697 regno_first_uid[bl->regno],
3698 regno_last_uid[bl->regno]);
3702 /* Combine all giv's for this iv_class. */
3703 combine_givs (bl);
3705 /* This will be true at the end, if all givs which depend on this
3706 biv have been strength reduced.
3707 We can't (currently) eliminate the biv unless this is so. */
3708 all_reduced = 1;
3710 /* Check each giv in this class to see if we will benefit by reducing
3711 it. Skip giv's combined with others. */
3712 for (v = bl->giv; v; v = v->next_iv)
3714 struct induction *tv;
3716 if (v->ignore || v->same)
3717 continue;
3719 benefit = v->benefit;
3721 /* Reduce benefit if not replaceable, since we will insert
3722 a move-insn to replace the insn that calculates this giv.
3723 Don't do this unless the giv is a user variable, since it
3724 will often be marked non-replaceable because of the duplication
3725 of the exit code outside the loop. In such a case, the copies
3726 we insert are dead and will be deleted. So they don't have
3727 a cost. Similar situations exist. */
3728 /* ??? The new final_[bg]iv_value code does a much better job
3729 of finding replaceable giv's, and hence this code may no longer
3730 be necessary. */
3731 if (! v->replaceable && ! bl->eliminable
3732 && REG_USERVAR_P (v->dest_reg))
3733 benefit -= copy_cost;
3735 /* Decrease the benefit to count the add-insns that we will
3736 insert to increment the reduced reg for the giv. */
3737 benefit -= add_cost * bl->biv_count;
3739 /* Decide whether to strength-reduce this giv or to leave the code
3740 unchanged (recompute it from the biv each time it is used).
3741 This decision can be made independently for each giv. */
3743 /* ??? Perhaps attempt to guess whether autoincrement will handle
3744 some of the new add insns; if so, can increase BENEFIT
3745 (undo the subtraction of add_cost that was done above). */
3747 /* If an insn is not to be strength reduced, then set its ignore
3748 flag, and clear all_reduced. */
3750 /* A giv that depends on a reversed biv must be reduced if it is
3751 used after the loop exit, otherwise, it would have the wrong
3752 value after the loop exit. To make it simple, just reduce all
3753 of such giv's whether or not we know they are used after the loop
3754 exit. */
3756 if (v->lifetime * threshold * benefit < insn_count
3757 && ! bl->reversed)
3759 if (loop_dump_stream)
3760 fprintf (loop_dump_stream,
3761 "giv of insn %d not worth while, %d vs %d.\n",
3762 INSN_UID (v->insn),
3763 v->lifetime * threshold * benefit, insn_count);
3764 v->ignore = 1;
3765 all_reduced = 0;
3767 else
3769 /* Check that we can increment the reduced giv without a
3770 multiply insn. If not, reject it. */
3772 for (tv = bl->biv; tv; tv = tv->next_iv)
3773 if (tv->mult_val == const1_rtx
3774 && ! product_cheap_p (tv->add_val, v->mult_val))
3776 if (loop_dump_stream)
3777 fprintf (loop_dump_stream,
3778 "giv of insn %d: would need a multiply.\n",
3779 INSN_UID (v->insn));
3780 v->ignore = 1;
3781 all_reduced = 0;
3782 break;
3787 /* Reduce each giv that we decided to reduce. */
3789 for (v = bl->giv; v; v = v->next_iv)
3791 struct induction *tv;
3792 if (! v->ignore && v->same == 0)
3794 v->new_reg = gen_reg_rtx (v->mode);
3796 /* For each place where the biv is incremented,
3797 add an insn to increment the new, reduced reg for the giv. */
3798 for (tv = bl->biv; tv; tv = tv->next_iv)
3800 if (tv->mult_val == const1_rtx)
3801 emit_iv_add_mult (tv->add_val, v->mult_val,
3802 v->new_reg, v->new_reg, tv->insn);
3803 else /* tv->mult_val == const0_rtx */
3804 /* A multiply is acceptable here
3805 since this is presumed to be seldom executed. */
3806 emit_iv_add_mult (tv->add_val, v->mult_val,
3807 v->add_val, v->new_reg, tv->insn);
3810 /* Add code at loop start to initialize giv's reduced reg. */
3812 emit_iv_add_mult (bl->initial_value, v->mult_val,
3813 v->add_val, v->new_reg, loop_start);
3817 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
3818 as not reduced.
3820 For each giv register that can be reduced now: if replaceable,
3821 substitute reduced reg wherever the old giv occurs;
3822 else add new move insn "giv_reg = reduced_reg".
3824 Also check for givs whose first use is their definition and whose
3825 last use is the definition of another giv. If so, it is likely
3826 dead and should not be used to eliminate a biv. */
3827 for (v = bl->giv; v; v = v->next_iv)
3829 if (v->same && v->same->ignore)
3830 v->ignore = 1;
3832 if (v->ignore)
3833 continue;
3835 if (v->giv_type == DEST_REG
3836 && regno_first_uid[REGNO (v->dest_reg)] == INSN_UID (v->insn))
3838 struct induction *v1;
3840 for (v1 = bl->giv; v1; v1 = v1->next_iv)
3841 if (regno_last_uid[REGNO (v->dest_reg)] == INSN_UID (v1->insn))
3842 v->maybe_dead = 1;
3845 /* Update expression if this was combined, in case other giv was
3846 replaced. */
3847 if (v->same)
3848 v->new_reg = replace_rtx (v->new_reg,
3849 v->same->dest_reg, v->same->new_reg);
3851 if (v->giv_type == DEST_ADDR)
3852 /* Store reduced reg as the address in the memref where we found
3853 this giv. */
3854 validate_change (v->insn, v->location, v->new_reg, 0);
3855 else if (v->replaceable)
3857 reg_map[REGNO (v->dest_reg)] = v->new_reg;
3859 #if 0
3860 /* I can no longer duplicate the original problem. Perhaps
3861 this is unnecessary now? */
3863 /* Replaceable; it isn't strictly necessary to delete the old
3864 insn and emit a new one, because v->dest_reg is now dead.
3866 However, especially when unrolling loops, the special
3867 handling for (set REG0 REG1) in the second cse pass may
3868 make v->dest_reg live again. To avoid this problem, emit
3869 an insn to set the original giv reg from the reduced giv.
3870 We can not delete the original insn, since it may be part
3871 of a LIBCALL, and the code in flow that eliminates dead
3872 libcalls will fail if it is deleted. */
3873 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
3874 v->insn);
3875 #endif
3877 else
3879 /* Not replaceable; emit an insn to set the original giv reg from
3880 the reduced giv, same as above. */
3881 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
3882 v->insn);
3885 /* When a loop is reversed, givs which depend on the reversed
3886 biv, and which are live outside the loop, must be set to their
3887 correct final value. This insn is only needed if the giv is
3888 not replaceable. The correct final value is the same as the
3889 value that the giv starts the reversed loop with. */
3890 if (bl->reversed && ! v->replaceable)
3891 emit_iv_add_mult (bl->initial_value, v->mult_val,
3892 v->add_val, v->dest_reg, end_insert_before);
3893 else if (v->final_value)
3895 rtx insert_before;
3897 /* If the loop has multiple exits, emit the insn before the
3898 loop to ensure that it will always be executed no matter
3899 how the loop exits. Otherwise, emit the insn after the loop,
3900 since this is slightly more efficient. */
3901 if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
3902 insert_before = loop_start;
3903 else
3904 insert_before = end_insert_before;
3905 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
3906 insert_before);
3908 #if 0
3909 /* If the insn to set the final value of the giv was emitted
3910 before the loop, then we must delete the insn inside the loop
3911 that sets it. If this is a LIBCALL, then we must delete
3912 every insn in the libcall. Note, however, that
3913 final_giv_value will only succeed when there are multiple
3914 exits if the giv is dead at each exit, hence it does not
3915 matter that the original insn remains because it is dead
3916 anyways. */
3917 /* Delete the insn inside the loop that sets the giv since
3918 the giv is now set before (or after) the loop. */
3919 delete_insn (v->insn);
3920 #endif
3923 if (loop_dump_stream)
3925 fprintf (loop_dump_stream, "giv at %d reduced to ",
3926 INSN_UID (v->insn));
3927 print_rtl (loop_dump_stream, v->new_reg);
3928 fprintf (loop_dump_stream, "\n");
3932 /* All the givs based on the biv bl have been reduced if they
3933 merit it. */
3935 /* For each giv not marked as maybe dead that has been combined with a
3936 second giv, clear any "maybe dead" mark on that second giv.
3937 v->new_reg will either be or refer to the register of the giv it
3938 combined with.
3940 Doing this clearing avoids problems in biv elimination where a
3941 giv's new_reg is a complex value that can't be put in the insn but
3942 the giv combined with (with a reg as new_reg) is marked maybe_dead.
3943 Since the register will be used in either case, we'd prefer it be
3944 used from the simpler giv. */
3946 for (v = bl->giv; v; v = v->next_iv)
3947 if (! v->maybe_dead && v->same)
3948 v->same->maybe_dead = 0;
3950 /* Try to eliminate the biv, if it is a candidate.
3951 This won't work if ! all_reduced,
3952 since the givs we planned to use might not have been reduced.
3954 We have to be careful that we didn't initially think we could eliminate
3955 this biv because of a giv that we now think may be dead and shouldn't
3956 be used as a biv replacement.
3958 Also, there is the possibility that we may have a giv that looks
3959 like it can be used to eliminate a biv, but the resulting insn
3960 isn't valid. This can happen, for example, on the 88k, where a
3961 JUMP_INSN can compare a register only with zero. Attempts to
3962 replace it with a compare with a constant will fail.
3964 Note that in cases where this call fails, we may have replaced some
3965 of the occurrences of the biv with a giv, but no harm was done in
3966 doing so in the rare cases where it can occur. */
3968 if (all_reduced == 1 && bl->eliminable
3969 && maybe_eliminate_biv (bl, loop_start, end, 1,
3970 threshold, insn_count))
3973 /* ?? If we created a new test to bypass the loop entirely,
3974 or otherwise drop straight in, based on this test, then
3975 we might want to rewrite it also. This way some later
3976 pass has more hope of removing the initialization of this
3977 biv entirely. */
3979 /* If final_value != 0, then the biv may be used after loop end
3980 and we must emit an insn to set it just in case.
3982 Reversed bivs already have an insn after the loop setting their
3983 value, so we don't need another one. We can't calculate the
3984 proper final value for such a biv here anyways. */
3985 if (final_value != 0 && ! bl->reversed)
3987 rtx insert_before;
3989 /* If the loop has multiple exits, emit the insn before the
3990 loop to ensure that it will always be executed no matter
3991 how the loop exits. Otherwise, emit the insn after the
3992 loop, since this is slightly more efficient. */
3993 if (loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
3994 insert_before = loop_start;
3995 else
3996 insert_before = end_insert_before;
3998 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
3999 end_insert_before);
4002 #if 0
4003 /* Delete all of the instructions inside the loop which set
4004 the biv, as they are all dead. If is safe to delete them,
4005 because an insn setting a biv will never be part of a libcall. */
4006 /* However, deleting them will invalidate the regno_last_uid info,
4007 so keeping them around is more convenient. Final_biv_value
4008 will only succeed when there are multiple exits if the biv
4009 is dead at each exit, hence it does not matter that the original
4010 insn remains, because it is dead anyways. */
4011 for (v = bl->biv; v; v = v->next_iv)
4012 delete_insn (v->insn);
4013 #endif
4015 if (loop_dump_stream)
4016 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4017 bl->regno);
4021 /* Go through all the instructions in the loop, making all the
4022 register substitutions scheduled in REG_MAP. */
4024 for (p = loop_start; p != end; p = NEXT_INSN (p))
4025 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4026 || GET_CODE (p) == CALL_INSN)
4028 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4029 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
4030 INSN_CODE (p) = -1;
4033 /* Unroll loops from within strength reduction so that we can use the
4034 induction variable information that strength_reduce has already
4035 collected. */
4037 if (flag_unroll_loops)
4038 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4040 if (loop_dump_stream)
4041 fprintf (loop_dump_stream, "\n");
4044 /* Return 1 if X is a valid source for an initial value (or as value being
4045 compared against in an initial test).
4047 X must be either a register or constant and must not be clobbered between
4048 the current insn and the start of the loop.
4050 INSN is the insn containing X. */
4052 static int
4053 valid_initial_value_p (x, insn, call_seen, loop_start)
4054 rtx x;
4055 rtx insn;
4056 int call_seen;
4057 rtx loop_start;
4059 if (CONSTANT_P (x))
4060 return 1;
4062 /* Only consider pseudos we know about initialized in insns whose luids
4063 we know. */
4064 if (GET_CODE (x) != REG
4065 || REGNO (x) >= max_reg_before_loop)
4066 return 0;
4068 /* Don't use call-clobbered registers across a call which clobbers it. On
4069 some machines, don't use any hard registers at all. */
4070 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4071 #ifndef SMALL_REGISTER_CLASSES
4072 && call_used_regs[REGNO (x)] && call_seen
4073 #endif
4075 return 0;
4077 /* Don't use registers that have been clobbered before the start of the
4078 loop. */
4079 if (reg_set_between_p (x, insn, loop_start))
4080 return 0;
4082 return 1;
4085 /* Scan X for memory refs and check each memory address
4086 as a possible giv. INSN is the insn whose pattern X comes from.
4087 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4088 every loop iteration. */
4090 static void
4091 find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4092 rtx x;
4093 rtx insn;
4094 int not_every_iteration;
4095 rtx loop_start, loop_end;
4097 register int i, j;
4098 register enum rtx_code code;
4099 register char *fmt;
4101 if (x == 0)
4102 return;
4104 code = GET_CODE (x);
4105 switch (code)
4107 case REG:
4108 case CONST_INT:
4109 case CONST:
4110 case CONST_DOUBLE:
4111 case SYMBOL_REF:
4112 case LABEL_REF:
4113 case PC:
4114 case CC0:
4115 case ADDR_VEC:
4116 case ADDR_DIFF_VEC:
4117 case USE:
4118 case CLOBBER:
4119 return;
4121 case MEM:
4123 rtx src_reg;
4124 rtx add_val;
4125 rtx mult_val;
4126 int benefit;
4128 benefit = general_induction_var (XEXP (x, 0),
4129 &src_reg, &add_val, &mult_val);
4131 /* Don't make a DEST_ADDR giv with mult_val == 1 && add_val == 0.
4132 Such a giv isn't useful. */
4133 if (benefit > 0 && (mult_val != const1_rtx || add_val != const0_rtx))
4135 /* Found one; record it. */
4136 struct induction *v
4137 = (struct induction *) oballoc (sizeof (struct induction));
4139 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4140 add_val, benefit, DEST_ADDR, not_every_iteration,
4141 &XEXP (x, 0), loop_start, loop_end);
4143 v->mem_mode = GET_MODE (x);
4145 return;
4149 /* Recursively scan the subexpressions for other mem refs. */
4151 fmt = GET_RTX_FORMAT (code);
4152 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4153 if (fmt[i] == 'e')
4154 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4155 loop_end);
4156 else if (fmt[i] == 'E')
4157 for (j = 0; j < XVECLEN (x, i); j++)
4158 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4159 loop_start, loop_end);
4162 /* Fill in the data about one biv update.
4163 V is the `struct induction' in which we record the biv. (It is
4164 allocated by the caller, with alloca.)
4165 INSN is the insn that sets it.
4166 DEST_REG is the biv's reg.
4168 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4169 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4170 being set to INC_VAL.
4172 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4173 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4174 can be executed more than once per iteration. If MAYBE_MULTIPLE
4175 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4176 executed exactly once per iteration. */
4178 static void
4179 record_biv (v, insn, dest_reg, inc_val, mult_val,
4180 not_every_iteration, maybe_multiple)
4181 struct induction *v;
4182 rtx insn;
4183 rtx dest_reg;
4184 rtx inc_val;
4185 rtx mult_val;
4186 int not_every_iteration;
4187 int maybe_multiple;
4189 struct iv_class *bl;
4191 v->insn = insn;
4192 v->src_reg = dest_reg;
4193 v->dest_reg = dest_reg;
4194 v->mult_val = mult_val;
4195 v->add_val = inc_val;
4196 v->mode = GET_MODE (dest_reg);
4197 v->always_computable = ! not_every_iteration;
4198 v->maybe_multiple = maybe_multiple;
4200 /* Add this to the reg's iv_class, creating a class
4201 if this is the first incrementation of the reg. */
4203 bl = reg_biv_class[REGNO (dest_reg)];
4204 if (bl == 0)
4206 /* Create and initialize new iv_class. */
4208 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4210 bl->regno = REGNO (dest_reg);
4211 bl->biv = 0;
4212 bl->giv = 0;
4213 bl->biv_count = 0;
4214 bl->giv_count = 0;
4216 /* Set initial value to the reg itself. */
4217 bl->initial_value = dest_reg;
4218 /* We haven't seen the initializing insn yet */
4219 bl->init_insn = 0;
4220 bl->init_set = 0;
4221 bl->initial_test = 0;
4222 bl->incremented = 0;
4223 bl->eliminable = 0;
4224 bl->nonneg = 0;
4225 bl->reversed = 0;
4226 bl->total_benefit = 0;
4228 /* Add this class to loop_iv_list. */
4229 bl->next = loop_iv_list;
4230 loop_iv_list = bl;
4232 /* Put it in the array of biv register classes. */
4233 reg_biv_class[REGNO (dest_reg)] = bl;
4236 /* Update IV_CLASS entry for this biv. */
4237 v->next_iv = bl->biv;
4238 bl->biv = v;
4239 bl->biv_count++;
4240 if (mult_val == const1_rtx)
4241 bl->incremented = 1;
4243 if (loop_dump_stream)
4245 fprintf (loop_dump_stream,
4246 "Insn %d: possible biv, reg %d,",
4247 INSN_UID (insn), REGNO (dest_reg));
4248 if (GET_CODE (inc_val) == CONST_INT)
4249 fprintf (loop_dump_stream, " const = %d\n",
4250 INTVAL (inc_val));
4251 else
4253 fprintf (loop_dump_stream, " const = ");
4254 print_rtl (loop_dump_stream, inc_val);
4255 fprintf (loop_dump_stream, "\n");
4260 /* Fill in the data about one giv.
4261 V is the `struct induction' in which we record the giv. (It is
4262 allocated by the caller, with alloca.)
4263 INSN is the insn that sets it.
4264 BENEFIT estimates the savings from deleting this insn.
4265 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4266 into a register or is used as a memory address.
4268 SRC_REG is the biv reg which the giv is computed from.
4269 DEST_REG is the giv's reg (if the giv is stored in a reg).
4270 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4271 LOCATION points to the place where this giv's value appears in INSN. */
4273 static void
4274 record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4275 type, not_every_iteration, location, loop_start, loop_end)
4276 struct induction *v;
4277 rtx insn;
4278 rtx src_reg;
4279 rtx dest_reg;
4280 rtx mult_val, add_val;
4281 int benefit;
4282 enum g_types type;
4283 int not_every_iteration;
4284 rtx *location;
4285 rtx loop_start, loop_end;
4287 struct induction *b;
4288 struct iv_class *bl;
4289 rtx set = single_set (insn);
4290 rtx p;
4292 v->insn = insn;
4293 v->src_reg = src_reg;
4294 v->giv_type = type;
4295 v->dest_reg = dest_reg;
4296 v->mult_val = mult_val;
4297 v->add_val = add_val;
4298 v->benefit = benefit;
4299 v->location = location;
4300 v->cant_derive = 0;
4301 v->combined_with = 0;
4302 v->maybe_multiple = 0;
4303 v->maybe_dead = 0;
4304 v->derive_adjustment = 0;
4305 v->same = 0;
4306 v->ignore = 0;
4307 v->new_reg = 0;
4308 v->final_value = 0;
4310 /* The v->always_computable field is used in update_giv_derive, to
4311 determine whether a giv can be used to derive another giv. For a
4312 DEST_REG giv, INSN computes a new value for the giv, so its value
4313 isn't computable if INSN insn't executed every iteration.
4314 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4315 it does not compute a new value. Hence the value is always computable
4316 regardless of whether INSN is executed each iteration. */
4318 if (type == DEST_ADDR)
4319 v->always_computable = 1;
4320 else
4321 v->always_computable = ! not_every_iteration;
4323 if (type == DEST_ADDR)
4325 v->mode = GET_MODE (*location);
4326 v->lifetime = 1;
4327 v->times_used = 1;
4329 else /* type == DEST_REG */
4331 v->mode = GET_MODE (SET_DEST (set));
4333 v->lifetime = (uid_luid[regno_last_uid[REGNO (dest_reg)]]
4334 - uid_luid[regno_first_uid[REGNO (dest_reg)]]);
4336 v->times_used = n_times_used[REGNO (dest_reg)];
4338 /* If the lifetime is zero, it means that this register is
4339 really a dead store. So mark this as a giv that can be
4340 ignored. This will not prevent the biv from being eliminated. */
4341 if (v->lifetime == 0)
4342 v->ignore = 1;
4344 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4345 reg_iv_info[REGNO (dest_reg)] = v;
4348 /* Add the giv to the class of givs computed from one biv. */
4350 bl = reg_biv_class[REGNO (src_reg)];
4351 if (bl)
4353 v->next_iv = bl->giv;
4354 bl->giv = v;
4355 /* Don't count DEST_ADDR. This is supposed to count the number of
4356 insns that calculate givs. */
4357 if (type == DEST_REG)
4358 bl->giv_count++;
4359 bl->total_benefit += benefit;
4361 else
4362 /* Fatal error, biv missing for this giv? */
4363 abort ();
4365 if (type == DEST_ADDR)
4366 v->replaceable = 1;
4367 else
4369 /* The giv can be replaced outright by the reduced register only if all
4370 of the following conditions are true:
4371 - the insn that sets the giv is always executed on any iteration
4372 on which the giv is used at all
4373 (there are two ways to deduce this:
4374 either the insn is executed on every iteration,
4375 or all uses follow that insn in the same basic block),
4376 - the giv is not used outside the loop
4377 - no assignments to the biv occur during the giv's lifetime. */
4379 if (regno_first_uid[REGNO (dest_reg)] == INSN_UID (insn)
4380 /* Previous line always fails if INSN was moved by loop opt. */
4381 && uid_luid[regno_last_uid[REGNO (dest_reg)]] < INSN_LUID (loop_end)
4382 && (! not_every_iteration
4383 || last_use_this_basic_block (dest_reg, insn)))
4385 /* Now check that there are no assignments to the biv within the
4386 giv's lifetime. This requires two separate checks. */
4388 /* Check each biv update, and fail if any are between the first
4389 and last use of the giv.
4391 If this loop contains an inner loop that was unrolled, then
4392 the insn modifying the biv may have been emitted by the loop
4393 unrolling code, and hence does not have a valid luid. Just
4394 mark the biv as not replaceable in this case. It is not very
4395 useful as a biv, because it is used in two different loops.
4396 It is very unlikely that we would be able to optimize the giv
4397 using this biv anyways. */
4399 v->replaceable = 1;
4400 for (b = bl->biv; b; b = b->next_iv)
4402 if (INSN_UID (b->insn) >= max_uid_for_loop
4403 || ((uid_luid[INSN_UID (b->insn)]
4404 >= uid_luid[regno_first_uid[REGNO (dest_reg)]])
4405 && (uid_luid[INSN_UID (b->insn)]
4406 <= uid_luid[regno_last_uid[REGNO (dest_reg)]])))
4408 v->replaceable = 0;
4409 v->not_replaceable = 1;
4410 break;
4414 /* Check each insn between the first and last use of the giv,
4415 and fail if any of them are branches that jump to a named label
4416 outside this range, but still inside the loop. This catches
4417 cases of spaghetti code where the execution order of insns
4418 is not linear, and hence the above test fails. For example,
4419 in the following code, j is not replaceable:
4420 for (i = 0; i < 100; ) {
4421 L0: j = 4*i; goto L1;
4422 L2: k = j; goto L3;
4423 L1: i++; goto L2;
4424 L3: ; }
4425 printf ("k = %d\n", k); }
4426 This test is conservative, but this test succeeds rarely enough
4427 that it isn't a problem. See also check_final_value below. */
4429 if (v->replaceable)
4430 for (p = insn;
4431 INSN_UID (p) >= max_uid_for_loop
4432 || INSN_LUID (p) < uid_luid[regno_last_uid[REGNO (dest_reg)]];
4433 p = NEXT_INSN (p))
4435 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4436 && LABEL_NAME (JUMP_LABEL (p))
4437 && ((INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start)
4438 && (INSN_LUID (JUMP_LABEL (p))
4439 < uid_luid[regno_first_uid[REGNO (dest_reg)]]))
4440 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end)
4441 && (INSN_LUID (JUMP_LABEL (p))
4442 > uid_luid[regno_last_uid[REGNO (dest_reg)]]))))
4444 v->replaceable = 0;
4445 v->not_replaceable = 1;
4447 if (loop_dump_stream)
4448 fprintf (loop_dump_stream,
4449 "Found branch outside giv lifetime.\n");
4451 break;
4455 else
4457 /* May still be replaceable, we don't have enough info here to
4458 decide. */
4459 v->replaceable = 0;
4460 v->not_replaceable = 0;
4464 if (loop_dump_stream)
4466 if (type == DEST_REG)
4467 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
4468 INSN_UID (insn), REGNO (dest_reg));
4469 else
4470 fprintf (loop_dump_stream, "Insn %d: dest address",
4471 INSN_UID (insn));
4473 fprintf (loop_dump_stream, " src reg %d benefit %d",
4474 REGNO (src_reg), v->benefit);
4475 fprintf (loop_dump_stream, " used %d lifetime %d",
4476 v->times_used, v->lifetime);
4478 if (v->replaceable)
4479 fprintf (loop_dump_stream, " replaceable");
4481 if (GET_CODE (mult_val) == CONST_INT)
4482 fprintf (loop_dump_stream, " mult %d",
4483 INTVAL (mult_val));
4484 else
4486 fprintf (loop_dump_stream, " mult ");
4487 print_rtl (loop_dump_stream, mult_val);
4490 if (GET_CODE (add_val) == CONST_INT)
4491 fprintf (loop_dump_stream, " add %d",
4492 INTVAL (add_val));
4493 else
4495 fprintf (loop_dump_stream, " add ");
4496 print_rtl (loop_dump_stream, add_val);
4500 if (loop_dump_stream)
4501 fprintf (loop_dump_stream, "\n");
4506 /* All this does is determine whether a giv can be made replaceable because
4507 its final value can be calculated. This code can not be part of record_giv
4508 above, because final_giv_value requires that the number of loop iterations
4509 be known, and that can not be accurately calculated until after all givs
4510 have been identified. */
4512 static void
4513 check_final_value (v, loop_start, loop_end)
4514 struct induction *v;
4515 rtx loop_start, loop_end;
4517 struct iv_class *bl;
4518 rtx final_value = 0;
4520 bl = reg_biv_class[REGNO (v->src_reg)];
4522 /* DEST_ADDR givs will never reach here, because they are always marked
4523 replaceable above in record_giv. */
4525 /* The giv can be replaced outright by the reduced register only if all
4526 of the following conditions are true:
4527 - the insn that sets the giv is always executed on any iteration
4528 on which the giv is used at all
4529 (there are two ways to deduce this:
4530 either the insn is executed on every iteration,
4531 or all uses follow that insn in the same basic block),
4532 - its final value can be calculated (this condition is different
4533 than the one above in record_giv)
4534 - no assignments to the biv occur during the giv's lifetime. */
4536 #if 0
4537 /* This is only called now when replaceable is known to be false. */
4538 /* Clear replaceable, so that it won't confuse final_giv_value. */
4539 v->replaceable = 0;
4540 #endif
4542 if ((final_value = final_giv_value (v, loop_start, loop_end))
4543 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
4545 int biv_increment_seen = 0;
4546 rtx p = v->insn;
4547 rtx last_giv_use;
4549 v->replaceable = 1;
4551 /* When trying to determine whether or not a biv increment occurs
4552 during the lifetime of the giv, we can ignore uses of the variable
4553 outside the loop because final_value is true. Hence we can not
4554 use regno_last_uid and regno_first_uid as above in record_giv. */
4556 /* Search the loop to determine whether any assignments to the
4557 biv occur during the giv's lifetime. Start with the insn
4558 that sets the giv, and search around the loop until we come
4559 back to that insn again.
4561 Also fail if there is a jump within the giv's lifetime that jumps
4562 to somewhere outside the lifetime but still within the loop. This
4563 catches spaghetti code where the execution order is not linear, and
4564 hence the above test fails. Here we assume that the giv lifetime
4565 does not extend from one iteration of the loop to the next, so as
4566 to make the test easier. Since the lifetime isn't known yet,
4567 this requires two loops. See also record_giv above. */
4569 last_giv_use = v->insn;
4571 while (1)
4573 p = NEXT_INSN (p);
4574 if (p == loop_end)
4575 p = NEXT_INSN (loop_start);
4576 if (p == v->insn)
4577 break;
4579 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4580 || GET_CODE (p) == CALL_INSN)
4582 if (biv_increment_seen)
4584 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4586 v->replaceable = 0;
4587 v->not_replaceable = 1;
4588 break;
4591 else if (GET_CODE (PATTERN (p)) == SET
4592 && SET_DEST (PATTERN (p)) == v->src_reg)
4593 biv_increment_seen = 1;
4594 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
4595 last_giv_use = p;
4599 /* Now that the lifetime of the giv is known, check for branches
4600 from within the lifetime to outside the lifetime if it is still
4601 replaceable. */
4603 if (v->replaceable)
4605 p = v->insn;
4606 while (1)
4608 p = NEXT_INSN (p);
4609 if (p == loop_end)
4610 p = NEXT_INSN (loop_start);
4611 if (p == last_giv_use)
4612 break;
4614 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
4615 && LABEL_NAME (JUMP_LABEL (p))
4616 && ((INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
4617 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
4618 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
4619 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
4621 v->replaceable = 0;
4622 v->not_replaceable = 1;
4624 if (loop_dump_stream)
4625 fprintf (loop_dump_stream,
4626 "Found branch outside giv lifetime.\n");
4628 break;
4633 /* If it is replaceable, then save the final value. */
4634 if (v->replaceable)
4635 v->final_value = final_value;
4638 if (loop_dump_stream && v->replaceable)
4639 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
4640 INSN_UID (v->insn), REGNO (v->dest_reg));
4643 /* Update the status of whether a giv can derive other givs.
4645 We need to do something special if there is or may be an update to the biv
4646 between the time the giv is defined and the time it is used to derive
4647 another giv.
4649 In addition, a giv that is only conditionally set is not allowed to
4650 derive another giv once a label has been passed.
4652 The cases we look at are when a label or an update to a biv is passed. */
4654 static void
4655 update_giv_derive (p)
4656 rtx p;
4658 struct iv_class *bl;
4659 struct induction *biv, *giv;
4660 rtx tem;
4661 int dummy;
4663 /* Search all IV classes, then all bivs, and finally all givs.
4665 There are three cases we are concerned with. First we have the situation
4666 of a giv that is only updated conditionally. In that case, it may not
4667 derive any givs after a label is passed.
4669 The second case is when a biv update occurs, or may occur, after the
4670 definition of a giv. For certain biv updates (see below) that are
4671 known to occur between the giv definition and use, we can adjust the
4672 giv definition. For others, or when the biv update is conditional,
4673 we must prevent the giv from deriving any other givs. There are two
4674 sub-cases within this case.
4676 If this is a label, we are concerned with any biv update that is done
4677 conditionally, since it may be done after the giv is defined followed by
4678 a branch here (actually, we need to pass both a jump and a label, but
4679 this extra tracking doesn't seem worth it).
4681 If this is a jump, we are concerned about any biv update that may be
4682 executed multiple times. We are actually only concerned about
4683 backward jumps, but it is probably not worth performing the test
4684 on the jump again here.
4686 If this is a biv update, we must adjust the giv status to show that a
4687 subsequent biv update was performed. If this adjustment cannot be done,
4688 the giv cannot derive further givs. */
4690 for (bl = loop_iv_list; bl; bl = bl->next)
4691 for (biv = bl->biv; biv; biv = biv->next_iv)
4692 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
4693 || biv->insn == p)
4695 for (giv = bl->giv; giv; giv = giv->next_iv)
4697 /* If cant_derive is already true, there is no point in
4698 checking all of these conditions again. */
4699 if (giv->cant_derive)
4700 continue;
4702 /* If this giv is conditionally set and we have passed a label,
4703 it cannot derive anything. */
4704 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
4705 giv->cant_derive = 1;
4707 /* Skip givs that have mult_val == 0, since
4708 they are really invariants. Also skip those that are
4709 replaceable, since we know their lifetime doesn't contain
4710 any biv update. */
4711 else if (giv->mult_val == const0_rtx || giv->replaceable)
4712 continue;
4714 /* The only way we can allow this giv to derive another
4715 is if this is a biv increment and we can form the product
4716 of biv->add_val and giv->mult_val. In this case, we will
4717 be able to compute a compensation. */
4718 else if (biv->insn == p)
4720 tem = 0;
4722 if (biv->mult_val == const1_rtx)
4723 tem = simplify_giv_expr (gen_rtx (MULT, giv->mode,
4724 biv->add_val,
4725 giv->mult_val),
4726 &dummy);
4728 if (tem && giv->derive_adjustment)
4729 tem = simplify_giv_expr (gen_rtx (PLUS, giv->mode, tem,
4730 giv->derive_adjustment),
4731 &dummy);
4732 if (tem)
4733 giv->derive_adjustment = tem;
4734 else
4735 giv->cant_derive = 1;
4737 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
4738 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
4739 giv->cant_derive = 1;
4744 /* Check whether an insn is an increment legitimate for a basic induction var.
4745 X is the source of insn P, or a part of it.
4746 MODE is the mode in which X should be interpreted.
4748 DEST_REG is the putative biv, also the destination of the insn.
4749 We accept patterns of these forms:
4750 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
4751 REG = INVARIANT + REG
4753 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
4754 and store the additive term into *INC_VAL.
4756 If X is an assignment of an invariant into DEST_REG, we set
4757 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
4759 We also want to detect a BIV when it corresponds to a variable
4760 whose mode was promoted via PROMOTED_MODE. In that case, an increment
4761 of the variable may be a PLUS that adds a SUBREG of that variable to
4762 an invariant and then sign- or zero-extends the result of the PLUS
4763 into the variable.
4765 Most GIVs in such cases will be in the promoted mode, since that is the
4766 probably the natural computation mode (and almost certainly the mode
4767 used for addresses) on the machine. So we view the pseudo-reg containing
4768 the variable as the BIV, as if it were simply incremented.
4770 Note that treating the entire pseudo as a BIV will result in making
4771 simple increments to any GIVs based on it. However, if the variable
4772 overflows in its declared mode but not its promoted mode, the result will
4773 be incorrect. This is acceptable if the variable is signed, since
4774 overflows in such cases are undefined, but not if it is unsigned, since
4775 those overflows are defined. So we only check for SIGN_EXTEND and
4776 not ZERO_EXTEND.
4778 If we cannot find a biv, we return 0. */
4780 static int
4781 basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
4782 register rtx x;
4783 enum machine_mode mode;
4784 rtx p;
4785 rtx dest_reg;
4786 rtx *inc_val;
4787 rtx *mult_val;
4789 register enum rtx_code code;
4790 rtx arg;
4791 rtx insn, set = 0;
4793 code = GET_CODE (x);
4794 switch (code)
4796 case PLUS:
4797 if (XEXP (x, 0) == dest_reg
4798 || (GET_CODE (XEXP (x, 0)) == SUBREG
4799 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
4800 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
4801 arg = XEXP (x, 1);
4802 else if (XEXP (x, 1) == dest_reg
4803 || (GET_CODE (XEXP (x, 1)) == SUBREG
4804 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
4805 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
4806 arg = XEXP (x, 0);
4807 else
4808 return 0;
4810 if (invariant_p (arg) != 1)
4811 return 0;
4813 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
4814 *mult_val = const1_rtx;
4815 return 1;
4817 case SUBREG:
4818 /* If this is a SUBREG for a promoted variable, check the inner
4819 value. */
4820 if (SUBREG_PROMOTED_VAR_P (x))
4821 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
4822 dest_reg, p, inc_val, mult_val);
4824 case REG:
4825 /* If this register is assigned in the previous insn, look at its
4826 source, but don't go outside the loop or past a label. */
4828 for (insn = PREV_INSN (p);
4829 (insn && GET_CODE (insn) == NOTE
4830 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
4831 insn = PREV_INSN (insn))
4834 if (insn)
4835 set = single_set (insn);
4837 if (set != 0
4838 && (SET_DEST (set) == x
4839 || (GET_CODE (SET_DEST (set)) == SUBREG
4840 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
4841 <= UNITS_PER_WORD)
4842 && SUBREG_REG (SET_DEST (set)) == x)))
4843 return basic_induction_var (SET_SRC (set),
4844 (GET_MODE (SET_SRC (set)) == VOIDmode
4845 ? GET_MODE (x)
4846 : GET_MODE (SET_SRC (set))),
4847 dest_reg, insn,
4848 inc_val, mult_val);
4849 /* ... fall through ... */
4851 /* Can accept constant setting of biv only when inside inner most loop.
4852 Otherwise, a biv of an inner loop may be incorrectly recognized
4853 as a biv of the outer loop,
4854 causing code to be moved INTO the inner loop. */
4855 case MEM:
4856 if (invariant_p (x) != 1)
4857 return 0;
4858 case CONST_INT:
4859 case SYMBOL_REF:
4860 case CONST:
4861 if (loops_enclosed == 1)
4863 /* Possible bug here? Perhaps we don't know the mode of X. */
4864 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
4865 *mult_val = const0_rtx;
4866 return 1;
4868 else
4869 return 0;
4871 case SIGN_EXTEND:
4872 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
4873 dest_reg, p, inc_val, mult_val);
4874 case ASHIFTRT:
4875 /* Similar, since this can be a sign extension. */
4876 for (insn = PREV_INSN (p);
4877 (insn && GET_CODE (insn) == NOTE
4878 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
4879 insn = PREV_INSN (insn))
4882 if (insn)
4883 set = single_set (insn);
4885 if (set && SET_DEST (set) == XEXP (x, 0)
4886 && GET_CODE (XEXP (x, 1)) == CONST_INT
4887 && INTVAL (XEXP (x, 1)) >= 0
4888 && GET_CODE (SET_SRC (set)) == ASHIFT
4889 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
4890 return basic_induction_var (XEXP (SET_SRC (set), 0),
4891 GET_MODE (XEXP (x, 0)),
4892 dest_reg, insn, inc_val, mult_val);
4893 return 0;
4895 default:
4896 return 0;
4900 /* A general induction variable (giv) is any quantity that is a linear
4901 function of a basic induction variable,
4902 i.e. giv = biv * mult_val + add_val.
4903 The coefficients can be any loop invariant quantity.
4904 A giv need not be computed directly from the biv;
4905 it can be computed by way of other givs. */
4907 /* Determine whether X computes a giv.
4908 If it does, return a nonzero value
4909 which is the benefit from eliminating the computation of X;
4910 set *SRC_REG to the register of the biv that it is computed from;
4911 set *ADD_VAL and *MULT_VAL to the coefficients,
4912 such that the value of X is biv * mult + add; */
4914 static int
4915 general_induction_var (x, src_reg, add_val, mult_val)
4916 rtx x;
4917 rtx *src_reg;
4918 rtx *add_val;
4919 rtx *mult_val;
4921 rtx orig_x = x;
4922 int benefit = 0;
4923 char *storage;
4925 /* If this is an invariant, forget it, it isn't a giv. */
4926 if (invariant_p (x) == 1)
4927 return 0;
4929 /* See if the expression could be a giv and get its form.
4930 Mark our place on the obstack in case we don't find a giv. */
4931 storage = (char *) oballoc (0);
4932 x = simplify_giv_expr (x, &benefit);
4933 if (x == 0)
4935 obfree (storage);
4936 return 0;
4939 switch (GET_CODE (x))
4941 case USE:
4942 case CONST_INT:
4943 /* Since this is now an invariant and wasn't before, it must be a giv
4944 with MULT_VAL == 0. It doesn't matter which BIV we associate this
4945 with. */
4946 *src_reg = loop_iv_list->biv->dest_reg;
4947 *mult_val = const0_rtx;
4948 *add_val = x;
4949 break;
4951 case REG:
4952 /* This is equivalent to a BIV. */
4953 *src_reg = x;
4954 *mult_val = const1_rtx;
4955 *add_val = const0_rtx;
4956 break;
4958 case PLUS:
4959 /* Either (plus (biv) (invar)) or
4960 (plus (mult (biv) (invar_1)) (invar_2)). */
4961 if (GET_CODE (XEXP (x, 0)) == MULT)
4963 *src_reg = XEXP (XEXP (x, 0), 0);
4964 *mult_val = XEXP (XEXP (x, 0), 1);
4966 else
4968 *src_reg = XEXP (x, 0);
4969 *mult_val = const1_rtx;
4971 *add_val = XEXP (x, 1);
4972 break;
4974 case MULT:
4975 /* ADD_VAL is zero. */
4976 *src_reg = XEXP (x, 0);
4977 *mult_val = XEXP (x, 1);
4978 *add_val = const0_rtx;
4979 break;
4981 default:
4982 abort ();
4985 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
4986 unless they are CONST_INT). */
4987 if (GET_CODE (*add_val) == USE)
4988 *add_val = XEXP (*add_val, 0);
4989 if (GET_CODE (*mult_val) == USE)
4990 *mult_val = XEXP (*mult_val, 0);
4992 benefit += rtx_cost (orig_x, SET);
4994 /* Always return some benefit if this is a giv so it will be detected
4995 as such. This allows elimination of bivs that might otherwise
4996 not be eliminated. */
4997 return benefit == 0 ? 1 : benefit;
5000 /* Given an expression, X, try to form it as a linear function of a biv.
5001 We will canonicalize it to be of the form
5002 (plus (mult (BIV) (invar_1))
5003 (invar_2))
5004 with possible degeneracies.
5006 The invariant expressions must each be of a form that can be used as a
5007 machine operand. We surround then with a USE rtx (a hack, but localized
5008 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5009 routine; it is the caller's responsibility to strip them.
5011 If no such canonicalization is possible (i.e., two biv's are used or an
5012 expression that is neither invariant nor a biv or giv), this routine
5013 returns 0.
5015 For a non-zero return, the result will have a code of CONST_INT, USE,
5016 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5018 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5020 static rtx
5021 simplify_giv_expr (x, benefit)
5022 rtx x;
5023 int *benefit;
5025 enum machine_mode mode = GET_MODE (x);
5026 rtx arg0, arg1;
5027 rtx tem;
5029 /* If this is not an integer mode, or if we cannot do arithmetic in this
5030 mode, this can't be a giv. */
5031 if (mode != VOIDmode
5032 && (GET_MODE_CLASS (mode) != MODE_INT
5033 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5034 return 0;
5036 switch (GET_CODE (x))
5038 case PLUS:
5039 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5040 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5041 if (arg0 == 0 || arg1 == 0)
5042 return 0;
5044 /* Put constant last, CONST_INT last if both constant. */
5045 if ((GET_CODE (arg0) == USE
5046 || GET_CODE (arg0) == CONST_INT)
5047 && GET_CODE (arg1) != CONST_INT)
5048 tem = arg0, arg0 = arg1, arg1 = tem;
5050 /* Handle addition of zero, then addition of an invariant. */
5051 if (arg1 == const0_rtx)
5052 return arg0;
5053 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5054 switch (GET_CODE (arg0))
5056 case CONST_INT:
5057 case USE:
5058 /* Both invariant. Only valid if sum is machine operand.
5059 First strip off possible USE on first operand. */
5060 if (GET_CODE (arg0) == USE)
5061 arg0 = XEXP (arg0, 0);
5063 tem = 0;
5064 if (CONSTANT_P (arg0) && GET_CODE (arg1) == CONST_INT)
5066 tem = plus_constant (arg0, INTVAL (arg1));
5067 if (GET_CODE (tem) != CONST_INT)
5068 tem = gen_rtx (USE, mode, tem);
5071 return tem;
5073 case REG:
5074 case MULT:
5075 /* biv + invar or mult + invar. Return sum. */
5076 return gen_rtx (PLUS, mode, arg0, arg1);
5078 case PLUS:
5079 /* (a + invar_1) + invar_2. Associate. */
5080 return simplify_giv_expr (gen_rtx (PLUS, mode,
5081 XEXP (arg0, 0),
5082 gen_rtx (PLUS, mode,
5083 XEXP (arg0, 1), arg1)),
5084 benefit);
5086 default:
5087 abort ();
5090 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5091 MULT to reduce cases. */
5092 if (GET_CODE (arg0) == REG)
5093 arg0 = gen_rtx (MULT, mode, arg0, const1_rtx);
5094 if (GET_CODE (arg1) == REG)
5095 arg1 = gen_rtx (MULT, mode, arg1, const1_rtx);
5097 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5098 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5099 Recurse to associate the second PLUS. */
5100 if (GET_CODE (arg1) == MULT)
5101 tem = arg0, arg0 = arg1, arg1 = tem;
5103 if (GET_CODE (arg1) == PLUS)
5104 return simplify_giv_expr (gen_rtx (PLUS, mode,
5105 gen_rtx (PLUS, mode,
5106 arg0, XEXP (arg1, 0)),
5107 XEXP (arg1, 1)),
5108 benefit);
5110 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5111 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5112 abort ();
5114 if (XEXP (arg0, 0) != XEXP (arg1, 0))
5115 return 0;
5117 return simplify_giv_expr (gen_rtx (MULT, mode,
5118 XEXP (arg0, 0),
5119 gen_rtx (PLUS, mode,
5120 XEXP (arg0, 1),
5121 XEXP (arg1, 1))),
5122 benefit);
5124 case MINUS:
5125 /* Handle "a - b" as "a + b * (-1)". */
5126 return simplify_giv_expr (gen_rtx (PLUS, mode,
5127 XEXP (x, 0),
5128 gen_rtx (MULT, mode,
5129 XEXP (x, 1), constm1_rtx)),
5130 benefit);
5132 case MULT:
5133 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5134 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5135 if (arg0 == 0 || arg1 == 0)
5136 return 0;
5138 /* Put constant last, CONST_INT last if both constant. */
5139 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5140 && GET_CODE (arg1) != CONST_INT)
5141 tem = arg0, arg0 = arg1, arg1 = tem;
5143 /* If second argument is not now constant, not giv. */
5144 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5145 return 0;
5147 /* Handle multiply by 0 or 1. */
5148 if (arg1 == const0_rtx)
5149 return const0_rtx;
5151 else if (arg1 == const1_rtx)
5152 return arg0;
5154 switch (GET_CODE (arg0))
5156 case REG:
5157 /* biv * invar. Done. */
5158 return gen_rtx (MULT, mode, arg0, arg1);
5160 case CONST_INT:
5161 /* Product of two constants. */
5162 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5164 case USE:
5165 /* invar * invar. Not giv. */
5166 return 0;
5168 case MULT:
5169 /* (a * invar_1) * invar_2. Associate. */
5170 return simplify_giv_expr (gen_rtx (MULT, mode,
5171 XEXP (arg0, 0),
5172 gen_rtx (MULT, mode,
5173 XEXP (arg0, 1), arg1)),
5174 benefit);
5176 case PLUS:
5177 /* (a + invar_1) * invar_2. Distribute. */
5178 return simplify_giv_expr (gen_rtx (PLUS, mode,
5179 gen_rtx (MULT, mode,
5180 XEXP (arg0, 0), arg1),
5181 gen_rtx (MULT, mode,
5182 XEXP (arg0, 1), arg1)),
5183 benefit);
5185 default:
5186 abort ();
5189 case ASHIFT:
5190 /* Shift by constant is multiply by power of two. */
5191 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5192 return 0;
5194 return simplify_giv_expr (gen_rtx (MULT, mode,
5195 XEXP (x, 0),
5196 GEN_INT ((HOST_WIDE_INT) 1
5197 << INTVAL (XEXP (x, 1)))),
5198 benefit);
5200 case NEG:
5201 /* "-a" is "a * (-1)" */
5202 return simplify_giv_expr (gen_rtx (MULT, mode, XEXP (x, 0), constm1_rtx),
5203 benefit);
5205 case NOT:
5206 /* "~a" is "-a - 1". Silly, but easy. */
5207 return simplify_giv_expr (gen_rtx (MINUS, mode,
5208 gen_rtx (NEG, mode, XEXP (x, 0)),
5209 const1_rtx),
5210 benefit);
5212 case USE:
5213 /* Already in proper form for invariant. */
5214 return x;
5216 case REG:
5217 /* If this is a new register, we can't deal with it. */
5218 if (REGNO (x) >= max_reg_before_loop)
5219 return 0;
5221 /* Check for biv or giv. */
5222 switch (reg_iv_type[REGNO (x)])
5224 case BASIC_INDUCT:
5225 return x;
5226 case GENERAL_INDUCT:
5228 struct induction *v = reg_iv_info[REGNO (x)];
5230 /* Form expression from giv and add benefit. Ensure this giv
5231 can derive another and subtract any needed adjustment if so. */
5232 *benefit += v->benefit;
5233 if (v->cant_derive)
5234 return 0;
5236 tem = gen_rtx (PLUS, mode, gen_rtx (MULT, mode,
5237 v->src_reg, v->mult_val),
5238 v->add_val);
5239 if (v->derive_adjustment)
5240 tem = gen_rtx (MINUS, mode, tem, v->derive_adjustment);
5241 return simplify_giv_expr (tem, benefit);
5245 /* Fall through to general case. */
5246 default:
5247 /* If invariant, return as USE (unless CONST_INT).
5248 Otherwise, not giv. */
5249 if (GET_CODE (x) == USE)
5250 x = XEXP (x, 0);
5252 if (invariant_p (x) == 1)
5254 if (GET_CODE (x) == CONST_INT)
5255 return x;
5256 else
5257 return gen_rtx (USE, mode, x);
5259 else
5260 return 0;
5264 /* Help detect a giv that is calculated by several consecutive insns;
5265 for example,
5266 giv = biv * M
5267 giv = giv + A
5268 The caller has already identified the first insn P as having a giv as dest;
5269 we check that all other insns that set the same register follow
5270 immediately after P, that they alter nothing else,
5271 and that the result of the last is still a giv.
5273 The value is 0 if the reg set in P is not really a giv.
5274 Otherwise, the value is the amount gained by eliminating
5275 all the consecutive insns that compute the value.
5277 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
5278 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
5280 The coefficients of the ultimate giv value are stored in
5281 *MULT_VAL and *ADD_VAL. */
5283 static int
5284 consec_sets_giv (first_benefit, p, src_reg, dest_reg,
5285 add_val, mult_val)
5286 int first_benefit;
5287 rtx p;
5288 rtx src_reg;
5289 rtx dest_reg;
5290 rtx *add_val;
5291 rtx *mult_val;
5293 int count;
5294 enum rtx_code code;
5295 int benefit;
5296 rtx temp;
5297 rtx set;
5299 /* Indicate that this is a giv so that we can update the value produced in
5300 each insn of the multi-insn sequence.
5302 This induction structure will be used only by the call to
5303 general_induction_var below, so we can allocate it on our stack.
5304 If this is a giv, our caller will replace the induct var entry with
5305 a new induction structure. */
5306 struct induction *v
5307 = (struct induction *) alloca (sizeof (struct induction));
5308 v->src_reg = src_reg;
5309 v->mult_val = *mult_val;
5310 v->add_val = *add_val;
5311 v->benefit = first_benefit;
5312 v->cant_derive = 0;
5313 v->derive_adjustment = 0;
5315 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
5316 reg_iv_info[REGNO (dest_reg)] = v;
5318 count = n_times_set[REGNO (dest_reg)] - 1;
5320 while (count > 0)
5322 p = NEXT_INSN (p);
5323 code = GET_CODE (p);
5325 /* If libcall, skip to end of call sequence. */
5326 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
5327 p = XEXP (temp, 0);
5329 if (code == INSN
5330 && (set = single_set (p))
5331 && GET_CODE (SET_DEST (set)) == REG
5332 && SET_DEST (set) == dest_reg
5333 && ((benefit = general_induction_var (SET_SRC (set), &src_reg,
5334 add_val, mult_val))
5335 /* Giv created by equivalent expression. */
5336 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
5337 && (benefit = general_induction_var (XEXP (temp, 0), &src_reg,
5338 add_val, mult_val))))
5339 && src_reg == v->src_reg)
5341 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5342 benefit += libcall_benefit (p);
5344 count--;
5345 v->mult_val = *mult_val;
5346 v->add_val = *add_val;
5347 v->benefit = benefit;
5349 else if (code != NOTE)
5351 /* Allow insns that set something other than this giv to a
5352 constant. Such insns are needed on machines which cannot
5353 include long constants and should not disqualify a giv. */
5354 if (code == INSN
5355 && (set = single_set (p))
5356 && SET_DEST (set) != dest_reg
5357 && CONSTANT_P (SET_SRC (set)))
5358 continue;
5360 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
5361 return 0;
5365 return v->benefit;
5368 /* Return an rtx, if any, that expresses giv G2 as a function of the register
5369 represented by G1. If no such expression can be found, or it is clear that
5370 it cannot possibly be a valid address, 0 is returned.
5372 To perform the computation, we note that
5373 G1 = a * v + b and
5374 G2 = c * v + d
5375 where `v' is the biv.
5377 So G2 = (c/a) * G1 + (d - b*c/a) */
5379 #ifdef ADDRESS_COST
5380 static rtx
5381 express_from (g1, g2)
5382 struct induction *g1, *g2;
5384 rtx mult, add;
5386 /* The value that G1 will be multiplied by must be a constant integer. Also,
5387 the only chance we have of getting a valid address is if b*c/a (see above
5388 for notation) is also an integer. */
5389 if (GET_CODE (g1->mult_val) != CONST_INT
5390 || GET_CODE (g2->mult_val) != CONST_INT
5391 || GET_CODE (g1->add_val) != CONST_INT
5392 || g1->mult_val == const0_rtx
5393 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
5394 return 0;
5396 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
5397 add = plus_constant (g2->add_val, - INTVAL (g1->add_val) * INTVAL (mult));
5399 /* Form simplified final result. */
5400 if (mult == const0_rtx)
5401 return add;
5402 else if (mult == const1_rtx)
5403 mult = g1->dest_reg;
5404 else
5405 mult = gen_rtx (MULT, g2->mode, g1->dest_reg, mult);
5407 if (add == const0_rtx)
5408 return mult;
5409 else
5410 return gen_rtx (PLUS, g2->mode, mult, add);
5412 #endif
5414 /* Return 1 if giv G2 can be combined with G1. This means that G2 can use
5415 (either directly or via an address expression) a register used to represent
5416 G1. Set g2->new_reg to a represtation of G1 (normally just
5417 g1->dest_reg). */
5419 static int
5420 combine_givs_p (g1, g2)
5421 struct induction *g1, *g2;
5423 rtx tem;
5425 /* If these givs are identical, they can be combined. */
5426 if (rtx_equal_p (g1->mult_val, g2->mult_val)
5427 && rtx_equal_p (g1->add_val, g2->add_val))
5429 g2->new_reg = g1->dest_reg;
5430 return 1;
5433 #ifdef ADDRESS_COST
5434 /* If G2 can be expressed as a function of G1 and that function is valid
5435 as an address and no more expensive than using a register for G2,
5436 the expression of G2 in terms of G1 can be used. */
5437 if (g2->giv_type == DEST_ADDR
5438 && (tem = express_from (g1, g2)) != 0
5439 && memory_address_p (g2->mem_mode, tem)
5440 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location))
5442 g2->new_reg = tem;
5443 return 1;
5445 #endif
5447 return 0;
5450 /* Check all pairs of givs for iv_class BL and see if any can be combined with
5451 any other. If so, point SAME to the giv combined with and set NEW_REG to
5452 be an expression (in terms of the other giv's DEST_REG) equivalent to the
5453 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
5455 static void
5456 combine_givs (bl)
5457 struct iv_class *bl;
5459 struct induction *g1, *g2;
5460 int pass;
5462 for (g1 = bl->giv; g1; g1 = g1->next_iv)
5463 for (pass = 0; pass <= 1; pass++)
5464 for (g2 = bl->giv; g2; g2 = g2->next_iv)
5465 if (g1 != g2
5466 /* First try to combine with replaceable givs, then all givs. */
5467 && (g1->replaceable || pass == 1)
5468 /* If either has already been combined or is to be ignored, can't
5469 combine. */
5470 && ! g1->ignore && ! g2->ignore && ! g1->same && ! g2->same
5471 /* If something has been based on G2, G2 cannot itself be based
5472 on something else. */
5473 && ! g2->combined_with
5474 && combine_givs_p (g1, g2))
5476 /* g2->new_reg set by `combine_givs_p' */
5477 g2->same = g1;
5478 g1->combined_with = 1;
5479 g1->benefit += g2->benefit;
5480 /* ??? The new final_[bg]iv_value code does a much better job
5481 of finding replaceable giv's, and hence this code may no
5482 longer be necessary. */
5483 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
5484 g1->benefit -= copy_cost;
5485 g1->lifetime += g2->lifetime;
5486 g1->times_used += g2->times_used;
5488 if (loop_dump_stream)
5489 fprintf (loop_dump_stream, "giv at %d combined with giv at %d\n",
5490 INSN_UID (g2->insn), INSN_UID (g1->insn));
5494 /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
5496 void
5497 emit_iv_add_mult (b, m, a, reg, insert_before)
5498 rtx b; /* initial value of basic induction variable */
5499 rtx m; /* multiplicative constant */
5500 rtx a; /* additive constant */
5501 rtx reg; /* destination register */
5502 rtx insert_before;
5504 rtx seq;
5505 rtx result;
5507 /* Prevent unexpected sharing of these rtx. */
5508 a = copy_rtx (a);
5509 b = copy_rtx (b);
5511 /* Increase the lifetime of any invariants moved further in code. */
5512 update_reg_last_use (a, insert_before);
5513 update_reg_last_use (b, insert_before);
5514 update_reg_last_use (m, insert_before);
5516 start_sequence ();
5517 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
5518 if (reg != result)
5519 emit_move_insn (reg, result);
5520 seq = gen_sequence ();
5521 end_sequence ();
5523 emit_insn_before (seq, insert_before);
5526 /* Test whether A * B can be computed without
5527 an actual multiply insn. Value is 1 if so. */
5529 static int
5530 product_cheap_p (a, b)
5531 rtx a;
5532 rtx b;
5534 int i;
5535 rtx tmp;
5536 struct obstack *old_rtl_obstack = rtl_obstack;
5537 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
5538 int win = 1;
5540 /* If only one is constant, make it B. */
5541 if (GET_CODE (a) == CONST_INT)
5542 tmp = a, a = b, b = tmp;
5544 /* If first constant, both constant, so don't need multiply. */
5545 if (GET_CODE (a) == CONST_INT)
5546 return 1;
5548 /* If second not constant, neither is constant, so would need multiply. */
5549 if (GET_CODE (b) != CONST_INT)
5550 return 0;
5552 /* One operand is constant, so might not need multiply insn. Generate the
5553 code for the multiply and see if a call or multiply, or long sequence
5554 of insns is generated. */
5556 rtl_obstack = &temp_obstack;
5557 start_sequence ();
5558 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
5559 tmp = gen_sequence ();
5560 end_sequence ();
5562 if (GET_CODE (tmp) == SEQUENCE)
5564 if (XVEC (tmp, 0) == 0)
5565 win = 1;
5566 else if (XVECLEN (tmp, 0) > 3)
5567 win = 0;
5568 else
5569 for (i = 0; i < XVECLEN (tmp, 0); i++)
5571 rtx insn = XVECEXP (tmp, 0, i);
5573 if (GET_CODE (insn) != INSN
5574 || (GET_CODE (PATTERN (insn)) == SET
5575 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
5576 || (GET_CODE (PATTERN (insn)) == PARALLEL
5577 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
5578 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
5580 win = 0;
5581 break;
5585 else if (GET_CODE (tmp) == SET
5586 && GET_CODE (SET_SRC (tmp)) == MULT)
5587 win = 0;
5588 else if (GET_CODE (tmp) == PARALLEL
5589 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
5590 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
5591 win = 0;
5593 /* Free any storage we obtained in generating this multiply and restore rtl
5594 allocation to its normal obstack. */
5595 obstack_free (&temp_obstack, storage);
5596 rtl_obstack = old_rtl_obstack;
5598 return win;
5601 /* Check to see if loop can be terminated by a "decrement and branch until
5602 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
5603 Also try reversing an increment loop to a decrement loop
5604 to see if the optimization can be performed.
5605 Value is nonzero if optimization was performed. */
5607 /* This is useful even if the architecture doesn't have such an insn,
5608 because it might change a loops which increments from 0 to n to a loop
5609 which decrements from n to 0. A loop that decrements to zero is usually
5610 faster than one that increments from zero. */
5612 /* ??? This could be rewritten to use some of the loop unrolling procedures,
5613 such as approx_final_value, biv_total_increment, loop_iterations, and
5614 final_[bg]iv_value. */
5616 static int
5617 check_dbra_loop (loop_end, insn_count, loop_start)
5618 rtx loop_end;
5619 int insn_count;
5620 rtx loop_start;
5622 struct iv_class *bl;
5623 rtx reg;
5624 rtx jump_label;
5625 rtx final_value;
5626 rtx start_value;
5627 rtx new_add_val;
5628 rtx comparison;
5629 rtx before_comparison;
5630 rtx p;
5632 /* If last insn is a conditional branch, and the insn before tests a
5633 register value, try to optimize it. Otherwise, we can't do anything. */
5635 comparison = get_condition_for_loop (PREV_INSN (loop_end));
5636 if (comparison == 0)
5637 return 0;
5639 /* Check all of the bivs to see if the compare uses one of them.
5640 Skip biv's set more than once because we can't guarantee that
5641 it will be zero on the last iteration. Also skip if the biv is
5642 used between its update and the test insn. */
5644 for (bl = loop_iv_list; bl; bl = bl->next)
5646 if (bl->biv_count == 1
5647 && bl->biv->dest_reg == XEXP (comparison, 0)
5648 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
5649 PREV_INSN (PREV_INSN (loop_end))))
5650 break;
5653 if (! bl)
5654 return 0;
5656 /* Look for the case where the basic induction variable is always
5657 nonnegative, and equals zero on the last iteration.
5658 In this case, add a reg_note REG_NONNEG, which allows the
5659 m68k DBRA instruction to be used. */
5661 if (((GET_CODE (comparison) == GT
5662 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
5663 && INTVAL (XEXP (comparison, 1)) == -1)
5664 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
5665 && GET_CODE (bl->biv->add_val) == CONST_INT
5666 && INTVAL (bl->biv->add_val) < 0)
5668 /* Initial value must be greater than 0,
5669 init_val % -dec_value == 0 to ensure that it equals zero on
5670 the last iteration */
5672 if (GET_CODE (bl->initial_value) == CONST_INT
5673 && INTVAL (bl->initial_value) > 0
5674 && (INTVAL (bl->initial_value) %
5675 (-INTVAL (bl->biv->add_val))) == 0)
5677 /* register always nonnegative, add REG_NOTE to branch */
5678 REG_NOTES (PREV_INSN (loop_end))
5679 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5680 REG_NOTES (PREV_INSN (loop_end)));
5681 bl->nonneg = 1;
5683 return 1;
5686 /* If the decrement is 1 and the value was tested as >= 0 before
5687 the loop, then we can safely optimize. */
5688 for (p = loop_start; p; p = PREV_INSN (p))
5690 if (GET_CODE (p) == CODE_LABEL)
5691 break;
5692 if (GET_CODE (p) != JUMP_INSN)
5693 continue;
5695 before_comparison = get_condition_for_loop (p);
5696 if (before_comparison
5697 && XEXP (before_comparison, 0) == bl->biv->dest_reg
5698 && GET_CODE (before_comparison) == LT
5699 && XEXP (before_comparison, 1) == const0_rtx
5700 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
5701 && INTVAL (bl->biv->add_val) == -1)
5703 REG_NOTES (PREV_INSN (loop_end))
5704 = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5705 REG_NOTES (PREV_INSN (loop_end)));
5706 bl->nonneg = 1;
5708 return 1;
5712 else if (num_mem_sets <= 1)
5714 /* Try to change inc to dec, so can apply above optimization. */
5715 /* Can do this if:
5716 all registers modified are induction variables or invariant,
5717 all memory references have non-overlapping addresses
5718 (obviously true if only one write)
5719 allow 2 insns for the compare/jump at the end of the loop. */
5720 int num_nonfixed_reads = 0;
5721 /* 1 if the iteration var is used only to count iterations. */
5722 int no_use_except_counting = 0;
5723 /* 1 if the loop has no memory store, or it has a single memory store
5724 which is reversible. */
5725 int reversible_mem_store = 1;
5727 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
5728 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
5729 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
5731 if (bl->giv_count == 0
5732 && ! loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]])
5734 rtx bivreg = regno_reg_rtx[bl->regno];
5736 /* If there are no givs for this biv, and the only exit is the
5737 fall through at the end of the the loop, then
5738 see if perhaps there are no uses except to count. */
5739 no_use_except_counting = 1;
5740 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
5741 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
5743 rtx set = single_set (p);
5745 if (set && GET_CODE (SET_DEST (set)) == REG
5746 && REGNO (SET_DEST (set)) == bl->regno)
5747 /* An insn that sets the biv is okay. */
5749 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
5750 || p == prev_nonnote_insn (loop_end))
5751 /* Don't bother about the end test. */
5753 else if (reg_mentioned_p (bivreg, PATTERN (p)))
5754 /* Any other use of the biv is no good. */
5756 no_use_except_counting = 0;
5757 break;
5762 /* If the loop has a single store, and the destination address is
5763 invariant, then we can't reverse the loop, because this address
5764 might then have the wrong value at loop exit.
5765 This would work if the source was invariant also, however, in that
5766 case, the insn should have been moved out of the loop. */
5768 if (num_mem_sets == 1)
5769 reversible_mem_store
5770 = (! unknown_address_altered
5771 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
5773 /* This code only acts for innermost loops. Also it simplifies
5774 the memory address check by only reversing loops with
5775 zero or one memory access.
5776 Two memory accesses could involve parts of the same array,
5777 and that can't be reversed. */
5779 if (num_nonfixed_reads <= 1
5780 && !loop_has_call
5781 && !loop_has_volatile
5782 && reversible_mem_store
5783 && (no_use_except_counting
5784 || (bl->giv_count + bl->biv_count + num_mem_sets
5785 + num_movables + 2 == insn_count)))
5787 rtx tem;
5789 /* Loop can be reversed. */
5790 if (loop_dump_stream)
5791 fprintf (loop_dump_stream, "Can reverse loop\n");
5793 /* Now check other conditions:
5794 initial_value must be zero,
5795 final_value % add_val == 0, so that when reversed, the
5796 biv will be zero on the last iteration.
5798 This test can probably be improved since +/- 1 in the constant
5799 can be obtained by changing LT to LE and vice versa; this is
5800 confusing. */
5802 if (comparison && bl->initial_value == const0_rtx
5803 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
5804 /* LE gets turned into LT */
5805 && GET_CODE (comparison) == LT
5806 && (INTVAL (XEXP (comparison, 1))
5807 % INTVAL (bl->biv->add_val)) == 0)
5809 /* Register will always be nonnegative, with value
5810 0 on last iteration if loop reversed */
5812 /* Save some info needed to produce the new insns. */
5813 reg = bl->biv->dest_reg;
5814 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
5815 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
5817 final_value = XEXP (comparison, 1);
5818 start_value = GEN_INT (INTVAL (XEXP (comparison, 1))
5819 - INTVAL (bl->biv->add_val));
5821 /* Initialize biv to start_value before loop start.
5822 The old initializing insn will be deleted as a
5823 dead store by flow.c. */
5824 emit_insn_before (gen_move_insn (reg, start_value), loop_start);
5826 /* Add insn to decrement register, and delete insn
5827 that incremented the register. */
5828 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
5829 bl->biv->insn);
5830 delete_insn (bl->biv->insn);
5832 /* Update biv info to reflect its new status. */
5833 bl->biv->insn = p;
5834 bl->initial_value = start_value;
5835 bl->biv->add_val = new_add_val;
5837 /* Inc LABEL_NUSES so that delete_insn will
5838 not delete the label. */
5839 LABEL_NUSES (XEXP (jump_label, 0)) ++;
5841 /* Emit an insn after the end of the loop to set the biv's
5842 proper exit value if it is used anywhere outside the loop. */
5843 if ((regno_last_uid[bl->regno]
5844 != INSN_UID (PREV_INSN (PREV_INSN (loop_end))))
5845 || ! bl->init_insn
5846 || regno_first_uid[bl->regno] != INSN_UID (bl->init_insn))
5847 emit_insn_after (gen_move_insn (reg, final_value),
5848 loop_end);
5850 /* Delete compare/branch at end of loop. */
5851 delete_insn (PREV_INSN (loop_end));
5852 delete_insn (PREV_INSN (loop_end));
5854 /* Add new compare/branch insn at end of loop. */
5855 start_sequence ();
5856 emit_cmp_insn (reg, const0_rtx, GE, NULL_RTX,
5857 GET_MODE (reg), 0, 0);
5858 emit_jump_insn (gen_bge (XEXP (jump_label, 0)));
5859 tem = gen_sequence ();
5860 end_sequence ();
5861 emit_jump_insn_before (tem, loop_end);
5863 for (tem = PREV_INSN (loop_end);
5864 tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem))
5866 if (tem)
5868 JUMP_LABEL (tem) = XEXP (jump_label, 0);
5870 /* Increment of LABEL_NUSES done above. */
5871 /* Register is now always nonnegative,
5872 so add REG_NONNEG note to the branch. */
5873 REG_NOTES (tem) = gen_rtx (EXPR_LIST, REG_NONNEG, NULL_RTX,
5874 REG_NOTES (tem));
5877 bl->nonneg = 1;
5879 /* Mark that this biv has been reversed. Each giv which depends
5880 on this biv, and which is also live past the end of the loop
5881 will have to be fixed up. */
5883 bl->reversed = 1;
5885 if (loop_dump_stream)
5886 fprintf (loop_dump_stream,
5887 "Reversed loop and added reg_nonneg\n");
5889 return 1;
5894 return 0;
5897 /* Verify whether the biv BL appears to be eliminable,
5898 based on the insns in the loop that refer to it.
5899 LOOP_START is the first insn of the loop, and END is the end insn.
5901 If ELIMINATE_P is non-zero, actually do the elimination.
5903 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
5904 determine whether invariant insns should be placed inside or at the
5905 start of the loop. */
5907 static int
5908 maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
5909 struct iv_class *bl;
5910 rtx loop_start;
5911 rtx end;
5912 int eliminate_p;
5913 int threshold, insn_count;
5915 rtx reg = bl->biv->dest_reg;
5916 rtx p;
5918 /* Scan all insns in the loop, stopping if we find one that uses the
5919 biv in a way that we cannot eliminate. */
5921 for (p = loop_start; p != end; p = NEXT_INSN (p))
5923 enum rtx_code code = GET_CODE (p);
5924 rtx where = threshold >= insn_count ? loop_start : p;
5926 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
5927 && reg_mentioned_p (reg, PATTERN (p))
5928 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
5930 if (loop_dump_stream)
5931 fprintf (loop_dump_stream,
5932 "Cannot eliminate biv %d: biv used in insn %d.\n",
5933 bl->regno, INSN_UID (p));
5934 break;
5938 if (p == end)
5940 if (loop_dump_stream)
5941 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
5942 bl->regno, eliminate_p ? "was" : "can be");
5943 return 1;
5946 return 0;
5949 /* If BL appears in X (part of the pattern of INSN), see if we can
5950 eliminate its use. If so, return 1. If not, return 0.
5952 If BIV does not appear in X, return 1.
5954 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
5955 where extra insns should be added. Depending on how many items have been
5956 moved out of the loop, it will either be before INSN or at the start of
5957 the loop. */
5959 static int
5960 maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
5961 rtx x, insn;
5962 struct iv_class *bl;
5963 int eliminate_p;
5964 rtx where;
5966 enum rtx_code code = GET_CODE (x);
5967 rtx reg = bl->biv->dest_reg;
5968 enum machine_mode mode = GET_MODE (reg);
5969 struct induction *v;
5970 rtx arg, new, tem;
5971 int arg_operand;
5972 char *fmt;
5973 int i, j;
5975 switch (code)
5977 case REG:
5978 /* If we haven't already been able to do something with this BIV,
5979 we can't eliminate it. */
5980 if (x == reg)
5981 return 0;
5982 return 1;
5984 case SET:
5985 /* If this sets the BIV, it is not a problem. */
5986 if (SET_DEST (x) == reg)
5987 return 1;
5989 /* If this is an insn that defines a giv, it is also ok because
5990 it will go away when the giv is reduced. */
5991 for (v = bl->giv; v; v = v->next_iv)
5992 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
5993 return 1;
5995 #ifdef HAVE_cc0
5996 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
5998 /* Can replace with any giv that was reduced and
5999 that has (MULT_VAL != 0) and (ADD_VAL == 0).
6000 Require a constant for MULT_VAL, so we know it's nonzero. */
6002 for (v = bl->giv; v; v = v->next_iv)
6003 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6004 && v->add_val == const0_rtx
6005 && ! v->ignore && ! v->maybe_dead && v->always_computable
6006 && v->mode == mode)
6008 if (! eliminate_p)
6009 return 1;
6011 /* If the giv has the opposite direction of change,
6012 then reverse the comparison. */
6013 if (INTVAL (v->mult_val) < 0)
6014 new = gen_rtx (COMPARE, GET_MODE (v->new_reg),
6015 const0_rtx, v->new_reg);
6016 else
6017 new = v->new_reg;
6019 /* We can probably test that giv's reduced reg. */
6020 if (validate_change (insn, &SET_SRC (x), new, 0))
6021 return 1;
6024 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
6025 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
6026 Require a constant for MULT_VAL, so we know it's nonzero. */
6028 for (v = bl->giv; v; v = v->next_iv)
6029 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
6030 && ! v->ignore && ! v->maybe_dead && v->always_computable
6031 && v->mode == mode)
6033 if (! eliminate_p)
6034 return 1;
6036 /* If the giv has the opposite direction of change,
6037 then reverse the comparison. */
6038 if (INTVAL (v->mult_val) < 0)
6039 new = gen_rtx (COMPARE, VOIDmode, copy_rtx (v->add_val),
6040 v->new_reg);
6041 else
6042 new = gen_rtx (COMPARE, VOIDmode, v->new_reg,
6043 copy_rtx (v->add_val));
6045 /* Replace biv with the giv's reduced register. */
6046 update_reg_last_use (v->add_val, insn);
6047 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
6048 return 1;
6050 /* Insn doesn't support that constant or invariant. Copy it
6051 into a register (it will be a loop invariant.) */
6052 tem = gen_reg_rtx (GET_MODE (v->new_reg));
6054 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
6055 where);
6057 if (validate_change (insn, &SET_SRC (PATTERN (insn)),
6058 gen_rtx (COMPARE, VOIDmode,
6059 v->new_reg, tem), 0))
6060 return 1;
6063 #endif
6064 break;
6066 case COMPARE:
6067 case EQ: case NE:
6068 case GT: case GE: case GTU: case GEU:
6069 case LT: case LE: case LTU: case LEU:
6070 /* See if either argument is the biv. */
6071 if (XEXP (x, 0) == reg)
6072 arg = XEXP (x, 1), arg_operand = 1;
6073 else if (XEXP (x, 1) == reg)
6074 arg = XEXP (x, 0), arg_operand = 0;
6075 else
6076 break;
6078 if (CONSTANT_P (arg))
6080 /* First try to replace with any giv that has constant positive
6081 mult_val and constant add_val. We might be able to support
6082 negative mult_val, but it seems complex to do it in general. */
6084 for (v = bl->giv; v; v = v->next_iv)
6085 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6086 && CONSTANT_P (v->add_val)
6087 && ! v->ignore && ! v->maybe_dead && v->always_computable
6088 && v->mode == mode)
6090 if (! eliminate_p)
6091 return 1;
6093 /* Replace biv with the giv's reduced reg. */
6094 XEXP (x, 1-arg_operand) = v->new_reg;
6096 /* If all constants are actually constant integers and
6097 the derived constant can be directly placed in the COMPARE,
6098 do so. */
6099 if (GET_CODE (arg) == CONST_INT
6100 && GET_CODE (v->mult_val) == CONST_INT
6101 && GET_CODE (v->add_val) == CONST_INT
6102 && validate_change (insn, &XEXP (x, arg_operand),
6103 GEN_INT (INTVAL (arg)
6104 * INTVAL (v->mult_val)
6105 + INTVAL (v->add_val)), 0))
6106 return 1;
6108 /* Otherwise, load it into a register. */
6109 tem = gen_reg_rtx (mode);
6110 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6111 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
6112 return 1;
6114 /* If that failed, put back the change we made above. */
6115 XEXP (x, 1-arg_operand) = reg;
6118 /* Look for giv with positive constant mult_val and nonconst add_val.
6119 Insert insns to calculate new compare value. */
6121 for (v = bl->giv; v; v = v->next_iv)
6122 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6123 && ! v->ignore && ! v->maybe_dead && v->always_computable
6124 && v->mode == mode)
6126 rtx tem;
6128 if (! eliminate_p)
6129 return 1;
6131 tem = gen_reg_rtx (mode);
6133 /* Replace biv with giv's reduced register. */
6134 validate_change (insn, &XEXP (x, 1 - arg_operand),
6135 v->new_reg, 1);
6137 /* Compute value to compare against. */
6138 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
6139 /* Use it in this insn. */
6140 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6141 if (apply_change_group ())
6142 return 1;
6145 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
6147 if (invariant_p (arg) == 1)
6149 /* Look for giv with constant positive mult_val and nonconst
6150 add_val. Insert insns to compute new compare value. */
6152 for (v = bl->giv; v; v = v->next_iv)
6153 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
6154 && ! v->ignore && ! v->maybe_dead && v->always_computable
6155 && v->mode == mode)
6157 rtx tem;
6159 if (! eliminate_p)
6160 return 1;
6162 tem = gen_reg_rtx (mode);
6164 /* Replace biv with giv's reduced register. */
6165 validate_change (insn, &XEXP (x, 1 - arg_operand),
6166 v->new_reg, 1);
6168 /* Compute value to compare against. */
6169 emit_iv_add_mult (arg, v->mult_val, v->add_val,
6170 tem, where);
6171 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
6172 if (apply_change_group ())
6173 return 1;
6177 /* This code has problems. Basically, you can't know when
6178 seeing if we will eliminate BL, whether a particular giv
6179 of ARG will be reduced. If it isn't going to be reduced,
6180 we can't eliminate BL. We can try forcing it to be reduced,
6181 but that can generate poor code.
6183 The problem is that the benefit of reducing TV, below should
6184 be increased if BL can actually be eliminated, but this means
6185 we might have to do a topological sort of the order in which
6186 we try to process biv. It doesn't seem worthwhile to do
6187 this sort of thing now. */
6189 #if 0
6190 /* Otherwise the reg compared with had better be a biv. */
6191 if (GET_CODE (arg) != REG
6192 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
6193 return 0;
6195 /* Look for a pair of givs, one for each biv,
6196 with identical coefficients. */
6197 for (v = bl->giv; v; v = v->next_iv)
6199 struct induction *tv;
6201 if (v->ignore || v->maybe_dead || v->mode != mode)
6202 continue;
6204 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
6205 if (! tv->ignore && ! tv->maybe_dead
6206 && rtx_equal_p (tv->mult_val, v->mult_val)
6207 && rtx_equal_p (tv->add_val, v->add_val)
6208 && tv->mode == mode)
6210 if (! eliminate_p)
6211 return 1;
6213 /* Replace biv with its giv's reduced reg. */
6214 XEXP (x, 1-arg_operand) = v->new_reg;
6215 /* Replace other operand with the other giv's
6216 reduced reg. */
6217 XEXP (x, arg_operand) = tv->new_reg;
6218 return 1;
6221 #endif
6224 /* If we get here, the biv can't be eliminated. */
6225 return 0;
6227 case MEM:
6228 /* If this address is a DEST_ADDR giv, it doesn't matter if the
6229 biv is used in it, since it will be replaced. */
6230 for (v = bl->giv; v; v = v->next_iv)
6231 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
6232 return 1;
6233 break;
6236 /* See if any subexpression fails elimination. */
6237 fmt = GET_RTX_FORMAT (code);
6238 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6240 switch (fmt[i])
6242 case 'e':
6243 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
6244 eliminate_p, where))
6245 return 0;
6246 break;
6248 case 'E':
6249 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6250 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
6251 eliminate_p, where))
6252 return 0;
6253 break;
6257 return 1;
6260 /* Return nonzero if the last use of REG
6261 is in an insn following INSN in the same basic block. */
6263 static int
6264 last_use_this_basic_block (reg, insn)
6265 rtx reg;
6266 rtx insn;
6268 rtx n;
6269 for (n = insn;
6270 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
6271 n = NEXT_INSN (n))
6273 if (regno_last_uid[REGNO (reg)] == INSN_UID (n))
6274 return 1;
6276 return 0;
6279 /* Called via `note_stores' to record the initial value of a biv. Here we
6280 just record the location of the set and process it later. */
6282 static void
6283 record_initial (dest, set)
6284 rtx dest;
6285 rtx set;
6287 struct iv_class *bl;
6289 if (GET_CODE (dest) != REG
6290 || REGNO (dest) >= max_reg_before_loop
6291 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
6292 return;
6294 bl = reg_biv_class[REGNO (dest)];
6296 /* If this is the first set found, record it. */
6297 if (bl->init_insn == 0)
6299 bl->init_insn = note_insn;
6300 bl->init_set = set;
6304 /* If any of the registers in X are "old" and currently have a last use earlier
6305 than INSN, update them to have a last use of INSN. Their actual last use
6306 will be the previous insn but it will not have a valid uid_luid so we can't
6307 use it. */
6309 static void
6310 update_reg_last_use (x, insn)
6311 rtx x;
6312 rtx insn;
6314 /* Check for the case where INSN does not have a valid luid. In this case,
6315 there is no need to modify the regno_last_uid, as this can only happen
6316 when code is inserted after the loop_end to set a pseudo's final value,
6317 and hence this insn will never be the last use of x. */
6318 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
6319 && INSN_UID (insn) < max_uid_for_loop
6320 && uid_luid[regno_last_uid[REGNO (x)]] < uid_luid[INSN_UID (insn)])
6321 regno_last_uid[REGNO (x)] = INSN_UID (insn);
6322 else
6324 register int i, j;
6325 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
6326 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6328 if (fmt[i] == 'e')
6329 update_reg_last_use (XEXP (x, i), insn);
6330 else if (fmt[i] == 'E')
6331 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6332 update_reg_last_use (XVECEXP (x, i, j), insn);
6337 /* Given a jump insn JUMP, return the condition that will cause it to branch
6338 to its JUMP_LABEL. If the condition cannot be understood, or is an
6339 inequality floating-point comparison which needs to be reversed, 0 will
6340 be returned.
6342 If EARLIEST is non-zero, it is a pointer to a place where the earliest
6343 insn used in locating the condition was found. If a replacement test
6344 of the condition is desired, it should be placed in front of that
6345 insn and we will be sure that the inputs are still valid.
6347 The condition will be returned in a canonical form to simplify testing by
6348 callers. Specifically:
6350 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
6351 (2) Both operands will be machine operands; (cc0) will have been replaced.
6352 (3) If an operand is a constant, it will be the second operand.
6353 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
6354 for GE, GEU, and LEU. */
6357 get_condition (jump, earliest)
6358 rtx jump;
6359 rtx *earliest;
6361 enum rtx_code code;
6362 rtx prev = jump;
6363 rtx set;
6364 rtx tem;
6365 rtx op0, op1;
6366 int reverse_code = 0;
6367 int did_reverse_condition = 0;
6369 /* If this is not a standard conditional jump, we can't parse it. */
6370 if (GET_CODE (jump) != JUMP_INSN
6371 || ! condjump_p (jump) || simplejump_p (jump))
6372 return 0;
6374 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
6375 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
6376 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
6378 if (earliest)
6379 *earliest = jump;
6381 /* If this branches to JUMP_LABEL when the condition is false, reverse
6382 the condition. */
6383 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
6384 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
6385 code = reverse_condition (code), did_reverse_condition ^= 1;
6387 /* If we are comparing a register with zero, see if the register is set
6388 in the previous insn to a COMPARE or a comparison operation. Perform
6389 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
6390 in cse.c */
6392 while (GET_RTX_CLASS (code) == '<' && op1 == const0_rtx)
6394 /* Set non-zero when we find something of interest. */
6395 rtx x = 0;
6397 #ifdef HAVE_cc0
6398 /* If comparison with cc0, import actual comparison from compare
6399 insn. */
6400 if (op0 == cc0_rtx)
6402 if ((prev = prev_nonnote_insn (prev)) == 0
6403 || GET_CODE (prev) != INSN
6404 || (set = single_set (prev)) == 0
6405 || SET_DEST (set) != cc0_rtx)
6406 return 0;
6408 op0 = SET_SRC (set);
6409 op1 = CONST0_RTX (GET_MODE (op0));
6410 if (earliest)
6411 *earliest = prev;
6413 #endif
6415 /* If this is a COMPARE, pick up the two things being compared. */
6416 if (GET_CODE (op0) == COMPARE)
6418 op1 = XEXP (op0, 1);
6419 op0 = XEXP (op0, 0);
6420 continue;
6422 else if (GET_CODE (op0) != REG)
6423 break;
6425 /* Go back to the previous insn. Stop if it is not an INSN. We also
6426 stop if it isn't a single set or if it has a REG_INC note because
6427 we don't want to bother dealing with it. */
6429 if ((prev = prev_nonnote_insn (prev)) == 0
6430 || GET_CODE (prev) != INSN
6431 || FIND_REG_INC_NOTE (prev, 0)
6432 || (set = single_set (prev)) == 0)
6433 break;
6435 /* If this is setting OP0, get what it sets it to if it looks
6436 relevant. */
6437 if (SET_DEST (set) == op0)
6439 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
6441 if ((GET_CODE (SET_SRC (set)) == COMPARE
6442 || (((code == NE
6443 || (code == LT
6444 && GET_MODE_CLASS (inner_mode) == MODE_INT
6445 && (GET_MODE_BITSIZE (inner_mode)
6446 <= HOST_BITS_PER_WIDE_INT)
6447 && (STORE_FLAG_VALUE
6448 & ((HOST_WIDE_INT) 1
6449 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6450 #ifdef FLOAT_STORE_FLAG_VALUE
6451 || (code == LT
6452 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6453 && FLOAT_STORE_FLAG_VALUE < 0)
6454 #endif
6456 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')))
6457 x = SET_SRC (set);
6458 else if (((code == EQ
6459 || (code == GE
6460 && (GET_MODE_BITSIZE (inner_mode)
6461 <= HOST_BITS_PER_WIDE_INT)
6462 && GET_MODE_CLASS (inner_mode) == MODE_INT
6463 && (STORE_FLAG_VALUE
6464 & ((HOST_WIDE_INT) 1
6465 << (GET_MODE_BITSIZE (inner_mode) - 1))))
6466 #ifdef FLOAT_STORE_FLAG_VALUE
6467 || (code == GE
6468 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
6469 && FLOAT_STORE_FLAG_VALUE < 0)
6470 #endif
6472 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')
6474 /* We might have reversed a LT to get a GE here. But this wasn't
6475 actually the comparison of data, so we don't flag that we
6476 have had to reverse the condition. */
6477 did_reverse_condition ^= 1;
6478 reverse_code = 1;
6479 x = SET_SRC (set);
6481 else
6482 break;
6485 else if (reg_set_p (op0, prev))
6486 /* If this sets OP0, but not directly, we have to give up. */
6487 break;
6489 if (x)
6491 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
6492 code = GET_CODE (x);
6493 if (reverse_code)
6495 code = reverse_condition (code);
6496 did_reverse_condition ^= 1;
6497 reverse_code = 0;
6500 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
6501 if (earliest)
6502 *earliest = prev;
6506 /* If constant is first, put it last. */
6507 if (CONSTANT_P (op0))
6508 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
6510 /* If OP0 is the result of a comparison, we weren't able to find what
6511 was really being compared, so fail. */
6512 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6513 return 0;
6515 /* Canonicalize any ordered comparison with integers involving equality
6516 if we can do computations in the relevant mode and we do not
6517 overflow. */
6519 if (GET_CODE (op1) == CONST_INT
6520 && GET_MODE (op0) != VOIDmode
6521 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
6523 HOST_WIDE_INT const_val = INTVAL (op1);
6524 unsigned HOST_WIDE_INT uconst_val = const_val;
6525 unsigned HOST_WIDE_INT max_val
6526 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
6528 switch (code)
6530 case LE:
6531 if (const_val != max_val >> 1)
6532 code = LT, op1 = GEN_INT (const_val + 1);
6533 break;
6535 case GE:
6536 if (const_val
6537 != (((HOST_WIDE_INT) 1
6538 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
6539 code = GT, op1 = GEN_INT (const_val - 1);
6540 break;
6542 case LEU:
6543 if (uconst_val != max_val)
6544 code = LTU, op1 = GEN_INT (uconst_val + 1);
6545 break;
6547 case GEU:
6548 if (uconst_val != 0)
6549 code = GTU, op1 = GEN_INT (uconst_val - 1);
6550 break;
6554 /* If this was floating-point and we reversed anything other than an
6555 EQ or NE, return zero. */
6556 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
6557 && did_reverse_condition && code != NE && code != EQ
6558 && ! flag_fast_math
6559 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
6560 return 0;
6562 #ifdef HAVE_cc0
6563 /* Never return CC0; return zero instead. */
6564 if (op0 == cc0_rtx)
6565 return 0;
6566 #endif
6568 return gen_rtx (code, VOIDmode, op0, op1);
6571 /* Similar to above routine, except that we also put an invariant last
6572 unless both operands are invariants. */
6575 get_condition_for_loop (x)
6576 rtx x;
6578 rtx comparison = get_condition (x, NULL_PTR);
6580 if (comparison == 0
6581 || ! invariant_p (XEXP (comparison, 0))
6582 || invariant_p (XEXP (comparison, 1)))
6583 return comparison;
6585 return gen_rtx (swap_condition (GET_CODE (comparison)), VOIDmode,
6586 XEXP (comparison, 1), XEXP (comparison, 0));