2005-07-14 Alexandre Oliva <aoliva@redhat.com>
[official-gcc.git] / gcc / loop.c
blob2686294bfd8db536f64154d13017ffe466ab4b3b
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "rtl.h"
51 #include "tm_p.h"
52 #include "function.h"
53 #include "expr.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
57 #include "regs.h"
58 #include "recog.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
69 #include "timevar.h"
70 #include "tree-pass.h"
72 /* Get the loop info pointer of a loop. */
73 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
75 /* Get a pointer to the loop movables structure. */
76 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
78 /* Get a pointer to the loop registers structure. */
79 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
81 /* Get a pointer to the loop induction variables structure. */
82 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
84 /* Get the luid of an insn. Catch the error of trying to reference the LUID
85 of an insn added during loop, since these don't have LUIDs. */
87 #define INSN_LUID(INSN) \
88 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
90 #define REGNO_FIRST_LUID(REGNO) \
91 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
92 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
93 : 0)
94 #define REGNO_LAST_LUID(REGNO) \
95 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
96 ? uid_luid[REGNO_LAST_UID (REGNO)] \
97 : INT_MAX)
99 /* A "basic induction variable" or biv is a pseudo reg that is set
100 (within this loop) only by incrementing or decrementing it. */
101 /* A "general induction variable" or giv is a pseudo reg whose
102 value is a linear function of a biv. */
104 /* Bivs are recognized by `basic_induction_var';
105 Givs by `general_induction_var'. */
107 /* An enum for the two different types of givs, those that are used
108 as memory addresses and those that are calculated into registers. */
109 enum g_types
111 DEST_ADDR,
112 DEST_REG
116 /* A `struct induction' is created for every instruction that sets
117 an induction variable (either a biv or a giv). */
119 struct induction
121 rtx insn; /* The insn that sets a biv or giv */
122 rtx new_reg; /* New register, containing strength reduced
123 version of this giv. */
124 rtx src_reg; /* Biv from which this giv is computed.
125 (If this is a biv, then this is the biv.) */
126 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
127 rtx dest_reg; /* Destination register for insn: this is the
128 register which was the biv or giv.
129 For a biv, this equals src_reg.
130 For a DEST_ADDR type giv, this is 0. */
131 rtx *location; /* Place in the insn where this giv occurs.
132 If GIV_TYPE is DEST_REG, this is 0. */
133 /* For a biv, this is the place where add_val
134 was found. */
135 enum machine_mode mode; /* The mode of this biv or giv */
136 rtx mem; /* For DEST_ADDR, the memory object. */
137 rtx mult_val; /* Multiplicative factor for src_reg. */
138 rtx add_val; /* Additive constant for that product. */
139 int benefit; /* Gain from eliminating this insn. */
140 rtx final_value; /* If the giv is used outside the loop, and its
141 final value could be calculated, it is put
142 here, and the giv is made replaceable. Set
143 the giv to this value before the loop. */
144 unsigned combined_with; /* The number of givs this giv has been
145 combined with. If nonzero, this giv
146 cannot combine with any other giv. */
147 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
148 variable for the original variable.
149 0 means they must be kept separate and the
150 new one must be copied into the old pseudo
151 reg each time the old one is set. */
152 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
153 1 if we know that the giv definitely can
154 not be made replaceable, in which case we
155 don't bother checking the variable again
156 even if further info is available.
157 Both this and the above can be zero. */
158 unsigned ignore : 1; /* 1 prohibits further processing of giv */
159 unsigned always_computable : 1;/* 1 if this value is computable every
160 iteration. */
161 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
162 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
163 update may be done multiple times per
164 iteration. */
165 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
166 another giv. This occurs in many cases
167 where a giv's lifetime spans an update to
168 a biv. */
169 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
170 we won't use it to eliminate a biv, it
171 would probably lose. */
172 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
173 to it to try to form an auto-inc address. */
174 unsigned shared : 1;
175 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
176 int lifetime; /* Length of life of this giv */
177 rtx derive_adjustment; /* If nonzero, is an adjustment to be
178 subtracted from add_val when this giv
179 derives another. This occurs when the
180 giv spans a biv update by incrementation. */
181 rtx ext_dependent; /* If nonzero, is a sign or zero extension
182 if a biv on which this giv is dependent. */
183 struct induction *next_iv; /* For givs, links together all givs that are
184 based on the same biv. For bivs, links
185 together all biv entries that refer to the
186 same biv register. */
187 struct induction *same; /* For givs, if the giv has been combined with
188 another giv, this points to the base giv.
189 The base giv will have COMBINED_WITH nonzero.
190 For bivs, if the biv has the same LOCATION
191 than another biv, this points to the base
192 biv. */
193 struct induction *same_insn; /* If there are multiple identical givs in
194 the same insn, then all but one have this
195 field set, and they all point to the giv
196 that doesn't have this field set. */
197 rtx last_use; /* For a giv made from a biv increment, this is
198 a substitute for the lifetime information. */
202 /* A `struct iv_class' is created for each biv. */
204 struct iv_class
206 unsigned int regno; /* Pseudo reg which is the biv. */
207 int biv_count; /* Number of insns setting this reg. */
208 struct induction *biv; /* List of all insns that set this reg. */
209 int giv_count; /* Number of DEST_REG givs computed from this
210 biv. The resulting count is only used in
211 check_dbra_loop. */
212 struct induction *giv; /* List of all insns that compute a giv
213 from this reg. */
214 int total_benefit; /* Sum of BENEFITs of all those givs. */
215 rtx initial_value; /* Value of reg at loop start. */
216 rtx initial_test; /* Test performed on BIV before loop. */
217 rtx final_value; /* Value of reg at loop end, if known. */
218 struct iv_class *next; /* Links all class structures together. */
219 rtx init_insn; /* insn which initializes biv, 0 if none. */
220 rtx init_set; /* SET of INIT_INSN, if any. */
221 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
222 unsigned eliminable : 1; /* 1 if plausible candidate for
223 elimination. */
224 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
225 this. */
226 unsigned reversed : 1; /* 1 if we reversed the loop that this
227 biv controls. */
228 unsigned all_reduced : 1; /* 1 if all givs using this biv have
229 been reduced. */
233 /* Definitions used by the basic induction variable discovery code. */
234 enum iv_mode
236 UNKNOWN_INDUCT,
237 BASIC_INDUCT,
238 NOT_BASIC_INDUCT,
239 GENERAL_INDUCT
243 /* A `struct iv' is created for every register. */
245 struct iv
247 enum iv_mode type;
248 union
250 struct iv_class *class;
251 struct induction *info;
252 } iv;
256 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
257 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
258 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
261 struct loop_ivs
263 /* Indexed by register number, contains pointer to `struct
264 iv' if register is an induction variable. */
265 struct iv *regs;
267 /* Size of regs array. */
268 unsigned int n_regs;
270 /* The head of a list which links together (via the next field)
271 every iv class for the current loop. */
272 struct iv_class *list;
276 typedef struct loop_mem_info
278 rtx mem; /* The MEM itself. */
279 rtx reg; /* Corresponding pseudo, if any. */
280 int optimize; /* Nonzero if we can optimize access to this MEM. */
281 } loop_mem_info;
285 struct loop_reg
287 /* Number of times the reg is set during the loop being scanned.
288 During code motion, a negative value indicates a reg that has
289 been made a candidate; in particular -2 means that it is an
290 candidate that we know is equal to a constant and -1 means that
291 it is a candidate not known equal to a constant. After code
292 motion, regs moved have 0 (which is accurate now) while the
293 failed candidates have the original number of times set.
295 Therefore, at all times, == 0 indicates an invariant register;
296 < 0 a conditionally invariant one. */
297 int set_in_loop;
299 /* Original value of set_in_loop; same except that this value
300 is not set negative for a reg whose sets have been made candidates
301 and not set to 0 for a reg that is moved. */
302 int n_times_set;
304 /* Contains the insn in which a register was used if it was used
305 exactly once; contains const0_rtx if it was used more than once. */
306 rtx single_usage;
308 /* Nonzero indicates that the register cannot be moved or strength
309 reduced. */
310 char may_not_optimize;
312 /* Nonzero means reg N has already been moved out of one loop.
313 This reduces the desire to move it out of another. */
314 char moved_once;
318 struct loop_regs
320 int num; /* Number of regs used in table. */
321 int size; /* Size of table. */
322 struct loop_reg *array; /* Register usage info. array. */
323 int multiple_uses; /* Nonzero if a reg has multiple uses. */
328 struct loop_movables
330 /* Head of movable chain. */
331 struct movable *head;
332 /* Last movable in chain. */
333 struct movable *last;
337 /* Information pertaining to a loop. */
339 struct loop_info
341 /* Nonzero if there is a subroutine call in the current loop. */
342 int has_call;
343 /* Nonzero if there is a libcall in the current loop. */
344 int has_libcall;
345 /* Nonzero if there is a non constant call in the current loop. */
346 int has_nonconst_call;
347 /* Nonzero if there is a prefetch instruction in the current loop. */
348 int has_prefetch;
349 /* Nonzero if there is a volatile memory reference in the current
350 loop. */
351 int has_volatile;
352 /* Nonzero if there is a tablejump in the current loop. */
353 int has_tablejump;
354 /* Nonzero if there are ways to leave the loop other than falling
355 off the end. */
356 int has_multiple_exit_targets;
357 /* Nonzero if there is an indirect jump in the current function. */
358 int has_indirect_jump;
359 /* Register or constant initial loop value. */
360 rtx initial_value;
361 /* Register or constant value used for comparison test. */
362 rtx comparison_value;
363 /* Register or constant approximate final value. */
364 rtx final_value;
365 /* Register or constant initial loop value with term common to
366 final_value removed. */
367 rtx initial_equiv_value;
368 /* Register or constant final loop value with term common to
369 initial_value removed. */
370 rtx final_equiv_value;
371 /* Register corresponding to iteration variable. */
372 rtx iteration_var;
373 /* Constant loop increment. */
374 rtx increment;
375 enum rtx_code comparison_code;
376 /* Holds the number of loop iterations. It is zero if the number
377 could not be calculated. Must be unsigned since the number of
378 iterations can be as high as 2^wordsize - 1. For loops with a
379 wider iterator, this number will be zero if the number of loop
380 iterations is too large for an unsigned integer to hold. */
381 unsigned HOST_WIDE_INT n_iterations;
382 int used_count_register;
383 /* The loop iterator induction variable. */
384 struct iv_class *iv;
385 /* List of MEMs that are stored in this loop. */
386 rtx store_mems;
387 /* Array of MEMs that are used (read or written) in this loop, but
388 cannot be aliased by anything in this loop, except perhaps
389 themselves. In other words, if mems[i] is altered during
390 the loop, it is altered by an expression that is rtx_equal_p to
391 it. */
392 loop_mem_info *mems;
393 /* The index of the next available slot in MEMS. */
394 int mems_idx;
395 /* The number of elements allocated in MEMS. */
396 int mems_allocated;
397 /* Nonzero if we don't know what MEMs were changed in the current
398 loop. This happens if the loop contains a call (in which case
399 `has_call' will also be set) or if we store into more than
400 NUM_STORES MEMs. */
401 int unknown_address_altered;
402 /* The above doesn't count any readonly memory locations that are
403 stored. This does. */
404 int unknown_constant_address_altered;
405 /* Count of memory write instructions discovered in the loop. */
406 int num_mem_sets;
407 /* The insn where the first of these was found. */
408 rtx first_loop_store_insn;
409 /* The chain of movable insns in loop. */
410 struct loop_movables movables;
411 /* The registers used the in loop. */
412 struct loop_regs regs;
413 /* The induction variable information in loop. */
414 struct loop_ivs ivs;
415 /* Nonzero if call is in pre_header extended basic block. */
416 int pre_header_has_call;
419 /* Not really meaningful values, but at least something. */
420 #ifndef SIMULTANEOUS_PREFETCHES
421 #define SIMULTANEOUS_PREFETCHES 3
422 #endif
423 #ifndef PREFETCH_BLOCK
424 #define PREFETCH_BLOCK 32
425 #endif
426 #ifndef HAVE_prefetch
427 #define HAVE_prefetch 0
428 #define CODE_FOR_prefetch 0
429 #define gen_prefetch(a,b,c) (gcc_unreachable (), NULL_RTX)
430 #endif
432 /* Give up the prefetch optimizations once we exceed a given threshold.
433 It is unlikely that we would be able to optimize something in a loop
434 with so many detected prefetches. */
435 #define MAX_PREFETCHES 100
436 /* The number of prefetch blocks that are beneficial to fetch at once before
437 a loop with a known (and low) iteration count. */
438 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
439 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
440 since it is likely that the data are already in the cache. */
441 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
443 /* Parameterize some prefetch heuristics so they can be turned on and off
444 easily for performance testing on new architectures. These can be
445 defined in target-dependent files. */
447 /* Prefetch is worthwhile only when loads/stores are dense. */
448 #ifndef PREFETCH_ONLY_DENSE_MEM
449 #define PREFETCH_ONLY_DENSE_MEM 1
450 #endif
452 /* Define what we mean by "dense" loads and stores; This value divided by 256
453 is the minimum percentage of memory references that worth prefetching. */
454 #ifndef PREFETCH_DENSE_MEM
455 #define PREFETCH_DENSE_MEM 220
456 #endif
458 /* Do not prefetch for a loop whose iteration count is known to be low. */
459 #ifndef PREFETCH_NO_LOW_LOOPCNT
460 #define PREFETCH_NO_LOW_LOOPCNT 1
461 #endif
463 /* Define what we mean by a "low" iteration count. */
464 #ifndef PREFETCH_LOW_LOOPCNT
465 #define PREFETCH_LOW_LOOPCNT 32
466 #endif
468 /* Do not prefetch for a loop that contains a function call; such a loop is
469 probably not an internal loop. */
470 #ifndef PREFETCH_NO_CALL
471 #define PREFETCH_NO_CALL 1
472 #endif
474 /* Do not prefetch accesses with an extreme stride. */
475 #ifndef PREFETCH_NO_EXTREME_STRIDE
476 #define PREFETCH_NO_EXTREME_STRIDE 1
477 #endif
479 /* Define what we mean by an "extreme" stride. */
480 #ifndef PREFETCH_EXTREME_STRIDE
481 #define PREFETCH_EXTREME_STRIDE 4096
482 #endif
484 /* Define a limit to how far apart indices can be and still be merged
485 into a single prefetch. */
486 #ifndef PREFETCH_EXTREME_DIFFERENCE
487 #define PREFETCH_EXTREME_DIFFERENCE 4096
488 #endif
490 /* Issue prefetch instructions before the loop to fetch data to be used
491 in the first few loop iterations. */
492 #ifndef PREFETCH_BEFORE_LOOP
493 #define PREFETCH_BEFORE_LOOP 1
494 #endif
496 /* Do not handle reversed order prefetches (negative stride). */
497 #ifndef PREFETCH_NO_REVERSE_ORDER
498 #define PREFETCH_NO_REVERSE_ORDER 1
499 #endif
501 /* Prefetch even if the GIV is in conditional code. */
502 #ifndef PREFETCH_CONDITIONAL
503 #define PREFETCH_CONDITIONAL 1
504 #endif
506 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
507 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
509 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
510 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
511 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
513 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
514 ((REGNO) < FIRST_PSEUDO_REGISTER \
515 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
518 /* Vector mapping INSN_UIDs to luids.
519 The luids are like uids but increase monotonically always.
520 We use them to see whether a jump comes from outside a given loop. */
522 static int *uid_luid;
524 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
525 number the insn is contained in. */
527 static struct loop **uid_loop;
529 /* 1 + largest uid of any insn. */
531 static int max_uid_for_loop;
533 /* Number of loops detected in current function. Used as index to the
534 next few tables. */
536 static int max_loop_num;
538 /* Bound on pseudo register number before loop optimization.
539 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
540 static unsigned int max_reg_before_loop;
542 /* The value to pass to the next call of reg_scan_update. */
543 static int loop_max_reg;
545 /* During the analysis of a loop, a chain of `struct movable's
546 is made to record all the movable insns found.
547 Then the entire chain can be scanned to decide which to move. */
549 struct movable
551 rtx insn; /* A movable insn */
552 rtx set_src; /* The expression this reg is set from. */
553 rtx set_dest; /* The destination of this SET. */
554 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
555 of any registers used within the LIBCALL. */
556 int consec; /* Number of consecutive following insns
557 that must be moved with this one. */
558 unsigned int regno; /* The register it sets */
559 short lifetime; /* lifetime of that register;
560 may be adjusted when matching movables
561 that load the same value are found. */
562 short savings; /* Number of insns we can move for this reg,
563 including other movables that force this
564 or match this one. */
565 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
566 a low part that we should avoid changing when
567 clearing the rest of the reg. */
568 unsigned int cond : 1; /* 1 if only conditionally movable */
569 unsigned int force : 1; /* 1 means MUST move this insn */
570 unsigned int global : 1; /* 1 means reg is live outside this loop */
571 /* If PARTIAL is 1, GLOBAL means something different:
572 that the reg is live outside the range from where it is set
573 to the following label. */
574 unsigned int done : 1; /* 1 inhibits further processing of this */
576 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
577 In particular, moving it does not make it
578 invariant. */
579 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
580 load SRC, rather than copying INSN. */
581 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
582 first insn of a consecutive sets group. */
583 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
584 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
585 the original insn with a copy from that
586 pseudo, rather than deleting it. */
587 struct movable *match; /* First entry for same value */
588 struct movable *forces; /* An insn that must be moved if this is */
589 struct movable *next;
593 static FILE *loop_dump_stream;
595 /* Forward declarations. */
597 static void invalidate_loops_containing_label (rtx);
598 static void find_and_verify_loops (rtx, struct loops *);
599 static void mark_loop_jump (rtx, struct loop *);
600 static void prescan_loop (struct loop *);
601 static int reg_in_basic_block_p (rtx, rtx);
602 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
603 static int labels_in_range_p (rtx, int);
604 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
605 static void note_addr_stored (rtx, rtx, void *);
606 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
607 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
608 static rtx find_regs_nested (rtx, rtx);
609 static void scan_loop (struct loop*, int);
610 #if 0
611 static void replace_call_address (rtx, rtx, rtx);
612 #endif
613 static rtx skip_consec_insns (rtx, int);
614 static int libcall_benefit (rtx);
615 static rtx libcall_other_reg (rtx, rtx);
616 static void record_excess_regs (rtx, rtx, rtx *);
617 static void ignore_some_movables (struct loop_movables *);
618 static void force_movables (struct loop_movables *);
619 static void combine_movables (struct loop_movables *, struct loop_regs *);
620 static int num_unmoved_movables (const struct loop *);
621 static int regs_match_p (rtx, rtx, struct loop_movables *);
622 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
623 struct loop_regs *);
624 static void add_label_notes (rtx, rtx);
625 static void move_movables (struct loop *loop, struct loop_movables *, int,
626 int);
627 static void loop_movables_add (struct loop_movables *, struct movable *);
628 static void loop_movables_free (struct loop_movables *);
629 static int count_nonfixed_reads (const struct loop *, rtx);
630 static void loop_bivs_find (struct loop *);
631 static void loop_bivs_init_find (struct loop *);
632 static void loop_bivs_check (struct loop *);
633 static void loop_givs_find (struct loop *);
634 static void loop_givs_check (struct loop *);
635 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
636 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
637 struct induction *, rtx);
638 static void loop_givs_dead_check (struct loop *, struct iv_class *);
639 static void loop_givs_reduce (struct loop *, struct iv_class *);
640 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
641 static void loop_ivs_free (struct loop *);
642 static void strength_reduce (struct loop *, int);
643 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
644 static int valid_initial_value_p (rtx, rtx, int, rtx);
645 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
646 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
647 rtx, rtx *, int, int);
648 static void check_final_value (const struct loop *, struct induction *);
649 static void loop_ivs_dump (const struct loop *, FILE *, int);
650 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
651 static void loop_biv_dump (const struct induction *, FILE *, int);
652 static void loop_giv_dump (const struct induction *, FILE *, int);
653 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
654 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
655 rtx *);
656 static void update_giv_derive (const struct loop *, rtx);
657 static HOST_WIDE_INT get_monotonic_increment (struct iv_class *);
658 static bool biased_biv_fits_mode_p (const struct loop *, struct iv_class *,
659 HOST_WIDE_INT, enum machine_mode,
660 unsigned HOST_WIDE_INT);
661 static bool biv_fits_mode_p (const struct loop *, struct iv_class *,
662 HOST_WIDE_INT, enum machine_mode, bool);
663 static bool extension_within_bounds_p (const struct loop *, struct iv_class *,
664 HOST_WIDE_INT, rtx);
665 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
666 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
667 rtx, rtx, rtx *, rtx *, rtx **);
668 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
669 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
670 rtx *, rtx *, int, int *, enum machine_mode);
671 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
672 rtx *, rtx *, rtx *);
673 static int check_dbra_loop (struct loop *, int);
674 static rtx express_from_1 (rtx, rtx, rtx);
675 static rtx combine_givs_p (struct induction *, struct induction *);
676 static int cmp_combine_givs_stats (const void *, const void *);
677 static void combine_givs (struct loop_regs *, struct iv_class *);
678 static int product_cheap_p (rtx, rtx);
679 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
680 int, int);
681 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
682 struct iv_class *, int, basic_block, rtx);
683 static int last_use_this_basic_block (rtx, rtx);
684 static void record_initial (rtx, rtx, void *);
685 static void update_reg_last_use (rtx, rtx);
686 static rtx next_insn_in_loop (const struct loop *, rtx);
687 static void loop_regs_scan (const struct loop *, int);
688 static int count_insns_in_loop (const struct loop *);
689 static int find_mem_in_note_1 (rtx *, void *);
690 static rtx find_mem_in_note (rtx);
691 static void load_mems (const struct loop *);
692 static int insert_loop_mem (rtx *, void *);
693 static int replace_loop_mem (rtx *, void *);
694 static void replace_loop_mems (rtx, rtx, rtx, int);
695 static int replace_loop_reg (rtx *, void *);
696 static void replace_loop_regs (rtx insn, rtx, rtx);
697 static void note_reg_stored (rtx, rtx, void *);
698 static void try_copy_prop (const struct loop *, rtx, unsigned int);
699 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
700 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
701 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
702 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
703 static void loop_regs_update (const struct loop *, rtx);
704 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
705 static int loop_invariant_p (const struct loop *, rtx);
706 static rtx loop_insn_hoist (const struct loop *, rtx);
707 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
708 rtx, basic_block, rtx);
709 static rtx loop_insn_emit_before (const struct loop *, basic_block,
710 rtx, rtx);
711 static int loop_insn_first_p (rtx, rtx);
712 static rtx get_condition_for_loop (const struct loop *, rtx);
713 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
714 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
715 static rtx extend_value_for_giv (struct induction *, rtx);
716 static rtx loop_insn_sink (const struct loop *, rtx);
718 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
719 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
720 rtx, rtx);
721 static rtx loop_call_insn_hoist (const struct loop *, rtx);
722 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
724 static void loop_dump_aux (const struct loop *, FILE *, int);
725 static void loop_delete_insns (rtx, rtx);
726 static HOST_WIDE_INT remove_constant_addition (rtx *);
727 static rtx gen_load_of_final_value (rtx, rtx);
728 void debug_ivs (const struct loop *);
729 void debug_iv_class (const struct iv_class *);
730 void debug_biv (const struct induction *);
731 void debug_giv (const struct induction *);
732 void debug_loop (const struct loop *);
733 void debug_loops (const struct loops *);
735 typedef struct loop_replace_args
737 rtx match;
738 rtx replacement;
739 rtx insn;
740 } loop_replace_args;
742 /* Nonzero iff INSN is between START and END, inclusive. */
743 #define INSN_IN_RANGE_P(INSN, START, END) \
744 (INSN_UID (INSN) < max_uid_for_loop \
745 && INSN_LUID (INSN) >= INSN_LUID (START) \
746 && INSN_LUID (INSN) <= INSN_LUID (END))
748 /* Indirect_jump_in_function is computed once per function. */
749 static int indirect_jump_in_function;
750 static int indirect_jump_in_function_p (rtx);
752 static int compute_luids (rtx, rtx, int);
754 static int biv_elimination_giv_has_0_offset (struct induction *,
755 struct induction *, rtx);
757 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
758 copy the value of the strength reduced giv to its original register. */
759 static int copy_cost;
761 /* Cost of using a register, to normalize the benefits of a giv. */
762 static int reg_address_cost;
764 void
765 init_loop (void)
767 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
769 reg_address_cost = address_cost (reg, SImode);
771 copy_cost = COSTS_N_INSNS (1);
774 /* Compute the mapping from uids to luids.
775 LUIDs are numbers assigned to insns, like uids,
776 except that luids increase monotonically through the code.
777 Start at insn START and stop just before END. Assign LUIDs
778 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
779 static int
780 compute_luids (rtx start, rtx end, int prev_luid)
782 int i;
783 rtx insn;
785 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
787 if (INSN_UID (insn) >= max_uid_for_loop)
788 continue;
789 /* Don't assign luids to line-number NOTEs, so that the distance in
790 luids between two insns is not affected by -g. */
791 if (!NOTE_P (insn)
792 || NOTE_LINE_NUMBER (insn) <= 0)
793 uid_luid[INSN_UID (insn)] = ++i;
794 else
795 /* Give a line number note the same luid as preceding insn. */
796 uid_luid[INSN_UID (insn)] = i;
798 return i + 1;
801 /* Entry point of this file. Perform loop optimization
802 on the current function. F is the first insn of the function
803 and DUMPFILE is a stream for output of a trace of actions taken
804 (or 0 if none should be output). */
806 void
807 loop_optimize (rtx f, FILE *dumpfile, int flags)
809 rtx insn;
810 int i;
811 struct loops loops_data;
812 struct loops *loops = &loops_data;
813 struct loop_info *loops_info;
815 loop_dump_stream = dumpfile;
817 init_recog_no_volatile ();
819 max_reg_before_loop = max_reg_num ();
820 loop_max_reg = max_reg_before_loop;
822 regs_may_share = 0;
824 /* Count the number of loops. */
826 max_loop_num = 0;
827 for (insn = f; insn; insn = NEXT_INSN (insn))
829 if (NOTE_P (insn)
830 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
831 max_loop_num++;
834 /* Don't waste time if no loops. */
835 if (max_loop_num == 0)
836 return;
838 loops->num = max_loop_num;
840 /* Get size to use for tables indexed by uids.
841 Leave some space for labels allocated by find_and_verify_loops. */
842 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
844 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
845 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
847 /* Allocate storage for array of loops. */
848 loops->array = xcalloc (loops->num, sizeof (struct loop));
850 /* Find and process each loop.
851 First, find them, and record them in order of their beginnings. */
852 find_and_verify_loops (f, loops);
854 /* Allocate and initialize auxiliary loop information. */
855 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
856 for (i = 0; i < (int) loops->num; i++)
857 loops->array[i].aux = loops_info + i;
859 /* Now find all register lifetimes. This must be done after
860 find_and_verify_loops, because it might reorder the insns in the
861 function. */
862 reg_scan (f, max_reg_before_loop);
864 /* This must occur after reg_scan so that registers created by gcse
865 will have entries in the register tables.
867 We could have added a call to reg_scan after gcse_main in toplev.c,
868 but moving this call to init_alias_analysis is more efficient. */
869 init_alias_analysis ();
871 /* See if we went too far. Note that get_max_uid already returns
872 one more that the maximum uid of all insn. */
873 gcc_assert (get_max_uid () <= max_uid_for_loop);
874 /* Now reset it to the actual size we need. See above. */
875 max_uid_for_loop = get_max_uid ();
877 /* find_and_verify_loops has already called compute_luids, but it
878 might have rearranged code afterwards, so we need to recompute
879 the luids now. */
880 compute_luids (f, NULL_RTX, 0);
882 /* Don't leave gaps in uid_luid for insns that have been
883 deleted. It is possible that the first or last insn
884 using some register has been deleted by cross-jumping.
885 Make sure that uid_luid for that former insn's uid
886 points to the general area where that insn used to be. */
887 for (i = 0; i < max_uid_for_loop; i++)
889 uid_luid[0] = uid_luid[i];
890 if (uid_luid[0] != 0)
891 break;
893 for (i = 0; i < max_uid_for_loop; i++)
894 if (uid_luid[i] == 0)
895 uid_luid[i] = uid_luid[i - 1];
897 /* Determine if the function has indirect jump. On some systems
898 this prevents low overhead loop instructions from being used. */
899 indirect_jump_in_function = indirect_jump_in_function_p (f);
901 /* Now scan the loops, last ones first, since this means inner ones are done
902 before outer ones. */
903 for (i = max_loop_num - 1; i >= 0; i--)
905 struct loop *loop = &loops->array[i];
907 if (! loop->invalid && loop->end)
909 scan_loop (loop, flags);
910 ggc_collect ();
914 end_alias_analysis ();
916 /* Clean up. */
917 for (i = 0; i < (int) loops->num; i++)
918 free (loops_info[i].mems);
920 free (uid_luid);
921 free (uid_loop);
922 free (loops_info);
923 free (loops->array);
926 /* Returns the next insn, in execution order, after INSN. START and
927 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
928 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
929 insn-stream; it is used with loops that are entered near the
930 bottom. */
932 static rtx
933 next_insn_in_loop (const struct loop *loop, rtx insn)
935 insn = NEXT_INSN (insn);
937 if (insn == loop->end)
939 if (loop->top)
940 /* Go to the top of the loop, and continue there. */
941 insn = loop->top;
942 else
943 /* We're done. */
944 insn = NULL_RTX;
947 if (insn == loop->scan_start)
948 /* We're done. */
949 insn = NULL_RTX;
951 return insn;
954 /* Find any register references hidden inside X and add them to
955 the dependency list DEPS. This is used to look inside CLOBBER (MEM
956 when checking whether a PARALLEL can be pulled out of a loop. */
958 static rtx
959 find_regs_nested (rtx deps, rtx x)
961 enum rtx_code code = GET_CODE (x);
962 if (code == REG)
963 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
964 else
966 const char *fmt = GET_RTX_FORMAT (code);
967 int i, j;
968 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
970 if (fmt[i] == 'e')
971 deps = find_regs_nested (deps, XEXP (x, i));
972 else if (fmt[i] == 'E')
973 for (j = 0; j < XVECLEN (x, i); j++)
974 deps = find_regs_nested (deps, XVECEXP (x, i, j));
977 return deps;
980 /* Optimize one loop described by LOOP. */
982 /* ??? Could also move memory writes out of loops if the destination address
983 is invariant, the source is invariant, the memory write is not volatile,
984 and if we can prove that no read inside the loop can read this address
985 before the write occurs. If there is a read of this address after the
986 write, then we can also mark the memory read as invariant. */
988 static void
989 scan_loop (struct loop *loop, int flags)
991 struct loop_info *loop_info = LOOP_INFO (loop);
992 struct loop_regs *regs = LOOP_REGS (loop);
993 int i;
994 rtx loop_start = loop->start;
995 rtx loop_end = loop->end;
996 rtx p;
997 /* 1 if we are scanning insns that could be executed zero times. */
998 int maybe_never = 0;
999 /* 1 if we are scanning insns that might never be executed
1000 due to a subroutine call which might exit before they are reached. */
1001 int call_passed = 0;
1002 /* Number of insns in the loop. */
1003 int insn_count;
1004 int tem;
1005 rtx temp, update_start, update_end;
1006 /* The SET from an insn, if it is the only SET in the insn. */
1007 rtx set, set1;
1008 /* Chain describing insns movable in current loop. */
1009 struct loop_movables *movables = LOOP_MOVABLES (loop);
1010 /* Ratio of extra register life span we can justify
1011 for saving an instruction. More if loop doesn't call subroutines
1012 since in that case saving an insn makes more difference
1013 and more registers are available. */
1014 int threshold;
1015 int in_libcall;
1017 loop->top = 0;
1019 movables->head = 0;
1020 movables->last = 0;
1022 /* Determine whether this loop starts with a jump down to a test at
1023 the end. This will occur for a small number of loops with a test
1024 that is too complex to duplicate in front of the loop.
1026 We search for the first insn or label in the loop, skipping NOTEs.
1027 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1028 (because we might have a loop executed only once that contains a
1029 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1030 (in case we have a degenerate loop).
1032 Note that if we mistakenly think that a loop is entered at the top
1033 when, in fact, it is entered at the exit test, the only effect will be
1034 slightly poorer optimization. Making the opposite error can generate
1035 incorrect code. Since very few loops now start with a jump to the
1036 exit test, the code here to detect that case is very conservative. */
1038 for (p = NEXT_INSN (loop_start);
1039 p != loop_end
1040 && !LABEL_P (p) && ! INSN_P (p)
1041 && (!NOTE_P (p)
1042 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1043 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1044 p = NEXT_INSN (p))
1047 loop->scan_start = p;
1049 /* If loop end is the end of the current function, then emit a
1050 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1051 note insn. This is the position we use when sinking insns out of
1052 the loop. */
1053 if (NEXT_INSN (loop->end) != 0)
1054 loop->sink = NEXT_INSN (loop->end);
1055 else
1056 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1058 /* Set up variables describing this loop. */
1059 prescan_loop (loop);
1060 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1062 /* If loop has a jump before the first label,
1063 the true entry is the target of that jump.
1064 Start scan from there.
1065 But record in LOOP->TOP the place where the end-test jumps
1066 back to so we can scan that after the end of the loop. */
1067 if (JUMP_P (p)
1068 /* Loop entry must be unconditional jump (and not a RETURN) */
1069 && any_uncondjump_p (p)
1070 && JUMP_LABEL (p) != 0
1071 /* Check to see whether the jump actually
1072 jumps out of the loop (meaning it's no loop).
1073 This case can happen for things like
1074 do {..} while (0). If this label was generated previously
1075 by loop, we can't tell anything about it and have to reject
1076 the loop. */
1077 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1079 loop->top = next_label (loop->scan_start);
1080 loop->scan_start = JUMP_LABEL (p);
1083 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1084 as required by loop_reg_used_before_p. So skip such loops. (This
1085 test may never be true, but it's best to play it safe.)
1087 Also, skip loops where we do not start scanning at a label. This
1088 test also rejects loops starting with a JUMP_INSN that failed the
1089 test above. */
1091 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1092 || !LABEL_P (loop->scan_start))
1094 if (loop_dump_stream)
1095 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1096 INSN_UID (loop_start), INSN_UID (loop_end));
1097 return;
1100 /* Allocate extra space for REGs that might be created by load_mems.
1101 We allocate a little extra slop as well, in the hopes that we
1102 won't have to reallocate the regs array. */
1103 loop_regs_scan (loop, loop_info->mems_idx + 16);
1104 insn_count = count_insns_in_loop (loop);
1106 if (loop_dump_stream)
1107 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1108 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1110 /* Scan through the loop finding insns that are safe to move.
1111 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1112 this reg will be considered invariant for subsequent insns.
1113 We consider whether subsequent insns use the reg
1114 in deciding whether it is worth actually moving.
1116 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1117 and therefore it is possible that the insns we are scanning
1118 would never be executed. At such times, we must make sure
1119 that it is safe to execute the insn once instead of zero times.
1120 When MAYBE_NEVER is 0, all insns will be executed at least once
1121 so that is not a problem. */
1123 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1124 p != NULL_RTX;
1125 p = next_insn_in_loop (loop, p))
1127 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1128 in_libcall--;
1129 if (NONJUMP_INSN_P (p))
1131 /* Do not scan past an optimization barrier. */
1132 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1133 break;
1134 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1135 if (temp)
1136 in_libcall++;
1137 if (! in_libcall
1138 && (set = single_set (p))
1139 && REG_P (SET_DEST (set))
1140 && SET_DEST (set) != frame_pointer_rtx
1141 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1142 && SET_DEST (set) != pic_offset_table_rtx
1143 #endif
1144 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1146 int tem1 = 0;
1147 int tem2 = 0;
1148 int move_insn = 0;
1149 int insert_temp = 0;
1150 rtx src = SET_SRC (set);
1151 rtx dependencies = 0;
1153 /* Figure out what to use as a source of this insn. If a
1154 REG_EQUIV note is given or if a REG_EQUAL note with a
1155 constant operand is specified, use it as the source and
1156 mark that we should move this insn by calling
1157 emit_move_insn rather that duplicating the insn.
1159 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1160 note is present. */
1161 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1162 if (temp)
1163 src = XEXP (temp, 0), move_insn = 1;
1164 else
1166 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1167 if (temp && CONSTANT_P (XEXP (temp, 0)))
1168 src = XEXP (temp, 0), move_insn = 1;
1169 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1171 src = XEXP (temp, 0);
1172 /* A libcall block can use regs that don't appear in
1173 the equivalent expression. To move the libcall,
1174 we must move those regs too. */
1175 dependencies = libcall_other_reg (p, src);
1179 /* For parallels, add any possible uses to the dependencies, as
1180 we can't move the insn without resolving them first.
1181 MEMs inside CLOBBERs may also reference registers; these
1182 count as implicit uses. */
1183 if (GET_CODE (PATTERN (p)) == PARALLEL)
1185 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1187 rtx x = XVECEXP (PATTERN (p), 0, i);
1188 if (GET_CODE (x) == USE)
1189 dependencies
1190 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1191 dependencies);
1192 else if (GET_CODE (x) == CLOBBER
1193 && MEM_P (XEXP (x, 0)))
1194 dependencies = find_regs_nested (dependencies,
1195 XEXP (XEXP (x, 0), 0));
1199 if (/* The register is used in basic blocks other
1200 than the one where it is set (meaning that
1201 something after this point in the loop might
1202 depend on its value before the set). */
1203 ! reg_in_basic_block_p (p, SET_DEST (set))
1204 /* And the set is not guaranteed to be executed once
1205 the loop starts, or the value before the set is
1206 needed before the set occurs...
1208 ??? Note we have quadratic behavior here, mitigated
1209 by the fact that the previous test will often fail for
1210 large loops. Rather than re-scanning the entire loop
1211 each time for register usage, we should build tables
1212 of the register usage and use them here instead. */
1213 && (maybe_never
1214 || loop_reg_used_before_p (loop, set, p)))
1215 /* It is unsafe to move the set. However, it may be OK to
1216 move the source into a new pseudo, and substitute a
1217 reg-to-reg copy for the original insn.
1219 This code used to consider it OK to move a set of a variable
1220 which was not created by the user and not used in an exit
1221 test.
1222 That behavior is incorrect and was removed. */
1223 insert_temp = 1;
1225 /* Don't try to optimize a MODE_CC set with a constant
1226 source. It probably will be combined with a conditional
1227 jump. */
1228 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1229 && CONSTANT_P (src))
1231 /* Don't try to optimize a register that was made
1232 by loop-optimization for an inner loop.
1233 We don't know its life-span, so we can't compute
1234 the benefit. */
1235 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1237 /* Don't move the source and add a reg-to-reg copy:
1238 - with -Os (this certainly increases size),
1239 - if the mode doesn't support copy operations (obviously),
1240 - if the source is already a reg (the motion will gain nothing),
1241 - if the source is a legitimate constant (likewise). */
1242 else if (insert_temp
1243 && (optimize_size
1244 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1245 || REG_P (SET_SRC (set))
1246 || (CONSTANT_P (SET_SRC (set))
1247 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
1249 else if ((tem = loop_invariant_p (loop, src))
1250 && (dependencies == 0
1251 || (tem2
1252 = loop_invariant_p (loop, dependencies)) != 0)
1253 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1254 || (tem1
1255 = consec_sets_invariant_p
1256 (loop, SET_DEST (set),
1257 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1258 p)))
1259 /* If the insn can cause a trap (such as divide by zero),
1260 can't move it unless it's guaranteed to be executed
1261 once loop is entered. Even a function call might
1262 prevent the trap insn from being reached
1263 (since it might exit!) */
1264 && ! ((maybe_never || call_passed)
1265 && may_trap_p (src)))
1267 struct movable *m;
1268 int regno = REGNO (SET_DEST (set));
1270 /* A potential lossage is where we have a case where two insns
1271 can be combined as long as they are both in the loop, but
1272 we move one of them outside the loop. For large loops,
1273 this can lose. The most common case of this is the address
1274 of a function being called.
1276 Therefore, if this register is marked as being used
1277 exactly once if we are in a loop with calls
1278 (a "large loop"), see if we can replace the usage of
1279 this register with the source of this SET. If we can,
1280 delete this insn.
1282 Don't do this if P has a REG_RETVAL note or if we have
1283 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
1285 if (loop_info->has_call
1286 && regs->array[regno].single_usage != 0
1287 && regs->array[regno].single_usage != const0_rtx
1288 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1289 && (REGNO_LAST_UID (regno)
1290 == INSN_UID (regs->array[regno].single_usage))
1291 && regs->array[regno].set_in_loop == 1
1292 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1293 && ! side_effects_p (SET_SRC (set))
1294 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1295 && (! SMALL_REGISTER_CLASSES
1296 || (! (REG_P (SET_SRC (set))
1297 && (REGNO (SET_SRC (set))
1298 < FIRST_PSEUDO_REGISTER))))
1299 && regno >= FIRST_PSEUDO_REGISTER
1300 /* This test is not redundant; SET_SRC (set) might be
1301 a call-clobbered register and the life of REGNO
1302 might span a call. */
1303 && ! modified_between_p (SET_SRC (set), p,
1304 regs->array[regno].single_usage)
1305 && no_labels_between_p (p,
1306 regs->array[regno].single_usage)
1307 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
1308 regs->array[regno].single_usage))
1310 /* Replace any usage in a REG_EQUAL note. Must copy
1311 the new source, so that we don't get rtx sharing
1312 between the SET_SOURCE and REG_NOTES of insn p. */
1313 REG_NOTES (regs->array[regno].single_usage)
1314 = (replace_rtx
1315 (REG_NOTES (regs->array[regno].single_usage),
1316 SET_DEST (set), copy_rtx (SET_SRC (set))));
1318 delete_insn (p);
1319 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1320 i++)
1321 regs->array[regno+i].set_in_loop = 0;
1322 continue;
1325 m = xmalloc (sizeof (struct movable));
1326 m->next = 0;
1327 m->insn = p;
1328 m->set_src = src;
1329 m->dependencies = dependencies;
1330 m->set_dest = SET_DEST (set);
1331 m->force = 0;
1332 m->consec
1333 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1334 m->done = 0;
1335 m->forces = 0;
1336 m->partial = 0;
1337 m->move_insn = move_insn;
1338 m->move_insn_first = 0;
1339 m->insert_temp = insert_temp;
1340 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1341 m->savemode = VOIDmode;
1342 m->regno = regno;
1343 /* Set M->cond if either loop_invariant_p
1344 or consec_sets_invariant_p returned 2
1345 (only conditionally invariant). */
1346 m->cond = ((tem | tem1 | tem2) > 1);
1347 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1348 m->match = 0;
1349 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1350 m->savings = regs->array[regno].n_times_set;
1351 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1352 m->savings += libcall_benefit (p);
1353 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1354 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1355 /* Add M to the end of the chain MOVABLES. */
1356 loop_movables_add (movables, m);
1358 if (m->consec > 0)
1360 /* It is possible for the first instruction to have a
1361 REG_EQUAL note but a non-invariant SET_SRC, so we must
1362 remember the status of the first instruction in case
1363 the last instruction doesn't have a REG_EQUAL note. */
1364 m->move_insn_first = m->move_insn;
1366 /* Skip this insn, not checking REG_LIBCALL notes. */
1367 p = next_nonnote_insn (p);
1368 /* Skip the consecutive insns, if there are any. */
1369 p = skip_consec_insns (p, m->consec);
1370 /* Back up to the last insn of the consecutive group. */
1371 p = prev_nonnote_insn (p);
1373 /* We must now reset m->move_insn, m->is_equiv, and
1374 possibly m->set_src to correspond to the effects of
1375 all the insns. */
1376 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1377 if (temp)
1378 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1379 else
1381 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1382 if (temp && CONSTANT_P (XEXP (temp, 0)))
1383 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1384 else
1385 m->move_insn = 0;
1388 m->is_equiv
1389 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1392 /* If this register is always set within a STRICT_LOW_PART
1393 or set to zero, then its high bytes are constant.
1394 So clear them outside the loop and within the loop
1395 just load the low bytes.
1396 We must check that the machine has an instruction to do so.
1397 Also, if the value loaded into the register
1398 depends on the same register, this cannot be done. */
1399 else if (SET_SRC (set) == const0_rtx
1400 && NONJUMP_INSN_P (NEXT_INSN (p))
1401 && (set1 = single_set (NEXT_INSN (p)))
1402 && GET_CODE (set1) == SET
1403 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1404 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1405 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1406 == SET_DEST (set))
1407 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1409 int regno = REGNO (SET_DEST (set));
1410 if (regs->array[regno].set_in_loop == 2)
1412 struct movable *m;
1413 m = xmalloc (sizeof (struct movable));
1414 m->next = 0;
1415 m->insn = p;
1416 m->set_dest = SET_DEST (set);
1417 m->dependencies = 0;
1418 m->force = 0;
1419 m->consec = 0;
1420 m->done = 0;
1421 m->forces = 0;
1422 m->move_insn = 0;
1423 m->move_insn_first = 0;
1424 m->insert_temp = insert_temp;
1425 m->partial = 1;
1426 /* If the insn may not be executed on some cycles,
1427 we can't clear the whole reg; clear just high part.
1428 Not even if the reg is used only within this loop.
1429 Consider this:
1430 while (1)
1431 while (s != t) {
1432 if (foo ()) x = *s;
1433 use (x);
1435 Clearing x before the inner loop could clobber a value
1436 being saved from the last time around the outer loop.
1437 However, if the reg is not used outside this loop
1438 and all uses of the register are in the same
1439 basic block as the store, there is no problem.
1441 If this insn was made by loop, we don't know its
1442 INSN_LUID and hence must make a conservative
1443 assumption. */
1444 m->global = (INSN_UID (p) >= max_uid_for_loop
1445 || LOOP_REG_GLOBAL_P (loop, regno)
1446 || (labels_in_range_p
1447 (p, REGNO_FIRST_LUID (regno))));
1448 if (maybe_never && m->global)
1449 m->savemode = GET_MODE (SET_SRC (set1));
1450 else
1451 m->savemode = VOIDmode;
1452 m->regno = regno;
1453 m->cond = 0;
1454 m->match = 0;
1455 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1456 m->savings = 1;
1457 for (i = 0;
1458 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1459 i++)
1460 regs->array[regno+i].set_in_loop = -1;
1461 /* Add M to the end of the chain MOVABLES. */
1462 loop_movables_add (movables, m);
1467 /* Past a call insn, we get to insns which might not be executed
1468 because the call might exit. This matters for insns that trap.
1469 Constant and pure call insns always return, so they don't count. */
1470 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1471 call_passed = 1;
1472 /* Past a label or a jump, we get to insns for which we
1473 can't count on whether or how many times they will be
1474 executed during each iteration. Therefore, we can
1475 only move out sets of trivial variables
1476 (those not used after the loop). */
1477 /* Similar code appears twice in strength_reduce. */
1478 else if ((LABEL_P (p) || JUMP_P (p))
1479 /* If we enter the loop in the middle, and scan around to the
1480 beginning, don't set maybe_never for that. This must be an
1481 unconditional jump, otherwise the code at the top of the
1482 loop might never be executed. Unconditional jumps are
1483 followed by a barrier then the loop_end. */
1484 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1485 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1486 && any_uncondjump_p (p)))
1487 maybe_never = 1;
1490 /* If one movable subsumes another, ignore that other. */
1492 ignore_some_movables (movables);
1494 /* For each movable insn, see if the reg that it loads
1495 leads when it dies right into another conditionally movable insn.
1496 If so, record that the second insn "forces" the first one,
1497 since the second can be moved only if the first is. */
1499 force_movables (movables);
1501 /* See if there are multiple movable insns that load the same value.
1502 If there are, make all but the first point at the first one
1503 through the `match' field, and add the priorities of them
1504 all together as the priority of the first. */
1506 combine_movables (movables, regs);
1508 /* Now consider each movable insn to decide whether it is worth moving.
1509 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1511 For machines with few registers this increases code size, so do not
1512 move moveables when optimizing for code size on such machines.
1513 (The 18 below is the value for i386.) */
1515 if (!optimize_size
1516 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1518 move_movables (loop, movables, threshold, insn_count);
1520 /* Recalculate regs->array if move_movables has created new
1521 registers. */
1522 if (max_reg_num () > regs->num)
1524 loop_regs_scan (loop, 0);
1525 for (update_start = loop_start;
1526 PREV_INSN (update_start)
1527 && !LABEL_P (PREV_INSN (update_start));
1528 update_start = PREV_INSN (update_start))
1530 update_end = NEXT_INSN (loop_end);
1532 reg_scan_update (update_start, update_end, loop_max_reg);
1533 loop_max_reg = max_reg_num ();
1537 /* Now candidates that still are negative are those not moved.
1538 Change regs->array[I].set_in_loop to indicate that those are not actually
1539 invariant. */
1540 for (i = 0; i < regs->num; i++)
1541 if (regs->array[i].set_in_loop < 0)
1542 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1544 /* Now that we've moved some things out of the loop, we might be able to
1545 hoist even more memory references. */
1546 load_mems (loop);
1548 /* Recalculate regs->array if load_mems has created new registers. */
1549 if (max_reg_num () > regs->num)
1550 loop_regs_scan (loop, 0);
1552 for (update_start = loop_start;
1553 PREV_INSN (update_start)
1554 && !LABEL_P (PREV_INSN (update_start));
1555 update_start = PREV_INSN (update_start))
1557 update_end = NEXT_INSN (loop_end);
1559 reg_scan_update (update_start, update_end, loop_max_reg);
1560 loop_max_reg = max_reg_num ();
1562 if (flag_strength_reduce)
1564 if (update_end && LABEL_P (update_end))
1565 /* Ensure our label doesn't go away. */
1566 LABEL_NUSES (update_end)++;
1568 strength_reduce (loop, flags);
1570 reg_scan_update (update_start, update_end, loop_max_reg);
1571 loop_max_reg = max_reg_num ();
1573 if (update_end && LABEL_P (update_end)
1574 && --LABEL_NUSES (update_end) == 0)
1575 delete_related_insns (update_end);
1579 /* The movable information is required for strength reduction. */
1580 loop_movables_free (movables);
1582 free (regs->array);
1583 regs->array = 0;
1584 regs->num = 0;
1587 /* Add elements to *OUTPUT to record all the pseudo-regs
1588 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1590 static void
1591 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1593 enum rtx_code code;
1594 const char *fmt;
1595 int i;
1597 code = GET_CODE (in_this);
1599 switch (code)
1601 case PC:
1602 case CC0:
1603 case CONST_INT:
1604 case CONST_DOUBLE:
1605 case CONST:
1606 case SYMBOL_REF:
1607 case LABEL_REF:
1608 return;
1610 case REG:
1611 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1612 && ! reg_mentioned_p (in_this, not_in_this))
1613 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1614 return;
1616 default:
1617 break;
1620 fmt = GET_RTX_FORMAT (code);
1621 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1623 int j;
1625 switch (fmt[i])
1627 case 'E':
1628 for (j = 0; j < XVECLEN (in_this, i); j++)
1629 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1630 break;
1632 case 'e':
1633 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1634 break;
1639 /* Check what regs are referred to in the libcall block ending with INSN,
1640 aside from those mentioned in the equivalent value.
1641 If there are none, return 0.
1642 If there are one or more, return an EXPR_LIST containing all of them. */
1644 static rtx
1645 libcall_other_reg (rtx insn, rtx equiv)
1647 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1648 rtx p = XEXP (note, 0);
1649 rtx output = 0;
1651 /* First, find all the regs used in the libcall block
1652 that are not mentioned as inputs to the result. */
1654 while (p != insn)
1656 if (INSN_P (p))
1657 record_excess_regs (PATTERN (p), equiv, &output);
1658 p = NEXT_INSN (p);
1661 return output;
1664 /* Return 1 if all uses of REG
1665 are between INSN and the end of the basic block. */
1667 static int
1668 reg_in_basic_block_p (rtx insn, rtx reg)
1670 int regno = REGNO (reg);
1671 rtx p;
1673 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1674 return 0;
1676 /* Search this basic block for the already recorded last use of the reg. */
1677 for (p = insn; p; p = NEXT_INSN (p))
1679 switch (GET_CODE (p))
1681 case NOTE:
1682 break;
1684 case INSN:
1685 case CALL_INSN:
1686 /* Ordinary insn: if this is the last use, we win. */
1687 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1688 return 1;
1689 break;
1691 case JUMP_INSN:
1692 /* Jump insn: if this is the last use, we win. */
1693 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1694 return 1;
1695 /* Otherwise, it's the end of the basic block, so we lose. */
1696 return 0;
1698 case CODE_LABEL:
1699 case BARRIER:
1700 /* It's the end of the basic block, so we lose. */
1701 return 0;
1703 default:
1704 break;
1708 /* The "last use" that was recorded can't be found after the first
1709 use. This can happen when the last use was deleted while
1710 processing an inner loop, this inner loop was then completely
1711 unrolled, and the outer loop is always exited after the inner loop,
1712 so that everything after the first use becomes a single basic block. */
1713 return 1;
1716 /* Compute the benefit of eliminating the insns in the block whose
1717 last insn is LAST. This may be a group of insns used to compute a
1718 value directly or can contain a library call. */
1720 static int
1721 libcall_benefit (rtx last)
1723 rtx insn;
1724 int benefit = 0;
1726 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1727 insn != last; insn = NEXT_INSN (insn))
1729 if (CALL_P (insn))
1730 benefit += 10; /* Assume at least this many insns in a library
1731 routine. */
1732 else if (NONJUMP_INSN_P (insn)
1733 && GET_CODE (PATTERN (insn)) != USE
1734 && GET_CODE (PATTERN (insn)) != CLOBBER)
1735 benefit++;
1738 return benefit;
1741 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1743 static rtx
1744 skip_consec_insns (rtx insn, int count)
1746 for (; count > 0; count--)
1748 rtx temp;
1750 /* If first insn of libcall sequence, skip to end. */
1751 /* Do this at start of loop, since INSN is guaranteed to
1752 be an insn here. */
1753 if (!NOTE_P (insn)
1754 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1755 insn = XEXP (temp, 0);
1758 insn = NEXT_INSN (insn);
1759 while (NOTE_P (insn));
1762 return insn;
1765 /* Ignore any movable whose insn falls within a libcall
1766 which is part of another movable.
1767 We make use of the fact that the movable for the libcall value
1768 was made later and so appears later on the chain. */
1770 static void
1771 ignore_some_movables (struct loop_movables *movables)
1773 struct movable *m, *m1;
1775 for (m = movables->head; m; m = m->next)
1777 /* Is this a movable for the value of a libcall? */
1778 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1779 if (note)
1781 rtx insn;
1782 /* Check for earlier movables inside that range,
1783 and mark them invalid. We cannot use LUIDs here because
1784 insns created by loop.c for prior loops don't have LUIDs.
1785 Rather than reject all such insns from movables, we just
1786 explicitly check each insn in the libcall (since invariant
1787 libcalls aren't that common). */
1788 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1789 for (m1 = movables->head; m1 != m; m1 = m1->next)
1790 if (m1->insn == insn)
1791 m1->done = 1;
1796 /* For each movable insn, see if the reg that it loads
1797 leads when it dies right into another conditionally movable insn.
1798 If so, record that the second insn "forces" the first one,
1799 since the second can be moved only if the first is. */
1801 static void
1802 force_movables (struct loop_movables *movables)
1804 struct movable *m, *m1;
1806 for (m1 = movables->head; m1; m1 = m1->next)
1807 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1808 if (!m1->partial && !m1->done)
1810 int regno = m1->regno;
1811 for (m = m1->next; m; m = m->next)
1812 /* ??? Could this be a bug? What if CSE caused the
1813 register of M1 to be used after this insn?
1814 Since CSE does not update regno_last_uid,
1815 this insn M->insn might not be where it dies.
1816 But very likely this doesn't matter; what matters is
1817 that M's reg is computed from M1's reg. */
1818 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1819 && !m->done)
1820 break;
1821 if (m != 0 && m->set_src == m1->set_dest
1822 /* If m->consec, m->set_src isn't valid. */
1823 && m->consec == 0)
1824 m = 0;
1826 /* Increase the priority of the moving the first insn
1827 since it permits the second to be moved as well.
1828 Likewise for insns already forced by the first insn. */
1829 if (m != 0)
1831 struct movable *m2;
1833 m->forces = m1;
1834 for (m2 = m1; m2; m2 = m2->forces)
1836 m2->lifetime += m->lifetime;
1837 m2->savings += m->savings;
1843 /* Find invariant expressions that are equal and can be combined into
1844 one register. */
1846 static void
1847 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1849 struct movable *m;
1850 char *matched_regs = xmalloc (regs->num);
1851 enum machine_mode mode;
1853 /* Regs that are set more than once are not allowed to match
1854 or be matched. I'm no longer sure why not. */
1855 /* Only pseudo registers are allowed to match or be matched,
1856 since move_movables does not validate the change. */
1857 /* Perhaps testing m->consec_sets would be more appropriate here? */
1859 for (m = movables->head; m; m = m->next)
1860 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1861 && m->regno >= FIRST_PSEUDO_REGISTER
1862 && !m->insert_temp
1863 && !m->partial)
1865 struct movable *m1;
1866 int regno = m->regno;
1868 memset (matched_regs, 0, regs->num);
1869 matched_regs[regno] = 1;
1871 /* We want later insns to match the first one. Don't make the first
1872 one match any later ones. So start this loop at m->next. */
1873 for (m1 = m->next; m1; m1 = m1->next)
1874 if (m != m1 && m1->match == 0
1875 && !m1->insert_temp
1876 && regs->array[m1->regno].n_times_set == 1
1877 && m1->regno >= FIRST_PSEUDO_REGISTER
1878 /* A reg used outside the loop mustn't be eliminated. */
1879 && !m1->global
1880 /* A reg used for zero-extending mustn't be eliminated. */
1881 && !m1->partial
1882 && (matched_regs[m1->regno]
1885 /* Can combine regs with different modes loaded from the
1886 same constant only if the modes are the same or
1887 if both are integer modes with M wider or the same
1888 width as M1. The check for integer is redundant, but
1889 safe, since the only case of differing destination
1890 modes with equal sources is when both sources are
1891 VOIDmode, i.e., CONST_INT. */
1892 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1893 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1894 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1895 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1896 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1897 /* See if the source of M1 says it matches M. */
1898 && ((REG_P (m1->set_src)
1899 && matched_regs[REGNO (m1->set_src)])
1900 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1901 movables, regs))))
1902 && ((m->dependencies == m1->dependencies)
1903 || rtx_equal_p (m->dependencies, m1->dependencies)))
1905 m->lifetime += m1->lifetime;
1906 m->savings += m1->savings;
1907 m1->done = 1;
1908 m1->match = m;
1909 matched_regs[m1->regno] = 1;
1913 /* Now combine the regs used for zero-extension.
1914 This can be done for those not marked `global'
1915 provided their lives don't overlap. */
1917 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1918 mode = GET_MODE_WIDER_MODE (mode))
1920 struct movable *m0 = 0;
1922 /* Combine all the registers for extension from mode MODE.
1923 Don't combine any that are used outside this loop. */
1924 for (m = movables->head; m; m = m->next)
1925 if (m->partial && ! m->global
1926 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1928 struct movable *m1;
1930 int first = REGNO_FIRST_LUID (m->regno);
1931 int last = REGNO_LAST_LUID (m->regno);
1933 if (m0 == 0)
1935 /* First one: don't check for overlap, just record it. */
1936 m0 = m;
1937 continue;
1940 /* Make sure they extend to the same mode.
1941 (Almost always true.) */
1942 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1943 continue;
1945 /* We already have one: check for overlap with those
1946 already combined together. */
1947 for (m1 = movables->head; m1 != m; m1 = m1->next)
1948 if (m1 == m0 || (m1->partial && m1->match == m0))
1949 if (! (REGNO_FIRST_LUID (m1->regno) > last
1950 || REGNO_LAST_LUID (m1->regno) < first))
1951 goto overlap;
1953 /* No overlap: we can combine this with the others. */
1954 m0->lifetime += m->lifetime;
1955 m0->savings += m->savings;
1956 m->done = 1;
1957 m->match = m0;
1959 overlap:
1964 /* Clean up. */
1965 free (matched_regs);
1968 /* Returns the number of movable instructions in LOOP that were not
1969 moved outside the loop. */
1971 static int
1972 num_unmoved_movables (const struct loop *loop)
1974 int num = 0;
1975 struct movable *m;
1977 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1978 if (!m->done)
1979 ++num;
1981 return num;
1985 /* Return 1 if regs X and Y will become the same if moved. */
1987 static int
1988 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1990 unsigned int xn = REGNO (x);
1991 unsigned int yn = REGNO (y);
1992 struct movable *mx, *my;
1994 for (mx = movables->head; mx; mx = mx->next)
1995 if (mx->regno == xn)
1996 break;
1998 for (my = movables->head; my; my = my->next)
1999 if (my->regno == yn)
2000 break;
2002 return (mx && my
2003 && ((mx->match == my->match && mx->match != 0)
2004 || mx->match == my
2005 || mx == my->match));
2008 /* Return 1 if X and Y are identical-looking rtx's.
2009 This is the Lisp function EQUAL for rtx arguments.
2011 If two registers are matching movables or a movable register and an
2012 equivalent constant, consider them equal. */
2014 static int
2015 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2016 struct loop_regs *regs)
2018 int i;
2019 int j;
2020 struct movable *m;
2021 enum rtx_code code;
2022 const char *fmt;
2024 if (x == y)
2025 return 1;
2026 if (x == 0 || y == 0)
2027 return 0;
2029 code = GET_CODE (x);
2031 /* If we have a register and a constant, they may sometimes be
2032 equal. */
2033 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2034 && CONSTANT_P (y))
2036 for (m = movables->head; m; m = m->next)
2037 if (m->move_insn && m->regno == REGNO (x)
2038 && rtx_equal_p (m->set_src, y))
2039 return 1;
2041 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2042 && CONSTANT_P (x))
2044 for (m = movables->head; m; m = m->next)
2045 if (m->move_insn && m->regno == REGNO (y)
2046 && rtx_equal_p (m->set_src, x))
2047 return 1;
2050 /* Otherwise, rtx's of different codes cannot be equal. */
2051 if (code != GET_CODE (y))
2052 return 0;
2054 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2055 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2057 if (GET_MODE (x) != GET_MODE (y))
2058 return 0;
2060 /* These three types of rtx's can be compared nonrecursively. */
2061 if (code == REG)
2062 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2064 if (code == LABEL_REF)
2065 return XEXP (x, 0) == XEXP (y, 0);
2066 if (code == SYMBOL_REF)
2067 return XSTR (x, 0) == XSTR (y, 0);
2069 /* Compare the elements. If any pair of corresponding elements
2070 fail to match, return 0 for the whole things. */
2072 fmt = GET_RTX_FORMAT (code);
2073 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2075 switch (fmt[i])
2077 case 'w':
2078 if (XWINT (x, i) != XWINT (y, i))
2079 return 0;
2080 break;
2082 case 'i':
2083 if (XINT (x, i) != XINT (y, i))
2084 return 0;
2085 break;
2087 case 'E':
2088 /* Two vectors must have the same length. */
2089 if (XVECLEN (x, i) != XVECLEN (y, i))
2090 return 0;
2092 /* And the corresponding elements must match. */
2093 for (j = 0; j < XVECLEN (x, i); j++)
2094 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2095 movables, regs) == 0)
2096 return 0;
2097 break;
2099 case 'e':
2100 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2101 == 0)
2102 return 0;
2103 break;
2105 case 's':
2106 if (strcmp (XSTR (x, i), XSTR (y, i)))
2107 return 0;
2108 break;
2110 case 'u':
2111 /* These are just backpointers, so they don't matter. */
2112 break;
2114 case '0':
2115 break;
2117 /* It is believed that rtx's at this level will never
2118 contain anything but integers and other rtx's,
2119 except for within LABEL_REFs and SYMBOL_REFs. */
2120 default:
2121 gcc_unreachable ();
2124 return 1;
2127 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2128 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2129 references is incremented once for each added note. */
2131 static void
2132 add_label_notes (rtx x, rtx insns)
2134 enum rtx_code code = GET_CODE (x);
2135 int i, j;
2136 const char *fmt;
2137 rtx insn;
2139 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2141 /* This code used to ignore labels that referred to dispatch tables to
2142 avoid flow generating (slightly) worse code.
2144 We no longer ignore such label references (see LABEL_REF handling in
2145 mark_jump_label for additional information). */
2146 for (insn = insns; insn; insn = NEXT_INSN (insn))
2147 if (reg_mentioned_p (XEXP (x, 0), insn))
2149 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2150 REG_NOTES (insn));
2151 if (LABEL_P (XEXP (x, 0)))
2152 LABEL_NUSES (XEXP (x, 0))++;
2156 fmt = GET_RTX_FORMAT (code);
2157 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2159 if (fmt[i] == 'e')
2160 add_label_notes (XEXP (x, i), insns);
2161 else if (fmt[i] == 'E')
2162 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2163 add_label_notes (XVECEXP (x, i, j), insns);
2167 /* Scan MOVABLES, and move the insns that deserve to be moved.
2168 If two matching movables are combined, replace one reg with the
2169 other throughout. */
2171 static void
2172 move_movables (struct loop *loop, struct loop_movables *movables,
2173 int threshold, int insn_count)
2175 struct loop_regs *regs = LOOP_REGS (loop);
2176 int nregs = regs->num;
2177 rtx new_start = 0;
2178 struct movable *m;
2179 rtx p;
2180 rtx loop_start = loop->start;
2181 rtx loop_end = loop->end;
2182 /* Map of pseudo-register replacements to handle combining
2183 when we move several insns that load the same value
2184 into different pseudo-registers. */
2185 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2186 char *already_moved = xcalloc (nregs, sizeof (char));
2188 for (m = movables->head; m; m = m->next)
2190 /* Describe this movable insn. */
2192 if (loop_dump_stream)
2194 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2195 INSN_UID (m->insn), m->regno, m->lifetime);
2196 if (m->consec > 0)
2197 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2198 if (m->cond)
2199 fprintf (loop_dump_stream, "cond ");
2200 if (m->force)
2201 fprintf (loop_dump_stream, "force ");
2202 if (m->global)
2203 fprintf (loop_dump_stream, "global ");
2204 if (m->done)
2205 fprintf (loop_dump_stream, "done ");
2206 if (m->move_insn)
2207 fprintf (loop_dump_stream, "move-insn ");
2208 if (m->match)
2209 fprintf (loop_dump_stream, "matches %d ",
2210 INSN_UID (m->match->insn));
2211 if (m->forces)
2212 fprintf (loop_dump_stream, "forces %d ",
2213 INSN_UID (m->forces->insn));
2216 /* Ignore the insn if it's already done (it matched something else).
2217 Otherwise, see if it is now safe to move. */
2219 if (!m->done
2220 && (! m->cond
2221 || (1 == loop_invariant_p (loop, m->set_src)
2222 && (m->dependencies == 0
2223 || 1 == loop_invariant_p (loop, m->dependencies))
2224 && (m->consec == 0
2225 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2226 m->consec + 1,
2227 m->insn))))
2228 && (! m->forces || m->forces->done))
2230 int regno;
2231 rtx p;
2232 int savings = m->savings;
2234 /* We have an insn that is safe to move.
2235 Compute its desirability. */
2237 p = m->insn;
2238 regno = m->regno;
2240 if (loop_dump_stream)
2241 fprintf (loop_dump_stream, "savings %d ", savings);
2243 if (regs->array[regno].moved_once && loop_dump_stream)
2244 fprintf (loop_dump_stream, "halved since already moved ");
2246 /* An insn MUST be moved if we already moved something else
2247 which is safe only if this one is moved too: that is,
2248 if already_moved[REGNO] is nonzero. */
2250 /* An insn is desirable to move if the new lifetime of the
2251 register is no more than THRESHOLD times the old lifetime.
2252 If it's not desirable, it means the loop is so big
2253 that moving won't speed things up much,
2254 and it is liable to make register usage worse. */
2256 /* It is also desirable to move if it can be moved at no
2257 extra cost because something else was already moved. */
2259 if (already_moved[regno]
2260 || (threshold * savings * m->lifetime) >=
2261 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2262 || (m->forces && m->forces->done
2263 && regs->array[m->forces->regno].n_times_set == 1))
2265 int count;
2266 struct movable *m1;
2267 rtx first = NULL_RTX;
2268 rtx newreg = NULL_RTX;
2270 if (m->insert_temp)
2271 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2273 /* Now move the insns that set the reg. */
2275 if (m->partial && m->match)
2277 rtx newpat, i1;
2278 rtx r1, r2;
2279 /* Find the end of this chain of matching regs.
2280 Thus, we load each reg in the chain from that one reg.
2281 And that reg is loaded with 0 directly,
2282 since it has ->match == 0. */
2283 for (m1 = m; m1->match; m1 = m1->match);
2284 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2285 SET_DEST (PATTERN (m1->insn)));
2286 i1 = loop_insn_hoist (loop, newpat);
2288 /* Mark the moved, invariant reg as being allowed to
2289 share a hard reg with the other matching invariant. */
2290 REG_NOTES (i1) = REG_NOTES (m->insn);
2291 r1 = SET_DEST (PATTERN (m->insn));
2292 r2 = SET_DEST (PATTERN (m1->insn));
2293 regs_may_share
2294 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2295 gen_rtx_EXPR_LIST (VOIDmode, r2,
2296 regs_may_share));
2297 delete_insn (m->insn);
2299 if (new_start == 0)
2300 new_start = i1;
2302 if (loop_dump_stream)
2303 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2305 /* If we are to re-generate the item being moved with a
2306 new move insn, first delete what we have and then emit
2307 the move insn before the loop. */
2308 else if (m->move_insn)
2310 rtx i1, temp, seq;
2312 for (count = m->consec; count >= 0; count--)
2314 if (!NOTE_P (p))
2316 /* If this is the first insn of a library
2317 call sequence, something is very
2318 wrong. */
2319 gcc_assert (!find_reg_note
2320 (p, REG_LIBCALL, NULL_RTX));
2322 /* If this is the last insn of a libcall
2323 sequence, then delete every insn in the
2324 sequence except the last. The last insn
2325 is handled in the normal manner. */
2326 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
2328 if (temp)
2330 temp = XEXP (temp, 0);
2331 while (temp != p)
2332 temp = delete_insn (temp);
2336 temp = p;
2337 p = delete_insn (p);
2339 /* simplify_giv_expr expects that it can walk the insns
2340 at m->insn forwards and see this old sequence we are
2341 tossing here. delete_insn does preserve the next
2342 pointers, but when we skip over a NOTE we must fix
2343 it up. Otherwise that code walks into the non-deleted
2344 insn stream. */
2345 while (p && NOTE_P (p))
2346 p = NEXT_INSN (temp) = NEXT_INSN (p);
2348 if (m->insert_temp)
2350 /* Replace the original insn with a move from
2351 our newly created temp. */
2352 start_sequence ();
2353 emit_move_insn (m->set_dest, newreg);
2354 seq = get_insns ();
2355 end_sequence ();
2356 emit_insn_before (seq, p);
2360 start_sequence ();
2361 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2362 m->set_src);
2363 seq = get_insns ();
2364 end_sequence ();
2366 add_label_notes (m->set_src, seq);
2368 i1 = loop_insn_hoist (loop, seq);
2369 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2370 set_unique_reg_note (i1,
2371 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2372 m->set_src);
2374 if (loop_dump_stream)
2375 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2377 /* The more regs we move, the less we like moving them. */
2378 threshold -= 3;
2380 else
2382 for (count = m->consec; count >= 0; count--)
2384 rtx i1, temp;
2386 /* If first insn of libcall sequence, skip to end. */
2387 /* Do this at start of loop, since p is guaranteed to
2388 be an insn here. */
2389 if (!NOTE_P (p)
2390 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2391 p = XEXP (temp, 0);
2393 /* If last insn of libcall sequence, move all
2394 insns except the last before the loop. The last
2395 insn is handled in the normal manner. */
2396 if (!NOTE_P (p)
2397 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2399 rtx fn_address = 0;
2400 rtx fn_reg = 0;
2401 rtx fn_address_insn = 0;
2403 first = 0;
2404 for (temp = XEXP (temp, 0); temp != p;
2405 temp = NEXT_INSN (temp))
2407 rtx body;
2408 rtx n;
2409 rtx next;
2411 if (NOTE_P (temp))
2412 continue;
2414 body = PATTERN (temp);
2416 /* Find the next insn after TEMP,
2417 not counting USE or NOTE insns. */
2418 for (next = NEXT_INSN (temp); next != p;
2419 next = NEXT_INSN (next))
2420 if (! (NONJUMP_INSN_P (next)
2421 && GET_CODE (PATTERN (next)) == USE)
2422 && !NOTE_P (next))
2423 break;
2425 /* If that is the call, this may be the insn
2426 that loads the function address.
2428 Extract the function address from the insn
2429 that loads it into a register.
2430 If this insn was cse'd, we get incorrect code.
2432 So emit a new move insn that copies the
2433 function address into the register that the
2434 call insn will use. flow.c will delete any
2435 redundant stores that we have created. */
2436 if (CALL_P (next)
2437 && GET_CODE (body) == SET
2438 && REG_P (SET_DEST (body))
2439 && (n = find_reg_note (temp, REG_EQUAL,
2440 NULL_RTX)))
2442 fn_reg = SET_SRC (body);
2443 if (!REG_P (fn_reg))
2444 fn_reg = SET_DEST (body);
2445 fn_address = XEXP (n, 0);
2446 fn_address_insn = temp;
2448 /* We have the call insn.
2449 If it uses the register we suspect it might,
2450 load it with the correct address directly. */
2451 if (CALL_P (temp)
2452 && fn_address != 0
2453 && reg_referenced_p (fn_reg, body))
2454 loop_insn_emit_after (loop, 0, fn_address_insn,
2455 gen_move_insn
2456 (fn_reg, fn_address));
2458 if (CALL_P (temp))
2460 i1 = loop_call_insn_hoist (loop, body);
2461 /* Because the USAGE information potentially
2462 contains objects other than hard registers
2463 we need to copy it. */
2464 if (CALL_INSN_FUNCTION_USAGE (temp))
2465 CALL_INSN_FUNCTION_USAGE (i1)
2466 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2468 else
2469 i1 = loop_insn_hoist (loop, body);
2470 if (first == 0)
2471 first = i1;
2472 if (temp == fn_address_insn)
2473 fn_address_insn = i1;
2474 REG_NOTES (i1) = REG_NOTES (temp);
2475 REG_NOTES (temp) = NULL;
2476 delete_insn (temp);
2478 if (new_start == 0)
2479 new_start = first;
2481 if (m->savemode != VOIDmode)
2483 /* P sets REG to zero; but we should clear only
2484 the bits that are not covered by the mode
2485 m->savemode. */
2486 rtx reg = m->set_dest;
2487 rtx sequence;
2488 rtx tem;
2490 start_sequence ();
2491 tem = expand_simple_binop
2492 (GET_MODE (reg), AND, reg,
2493 GEN_INT ((((HOST_WIDE_INT) 1
2494 << GET_MODE_BITSIZE (m->savemode)))
2495 - 1),
2496 reg, 1, OPTAB_LIB_WIDEN);
2497 gcc_assert (tem);
2498 if (tem != reg)
2499 emit_move_insn (reg, tem);
2500 sequence = get_insns ();
2501 end_sequence ();
2502 i1 = loop_insn_hoist (loop, sequence);
2504 else if (CALL_P (p))
2506 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2507 /* Because the USAGE information potentially
2508 contains objects other than hard registers
2509 we need to copy it. */
2510 if (CALL_INSN_FUNCTION_USAGE (p))
2511 CALL_INSN_FUNCTION_USAGE (i1)
2512 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2514 else if (count == m->consec && m->move_insn_first)
2516 rtx seq;
2517 /* The SET_SRC might not be invariant, so we must
2518 use the REG_EQUAL note. */
2519 start_sequence ();
2520 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2521 m->set_src);
2522 seq = get_insns ();
2523 end_sequence ();
2525 add_label_notes (m->set_src, seq);
2527 i1 = loop_insn_hoist (loop, seq);
2528 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2529 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2530 : REG_EQUAL, m->set_src);
2532 else if (m->insert_temp)
2534 rtx *reg_map2 = xcalloc (REGNO (newreg),
2535 sizeof(rtx));
2536 reg_map2 [m->regno] = newreg;
2538 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2539 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2540 free (reg_map2);
2542 else
2543 i1 = loop_insn_hoist (loop, PATTERN (p));
2545 if (REG_NOTES (i1) == 0)
2547 REG_NOTES (i1) = REG_NOTES (p);
2548 REG_NOTES (p) = NULL;
2550 /* If there is a REG_EQUAL note present whose value
2551 is not loop invariant, then delete it, since it
2552 may cause problems with later optimization passes.
2553 It is possible for cse to create such notes
2554 like this as a result of record_jump_cond. */
2556 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2557 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2558 remove_note (i1, temp);
2561 if (new_start == 0)
2562 new_start = i1;
2564 if (loop_dump_stream)
2565 fprintf (loop_dump_stream, " moved to %d",
2566 INSN_UID (i1));
2568 /* If library call, now fix the REG_NOTES that contain
2569 insn pointers, namely REG_LIBCALL on FIRST
2570 and REG_RETVAL on I1. */
2571 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2573 XEXP (temp, 0) = first;
2574 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2575 XEXP (temp, 0) = i1;
2578 temp = p;
2579 delete_insn (p);
2580 p = NEXT_INSN (p);
2582 /* simplify_giv_expr expects that it can walk the insns
2583 at m->insn forwards and see this old sequence we are
2584 tossing here. delete_insn does preserve the next
2585 pointers, but when we skip over a NOTE we must fix
2586 it up. Otherwise that code walks into the non-deleted
2587 insn stream. */
2588 while (p && NOTE_P (p))
2589 p = NEXT_INSN (temp) = NEXT_INSN (p);
2591 if (m->insert_temp)
2593 rtx seq;
2594 /* Replace the original insn with a move from
2595 our newly created temp. */
2596 start_sequence ();
2597 emit_move_insn (m->set_dest, newreg);
2598 seq = get_insns ();
2599 end_sequence ();
2600 emit_insn_before (seq, p);
2604 /* The more regs we move, the less we like moving them. */
2605 threshold -= 3;
2608 m->done = 1;
2610 if (!m->insert_temp)
2612 /* Any other movable that loads the same register
2613 MUST be moved. */
2614 already_moved[regno] = 1;
2616 /* This reg has been moved out of one loop. */
2617 regs->array[regno].moved_once = 1;
2619 /* The reg set here is now invariant. */
2620 if (! m->partial)
2622 int i;
2623 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2624 regs->array[regno+i].set_in_loop = 0;
2627 /* Change the length-of-life info for the register
2628 to say it lives at least the full length of this loop.
2629 This will help guide optimizations in outer loops. */
2631 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2632 /* This is the old insn before all the moved insns.
2633 We can't use the moved insn because it is out of range
2634 in uid_luid. Only the old insns have luids. */
2635 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2636 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2637 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2640 /* Combine with this moved insn any other matching movables. */
2642 if (! m->partial)
2643 for (m1 = movables->head; m1; m1 = m1->next)
2644 if (m1->match == m)
2646 rtx temp;
2648 /* Schedule the reg loaded by M1
2649 for replacement so that shares the reg of M.
2650 If the modes differ (only possible in restricted
2651 circumstances, make a SUBREG.
2653 Note this assumes that the target dependent files
2654 treat REG and SUBREG equally, including within
2655 GO_IF_LEGITIMATE_ADDRESS and in all the
2656 predicates since we never verify that replacing the
2657 original register with a SUBREG results in a
2658 recognizable insn. */
2659 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2660 reg_map[m1->regno] = m->set_dest;
2661 else
2662 reg_map[m1->regno]
2663 = gen_lowpart_common (GET_MODE (m1->set_dest),
2664 m->set_dest);
2666 /* Get rid of the matching insn
2667 and prevent further processing of it. */
2668 m1->done = 1;
2670 /* If library call, delete all insns. */
2671 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2672 NULL_RTX)))
2673 delete_insn_chain (XEXP (temp, 0), m1->insn);
2674 else
2675 delete_insn (m1->insn);
2677 /* Any other movable that loads the same register
2678 MUST be moved. */
2679 already_moved[m1->regno] = 1;
2681 /* The reg merged here is now invariant,
2682 if the reg it matches is invariant. */
2683 if (! m->partial)
2685 int i;
2686 for (i = 0;
2687 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2688 i++)
2689 regs->array[m1->regno+i].set_in_loop = 0;
2693 else if (loop_dump_stream)
2694 fprintf (loop_dump_stream, "not desirable");
2696 else if (loop_dump_stream && !m->match)
2697 fprintf (loop_dump_stream, "not safe");
2699 if (loop_dump_stream)
2700 fprintf (loop_dump_stream, "\n");
2703 if (new_start == 0)
2704 new_start = loop_start;
2706 /* Go through all the instructions in the loop, making
2707 all the register substitutions scheduled in REG_MAP. */
2708 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2709 if (INSN_P (p))
2711 replace_regs (PATTERN (p), reg_map, nregs, 0);
2712 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2713 INSN_CODE (p) = -1;
2716 /* Clean up. */
2717 free (reg_map);
2718 free (already_moved);
2722 static void
2723 loop_movables_add (struct loop_movables *movables, struct movable *m)
2725 if (movables->head == 0)
2726 movables->head = m;
2727 else
2728 movables->last->next = m;
2729 movables->last = m;
2733 static void
2734 loop_movables_free (struct loop_movables *movables)
2736 struct movable *m;
2737 struct movable *m_next;
2739 for (m = movables->head; m; m = m_next)
2741 m_next = m->next;
2742 free (m);
2746 #if 0
2747 /* Scan X and replace the address of any MEM in it with ADDR.
2748 REG is the address that MEM should have before the replacement. */
2750 static void
2751 replace_call_address (rtx x, rtx reg, rtx addr)
2753 enum rtx_code code;
2754 int i;
2755 const char *fmt;
2757 if (x == 0)
2758 return;
2759 code = GET_CODE (x);
2760 switch (code)
2762 case PC:
2763 case CC0:
2764 case CONST_INT:
2765 case CONST_DOUBLE:
2766 case CONST:
2767 case SYMBOL_REF:
2768 case LABEL_REF:
2769 case REG:
2770 return;
2772 case SET:
2773 /* Short cut for very common case. */
2774 replace_call_address (XEXP (x, 1), reg, addr);
2775 return;
2777 case CALL:
2778 /* Short cut for very common case. */
2779 replace_call_address (XEXP (x, 0), reg, addr);
2780 return;
2782 case MEM:
2783 /* If this MEM uses a reg other than the one we expected,
2784 something is wrong. */
2785 gcc_assert (XEXP (x, 0) == reg);
2786 XEXP (x, 0) = addr;
2787 return;
2789 default:
2790 break;
2793 fmt = GET_RTX_FORMAT (code);
2794 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2796 if (fmt[i] == 'e')
2797 replace_call_address (XEXP (x, i), reg, addr);
2798 else if (fmt[i] == 'E')
2800 int j;
2801 for (j = 0; j < XVECLEN (x, i); j++)
2802 replace_call_address (XVECEXP (x, i, j), reg, addr);
2806 #endif
2808 /* Return the number of memory refs to addresses that vary
2809 in the rtx X. */
2811 static int
2812 count_nonfixed_reads (const struct loop *loop, rtx x)
2814 enum rtx_code code;
2815 int i;
2816 const char *fmt;
2817 int value;
2819 if (x == 0)
2820 return 0;
2822 code = GET_CODE (x);
2823 switch (code)
2825 case PC:
2826 case CC0:
2827 case CONST_INT:
2828 case CONST_DOUBLE:
2829 case CONST:
2830 case SYMBOL_REF:
2831 case LABEL_REF:
2832 case REG:
2833 return 0;
2835 case MEM:
2836 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2837 + count_nonfixed_reads (loop, XEXP (x, 0)));
2839 default:
2840 break;
2843 value = 0;
2844 fmt = GET_RTX_FORMAT (code);
2845 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2847 if (fmt[i] == 'e')
2848 value += count_nonfixed_reads (loop, XEXP (x, i));
2849 if (fmt[i] == 'E')
2851 int j;
2852 for (j = 0; j < XVECLEN (x, i); j++)
2853 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2856 return value;
2859 /* Scan a loop setting the elements `loops_enclosed',
2860 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2861 `unknown_address_altered', `unknown_constant_address_altered', and
2862 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2863 list `store_mems' in LOOP. */
2865 static void
2866 prescan_loop (struct loop *loop)
2868 int level = 1;
2869 rtx insn;
2870 struct loop_info *loop_info = LOOP_INFO (loop);
2871 rtx start = loop->start;
2872 rtx end = loop->end;
2873 /* The label after END. Jumping here is just like falling off the
2874 end of the loop. We use next_nonnote_insn instead of next_label
2875 as a hedge against the (pathological) case where some actual insn
2876 might end up between the two. */
2877 rtx exit_target = next_nonnote_insn (end);
2879 loop_info->has_indirect_jump = indirect_jump_in_function;
2880 loop_info->pre_header_has_call = 0;
2881 loop_info->has_call = 0;
2882 loop_info->has_nonconst_call = 0;
2883 loop_info->has_prefetch = 0;
2884 loop_info->has_volatile = 0;
2885 loop_info->has_tablejump = 0;
2886 loop_info->has_multiple_exit_targets = 0;
2887 loop->level = 1;
2889 loop_info->unknown_address_altered = 0;
2890 loop_info->unknown_constant_address_altered = 0;
2891 loop_info->store_mems = NULL_RTX;
2892 loop_info->first_loop_store_insn = NULL_RTX;
2893 loop_info->mems_idx = 0;
2894 loop_info->num_mem_sets = 0;
2896 for (insn = start; insn && !LABEL_P (insn);
2897 insn = PREV_INSN (insn))
2899 if (CALL_P (insn))
2901 loop_info->pre_header_has_call = 1;
2902 break;
2906 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2907 insn = NEXT_INSN (insn))
2909 switch (GET_CODE (insn))
2911 case NOTE:
2912 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2914 ++level;
2915 /* Count number of loops contained in this one. */
2916 loop->level++;
2918 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2919 --level;
2920 break;
2922 case CALL_INSN:
2923 if (! CONST_OR_PURE_CALL_P (insn))
2925 loop_info->unknown_address_altered = 1;
2926 loop_info->has_nonconst_call = 1;
2928 else if (pure_call_p (insn))
2929 loop_info->has_nonconst_call = 1;
2930 loop_info->has_call = 1;
2931 if (can_throw_internal (insn))
2932 loop_info->has_multiple_exit_targets = 1;
2933 break;
2935 case JUMP_INSN:
2936 if (! loop_info->has_multiple_exit_targets)
2938 rtx set = pc_set (insn);
2940 if (set)
2942 rtx src = SET_SRC (set);
2943 rtx label1, label2;
2945 if (GET_CODE (src) == IF_THEN_ELSE)
2947 label1 = XEXP (src, 1);
2948 label2 = XEXP (src, 2);
2950 else
2952 label1 = src;
2953 label2 = NULL_RTX;
2958 if (label1 && label1 != pc_rtx)
2960 if (GET_CODE (label1) != LABEL_REF)
2962 /* Something tricky. */
2963 loop_info->has_multiple_exit_targets = 1;
2964 break;
2966 else if (XEXP (label1, 0) != exit_target
2967 && LABEL_OUTSIDE_LOOP_P (label1))
2969 /* A jump outside the current loop. */
2970 loop_info->has_multiple_exit_targets = 1;
2971 break;
2975 label1 = label2;
2976 label2 = NULL_RTX;
2978 while (label1);
2980 else
2982 /* A return, or something tricky. */
2983 loop_info->has_multiple_exit_targets = 1;
2986 /* Fall through. */
2988 case INSN:
2989 if (volatile_refs_p (PATTERN (insn)))
2990 loop_info->has_volatile = 1;
2992 if (JUMP_P (insn)
2993 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2994 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2995 loop_info->has_tablejump = 1;
2997 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2998 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2999 loop_info->first_loop_store_insn = insn;
3001 if (flag_non_call_exceptions && can_throw_internal (insn))
3002 loop_info->has_multiple_exit_targets = 1;
3003 break;
3005 default:
3006 break;
3010 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
3011 if (/* An exception thrown by a called function might land us
3012 anywhere. */
3013 ! loop_info->has_nonconst_call
3014 /* We don't want loads for MEMs moved to a location before the
3015 one at which their stack memory becomes allocated. (Note
3016 that this is not a problem for malloc, etc., since those
3017 require actual function calls. */
3018 && ! current_function_calls_alloca
3019 /* There are ways to leave the loop other than falling off the
3020 end. */
3021 && ! loop_info->has_multiple_exit_targets)
3022 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3023 insn = NEXT_INSN (insn))
3024 for_each_rtx (&insn, insert_loop_mem, loop_info);
3026 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3027 that loop_invariant_p and load_mems can use true_dependence
3028 to determine what is really clobbered. */
3029 if (loop_info->unknown_address_altered)
3031 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3033 loop_info->store_mems
3034 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3036 if (loop_info->unknown_constant_address_altered)
3038 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3039 MEM_READONLY_P (mem) = 1;
3040 loop_info->store_mems
3041 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3045 /* Invalidate all loops containing LABEL. */
3047 static void
3048 invalidate_loops_containing_label (rtx label)
3050 struct loop *loop;
3051 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3052 loop->invalid = 1;
3055 /* Scan the function looking for loops. Record the start and end of each loop.
3056 Also mark as invalid loops any loops that contain a setjmp or are branched
3057 to from outside the loop. */
3059 static void
3060 find_and_verify_loops (rtx f, struct loops *loops)
3062 rtx insn;
3063 rtx label;
3064 int num_loops;
3065 struct loop *current_loop;
3066 struct loop *next_loop;
3067 struct loop *loop;
3069 num_loops = loops->num;
3071 compute_luids (f, NULL_RTX, 0);
3073 /* If there are jumps to undefined labels,
3074 treat them as jumps out of any/all loops.
3075 This also avoids writing past end of tables when there are no loops. */
3076 uid_loop[0] = NULL;
3078 /* Find boundaries of loops, mark which loops are contained within
3079 loops, and invalidate loops that have setjmp. */
3081 num_loops = 0;
3082 current_loop = NULL;
3083 for (insn = f; insn; insn = NEXT_INSN (insn))
3085 if (NOTE_P (insn))
3086 switch (NOTE_LINE_NUMBER (insn))
3088 case NOTE_INSN_LOOP_BEG:
3089 next_loop = loops->array + num_loops;
3090 next_loop->num = num_loops;
3091 num_loops++;
3092 next_loop->start = insn;
3093 next_loop->outer = current_loop;
3094 current_loop = next_loop;
3095 break;
3097 case NOTE_INSN_LOOP_END:
3098 gcc_assert (current_loop);
3100 current_loop->end = insn;
3101 current_loop = current_loop->outer;
3102 break;
3104 default:
3105 break;
3108 if (CALL_P (insn)
3109 && find_reg_note (insn, REG_SETJMP, NULL))
3111 /* In this case, we must invalidate our current loop and any
3112 enclosing loop. */
3113 for (loop = current_loop; loop; loop = loop->outer)
3115 loop->invalid = 1;
3116 if (loop_dump_stream)
3117 fprintf (loop_dump_stream,
3118 "\nLoop at %d ignored due to setjmp.\n",
3119 INSN_UID (loop->start));
3123 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3124 enclosing loop, but this doesn't matter. */
3125 uid_loop[INSN_UID (insn)] = current_loop;
3128 /* Any loop containing a label used in an initializer must be invalidated,
3129 because it can be jumped into from anywhere. */
3130 for (label = forced_labels; label; label = XEXP (label, 1))
3131 invalidate_loops_containing_label (XEXP (label, 0));
3133 /* Any loop containing a label used for an exception handler must be
3134 invalidated, because it can be jumped into from anywhere. */
3135 for_each_eh_label (invalidate_loops_containing_label);
3137 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3138 loop that it is not contained within, that loop is marked invalid.
3139 If any INSN or CALL_INSN uses a label's address, then the loop containing
3140 that label is marked invalid, because it could be jumped into from
3141 anywhere.
3143 Also look for blocks of code ending in an unconditional branch that
3144 exits the loop. If such a block is surrounded by a conditional
3145 branch around the block, move the block elsewhere (see below) and
3146 invert the jump to point to the code block. This may eliminate a
3147 label in our loop and will simplify processing by both us and a
3148 possible second cse pass. */
3150 for (insn = f; insn; insn = NEXT_INSN (insn))
3151 if (INSN_P (insn))
3153 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3155 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3157 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3158 if (note)
3159 invalidate_loops_containing_label (XEXP (note, 0));
3162 if (!JUMP_P (insn))
3163 continue;
3165 mark_loop_jump (PATTERN (insn), this_loop);
3167 /* See if this is an unconditional branch outside the loop. */
3168 if (this_loop
3169 && (GET_CODE (PATTERN (insn)) == RETURN
3170 || (any_uncondjump_p (insn)
3171 && onlyjump_p (insn)
3172 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3173 != this_loop)))
3174 && get_max_uid () < max_uid_for_loop)
3176 rtx p;
3177 rtx our_next = next_real_insn (insn);
3178 rtx last_insn_to_move = NEXT_INSN (insn);
3179 struct loop *dest_loop;
3180 struct loop *outer_loop = NULL;
3182 /* Go backwards until we reach the start of the loop, a label,
3183 or a JUMP_INSN. */
3184 for (p = PREV_INSN (insn);
3185 !LABEL_P (p)
3186 && ! (NOTE_P (p)
3187 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3188 && !JUMP_P (p);
3189 p = PREV_INSN (p))
3192 /* Check for the case where we have a jump to an inner nested
3193 loop, and do not perform the optimization in that case. */
3195 if (JUMP_LABEL (insn))
3197 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3198 if (dest_loop)
3200 for (outer_loop = dest_loop; outer_loop;
3201 outer_loop = outer_loop->outer)
3202 if (outer_loop == this_loop)
3203 break;
3207 /* Make sure that the target of P is within the current loop. */
3209 if (JUMP_P (p) && JUMP_LABEL (p)
3210 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3211 outer_loop = this_loop;
3213 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3214 we have a block of code to try to move.
3216 We look backward and then forward from the target of INSN
3217 to find a BARRIER at the same loop depth as the target.
3218 If we find such a BARRIER, we make a new label for the start
3219 of the block, invert the jump in P and point it to that label,
3220 and move the block of code to the spot we found. */
3222 if (! outer_loop
3223 && JUMP_P (p)
3224 && JUMP_LABEL (p) != 0
3225 /* Just ignore jumps to labels that were never emitted.
3226 These always indicate compilation errors. */
3227 && INSN_UID (JUMP_LABEL (p)) != 0
3228 && any_condjump_p (p) && onlyjump_p (p)
3229 && next_real_insn (JUMP_LABEL (p)) == our_next
3230 /* If it's not safe to move the sequence, then we
3231 mustn't try. */
3232 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3233 &last_insn_to_move))
3235 rtx target
3236 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3237 struct loop *target_loop = uid_loop[INSN_UID (target)];
3238 rtx loc, loc2;
3239 rtx tmp;
3241 /* Search for possible garbage past the conditional jumps
3242 and look for the last barrier. */
3243 for (tmp = last_insn_to_move;
3244 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3245 if (BARRIER_P (tmp))
3246 last_insn_to_move = tmp;
3248 for (loc = target; loc; loc = PREV_INSN (loc))
3249 if (BARRIER_P (loc)
3250 /* Don't move things inside a tablejump. */
3251 && ((loc2 = next_nonnote_insn (loc)) == 0
3252 || !LABEL_P (loc2)
3253 || (loc2 = next_nonnote_insn (loc2)) == 0
3254 || !JUMP_P (loc2)
3255 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3256 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3257 && uid_loop[INSN_UID (loc)] == target_loop)
3258 break;
3260 if (loc == 0)
3261 for (loc = target; loc; loc = NEXT_INSN (loc))
3262 if (BARRIER_P (loc)
3263 /* Don't move things inside a tablejump. */
3264 && ((loc2 = next_nonnote_insn (loc)) == 0
3265 || !LABEL_P (loc2)
3266 || (loc2 = next_nonnote_insn (loc2)) == 0
3267 || !JUMP_P (loc2)
3268 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3269 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3270 && uid_loop[INSN_UID (loc)] == target_loop)
3271 break;
3273 if (loc)
3275 rtx cond_label = JUMP_LABEL (p);
3276 rtx new_label = get_label_after (p);
3278 /* Ensure our label doesn't go away. */
3279 LABEL_NUSES (cond_label)++;
3281 /* Verify that uid_loop is large enough and that
3282 we can invert P. */
3283 if (invert_jump (p, new_label, 1))
3285 rtx q, r;
3286 bool only_notes;
3288 /* If no suitable BARRIER was found, create a suitable
3289 one before TARGET. Since TARGET is a fall through
3290 path, we'll need to insert a jump around our block
3291 and add a BARRIER before TARGET.
3293 This creates an extra unconditional jump outside
3294 the loop. However, the benefits of removing rarely
3295 executed instructions from inside the loop usually
3296 outweighs the cost of the extra unconditional jump
3297 outside the loop. */
3298 if (loc == 0)
3300 rtx temp;
3302 temp = gen_jump (JUMP_LABEL (insn));
3303 temp = emit_jump_insn_before (temp, target);
3304 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3305 LABEL_NUSES (JUMP_LABEL (insn))++;
3306 loc = emit_barrier_before (target);
3309 /* Include the BARRIER after INSN and copy the
3310 block after LOC. */
3311 only_notes = squeeze_notes (&new_label,
3312 &last_insn_to_move);
3313 gcc_assert (!only_notes);
3315 reorder_insns (new_label, last_insn_to_move, loc);
3317 /* All those insns are now in TARGET_LOOP. */
3318 for (q = new_label;
3319 q != NEXT_INSN (last_insn_to_move);
3320 q = NEXT_INSN (q))
3321 uid_loop[INSN_UID (q)] = target_loop;
3323 /* The label jumped to by INSN is no longer a loop
3324 exit. Unless INSN does not have a label (e.g.,
3325 it is a RETURN insn), search loop->exit_labels
3326 to find its label_ref, and remove it. Also turn
3327 off LABEL_OUTSIDE_LOOP_P bit. */
3328 if (JUMP_LABEL (insn))
3330 for (q = 0, r = this_loop->exit_labels;
3332 q = r, r = LABEL_NEXTREF (r))
3333 if (XEXP (r, 0) == JUMP_LABEL (insn))
3335 LABEL_OUTSIDE_LOOP_P (r) = 0;
3336 if (q)
3337 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3338 else
3339 this_loop->exit_labels = LABEL_NEXTREF (r);
3340 break;
3343 for (loop = this_loop; loop && loop != target_loop;
3344 loop = loop->outer)
3345 loop->exit_count--;
3347 /* If we didn't find it, then something is
3348 wrong. */
3349 gcc_assert (r);
3352 /* P is now a jump outside the loop, so it must be put
3353 in loop->exit_labels, and marked as such.
3354 The easiest way to do this is to just call
3355 mark_loop_jump again for P. */
3356 mark_loop_jump (PATTERN (p), this_loop);
3358 /* If INSN now jumps to the insn after it,
3359 delete INSN. */
3360 if (JUMP_LABEL (insn) != 0
3361 && (next_real_insn (JUMP_LABEL (insn))
3362 == next_real_insn (insn)))
3363 delete_related_insns (insn);
3366 /* Continue the loop after where the conditional
3367 branch used to jump, since the only branch insn
3368 in the block (if it still remains) is an inter-loop
3369 branch and hence needs no processing. */
3370 insn = NEXT_INSN (cond_label);
3372 if (--LABEL_NUSES (cond_label) == 0)
3373 delete_related_insns (cond_label);
3375 /* This loop will be continued with NEXT_INSN (insn). */
3376 insn = PREV_INSN (insn);
3383 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3384 loops it is contained in, mark the target loop invalid.
3386 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3388 static void
3389 mark_loop_jump (rtx x, struct loop *loop)
3391 struct loop *dest_loop;
3392 struct loop *outer_loop;
3393 int i;
3395 switch (GET_CODE (x))
3397 case PC:
3398 case USE:
3399 case CLOBBER:
3400 case REG:
3401 case MEM:
3402 case CONST_INT:
3403 case CONST_DOUBLE:
3404 case RETURN:
3405 return;
3407 case CONST:
3408 /* There could be a label reference in here. */
3409 mark_loop_jump (XEXP (x, 0), loop);
3410 return;
3412 case PLUS:
3413 case MINUS:
3414 case MULT:
3415 mark_loop_jump (XEXP (x, 0), loop);
3416 mark_loop_jump (XEXP (x, 1), loop);
3417 return;
3419 case LO_SUM:
3420 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3421 mark_loop_jump (XEXP (x, 1), loop);
3422 return;
3424 case SIGN_EXTEND:
3425 case ZERO_EXTEND:
3426 mark_loop_jump (XEXP (x, 0), loop);
3427 return;
3429 case LABEL_REF:
3430 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3432 /* Link together all labels that branch outside the loop. This
3433 is used by final_[bg]iv_value and the loop unrolling code. Also
3434 mark this LABEL_REF so we know that this branch should predict
3435 false. */
3437 /* A check to make sure the label is not in an inner nested loop,
3438 since this does not count as a loop exit. */
3439 if (dest_loop)
3441 for (outer_loop = dest_loop; outer_loop;
3442 outer_loop = outer_loop->outer)
3443 if (outer_loop == loop)
3444 break;
3446 else
3447 outer_loop = NULL;
3449 if (loop && ! outer_loop)
3451 LABEL_OUTSIDE_LOOP_P (x) = 1;
3452 LABEL_NEXTREF (x) = loop->exit_labels;
3453 loop->exit_labels = x;
3455 for (outer_loop = loop;
3456 outer_loop && outer_loop != dest_loop;
3457 outer_loop = outer_loop->outer)
3458 outer_loop->exit_count++;
3461 /* If this is inside a loop, but not in the current loop or one enclosed
3462 by it, it invalidates at least one loop. */
3464 if (! dest_loop)
3465 return;
3467 /* We must invalidate every nested loop containing the target of this
3468 label, except those that also contain the jump insn. */
3470 for (; dest_loop; dest_loop = dest_loop->outer)
3472 /* Stop when we reach a loop that also contains the jump insn. */
3473 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3474 if (dest_loop == outer_loop)
3475 return;
3477 /* If we get here, we know we need to invalidate a loop. */
3478 if (loop_dump_stream && ! dest_loop->invalid)
3479 fprintf (loop_dump_stream,
3480 "\nLoop at %d ignored due to multiple entry points.\n",
3481 INSN_UID (dest_loop->start));
3483 dest_loop->invalid = 1;
3485 return;
3487 case SET:
3488 /* If this is not setting pc, ignore. */
3489 if (SET_DEST (x) == pc_rtx)
3490 mark_loop_jump (SET_SRC (x), loop);
3491 return;
3493 case IF_THEN_ELSE:
3494 mark_loop_jump (XEXP (x, 1), loop);
3495 mark_loop_jump (XEXP (x, 2), loop);
3496 return;
3498 case PARALLEL:
3499 case ADDR_VEC:
3500 for (i = 0; i < XVECLEN (x, 0); i++)
3501 mark_loop_jump (XVECEXP (x, 0, i), loop);
3502 return;
3504 case ADDR_DIFF_VEC:
3505 for (i = 0; i < XVECLEN (x, 1); i++)
3506 mark_loop_jump (XVECEXP (x, 1, i), loop);
3507 return;
3509 default:
3510 /* Strictly speaking this is not a jump into the loop, only a possible
3511 jump out of the loop. However, we have no way to link the destination
3512 of this jump onto the list of exit labels. To be safe we mark this
3513 loop and any containing loops as invalid. */
3514 if (loop)
3516 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3518 if (loop_dump_stream && ! outer_loop->invalid)
3519 fprintf (loop_dump_stream,
3520 "\nLoop at %d ignored due to unknown exit jump.\n",
3521 INSN_UID (outer_loop->start));
3522 outer_loop->invalid = 1;
3525 return;
3529 /* Return nonzero if there is a label in the range from
3530 insn INSN to and including the insn whose luid is END
3531 INSN must have an assigned luid (i.e., it must not have
3532 been previously created by loop.c). */
3534 static int
3535 labels_in_range_p (rtx insn, int end)
3537 while (insn && INSN_LUID (insn) <= end)
3539 if (LABEL_P (insn))
3540 return 1;
3541 insn = NEXT_INSN (insn);
3544 return 0;
3547 /* Record that a memory reference X is being set. */
3549 static void
3550 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3551 void *data ATTRIBUTE_UNUSED)
3553 struct loop_info *loop_info = data;
3555 if (x == 0 || !MEM_P (x))
3556 return;
3558 /* Count number of memory writes.
3559 This affects heuristics in strength_reduce. */
3560 loop_info->num_mem_sets++;
3562 /* BLKmode MEM means all memory is clobbered. */
3563 if (GET_MODE (x) == BLKmode)
3565 if (MEM_READONLY_P (x))
3566 loop_info->unknown_constant_address_altered = 1;
3567 else
3568 loop_info->unknown_address_altered = 1;
3570 return;
3573 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3574 loop_info->store_mems);
3577 /* X is a value modified by an INSN that references a biv inside a loop
3578 exit test (i.e., X is somehow related to the value of the biv). If X
3579 is a pseudo that is used more than once, then the biv is (effectively)
3580 used more than once. DATA is a pointer to a loop_regs structure. */
3582 static void
3583 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3585 struct loop_regs *regs = (struct loop_regs *) data;
3587 if (x == 0)
3588 return;
3590 while (GET_CODE (x) == STRICT_LOW_PART
3591 || GET_CODE (x) == SIGN_EXTRACT
3592 || GET_CODE (x) == ZERO_EXTRACT
3593 || GET_CODE (x) == SUBREG)
3594 x = XEXP (x, 0);
3596 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3597 return;
3599 /* If we do not have usage information, or if we know the register
3600 is used more than once, note that fact for check_dbra_loop. */
3601 if (REGNO (x) >= max_reg_before_loop
3602 || ! regs->array[REGNO (x)].single_usage
3603 || regs->array[REGNO (x)].single_usage == const0_rtx)
3604 regs->multiple_uses = 1;
3607 /* Return nonzero if the rtx X is invariant over the current loop.
3609 The value is 2 if we refer to something only conditionally invariant.
3611 A memory ref is invariant if it is not volatile and does not conflict
3612 with anything stored in `loop_info->store_mems'. */
3614 static int
3615 loop_invariant_p (const struct loop *loop, rtx x)
3617 struct loop_info *loop_info = LOOP_INFO (loop);
3618 struct loop_regs *regs = LOOP_REGS (loop);
3619 int i;
3620 enum rtx_code code;
3621 const char *fmt;
3622 int conditional = 0;
3623 rtx mem_list_entry;
3625 if (x == 0)
3626 return 1;
3627 code = GET_CODE (x);
3628 switch (code)
3630 case CONST_INT:
3631 case CONST_DOUBLE:
3632 case SYMBOL_REF:
3633 case CONST:
3634 return 1;
3636 case LABEL_REF:
3637 return 1;
3639 case PC:
3640 case CC0:
3641 case UNSPEC_VOLATILE:
3642 return 0;
3644 case REG:
3645 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3646 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3647 && ! current_function_has_nonlocal_goto)
3648 return 1;
3650 if (LOOP_INFO (loop)->has_call
3651 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3652 return 0;
3654 /* Out-of-range regs can occur when we are called from unrolling.
3655 These registers created by the unroller are set in the loop,
3656 hence are never invariant.
3657 Other out-of-range regs can be generated by load_mems; those that
3658 are written to in the loop are not invariant, while those that are
3659 not written to are invariant. It would be easy for load_mems
3660 to set n_times_set correctly for these registers, however, there
3661 is no easy way to distinguish them from registers created by the
3662 unroller. */
3664 if (REGNO (x) >= (unsigned) regs->num)
3665 return 0;
3667 if (regs->array[REGNO (x)].set_in_loop < 0)
3668 return 2;
3670 return regs->array[REGNO (x)].set_in_loop == 0;
3672 case MEM:
3673 /* Volatile memory references must be rejected. Do this before
3674 checking for read-only items, so that volatile read-only items
3675 will be rejected also. */
3676 if (MEM_VOLATILE_P (x))
3677 return 0;
3679 /* See if there is any dependence between a store and this load. */
3680 mem_list_entry = loop_info->store_mems;
3681 while (mem_list_entry)
3683 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3684 x, rtx_varies_p))
3685 return 0;
3687 mem_list_entry = XEXP (mem_list_entry, 1);
3690 /* It's not invalidated by a store in memory
3691 but we must still verify the address is invariant. */
3692 break;
3694 case ASM_OPERANDS:
3695 /* Don't mess with insns declared volatile. */
3696 if (MEM_VOLATILE_P (x))
3697 return 0;
3698 break;
3700 default:
3701 break;
3704 fmt = GET_RTX_FORMAT (code);
3705 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3707 if (fmt[i] == 'e')
3709 int tem = loop_invariant_p (loop, XEXP (x, i));
3710 if (tem == 0)
3711 return 0;
3712 if (tem == 2)
3713 conditional = 1;
3715 else if (fmt[i] == 'E')
3717 int j;
3718 for (j = 0; j < XVECLEN (x, i); j++)
3720 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3721 if (tem == 0)
3722 return 0;
3723 if (tem == 2)
3724 conditional = 1;
3730 return 1 + conditional;
3733 /* Return nonzero if all the insns in the loop that set REG
3734 are INSN and the immediately following insns,
3735 and if each of those insns sets REG in an invariant way
3736 (not counting uses of REG in them).
3738 The value is 2 if some of these insns are only conditionally invariant.
3740 We assume that INSN itself is the first set of REG
3741 and that its source is invariant. */
3743 static int
3744 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3745 rtx insn)
3747 struct loop_regs *regs = LOOP_REGS (loop);
3748 rtx p = insn;
3749 unsigned int regno = REGNO (reg);
3750 rtx temp;
3751 /* Number of sets we have to insist on finding after INSN. */
3752 int count = n_sets - 1;
3753 int old = regs->array[regno].set_in_loop;
3754 int value = 0;
3755 int this;
3757 /* If N_SETS hit the limit, we can't rely on its value. */
3758 if (n_sets == 127)
3759 return 0;
3761 regs->array[regno].set_in_loop = 0;
3763 while (count > 0)
3765 enum rtx_code code;
3766 rtx set;
3768 p = NEXT_INSN (p);
3769 code = GET_CODE (p);
3771 /* If library call, skip to end of it. */
3772 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3773 p = XEXP (temp, 0);
3775 this = 0;
3776 if (code == INSN
3777 && (set = single_set (p))
3778 && REG_P (SET_DEST (set))
3779 && REGNO (SET_DEST (set)) == regno)
3781 this = loop_invariant_p (loop, SET_SRC (set));
3782 if (this != 0)
3783 value |= this;
3784 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3786 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3787 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3788 notes are OK. */
3789 this = (CONSTANT_P (XEXP (temp, 0))
3790 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3791 && loop_invariant_p (loop, XEXP (temp, 0))));
3792 if (this != 0)
3793 value |= this;
3796 if (this != 0)
3797 count--;
3798 else if (code != NOTE)
3800 regs->array[regno].set_in_loop = old;
3801 return 0;
3805 regs->array[regno].set_in_loop = old;
3806 /* If loop_invariant_p ever returned 2, we return 2. */
3807 return 1 + (value & 2);
3810 /* Look at all uses (not sets) of registers in X. For each, if it is
3811 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3812 a different insn, set USAGE[REGNO] to const0_rtx. */
3814 static void
3815 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3817 enum rtx_code code = GET_CODE (x);
3818 const char *fmt = GET_RTX_FORMAT (code);
3819 int i, j;
3821 if (code == REG)
3822 regs->array[REGNO (x)].single_usage
3823 = (regs->array[REGNO (x)].single_usage != 0
3824 && regs->array[REGNO (x)].single_usage != insn)
3825 ? const0_rtx : insn;
3827 else if (code == SET)
3829 /* Don't count SET_DEST if it is a REG; otherwise count things
3830 in SET_DEST because if a register is partially modified, it won't
3831 show up as a potential movable so we don't care how USAGE is set
3832 for it. */
3833 if (!REG_P (SET_DEST (x)))
3834 find_single_use_in_loop (regs, insn, SET_DEST (x));
3835 find_single_use_in_loop (regs, insn, SET_SRC (x));
3837 else
3838 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3840 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3841 find_single_use_in_loop (regs, insn, XEXP (x, i));
3842 else if (fmt[i] == 'E')
3843 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3844 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3848 /* Count and record any set in X which is contained in INSN. Update
3849 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3850 in X. */
3852 static void
3853 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3855 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3856 /* Don't move a reg that has an explicit clobber.
3857 It's not worth the pain to try to do it correctly. */
3858 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3860 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3862 rtx dest = SET_DEST (x);
3863 while (GET_CODE (dest) == SUBREG
3864 || GET_CODE (dest) == ZERO_EXTRACT
3865 || GET_CODE (dest) == STRICT_LOW_PART)
3866 dest = XEXP (dest, 0);
3867 if (REG_P (dest))
3869 int i;
3870 int regno = REGNO (dest);
3871 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3873 /* If this is the first setting of this reg
3874 in current basic block, and it was set before,
3875 it must be set in two basic blocks, so it cannot
3876 be moved out of the loop. */
3877 if (regs->array[regno].set_in_loop > 0
3878 && last_set[regno] == 0)
3879 regs->array[regno+i].may_not_optimize = 1;
3880 /* If this is not first setting in current basic block,
3881 see if reg was used in between previous one and this.
3882 If so, neither one can be moved. */
3883 if (last_set[regno] != 0
3884 && reg_used_between_p (dest, last_set[regno], insn))
3885 regs->array[regno+i].may_not_optimize = 1;
3886 if (regs->array[regno+i].set_in_loop < 127)
3887 ++regs->array[regno+i].set_in_loop;
3888 last_set[regno+i] = insn;
3894 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3895 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3896 contained in insn INSN is used by any insn that precedes INSN in
3897 cyclic order starting from the loop entry point.
3899 We don't want to use INSN_LUID here because if we restrict INSN to those
3900 that have a valid INSN_LUID, it means we cannot move an invariant out
3901 from an inner loop past two loops. */
3903 static int
3904 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3906 rtx reg = SET_DEST (set);
3907 rtx p;
3909 /* Scan forward checking for register usage. If we hit INSN, we
3910 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3911 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3913 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3914 return 1;
3916 if (p == loop->end)
3917 p = loop->start;
3920 return 0;
3924 /* Information we collect about arrays that we might want to prefetch. */
3925 struct prefetch_info
3927 struct iv_class *class; /* Class this prefetch is based on. */
3928 struct induction *giv; /* GIV this prefetch is based on. */
3929 rtx base_address; /* Start prefetching from this address plus
3930 index. */
3931 HOST_WIDE_INT index;
3932 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3933 iteration. */
3934 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3935 prefetch area in one iteration. */
3936 unsigned int total_bytes; /* Total bytes loop will access in this block.
3937 This is set only for loops with known
3938 iteration counts and is 0xffffffff
3939 otherwise. */
3940 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3941 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3942 unsigned int write : 1; /* 1 for read/write prefetches. */
3945 /* Data used by check_store function. */
3946 struct check_store_data
3948 rtx mem_address;
3949 int mem_write;
3952 static void check_store (rtx, rtx, void *);
3953 static void emit_prefetch_instructions (struct loop *);
3954 static int rtx_equal_for_prefetch_p (rtx, rtx);
3956 /* Set mem_write when mem_address is found. Used as callback to
3957 note_stores. */
3958 static void
3959 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3961 struct check_store_data *d = (struct check_store_data *) data;
3963 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3964 d->mem_write = 1;
3967 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3968 important to get some addresses combined. Later more sophisticated
3969 transformations can be added when necessary.
3971 ??? Same trick with swapping operand is done at several other places.
3972 It can be nice to develop some common way to handle this. */
3974 static int
3975 rtx_equal_for_prefetch_p (rtx x, rtx y)
3977 int i;
3978 int j;
3979 enum rtx_code code = GET_CODE (x);
3980 const char *fmt;
3982 if (x == y)
3983 return 1;
3984 if (code != GET_CODE (y))
3985 return 0;
3987 if (COMMUTATIVE_ARITH_P (x))
3989 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3990 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3991 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3992 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3995 /* Compare the elements. If any pair of corresponding elements fails to
3996 match, return 0 for the whole thing. */
3998 fmt = GET_RTX_FORMAT (code);
3999 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4001 switch (fmt[i])
4003 case 'w':
4004 if (XWINT (x, i) != XWINT (y, i))
4005 return 0;
4006 break;
4008 case 'i':
4009 if (XINT (x, i) != XINT (y, i))
4010 return 0;
4011 break;
4013 case 'E':
4014 /* Two vectors must have the same length. */
4015 if (XVECLEN (x, i) != XVECLEN (y, i))
4016 return 0;
4018 /* And the corresponding elements must match. */
4019 for (j = 0; j < XVECLEN (x, i); j++)
4020 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4021 XVECEXP (y, i, j)) == 0)
4022 return 0;
4023 break;
4025 case 'e':
4026 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4027 return 0;
4028 break;
4030 case 's':
4031 if (strcmp (XSTR (x, i), XSTR (y, i)))
4032 return 0;
4033 break;
4035 case 'u':
4036 /* These are just backpointers, so they don't matter. */
4037 break;
4039 case '0':
4040 break;
4042 /* It is believed that rtx's at this level will never
4043 contain anything but integers and other rtx's,
4044 except for within LABEL_REFs and SYMBOL_REFs. */
4045 default:
4046 gcc_unreachable ();
4049 return 1;
4052 /* Remove constant addition value from the expression X (when present)
4053 and return it. */
4055 static HOST_WIDE_INT
4056 remove_constant_addition (rtx *x)
4058 HOST_WIDE_INT addval = 0;
4059 rtx exp = *x;
4061 /* Avoid clobbering a shared CONST expression. */
4062 if (GET_CODE (exp) == CONST)
4064 if (GET_CODE (XEXP (exp, 0)) == PLUS
4065 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4066 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4068 *x = XEXP (XEXP (exp, 0), 0);
4069 return INTVAL (XEXP (XEXP (exp, 0), 1));
4071 return 0;
4074 if (GET_CODE (exp) == CONST_INT)
4076 addval = INTVAL (exp);
4077 *x = const0_rtx;
4080 /* For plus expression recurse on ourself. */
4081 else if (GET_CODE (exp) == PLUS)
4083 addval += remove_constant_addition (&XEXP (exp, 0));
4084 addval += remove_constant_addition (&XEXP (exp, 1));
4086 /* In case our parameter was constant, remove extra zero from the
4087 expression. */
4088 if (XEXP (exp, 0) == const0_rtx)
4089 *x = XEXP (exp, 1);
4090 else if (XEXP (exp, 1) == const0_rtx)
4091 *x = XEXP (exp, 0);
4094 return addval;
4097 /* Attempt to identify accesses to arrays that are most likely to cause cache
4098 misses, and emit prefetch instructions a few prefetch blocks forward.
4100 To detect the arrays we use the GIV information that was collected by the
4101 strength reduction pass.
4103 The prefetch instructions are generated after the GIV information is done
4104 and before the strength reduction process. The new GIVs are injected into
4105 the strength reduction tables, so the prefetch addresses are optimized as
4106 well.
4108 GIVs are split into base address, stride, and constant addition values.
4109 GIVs with the same address, stride and close addition values are combined
4110 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4111 for write instructions can be used for the block we write to, on machines
4112 that support write prefetches.
4114 Several heuristics are used to determine when to prefetch. They are
4115 controlled by defined symbols that can be overridden for each target. */
4117 static void
4118 emit_prefetch_instructions (struct loop *loop)
4120 int num_prefetches = 0;
4121 int num_real_prefetches = 0;
4122 int num_real_write_prefetches = 0;
4123 int num_prefetches_before = 0;
4124 int num_write_prefetches_before = 0;
4125 int ahead = 0;
4126 int i;
4127 struct iv_class *bl;
4128 struct induction *iv;
4129 struct prefetch_info info[MAX_PREFETCHES];
4130 struct loop_ivs *ivs = LOOP_IVS (loop);
4132 if (!HAVE_prefetch || PREFETCH_BLOCK == 0)
4133 return;
4135 /* Consider only loops w/o calls. When a call is done, the loop is probably
4136 slow enough to read the memory. */
4137 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4139 if (loop_dump_stream)
4140 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4142 return;
4145 /* Don't prefetch in loops known to have few iterations. */
4146 if (PREFETCH_NO_LOW_LOOPCNT
4147 && LOOP_INFO (loop)->n_iterations
4148 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4150 if (loop_dump_stream)
4151 fprintf (loop_dump_stream,
4152 "Prefetch: ignoring loop: not enough iterations.\n");
4153 return;
4156 /* Search all induction variables and pick those interesting for the prefetch
4157 machinery. */
4158 for (bl = ivs->list; bl; bl = bl->next)
4160 struct induction *biv = bl->biv, *biv1;
4161 int basestride = 0;
4163 biv1 = biv;
4165 /* Expect all BIVs to be executed in each iteration. This makes our
4166 analysis more conservative. */
4167 while (biv1)
4169 /* Discard non-constant additions that we can't handle well yet, and
4170 BIVs that are executed multiple times; such BIVs ought to be
4171 handled in the nested loop. We accept not_every_iteration BIVs,
4172 since these only result in larger strides and make our
4173 heuristics more conservative. */
4174 if (GET_CODE (biv->add_val) != CONST_INT)
4176 if (loop_dump_stream)
4178 fprintf (loop_dump_stream,
4179 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4180 REGNO (biv->src_reg), INSN_UID (biv->insn));
4181 print_rtl (loop_dump_stream, biv->add_val);
4182 fprintf (loop_dump_stream, "\n");
4184 break;
4187 if (biv->maybe_multiple)
4189 if (loop_dump_stream)
4191 fprintf (loop_dump_stream,
4192 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4193 REGNO (biv->src_reg), INSN_UID (biv->insn));
4194 print_rtl (loop_dump_stream, biv->add_val);
4195 fprintf (loop_dump_stream, "\n");
4197 break;
4200 basestride += INTVAL (biv1->add_val);
4201 biv1 = biv1->next_iv;
4204 if (biv1 || !basestride)
4205 continue;
4207 for (iv = bl->giv; iv; iv = iv->next_iv)
4209 rtx address;
4210 rtx temp;
4211 HOST_WIDE_INT index = 0;
4212 int add = 1;
4213 HOST_WIDE_INT stride = 0;
4214 int stride_sign = 1;
4215 struct check_store_data d;
4216 const char *ignore_reason = NULL;
4217 int size = GET_MODE_SIZE (GET_MODE (iv));
4219 /* See whether an induction variable is interesting to us and if
4220 not, report the reason. */
4221 if (iv->giv_type != DEST_ADDR)
4222 ignore_reason = "giv is not a destination address";
4224 /* We are interested only in constant stride memory references
4225 in order to be able to compute density easily. */
4226 else if (GET_CODE (iv->mult_val) != CONST_INT)
4227 ignore_reason = "stride is not constant";
4229 else
4231 stride = INTVAL (iv->mult_val) * basestride;
4232 if (stride < 0)
4234 stride = -stride;
4235 stride_sign = -1;
4238 /* On some targets, reversed order prefetches are not
4239 worthwhile. */
4240 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4241 ignore_reason = "reversed order stride";
4243 /* Prefetch of accesses with an extreme stride might not be
4244 worthwhile, either. */
4245 else if (PREFETCH_NO_EXTREME_STRIDE
4246 && stride > PREFETCH_EXTREME_STRIDE)
4247 ignore_reason = "extreme stride";
4249 /* Ignore GIVs with varying add values; we can't predict the
4250 value for the next iteration. */
4251 else if (!loop_invariant_p (loop, iv->add_val))
4252 ignore_reason = "giv has varying add value";
4254 /* Ignore GIVs in the nested loops; they ought to have been
4255 handled already. */
4256 else if (iv->maybe_multiple)
4257 ignore_reason = "giv is in nested loop";
4260 if (ignore_reason != NULL)
4262 if (loop_dump_stream)
4263 fprintf (loop_dump_stream,
4264 "Prefetch: ignoring giv at %d: %s.\n",
4265 INSN_UID (iv->insn), ignore_reason);
4266 continue;
4269 /* Determine the pointer to the basic array we are examining. It is
4270 the sum of the BIV's initial value and the GIV's add_val. */
4271 address = copy_rtx (iv->add_val);
4272 temp = copy_rtx (bl->initial_value);
4274 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4275 index = remove_constant_addition (&address);
4277 d.mem_write = 0;
4278 d.mem_address = *iv->location;
4280 /* When the GIV is not always executed, we might be better off by
4281 not dirtying the cache pages. */
4282 if (PREFETCH_CONDITIONAL || iv->always_executed)
4283 note_stores (PATTERN (iv->insn), check_store, &d);
4284 else
4286 if (loop_dump_stream)
4287 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4288 INSN_UID (iv->insn), "in conditional code.");
4289 continue;
4292 /* Attempt to find another prefetch to the same array and see if we
4293 can merge this one. */
4294 for (i = 0; i < num_prefetches; i++)
4295 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4296 && stride == info[i].stride)
4298 /* In case both access same array (same location
4299 just with small difference in constant indexes), merge
4300 the prefetches. Just do the later and the earlier will
4301 get prefetched from previous iteration.
4302 The artificial threshold should not be too small,
4303 but also not bigger than small portion of memory usually
4304 traversed by single loop. */
4305 if (index >= info[i].index
4306 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4308 info[i].write |= d.mem_write;
4309 info[i].bytes_accessed += size;
4310 info[i].index = index;
4311 info[i].giv = iv;
4312 info[i].class = bl;
4313 info[num_prefetches].base_address = address;
4314 add = 0;
4315 break;
4318 if (index < info[i].index
4319 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4321 info[i].write |= d.mem_write;
4322 info[i].bytes_accessed += size;
4323 add = 0;
4324 break;
4328 /* Merging failed. */
4329 if (add)
4331 info[num_prefetches].giv = iv;
4332 info[num_prefetches].class = bl;
4333 info[num_prefetches].index = index;
4334 info[num_prefetches].stride = stride;
4335 info[num_prefetches].base_address = address;
4336 info[num_prefetches].write = d.mem_write;
4337 info[num_prefetches].bytes_accessed = size;
4338 num_prefetches++;
4339 if (num_prefetches >= MAX_PREFETCHES)
4341 if (loop_dump_stream)
4342 fprintf (loop_dump_stream,
4343 "Maximal number of prefetches exceeded.\n");
4344 return;
4350 for (i = 0; i < num_prefetches; i++)
4352 int density;
4354 /* Attempt to calculate the total number of bytes fetched by all
4355 iterations of the loop. Avoid overflow. */
4356 if (LOOP_INFO (loop)->n_iterations
4357 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4358 >= LOOP_INFO (loop)->n_iterations))
4359 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4360 else
4361 info[i].total_bytes = 0xffffffff;
4363 density = info[i].bytes_accessed * 100 / info[i].stride;
4365 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4366 if (PREFETCH_ONLY_DENSE_MEM)
4367 if (density * 256 > PREFETCH_DENSE_MEM * 100
4368 && (info[i].total_bytes / PREFETCH_BLOCK
4369 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4371 info[i].prefetch_before_loop = 1;
4372 info[i].prefetch_in_loop
4373 = (info[i].total_bytes / PREFETCH_BLOCK
4374 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4376 else
4378 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4379 if (loop_dump_stream)
4380 fprintf (loop_dump_stream,
4381 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4382 INSN_UID (info[i].giv->insn), density);
4384 else
4385 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4387 /* Find how many prefetch instructions we'll use within the loop. */
4388 if (info[i].prefetch_in_loop != 0)
4390 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4391 / PREFETCH_BLOCK);
4392 num_real_prefetches += info[i].prefetch_in_loop;
4393 if (info[i].write)
4394 num_real_write_prefetches += info[i].prefetch_in_loop;
4398 /* Determine how many iterations ahead to prefetch within the loop, based
4399 on how many prefetches we currently expect to do within the loop. */
4400 if (num_real_prefetches != 0)
4402 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4404 if (loop_dump_stream)
4405 fprintf (loop_dump_stream,
4406 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4407 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4408 num_real_prefetches = 0, num_real_write_prefetches = 0;
4411 /* We'll also use AHEAD to determine how many prefetch instructions to
4412 emit before a loop, so don't leave it zero. */
4413 if (ahead == 0)
4414 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4416 for (i = 0; i < num_prefetches; i++)
4418 /* Update if we've decided not to prefetch anything within the loop. */
4419 if (num_real_prefetches == 0)
4420 info[i].prefetch_in_loop = 0;
4422 /* Find how many prefetch instructions we'll use before the loop. */
4423 if (info[i].prefetch_before_loop != 0)
4425 int n = info[i].total_bytes / PREFETCH_BLOCK;
4426 if (n > ahead)
4427 n = ahead;
4428 info[i].prefetch_before_loop = n;
4429 num_prefetches_before += n;
4430 if (info[i].write)
4431 num_write_prefetches_before += n;
4434 if (loop_dump_stream)
4436 if (info[i].prefetch_in_loop == 0
4437 && info[i].prefetch_before_loop == 0)
4438 continue;
4439 fprintf (loop_dump_stream, "Prefetch insn: %d",
4440 INSN_UID (info[i].giv->insn));
4441 fprintf (loop_dump_stream,
4442 "; in loop: %d; before: %d; %s\n",
4443 info[i].prefetch_in_loop,
4444 info[i].prefetch_before_loop,
4445 info[i].write ? "read/write" : "read only");
4446 fprintf (loop_dump_stream,
4447 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4448 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4449 info[i].bytes_accessed, info[i].total_bytes);
4450 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4451 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4452 info[i].index, info[i].stride);
4453 print_rtl (loop_dump_stream, info[i].base_address);
4454 fprintf (loop_dump_stream, "\n");
4458 if (num_real_prefetches + num_prefetches_before > 0)
4460 /* Record that this loop uses prefetch instructions. */
4461 LOOP_INFO (loop)->has_prefetch = 1;
4463 if (loop_dump_stream)
4465 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4466 num_real_prefetches, num_real_write_prefetches);
4467 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4468 num_prefetches_before, num_write_prefetches_before);
4472 for (i = 0; i < num_prefetches; i++)
4474 int y;
4476 for (y = 0; y < info[i].prefetch_in_loop; y++)
4478 rtx loc = copy_rtx (*info[i].giv->location);
4479 rtx insn;
4480 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4481 rtx before_insn = info[i].giv->insn;
4482 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4483 rtx seq;
4485 /* We can save some effort by offsetting the address on
4486 architectures with offsettable memory references. */
4487 if (offsettable_address_p (0, VOIDmode, loc))
4488 loc = plus_constant (loc, bytes_ahead);
4489 else
4491 rtx reg = gen_reg_rtx (Pmode);
4492 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4493 GEN_INT (bytes_ahead), reg,
4494 0, before_insn);
4495 loc = reg;
4498 start_sequence ();
4499 /* Make sure the address operand is valid for prefetch. */
4500 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4501 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4502 loc = force_reg (Pmode, loc);
4503 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4504 GEN_INT (3)));
4505 seq = get_insns ();
4506 end_sequence ();
4507 emit_insn_before (seq, before_insn);
4509 /* Check all insns emitted and record the new GIV
4510 information. */
4511 insn = NEXT_INSN (prev_insn);
4512 while (insn != before_insn)
4514 insn = check_insn_for_givs (loop, insn,
4515 info[i].giv->always_executed,
4516 info[i].giv->maybe_multiple);
4517 insn = NEXT_INSN (insn);
4521 if (PREFETCH_BEFORE_LOOP)
4523 /* Emit insns before the loop to fetch the first cache lines or,
4524 if we're not prefetching within the loop, everything we expect
4525 to need. */
4526 for (y = 0; y < info[i].prefetch_before_loop; y++)
4528 rtx reg = gen_reg_rtx (Pmode);
4529 rtx loop_start = loop->start;
4530 rtx init_val = info[i].class->initial_value;
4531 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4532 info[i].giv->add_val,
4533 GEN_INT (y * PREFETCH_BLOCK));
4535 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4536 non-constant INIT_VAL to have the same mode as REG, which
4537 in this case we know to be Pmode. */
4538 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4540 rtx seq;
4542 start_sequence ();
4543 init_val = convert_to_mode (Pmode, init_val, 0);
4544 seq = get_insns ();
4545 end_sequence ();
4546 loop_insn_emit_before (loop, 0, loop_start, seq);
4548 loop_iv_add_mult_emit_before (loop, init_val,
4549 info[i].giv->mult_val,
4550 add_val, reg, 0, loop_start);
4551 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4552 GEN_INT (3)),
4553 loop_start);
4558 return;
4561 /* Communication with routines called via `note_stores'. */
4563 static rtx note_insn;
4565 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4567 static rtx addr_placeholder;
4569 /* ??? Unfinished optimizations, and possible future optimizations,
4570 for the strength reduction code. */
4572 /* ??? The interaction of biv elimination, and recognition of 'constant'
4573 bivs, may cause problems. */
4575 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4576 performance problems.
4578 Perhaps don't eliminate things that can be combined with an addressing
4579 mode. Find all givs that have the same biv, mult_val, and add_val;
4580 then for each giv, check to see if its only use dies in a following
4581 memory address. If so, generate a new memory address and check to see
4582 if it is valid. If it is valid, then store the modified memory address,
4583 otherwise, mark the giv as not done so that it will get its own iv. */
4585 /* ??? Could try to optimize branches when it is known that a biv is always
4586 positive. */
4588 /* ??? When replace a biv in a compare insn, we should replace with closest
4589 giv so that an optimized branch can still be recognized by the combiner,
4590 e.g. the VAX acb insn. */
4592 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4593 was rerun in loop_optimize whenever a register was added or moved.
4594 Also, some of the optimizations could be a little less conservative. */
4596 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4597 is a backward branch in that range that branches to somewhere between
4598 LOOP->START and INSN. Returns 0 otherwise. */
4600 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4601 In practice, this is not a problem, because this function is seldom called,
4602 and uses a negligible amount of CPU time on average. */
4604 static int
4605 back_branch_in_range_p (const struct loop *loop, rtx insn)
4607 rtx p, q, target_insn;
4608 rtx loop_start = loop->start;
4609 rtx loop_end = loop->end;
4610 rtx orig_loop_end = loop->end;
4612 /* Stop before we get to the backward branch at the end of the loop. */
4613 loop_end = prev_nonnote_insn (loop_end);
4614 if (BARRIER_P (loop_end))
4615 loop_end = PREV_INSN (loop_end);
4617 /* Check in case insn has been deleted, search forward for first non
4618 deleted insn following it. */
4619 while (INSN_DELETED_P (insn))
4620 insn = NEXT_INSN (insn);
4622 /* Check for the case where insn is the last insn in the loop. Deal
4623 with the case where INSN was a deleted loop test insn, in which case
4624 it will now be the NOTE_LOOP_END. */
4625 if (insn == loop_end || insn == orig_loop_end)
4626 return 0;
4628 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4630 if (JUMP_P (p))
4632 target_insn = JUMP_LABEL (p);
4634 /* Search from loop_start to insn, to see if one of them is
4635 the target_insn. We can't use INSN_LUID comparisons here,
4636 since insn may not have an LUID entry. */
4637 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4638 if (q == target_insn)
4639 return 1;
4643 return 0;
4646 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4647 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4648 callback.
4650 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4651 least once for every loop iteration except for the last one.
4653 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4654 loop iteration.
4656 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4657 static void
4658 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4660 int not_every_iteration = 0;
4661 int maybe_multiple = 0;
4662 int past_loop_latch = 0;
4663 bool exit_test_is_entry = false;
4664 rtx p;
4666 /* If loop_scan_start points to the loop exit test, the loop body
4667 cannot be counted on running on every iteration, and we have to
4668 be wary of subversive use of gotos inside expression
4669 statements. */
4670 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4672 exit_test_is_entry = true;
4673 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4676 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4677 for (p = next_insn_in_loop (loop, loop->scan_start);
4678 p != NULL_RTX;
4679 p = next_insn_in_loop (loop, p))
4681 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4683 /* Past CODE_LABEL, we get to insns that may be executed multiple
4684 times. The only way we can be sure that they can't is if every
4685 jump insn between here and the end of the loop either
4686 returns, exits the loop, is a jump to a location that is still
4687 behind the label, or is a jump to the loop start. */
4689 if (LABEL_P (p))
4691 rtx insn = p;
4693 maybe_multiple = 0;
4695 while (1)
4697 insn = NEXT_INSN (insn);
4698 if (insn == loop->scan_start)
4699 break;
4700 if (insn == loop->end)
4702 if (loop->top != 0)
4703 insn = loop->top;
4704 else
4705 break;
4706 if (insn == loop->scan_start)
4707 break;
4710 if (JUMP_P (insn)
4711 && GET_CODE (PATTERN (insn)) != RETURN
4712 && (!any_condjump_p (insn)
4713 || (JUMP_LABEL (insn) != 0
4714 && JUMP_LABEL (insn) != loop->scan_start
4715 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4717 maybe_multiple = 1;
4718 break;
4723 /* Past a jump, we get to insns for which we can't count
4724 on whether they will be executed during each iteration. */
4725 /* This code appears twice in strength_reduce. There is also similar
4726 code in scan_loop. */
4727 if (JUMP_P (p)
4728 /* If we enter the loop in the middle, and scan around to the
4729 beginning, don't set not_every_iteration for that.
4730 This can be any kind of jump, since we want to know if insns
4731 will be executed if the loop is executed. */
4732 && (exit_test_is_entry
4733 || !(JUMP_LABEL (p) == loop->top
4734 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4735 && any_uncondjump_p (p))
4736 || (NEXT_INSN (p) == loop->end
4737 && any_condjump_p (p))))))
4739 rtx label = 0;
4741 /* If this is a jump outside the loop, then it also doesn't
4742 matter. Check to see if the target of this branch is on the
4743 loop->exits_labels list. */
4745 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4746 if (XEXP (label, 0) == JUMP_LABEL (p))
4747 break;
4749 if (!label)
4750 not_every_iteration = 1;
4753 /* Note if we pass a loop latch. If we do, then we can not clear
4754 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4755 a loop since a jump before the last CODE_LABEL may have started
4756 a new loop iteration.
4758 Note that LOOP_TOP is only set for rotated loops and we need
4759 this check for all loops, so compare against the CODE_LABEL
4760 which immediately follows LOOP_START. */
4761 if (JUMP_P (p)
4762 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4763 past_loop_latch = 1;
4765 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4766 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4767 or not an insn is known to be executed each iteration of the
4768 loop, whether or not any iterations are known to occur.
4770 Therefore, if we have just passed a label and have no more labels
4771 between here and the test insn of the loop, and we have not passed
4772 a jump to the top of the loop, then we know these insns will be
4773 executed each iteration. */
4775 if (not_every_iteration
4776 && !past_loop_latch
4777 && LABEL_P (p)
4778 && no_labels_between_p (p, loop->end))
4779 not_every_iteration = 0;
4783 static void
4784 loop_bivs_find (struct loop *loop)
4786 struct loop_regs *regs = LOOP_REGS (loop);
4787 struct loop_ivs *ivs = LOOP_IVS (loop);
4788 /* Temporary list pointers for traversing ivs->list. */
4789 struct iv_class *bl, **backbl;
4791 ivs->list = 0;
4793 for_each_insn_in_loop (loop, check_insn_for_bivs);
4795 /* Scan ivs->list to remove all regs that proved not to be bivs.
4796 Make a sanity check against regs->n_times_set. */
4797 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4799 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4800 /* Above happens if register modified by subreg, etc. */
4801 /* Make sure it is not recognized as a basic induction var: */
4802 || regs->array[bl->regno].n_times_set != bl->biv_count
4803 /* If never incremented, it is invariant that we decided not to
4804 move. So leave it alone. */
4805 || ! bl->incremented)
4807 if (loop_dump_stream)
4808 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4809 bl->regno,
4810 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4811 ? "not induction variable"
4812 : (! bl->incremented ? "never incremented"
4813 : "count error")));
4815 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4816 *backbl = bl->next;
4818 else
4820 backbl = &bl->next;
4822 if (loop_dump_stream)
4823 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4829 /* Determine how BIVS are initialized by looking through pre-header
4830 extended basic block. */
4831 static void
4832 loop_bivs_init_find (struct loop *loop)
4834 struct loop_ivs *ivs = LOOP_IVS (loop);
4835 /* Temporary list pointers for traversing ivs->list. */
4836 struct iv_class *bl;
4837 int call_seen;
4838 rtx p;
4840 /* Find initial value for each biv by searching backwards from loop_start,
4841 halting at first label. Also record any test condition. */
4843 call_seen = 0;
4844 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4846 rtx test;
4848 note_insn = p;
4850 if (CALL_P (p))
4851 call_seen = 1;
4853 if (INSN_P (p))
4854 note_stores (PATTERN (p), record_initial, ivs);
4856 /* Record any test of a biv that branches around the loop if no store
4857 between it and the start of loop. We only care about tests with
4858 constants and registers and only certain of those. */
4859 if (JUMP_P (p)
4860 && JUMP_LABEL (p) != 0
4861 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4862 && (test = get_condition_for_loop (loop, p)) != 0
4863 && REG_P (XEXP (test, 0))
4864 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4865 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4866 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4867 && bl->init_insn == 0)
4869 /* If an NE test, we have an initial value! */
4870 if (GET_CODE (test) == NE)
4872 bl->init_insn = p;
4873 bl->init_set = gen_rtx_SET (VOIDmode,
4874 XEXP (test, 0), XEXP (test, 1));
4876 else
4877 bl->initial_test = test;
4883 /* Look at the each biv and see if we can say anything better about its
4884 initial value from any initializing insns set up above. (This is done
4885 in two passes to avoid missing SETs in a PARALLEL.) */
4886 static void
4887 loop_bivs_check (struct loop *loop)
4889 struct loop_ivs *ivs = LOOP_IVS (loop);
4890 /* Temporary list pointers for traversing ivs->list. */
4891 struct iv_class *bl;
4892 struct iv_class **backbl;
4894 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4896 rtx src;
4897 rtx note;
4899 if (! bl->init_insn)
4900 continue;
4902 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4903 is a constant, use the value of that. */
4904 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4905 && CONSTANT_P (XEXP (note, 0)))
4906 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4907 && CONSTANT_P (XEXP (note, 0))))
4908 src = XEXP (note, 0);
4909 else
4910 src = SET_SRC (bl->init_set);
4912 if (loop_dump_stream)
4913 fprintf (loop_dump_stream,
4914 "Biv %d: initialized at insn %d: initial value ",
4915 bl->regno, INSN_UID (bl->init_insn));
4917 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4918 || GET_MODE (src) == VOIDmode)
4919 && valid_initial_value_p (src, bl->init_insn,
4920 LOOP_INFO (loop)->pre_header_has_call,
4921 loop->start))
4923 bl->initial_value = src;
4925 if (loop_dump_stream)
4927 print_simple_rtl (loop_dump_stream, src);
4928 fputc ('\n', loop_dump_stream);
4931 /* If we can't make it a giv,
4932 let biv keep initial value of "itself". */
4933 else if (loop_dump_stream)
4934 fprintf (loop_dump_stream, "is complex\n");
4939 /* Search the loop for general induction variables. */
4941 static void
4942 loop_givs_find (struct loop* loop)
4944 for_each_insn_in_loop (loop, check_insn_for_givs);
4948 /* For each giv for which we still don't know whether or not it is
4949 replaceable, check to see if it is replaceable because its final value
4950 can be calculated. */
4952 static void
4953 loop_givs_check (struct loop *loop)
4955 struct loop_ivs *ivs = LOOP_IVS (loop);
4956 struct iv_class *bl;
4958 for (bl = ivs->list; bl; bl = bl->next)
4960 struct induction *v;
4962 for (v = bl->giv; v; v = v->next_iv)
4963 if (! v->replaceable && ! v->not_replaceable)
4964 check_final_value (loop, v);
4968 /* Try to generate the simplest rtx for the expression
4969 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4970 value of giv's. */
4972 static rtx
4973 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4975 rtx temp, mult_res;
4976 rtx result;
4978 /* The modes must all be the same. This should always be true. For now,
4979 check to make sure. */
4980 gcc_assert (GET_MODE (mult1) == mode || GET_MODE (mult1) == VOIDmode);
4981 gcc_assert (GET_MODE (mult2) == mode || GET_MODE (mult2) == VOIDmode);
4982 gcc_assert (GET_MODE (add1) == mode || GET_MODE (add1) == VOIDmode);
4984 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4985 will be a constant. */
4986 if (GET_CODE (mult1) == CONST_INT)
4988 temp = mult2;
4989 mult2 = mult1;
4990 mult1 = temp;
4993 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
4994 if (! mult_res)
4995 mult_res = gen_rtx_MULT (mode, mult1, mult2);
4997 /* Again, put the constant second. */
4998 if (GET_CODE (add1) == CONST_INT)
5000 temp = add1;
5001 add1 = mult_res;
5002 mult_res = temp;
5005 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
5006 if (! result)
5007 result = gen_rtx_PLUS (mode, add1, mult_res);
5009 return result;
5012 /* Searches the list of induction struct's for the biv BL, to try to calculate
5013 the total increment value for one iteration of the loop as a constant.
5015 Returns the increment value as an rtx, simplified as much as possible,
5016 if it can be calculated. Otherwise, returns 0. */
5018 static rtx
5019 biv_total_increment (const struct iv_class *bl)
5021 struct induction *v;
5022 rtx result;
5024 /* For increment, must check every instruction that sets it. Each
5025 instruction must be executed only once each time through the loop.
5026 To verify this, we check that the insn is always executed, and that
5027 there are no backward branches after the insn that branch to before it.
5028 Also, the insn must have a mult_val of one (to make sure it really is
5029 an increment). */
5031 result = const0_rtx;
5032 for (v = bl->biv; v; v = v->next_iv)
5034 if (v->always_computable && v->mult_val == const1_rtx
5035 && ! v->maybe_multiple
5036 && SCALAR_INT_MODE_P (v->mode))
5038 /* If we have already counted it, skip it. */
5039 if (v->same)
5040 continue;
5042 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5044 else
5045 return 0;
5048 return result;
5051 /* Try to prove that the register is dead after the loop exits. Trace every
5052 loop exit looking for an insn that will always be executed, which sets
5053 the register to some value, and appears before the first use of the register
5054 is found. If successful, then return 1, otherwise return 0. */
5056 /* ?? Could be made more intelligent in the handling of jumps, so that
5057 it can search past if statements and other similar structures. */
5059 static int
5060 reg_dead_after_loop (const struct loop *loop, rtx reg)
5062 rtx insn, label;
5063 int jump_count = 0;
5064 int label_count = 0;
5066 /* In addition to checking all exits of this loop, we must also check
5067 all exits of inner nested loops that would exit this loop. We don't
5068 have any way to identify those, so we just give up if there are any
5069 such inner loop exits. */
5071 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5072 label_count++;
5074 if (label_count != loop->exit_count)
5075 return 0;
5077 /* HACK: Must also search the loop fall through exit, create a label_ref
5078 here which points to the loop->end, and append the loop_number_exit_labels
5079 list to it. */
5080 label = gen_rtx_LABEL_REF (Pmode, loop->end);
5081 LABEL_NEXTREF (label) = loop->exit_labels;
5083 for (; label; label = LABEL_NEXTREF (label))
5085 /* Succeed if find an insn which sets the biv or if reach end of
5086 function. Fail if find an insn that uses the biv, or if come to
5087 a conditional jump. */
5089 insn = NEXT_INSN (XEXP (label, 0));
5090 while (insn)
5092 if (INSN_P (insn))
5094 rtx set, note;
5096 if (reg_referenced_p (reg, PATTERN (insn)))
5097 return 0;
5099 note = find_reg_equal_equiv_note (insn);
5100 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5101 return 0;
5103 set = single_set (insn);
5104 if (set && rtx_equal_p (SET_DEST (set), reg))
5105 break;
5107 if (JUMP_P (insn))
5109 if (GET_CODE (PATTERN (insn)) == RETURN)
5110 break;
5111 else if (!any_uncondjump_p (insn)
5112 /* Prevent infinite loop following infinite loops. */
5113 || jump_count++ > 20)
5114 return 0;
5115 else
5116 insn = JUMP_LABEL (insn);
5120 insn = NEXT_INSN (insn);
5124 /* Success, the register is dead on all loop exits. */
5125 return 1;
5128 /* Try to calculate the final value of the biv, the value it will have at
5129 the end of the loop. If we can do it, return that value. */
5131 static rtx
5132 final_biv_value (const struct loop *loop, struct iv_class *bl)
5134 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5135 rtx increment, tem;
5137 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5139 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5140 return 0;
5142 /* The final value for reversed bivs must be calculated differently than
5143 for ordinary bivs. In this case, there is already an insn after the
5144 loop which sets this biv's final value (if necessary), and there are
5145 no other loop exits, so we can return any value. */
5146 if (bl->reversed)
5148 if (loop_dump_stream)
5149 fprintf (loop_dump_stream,
5150 "Final biv value for %d, reversed biv.\n", bl->regno);
5152 return const0_rtx;
5155 /* Try to calculate the final value as initial value + (number of iterations
5156 * increment). For this to work, increment must be invariant, the only
5157 exit from the loop must be the fall through at the bottom (otherwise
5158 it may not have its final value when the loop exits), and the initial
5159 value of the biv must be invariant. */
5161 if (n_iterations != 0
5162 && ! loop->exit_count
5163 && loop_invariant_p (loop, bl->initial_value))
5165 increment = biv_total_increment (bl);
5167 if (increment && loop_invariant_p (loop, increment))
5169 /* Can calculate the loop exit value, emit insns after loop
5170 end to calculate this value into a temporary register in
5171 case it is needed later. */
5173 tem = gen_reg_rtx (bl->biv->mode);
5174 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5175 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5176 bl->initial_value, tem);
5178 if (loop_dump_stream)
5179 fprintf (loop_dump_stream,
5180 "Final biv value for %d, calculated.\n", bl->regno);
5182 return tem;
5186 /* Check to see if the biv is dead at all loop exits. */
5187 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5189 if (loop_dump_stream)
5190 fprintf (loop_dump_stream,
5191 "Final biv value for %d, biv dead after loop exit.\n",
5192 bl->regno);
5194 return const0_rtx;
5197 return 0;
5200 /* Return nonzero if it is possible to eliminate the biv BL provided
5201 all givs are reduced. This is possible if either the reg is not
5202 used outside the loop, or we can compute what its final value will
5203 be. */
5205 static int
5206 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5207 int threshold, int insn_count)
5209 /* For architectures with a decrement_and_branch_until_zero insn,
5210 don't do this if we put a REG_NONNEG note on the endtest for this
5211 biv. */
5213 #ifdef HAVE_decrement_and_branch_until_zero
5214 if (bl->nonneg)
5216 if (loop_dump_stream)
5217 fprintf (loop_dump_stream,
5218 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5219 return 0;
5221 #endif
5223 /* Check that biv is used outside loop or if it has a final value.
5224 Compare against bl->init_insn rather than loop->start. We aren't
5225 concerned with any uses of the biv between init_insn and
5226 loop->start since these won't be affected by the value of the biv
5227 elsewhere in the function, so long as init_insn doesn't use the
5228 biv itself. */
5230 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5231 && bl->init_insn
5232 && INSN_UID (bl->init_insn) < max_uid_for_loop
5233 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5234 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5235 || (bl->final_value = final_biv_value (loop, bl)))
5236 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5238 if (loop_dump_stream)
5240 fprintf (loop_dump_stream,
5241 "Cannot eliminate biv %d.\n",
5242 bl->regno);
5243 fprintf (loop_dump_stream,
5244 "First use: insn %d, last use: insn %d.\n",
5245 REGNO_FIRST_UID (bl->regno),
5246 REGNO_LAST_UID (bl->regno));
5248 return 0;
5252 /* Reduce each giv of BL that we have decided to reduce. */
5254 static void
5255 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5257 struct induction *v;
5259 for (v = bl->giv; v; v = v->next_iv)
5261 struct induction *tv;
5262 if (! v->ignore && v->same == 0)
5264 int auto_inc_opt = 0;
5266 /* If the code for derived givs immediately below has already
5267 allocated a new_reg, we must keep it. */
5268 if (! v->new_reg)
5269 v->new_reg = gen_reg_rtx (v->mode);
5271 #ifdef AUTO_INC_DEC
5272 /* If the target has auto-increment addressing modes, and
5273 this is an address giv, then try to put the increment
5274 immediately after its use, so that flow can create an
5275 auto-increment addressing mode. */
5276 /* Don't do this for loops entered at the bottom, to avoid
5277 this invalid transformation:
5278 jmp L; -> jmp L;
5279 TOP: TOP:
5280 use giv use giv
5281 L: inc giv
5282 inc biv L:
5283 test biv test giv
5284 cbr TOP cbr TOP
5286 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5287 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5288 /* We don't handle reversed biv's because bl->biv->insn
5289 does not have a valid INSN_LUID. */
5290 && ! bl->reversed
5291 && v->always_executed && ! v->maybe_multiple
5292 && INSN_UID (v->insn) < max_uid_for_loop
5293 && !loop->top)
5295 /* If other giv's have been combined with this one, then
5296 this will work only if all uses of the other giv's occur
5297 before this giv's insn. This is difficult to check.
5299 We simplify this by looking for the common case where
5300 there is one DEST_REG giv, and this giv's insn is the
5301 last use of the dest_reg of that DEST_REG giv. If the
5302 increment occurs after the address giv, then we can
5303 perform the optimization. (Otherwise, the increment
5304 would have to go before other_giv, and we would not be
5305 able to combine it with the address giv to get an
5306 auto-inc address.) */
5307 if (v->combined_with)
5309 struct induction *other_giv = 0;
5311 for (tv = bl->giv; tv; tv = tv->next_iv)
5312 if (tv->same == v)
5314 if (other_giv)
5315 break;
5316 else
5317 other_giv = tv;
5319 if (! tv && other_giv
5320 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5321 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5322 == INSN_UID (v->insn))
5323 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5324 auto_inc_opt = 1;
5326 /* Check for case where increment is before the address
5327 giv. Do this test in "loop order". */
5328 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5329 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5330 || (INSN_LUID (bl->biv->insn)
5331 > INSN_LUID (loop->scan_start))))
5332 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5333 && (INSN_LUID (loop->scan_start)
5334 < INSN_LUID (bl->biv->insn))))
5335 auto_inc_opt = -1;
5336 else
5337 auto_inc_opt = 1;
5339 #ifdef HAVE_cc0
5341 rtx prev;
5343 /* We can't put an insn immediately after one setting
5344 cc0, or immediately before one using cc0. */
5345 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5346 || (auto_inc_opt == -1
5347 && (prev = prev_nonnote_insn (v->insn)) != 0
5348 && INSN_P (prev)
5349 && sets_cc0_p (PATTERN (prev))))
5350 auto_inc_opt = 0;
5352 #endif
5354 if (auto_inc_opt)
5355 v->auto_inc_opt = 1;
5357 #endif
5359 /* For each place where the biv is incremented, add an insn
5360 to increment the new, reduced reg for the giv. */
5361 for (tv = bl->biv; tv; tv = tv->next_iv)
5363 rtx insert_before;
5365 /* Skip if location is the same as a previous one. */
5366 if (tv->same)
5367 continue;
5368 if (! auto_inc_opt)
5369 insert_before = NEXT_INSN (tv->insn);
5370 else if (auto_inc_opt == 1)
5371 insert_before = NEXT_INSN (v->insn);
5372 else
5373 insert_before = v->insn;
5375 if (tv->mult_val == const1_rtx)
5376 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5377 v->new_reg, v->new_reg,
5378 0, insert_before);
5379 else /* tv->mult_val == const0_rtx */
5380 /* A multiply is acceptable here
5381 since this is presumed to be seldom executed. */
5382 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5383 v->add_val, v->new_reg,
5384 0, insert_before);
5387 /* Add code at loop start to initialize giv's reduced reg. */
5389 loop_iv_add_mult_hoist (loop,
5390 extend_value_for_giv (v, bl->initial_value),
5391 v->mult_val, v->add_val, v->new_reg);
5397 /* Check for givs whose first use is their definition and whose
5398 last use is the definition of another giv. If so, it is likely
5399 dead and should not be used to derive another giv nor to
5400 eliminate a biv. */
5402 static void
5403 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5405 struct induction *v;
5407 for (v = bl->giv; v; v = v->next_iv)
5409 if (v->ignore
5410 || (v->same && v->same->ignore))
5411 continue;
5413 if (v->giv_type == DEST_REG
5414 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5416 struct induction *v1;
5418 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5419 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5420 v->maybe_dead = 1;
5426 static void
5427 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5429 struct induction *v;
5431 for (v = bl->giv; v; v = v->next_iv)
5433 if (v->same && v->same->ignore)
5434 v->ignore = 1;
5436 if (v->ignore)
5437 continue;
5439 /* Update expression if this was combined, in case other giv was
5440 replaced. */
5441 if (v->same)
5442 v->new_reg = replace_rtx (v->new_reg,
5443 v->same->dest_reg, v->same->new_reg);
5445 /* See if this register is known to be a pointer to something. If
5446 so, see if we can find the alignment. First see if there is a
5447 destination register that is a pointer. If so, this shares the
5448 alignment too. Next see if we can deduce anything from the
5449 computational information. If not, and this is a DEST_ADDR
5450 giv, at least we know that it's a pointer, though we don't know
5451 the alignment. */
5452 if (REG_P (v->new_reg)
5453 && v->giv_type == DEST_REG
5454 && REG_POINTER (v->dest_reg))
5455 mark_reg_pointer (v->new_reg,
5456 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5457 else if (REG_P (v->new_reg)
5458 && REG_POINTER (v->src_reg))
5460 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5462 if (align == 0
5463 || GET_CODE (v->add_val) != CONST_INT
5464 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5465 align = 0;
5467 mark_reg_pointer (v->new_reg, align);
5469 else if (REG_P (v->new_reg)
5470 && REG_P (v->add_val)
5471 && REG_POINTER (v->add_val))
5473 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5475 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5476 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5477 align = 0;
5479 mark_reg_pointer (v->new_reg, align);
5481 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5482 mark_reg_pointer (v->new_reg, 0);
5484 if (v->giv_type == DEST_ADDR)
5486 /* Store reduced reg as the address in the memref where we found
5487 this giv. */
5488 if (validate_change_maybe_volatile (v->insn, v->location,
5489 v->new_reg))
5490 /* Yay, it worked! */;
5491 /* Not replaceable; emit an insn to set the original
5492 giv reg from the reduced giv. */
5493 else if (REG_P (*v->location))
5494 loop_insn_emit_before (loop, 0, v->insn,
5495 gen_move_insn (*v->location,
5496 v->new_reg));
5497 else if (GET_CODE (*v->location) == PLUS
5498 && REG_P (XEXP (*v->location, 0))
5499 && CONSTANT_P (XEXP (*v->location, 1)))
5501 rtx tem;
5502 start_sequence ();
5503 tem = expand_simple_binop (GET_MODE (*v->location), MINUS,
5504 v->new_reg, XEXP (*v->location, 1),
5505 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5506 emit_move_insn (XEXP (*v->location, 0), tem);
5507 tem = get_insns ();
5508 end_sequence ();
5509 loop_insn_emit_before (loop, 0, v->insn, tem);
5511 else
5513 /* If it wasn't a reg, create a pseudo and use that. */
5514 rtx reg, seq;
5515 start_sequence ();
5516 reg = force_reg (v->mode, *v->location);
5517 if (validate_change_maybe_volatile (v->insn, v->location, reg))
5519 seq = get_insns ();
5520 end_sequence ();
5521 loop_insn_emit_before (loop, 0, v->insn, seq);
5523 else
5525 end_sequence ();
5526 if (loop_dump_stream)
5527 fprintf (loop_dump_stream,
5528 "unable to reduce iv in insn %d\n",
5529 INSN_UID (v->insn));
5530 bl->all_reduced = 0;
5531 v->ignore = 1;
5532 continue;
5536 else if (v->replaceable)
5538 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5540 else
5542 rtx original_insn = v->insn;
5543 rtx note;
5545 /* Not replaceable; emit an insn to set the original giv reg from
5546 the reduced giv, same as above. */
5547 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5548 gen_move_insn (v->dest_reg,
5549 v->new_reg));
5551 /* The original insn may have a REG_EQUAL note. This note is
5552 now incorrect and may result in invalid substitutions later.
5553 The original insn is dead, but may be part of a libcall
5554 sequence, which doesn't seem worth the bother of handling. */
5555 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5556 if (note)
5557 remove_note (original_insn, note);
5560 /* When a loop is reversed, givs which depend on the reversed
5561 biv, and which are live outside the loop, must be set to their
5562 correct final value. This insn is only needed if the giv is
5563 not replaceable. The correct final value is the same as the
5564 value that the giv starts the reversed loop with. */
5565 if (bl->reversed && ! v->replaceable)
5566 loop_iv_add_mult_sink (loop,
5567 extend_value_for_giv (v, bl->initial_value),
5568 v->mult_val, v->add_val, v->dest_reg);
5569 else if (v->final_value)
5570 loop_insn_sink_or_swim (loop,
5571 gen_load_of_final_value (v->dest_reg,
5572 v->final_value));
5574 if (loop_dump_stream)
5576 fprintf (loop_dump_stream, "giv at %d reduced to ",
5577 INSN_UID (v->insn));
5578 print_simple_rtl (loop_dump_stream, v->new_reg);
5579 fprintf (loop_dump_stream, "\n");
5585 static int
5586 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5587 struct iv_class *bl, struct induction *v,
5588 rtx test_reg)
5590 int add_cost;
5591 int benefit;
5593 benefit = v->benefit;
5594 PUT_MODE (test_reg, v->mode);
5595 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
5596 test_reg, test_reg);
5598 /* Reduce benefit if not replaceable, since we will insert a
5599 move-insn to replace the insn that calculates this giv. Don't do
5600 this unless the giv is a user variable, since it will often be
5601 marked non-replaceable because of the duplication of the exit
5602 code outside the loop. In such a case, the copies we insert are
5603 dead and will be deleted. So they don't have a cost. Similar
5604 situations exist. */
5605 /* ??? The new final_[bg]iv_value code does a much better job of
5606 finding replaceable giv's, and hence this code may no longer be
5607 necessary. */
5608 if (! v->replaceable && ! bl->eliminable
5609 && REG_USERVAR_P (v->dest_reg))
5610 benefit -= copy_cost;
5612 /* Decrease the benefit to count the add-insns that we will insert
5613 to increment the reduced reg for the giv. ??? This can
5614 overestimate the run-time cost of the additional insns, e.g. if
5615 there are multiple basic blocks that increment the biv, but only
5616 one of these blocks is executed during each iteration. There is
5617 no good way to detect cases like this with the current structure
5618 of the loop optimizer. This code is more accurate for
5619 determining code size than run-time benefits. */
5620 benefit -= add_cost * bl->biv_count;
5622 /* Decide whether to strength-reduce this giv or to leave the code
5623 unchanged (recompute it from the biv each time it is used). This
5624 decision can be made independently for each giv. */
5626 #ifdef AUTO_INC_DEC
5627 /* Attempt to guess whether autoincrement will handle some of the
5628 new add insns; if so, increase BENEFIT (undo the subtraction of
5629 add_cost that was done above). */
5630 if (v->giv_type == DEST_ADDR
5631 /* Increasing the benefit is risky, since this is only a guess.
5632 Avoid increasing register pressure in cases where there would
5633 be no other benefit from reducing this giv. */
5634 && benefit > 0
5635 && GET_CODE (v->mult_val) == CONST_INT)
5637 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5639 if (HAVE_POST_INCREMENT
5640 && INTVAL (v->mult_val) == size)
5641 benefit += add_cost * bl->biv_count;
5642 else if (HAVE_PRE_INCREMENT
5643 && INTVAL (v->mult_val) == size)
5644 benefit += add_cost * bl->biv_count;
5645 else if (HAVE_POST_DECREMENT
5646 && -INTVAL (v->mult_val) == size)
5647 benefit += add_cost * bl->biv_count;
5648 else if (HAVE_PRE_DECREMENT
5649 && -INTVAL (v->mult_val) == size)
5650 benefit += add_cost * bl->biv_count;
5652 #endif
5654 return benefit;
5658 /* Free IV structures for LOOP. */
5660 static void
5661 loop_ivs_free (struct loop *loop)
5663 struct loop_ivs *ivs = LOOP_IVS (loop);
5664 struct iv_class *iv = ivs->list;
5666 free (ivs->regs);
5668 while (iv)
5670 struct iv_class *next = iv->next;
5671 struct induction *induction;
5672 struct induction *next_induction;
5674 for (induction = iv->biv; induction; induction = next_induction)
5676 next_induction = induction->next_iv;
5677 free (induction);
5679 for (induction = iv->giv; induction; induction = next_induction)
5681 next_induction = induction->next_iv;
5682 free (induction);
5685 free (iv);
5686 iv = next;
5690 /* Look back before LOOP->START for the insn that sets REG and return
5691 the equivalent constant if there is a REG_EQUAL note otherwise just
5692 the SET_SRC of REG. */
5694 static rtx
5695 loop_find_equiv_value (const struct loop *loop, rtx reg)
5697 rtx loop_start = loop->start;
5698 rtx insn, set;
5699 rtx ret;
5701 ret = reg;
5702 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5704 if (LABEL_P (insn))
5705 break;
5707 else if (INSN_P (insn) && reg_set_p (reg, insn))
5709 /* We found the last insn before the loop that sets the register.
5710 If it sets the entire register, and has a REG_EQUAL note,
5711 then use the value of the REG_EQUAL note. */
5712 if ((set = single_set (insn))
5713 && (SET_DEST (set) == reg))
5715 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5717 /* Only use the REG_EQUAL note if it is a constant.
5718 Other things, divide in particular, will cause
5719 problems later if we use them. */
5720 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5721 && CONSTANT_P (XEXP (note, 0)))
5722 ret = XEXP (note, 0);
5723 else
5724 ret = SET_SRC (set);
5726 /* We cannot do this if it changes between the
5727 assignment and loop start though. */
5728 if (modified_between_p (ret, insn, loop_start))
5729 ret = reg;
5731 break;
5734 return ret;
5737 /* Find and return register term common to both expressions OP0 and
5738 OP1 or NULL_RTX if no such term exists. Each expression must be a
5739 REG or a PLUS of a REG. */
5741 static rtx
5742 find_common_reg_term (rtx op0, rtx op1)
5744 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5745 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5747 rtx op00;
5748 rtx op01;
5749 rtx op10;
5750 rtx op11;
5752 if (GET_CODE (op0) == PLUS)
5753 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5754 else
5755 op01 = const0_rtx, op00 = op0;
5757 if (GET_CODE (op1) == PLUS)
5758 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5759 else
5760 op11 = const0_rtx, op10 = op1;
5762 /* Find and return common register term if present. */
5763 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5764 return op00;
5765 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5766 return op01;
5769 /* No common register term found. */
5770 return NULL_RTX;
5773 /* Determine the loop iterator and calculate the number of loop
5774 iterations. Returns the exact number of loop iterations if it can
5775 be calculated, otherwise returns zero. */
5777 static unsigned HOST_WIDE_INT
5778 loop_iterations (struct loop *loop)
5780 struct loop_info *loop_info = LOOP_INFO (loop);
5781 struct loop_ivs *ivs = LOOP_IVS (loop);
5782 rtx comparison, comparison_value;
5783 rtx iteration_var, initial_value, increment, final_value;
5784 enum rtx_code comparison_code;
5785 HOST_WIDE_INT inc;
5786 unsigned HOST_WIDE_INT abs_inc;
5787 unsigned HOST_WIDE_INT abs_diff;
5788 int off_by_one;
5789 int increment_dir;
5790 int unsigned_p, compare_dir, final_larger;
5791 rtx last_loop_insn;
5792 struct iv_class *bl;
5794 loop_info->n_iterations = 0;
5795 loop_info->initial_value = 0;
5796 loop_info->initial_equiv_value = 0;
5797 loop_info->comparison_value = 0;
5798 loop_info->final_value = 0;
5799 loop_info->final_equiv_value = 0;
5800 loop_info->increment = 0;
5801 loop_info->iteration_var = 0;
5802 loop_info->iv = 0;
5804 /* We used to use prev_nonnote_insn here, but that fails because it might
5805 accidentally get the branch for a contained loop if the branch for this
5806 loop was deleted. We can only trust branches immediately before the
5807 loop_end. */
5808 last_loop_insn = PREV_INSN (loop->end);
5810 /* ??? We should probably try harder to find the jump insn
5811 at the end of the loop. The following code assumes that
5812 the last loop insn is a jump to the top of the loop. */
5813 if (!JUMP_P (last_loop_insn))
5815 if (loop_dump_stream)
5816 fprintf (loop_dump_stream,
5817 "Loop iterations: No final conditional branch found.\n");
5818 return 0;
5821 /* If there is a more than a single jump to the top of the loop
5822 we cannot (easily) determine the iteration count. */
5823 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5825 if (loop_dump_stream)
5826 fprintf (loop_dump_stream,
5827 "Loop iterations: Loop has multiple back edges.\n");
5828 return 0;
5831 /* Find the iteration variable. If the last insn is a conditional
5832 branch, and the insn before tests a register value, make that the
5833 iteration variable. */
5835 comparison = get_condition_for_loop (loop, last_loop_insn);
5836 if (comparison == 0)
5838 if (loop_dump_stream)
5839 fprintf (loop_dump_stream,
5840 "Loop iterations: No final comparison found.\n");
5841 return 0;
5844 /* ??? Get_condition may switch position of induction variable and
5845 invariant register when it canonicalizes the comparison. */
5847 comparison_code = GET_CODE (comparison);
5848 iteration_var = XEXP (comparison, 0);
5849 comparison_value = XEXP (comparison, 1);
5851 if (!REG_P (iteration_var))
5853 if (loop_dump_stream)
5854 fprintf (loop_dump_stream,
5855 "Loop iterations: Comparison not against register.\n");
5856 return 0;
5859 /* The only new registers that are created before loop iterations
5860 are givs made from biv increments or registers created by
5861 load_mems. In the latter case, it is possible that try_copy_prop
5862 will propagate a new pseudo into the old iteration register but
5863 this will be marked by having the REG_USERVAR_P bit set. */
5865 gcc_assert ((unsigned) REGNO (iteration_var) < ivs->n_regs
5866 || REG_USERVAR_P (iteration_var));
5868 /* Determine the initial value of the iteration variable, and the amount
5869 that it is incremented each loop. Use the tables constructed by
5870 the strength reduction pass to calculate these values. */
5872 /* Clear the result values, in case no answer can be found. */
5873 initial_value = 0;
5874 increment = 0;
5876 /* The iteration variable can be either a giv or a biv. Check to see
5877 which it is, and compute the variable's initial value, and increment
5878 value if possible. */
5880 /* If this is a new register, can't handle it since we don't have any
5881 reg_iv_type entry for it. */
5882 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5884 if (loop_dump_stream)
5885 fprintf (loop_dump_stream,
5886 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5887 return 0;
5890 /* Reject iteration variables larger than the host wide int size, since they
5891 could result in a number of iterations greater than the range of our
5892 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5893 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5894 > HOST_BITS_PER_WIDE_INT))
5896 if (loop_dump_stream)
5897 fprintf (loop_dump_stream,
5898 "Loop iterations: Iteration var rejected because mode too large.\n");
5899 return 0;
5901 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5903 if (loop_dump_stream)
5904 fprintf (loop_dump_stream,
5905 "Loop iterations: Iteration var not an integer.\n");
5906 return 0;
5909 /* Try swapping the comparison to identify a suitable iv. */
5910 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5911 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5912 && REG_P (comparison_value)
5913 && REGNO (comparison_value) < ivs->n_regs)
5915 rtx temp = comparison_value;
5916 comparison_code = swap_condition (comparison_code);
5917 comparison_value = iteration_var;
5918 iteration_var = temp;
5921 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5923 gcc_assert (REGNO (iteration_var) < ivs->n_regs);
5925 /* Grab initial value, only useful if it is a constant. */
5926 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5927 initial_value = bl->initial_value;
5928 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5930 if (loop_dump_stream)
5931 fprintf (loop_dump_stream,
5932 "Loop iterations: Basic induction var not set once in each iteration.\n");
5933 return 0;
5936 increment = biv_total_increment (bl);
5938 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5940 HOST_WIDE_INT offset = 0;
5941 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5942 rtx biv_initial_value;
5944 gcc_assert (REGNO (v->src_reg) < ivs->n_regs);
5946 if (!v->always_executed || v->maybe_multiple)
5948 if (loop_dump_stream)
5949 fprintf (loop_dump_stream,
5950 "Loop iterations: General induction var not set once in each iteration.\n");
5951 return 0;
5954 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5956 /* Increment value is mult_val times the increment value of the biv. */
5958 increment = biv_total_increment (bl);
5959 if (increment)
5961 struct induction *biv_inc;
5963 increment = fold_rtx_mult_add (v->mult_val,
5964 extend_value_for_giv (v, increment),
5965 const0_rtx, v->mode);
5966 /* The caller assumes that one full increment has occurred at the
5967 first loop test. But that's not true when the biv is incremented
5968 after the giv is set (which is the usual case), e.g.:
5969 i = 6; do {;} while (i++ < 9) .
5970 Therefore, we bias the initial value by subtracting the amount of
5971 the increment that occurs between the giv set and the giv test. */
5972 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5974 if (loop_insn_first_p (v->insn, biv_inc->insn))
5976 if (REG_P (biv_inc->add_val))
5978 if (loop_dump_stream)
5979 fprintf (loop_dump_stream,
5980 "Loop iterations: Basic induction var add_val is REG %d.\n",
5981 REGNO (biv_inc->add_val));
5982 return 0;
5985 /* If we have already counted it, skip it. */
5986 if (biv_inc->same)
5987 continue;
5989 offset -= INTVAL (biv_inc->add_val);
5993 if (loop_dump_stream)
5994 fprintf (loop_dump_stream,
5995 "Loop iterations: Giv iterator, initial value bias %ld.\n",
5996 (long) offset);
5998 /* Initial value is mult_val times the biv's initial value plus
5999 add_val. Only useful if it is a constant. */
6000 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
6001 initial_value
6002 = fold_rtx_mult_add (v->mult_val,
6003 plus_constant (biv_initial_value, offset),
6004 v->add_val, v->mode);
6006 else
6008 if (loop_dump_stream)
6009 fprintf (loop_dump_stream,
6010 "Loop iterations: Not basic or general induction var.\n");
6011 return 0;
6014 if (initial_value == 0)
6015 return 0;
6017 unsigned_p = 0;
6018 off_by_one = 0;
6019 switch (comparison_code)
6021 case LEU:
6022 unsigned_p = 1;
6023 case LE:
6024 compare_dir = 1;
6025 off_by_one = 1;
6026 break;
6027 case GEU:
6028 unsigned_p = 1;
6029 case GE:
6030 compare_dir = -1;
6031 off_by_one = -1;
6032 break;
6033 case EQ:
6034 /* Cannot determine loop iterations with this case. */
6035 compare_dir = 0;
6036 break;
6037 case LTU:
6038 unsigned_p = 1;
6039 case LT:
6040 compare_dir = 1;
6041 break;
6042 case GTU:
6043 unsigned_p = 1;
6044 case GT:
6045 compare_dir = -1;
6046 break;
6047 case NE:
6048 compare_dir = 0;
6049 break;
6050 default:
6051 gcc_unreachable ();
6054 /* If the comparison value is an invariant register, then try to find
6055 its value from the insns before the start of the loop. */
6057 final_value = comparison_value;
6058 if (REG_P (comparison_value)
6059 && loop_invariant_p (loop, comparison_value))
6061 final_value = loop_find_equiv_value (loop, comparison_value);
6063 /* If we don't get an invariant final value, we are better
6064 off with the original register. */
6065 if (! loop_invariant_p (loop, final_value))
6066 final_value = comparison_value;
6069 /* Calculate the approximate final value of the induction variable
6070 (on the last successful iteration). The exact final value
6071 depends on the branch operator, and increment sign. It will be
6072 wrong if the iteration variable is not incremented by one each
6073 time through the loop and (comparison_value + off_by_one -
6074 initial_value) % increment != 0.
6075 ??? Note that the final_value may overflow and thus final_larger
6076 will be bogus. A potentially infinite loop will be classified
6077 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6078 if (off_by_one)
6079 final_value = plus_constant (final_value, off_by_one);
6081 /* Save the calculated values describing this loop's bounds, in case
6082 precondition_loop_p will need them later. These values can not be
6083 recalculated inside precondition_loop_p because strength reduction
6084 optimizations may obscure the loop's structure.
6086 These values are only required by precondition_loop_p and insert_bct
6087 whenever the number of iterations cannot be computed at compile time.
6088 Only the difference between final_value and initial_value is
6089 important. Note that final_value is only approximate. */
6090 loop_info->initial_value = initial_value;
6091 loop_info->comparison_value = comparison_value;
6092 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6093 loop_info->increment = increment;
6094 loop_info->iteration_var = iteration_var;
6095 loop_info->comparison_code = comparison_code;
6096 loop_info->iv = bl;
6098 /* Try to determine the iteration count for loops such
6099 as (for i = init; i < init + const; i++). When running the
6100 loop optimization twice, the first pass often converts simple
6101 loops into this form. */
6103 if (REG_P (initial_value))
6105 rtx reg1;
6106 rtx reg2;
6107 rtx const2;
6109 reg1 = initial_value;
6110 if (GET_CODE (final_value) == PLUS)
6111 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6112 else
6113 reg2 = final_value, const2 = const0_rtx;
6115 /* Check for initial_value = reg1, final_value = reg2 + const2,
6116 where reg1 != reg2. */
6117 if (REG_P (reg2) && reg2 != reg1)
6119 rtx temp;
6121 /* Find what reg1 is equivalent to. Hopefully it will
6122 either be reg2 or reg2 plus a constant. */
6123 temp = loop_find_equiv_value (loop, reg1);
6125 if (find_common_reg_term (temp, reg2))
6126 initial_value = temp;
6127 else if (loop_invariant_p (loop, reg2))
6129 /* Find what reg2 is equivalent to. Hopefully it will
6130 either be reg1 or reg1 plus a constant. Let's ignore
6131 the latter case for now since it is not so common. */
6132 temp = loop_find_equiv_value (loop, reg2);
6134 if (temp == loop_info->iteration_var)
6135 temp = initial_value;
6136 if (temp == reg1)
6137 final_value = (const2 == const0_rtx)
6138 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6143 loop_info->initial_equiv_value = initial_value;
6144 loop_info->final_equiv_value = final_value;
6146 /* For EQ comparison loops, we don't have a valid final value.
6147 Check this now so that we won't leave an invalid value if we
6148 return early for any other reason. */
6149 if (comparison_code == EQ)
6150 loop_info->final_equiv_value = loop_info->final_value = 0;
6152 if (increment == 0)
6154 if (loop_dump_stream)
6155 fprintf (loop_dump_stream,
6156 "Loop iterations: Increment value can't be calculated.\n");
6157 return 0;
6160 if (GET_CODE (increment) != CONST_INT)
6162 /* If we have a REG, check to see if REG holds a constant value. */
6163 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6164 clear if it is worthwhile to try to handle such RTL. */
6165 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6166 increment = loop_find_equiv_value (loop, increment);
6168 if (GET_CODE (increment) != CONST_INT)
6170 if (loop_dump_stream)
6172 fprintf (loop_dump_stream,
6173 "Loop iterations: Increment value not constant ");
6174 print_simple_rtl (loop_dump_stream, increment);
6175 fprintf (loop_dump_stream, ".\n");
6177 return 0;
6179 loop_info->increment = increment;
6182 if (GET_CODE (initial_value) != CONST_INT)
6184 if (loop_dump_stream)
6186 fprintf (loop_dump_stream,
6187 "Loop iterations: Initial value not constant ");
6188 print_simple_rtl (loop_dump_stream, initial_value);
6189 fprintf (loop_dump_stream, ".\n");
6191 return 0;
6193 else if (GET_CODE (final_value) != CONST_INT)
6195 if (loop_dump_stream)
6197 fprintf (loop_dump_stream,
6198 "Loop iterations: Final value not constant ");
6199 print_simple_rtl (loop_dump_stream, final_value);
6200 fprintf (loop_dump_stream, ".\n");
6202 return 0;
6204 else if (comparison_code == EQ)
6206 rtx inc_once;
6208 if (loop_dump_stream)
6209 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6211 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6212 GET_MODE (iteration_var));
6214 if (inc_once == final_value)
6216 /* The iterator value once through the loop is equal to the
6217 comparison value. Either we have an infinite loop, or
6218 we'll loop twice. */
6219 if (increment == const0_rtx)
6220 return 0;
6221 loop_info->n_iterations = 2;
6223 else
6224 loop_info->n_iterations = 1;
6226 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6227 loop_info->final_value
6228 = gen_int_mode ((INTVAL (loop_info->initial_value)
6229 + loop_info->n_iterations * INTVAL (increment)),
6230 GET_MODE (iteration_var));
6231 else
6232 loop_info->final_value
6233 = plus_constant (loop_info->initial_value,
6234 loop_info->n_iterations * INTVAL (increment));
6235 loop_info->final_equiv_value
6236 = gen_int_mode ((INTVAL (initial_value)
6237 + loop_info->n_iterations * INTVAL (increment)),
6238 GET_MODE (iteration_var));
6239 return loop_info->n_iterations;
6242 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6243 if (unsigned_p)
6244 final_larger
6245 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6246 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6247 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6248 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6249 else
6250 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6251 - (INTVAL (final_value) < INTVAL (initial_value));
6253 if (INTVAL (increment) > 0)
6254 increment_dir = 1;
6255 else if (INTVAL (increment) == 0)
6256 increment_dir = 0;
6257 else
6258 increment_dir = -1;
6260 /* There are 27 different cases: compare_dir = -1, 0, 1;
6261 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6262 There are 4 normal cases, 4 reverse cases (where the iteration variable
6263 will overflow before the loop exits), 4 infinite loop cases, and 15
6264 immediate exit (0 or 1 iteration depending on loop type) cases.
6265 Only try to optimize the normal cases. */
6267 /* (compare_dir/final_larger/increment_dir)
6268 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6269 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6270 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6271 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6273 /* ?? If the meaning of reverse loops (where the iteration variable
6274 will overflow before the loop exits) is undefined, then could
6275 eliminate all of these special checks, and just always assume
6276 the loops are normal/immediate/infinite. Note that this means
6277 the sign of increment_dir does not have to be known. Also,
6278 since it does not really hurt if immediate exit loops or infinite loops
6279 are optimized, then that case could be ignored also, and hence all
6280 loops can be optimized.
6282 According to ANSI Spec, the reverse loop case result is undefined,
6283 because the action on overflow is undefined.
6285 See also the special test for NE loops below. */
6287 if (final_larger == increment_dir && final_larger != 0
6288 && (final_larger == compare_dir || compare_dir == 0))
6289 /* Normal case. */
6291 else
6293 if (loop_dump_stream)
6294 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6295 return 0;
6298 /* Calculate the number of iterations, final_value is only an approximation,
6299 so correct for that. Note that abs_diff and n_iterations are
6300 unsigned, because they can be as large as 2^n - 1. */
6302 inc = INTVAL (increment);
6303 gcc_assert (inc);
6304 if (inc > 0)
6306 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6307 abs_inc = inc;
6309 else
6311 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6312 abs_inc = -inc;
6315 /* Given that iteration_var is going to iterate over its own mode,
6316 not HOST_WIDE_INT, disregard higher bits that might have come
6317 into the picture due to sign extension of initial and final
6318 values. */
6319 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6320 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6321 << 1) - 1;
6323 /* For NE tests, make sure that the iteration variable won't miss
6324 the final value. If abs_diff mod abs_incr is not zero, then the
6325 iteration variable will overflow before the loop exits, and we
6326 can not calculate the number of iterations. */
6327 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6328 return 0;
6330 /* Note that the number of iterations could be calculated using
6331 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6332 handle potential overflow of the summation. */
6333 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6334 return loop_info->n_iterations;
6337 /* Perform strength reduction and induction variable elimination.
6339 Pseudo registers created during this function will be beyond the
6340 last valid index in several tables including
6341 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6342 problem here, because the added registers cannot be givs outside of
6343 their loop, and hence will never be reconsidered. But scan_loop
6344 must check regnos to make sure they are in bounds. */
6346 static void
6347 strength_reduce (struct loop *loop, int flags)
6349 struct loop_info *loop_info = LOOP_INFO (loop);
6350 struct loop_regs *regs = LOOP_REGS (loop);
6351 struct loop_ivs *ivs = LOOP_IVS (loop);
6352 rtx p;
6353 /* Temporary list pointer for traversing ivs->list. */
6354 struct iv_class *bl;
6355 /* Ratio of extra register life span we can justify
6356 for saving an instruction. More if loop doesn't call subroutines
6357 since in that case saving an insn makes more difference
6358 and more registers are available. */
6359 /* ??? could set this to last value of threshold in move_movables */
6360 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6361 /* Map of pseudo-register replacements. */
6362 rtx *reg_map = NULL;
6363 int reg_map_size;
6364 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6365 int insn_count = count_insns_in_loop (loop);
6367 addr_placeholder = gen_reg_rtx (Pmode);
6369 ivs->n_regs = max_reg_before_loop;
6370 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6372 /* Find all BIVs in loop. */
6373 loop_bivs_find (loop);
6375 /* Exit if there are no bivs. */
6376 if (! ivs->list)
6378 loop_ivs_free (loop);
6379 return;
6382 /* Determine how BIVS are initialized by looking through pre-header
6383 extended basic block. */
6384 loop_bivs_init_find (loop);
6386 /* Look at the each biv and see if we can say anything better about its
6387 initial value from any initializing insns set up above. */
6388 loop_bivs_check (loop);
6390 /* Search the loop for general induction variables. */
6391 loop_givs_find (loop);
6393 /* Try to calculate and save the number of loop iterations. This is
6394 set to zero if the actual number can not be calculated. This must
6395 be called after all giv's have been identified, since otherwise it may
6396 fail if the iteration variable is a giv. */
6397 loop_iterations (loop);
6399 #ifdef HAVE_prefetch
6400 if (flags & LOOP_PREFETCH)
6401 emit_prefetch_instructions (loop);
6402 #endif
6404 /* Now for each giv for which we still don't know whether or not it is
6405 replaceable, check to see if it is replaceable because its final value
6406 can be calculated. This must be done after loop_iterations is called,
6407 so that final_giv_value will work correctly. */
6408 loop_givs_check (loop);
6410 /* Try to prove that the loop counter variable (if any) is always
6411 nonnegative; if so, record that fact with a REG_NONNEG note
6412 so that "decrement and branch until zero" insn can be used. */
6413 check_dbra_loop (loop, insn_count);
6415 /* Create reg_map to hold substitutions for replaceable giv regs.
6416 Some givs might have been made from biv increments, so look at
6417 ivs->reg_iv_type for a suitable size. */
6418 reg_map_size = ivs->n_regs;
6419 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6421 /* Examine each iv class for feasibility of strength reduction/induction
6422 variable elimination. */
6424 for (bl = ivs->list; bl; bl = bl->next)
6426 struct induction *v;
6427 int benefit;
6429 /* Test whether it will be possible to eliminate this biv
6430 provided all givs are reduced. */
6431 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6433 /* This will be true at the end, if all givs which depend on this
6434 biv have been strength reduced.
6435 We can't (currently) eliminate the biv unless this is so. */
6436 bl->all_reduced = 1;
6438 /* Check each extension dependent giv in this class to see if its
6439 root biv is safe from wrapping in the interior mode. */
6440 check_ext_dependent_givs (loop, bl);
6442 /* Combine all giv's for this iv_class. */
6443 combine_givs (regs, bl);
6445 for (v = bl->giv; v; v = v->next_iv)
6447 struct induction *tv;
6449 if (v->ignore || v->same)
6450 continue;
6452 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6454 /* If an insn is not to be strength reduced, then set its ignore
6455 flag, and clear bl->all_reduced. */
6457 /* A giv that depends on a reversed biv must be reduced if it is
6458 used after the loop exit, otherwise, it would have the wrong
6459 value after the loop exit. To make it simple, just reduce all
6460 of such giv's whether or not we know they are used after the loop
6461 exit. */
6463 if (v->lifetime * threshold * benefit < insn_count
6464 && ! bl->reversed)
6466 if (loop_dump_stream)
6467 fprintf (loop_dump_stream,
6468 "giv of insn %d not worth while, %d vs %d.\n",
6469 INSN_UID (v->insn),
6470 v->lifetime * threshold * benefit, insn_count);
6471 v->ignore = 1;
6472 bl->all_reduced = 0;
6474 else
6476 /* Check that we can increment the reduced giv without a
6477 multiply insn. If not, reject it. */
6479 for (tv = bl->biv; tv; tv = tv->next_iv)
6480 if (tv->mult_val == const1_rtx
6481 && ! product_cheap_p (tv->add_val, v->mult_val))
6483 if (loop_dump_stream)
6484 fprintf (loop_dump_stream,
6485 "giv of insn %d: would need a multiply.\n",
6486 INSN_UID (v->insn));
6487 v->ignore = 1;
6488 bl->all_reduced = 0;
6489 break;
6494 /* Check for givs whose first use is their definition and whose
6495 last use is the definition of another giv. If so, it is likely
6496 dead and should not be used to derive another giv nor to
6497 eliminate a biv. */
6498 loop_givs_dead_check (loop, bl);
6500 /* Reduce each giv that we decided to reduce. */
6501 loop_givs_reduce (loop, bl);
6503 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6504 as not reduced.
6506 For each giv register that can be reduced now: if replaceable,
6507 substitute reduced reg wherever the old giv occurs;
6508 else add new move insn "giv_reg = reduced_reg". */
6509 loop_givs_rescan (loop, bl, reg_map);
6511 /* All the givs based on the biv bl have been reduced if they
6512 merit it. */
6514 /* For each giv not marked as maybe dead that has been combined with a
6515 second giv, clear any "maybe dead" mark on that second giv.
6516 v->new_reg will either be or refer to the register of the giv it
6517 combined with.
6519 Doing this clearing avoids problems in biv elimination where
6520 a giv's new_reg is a complex value that can't be put in the
6521 insn but the giv combined with (with a reg as new_reg) is
6522 marked maybe_dead. Since the register will be used in either
6523 case, we'd prefer it be used from the simpler giv. */
6525 for (v = bl->giv; v; v = v->next_iv)
6526 if (! v->maybe_dead && v->same)
6527 v->same->maybe_dead = 0;
6529 /* Try to eliminate the biv, if it is a candidate.
6530 This won't work if ! bl->all_reduced,
6531 since the givs we planned to use might not have been reduced.
6533 We have to be careful that we didn't initially think we could
6534 eliminate this biv because of a giv that we now think may be
6535 dead and shouldn't be used as a biv replacement.
6537 Also, there is the possibility that we may have a giv that looks
6538 like it can be used to eliminate a biv, but the resulting insn
6539 isn't valid. This can happen, for example, on the 88k, where a
6540 JUMP_INSN can compare a register only with zero. Attempts to
6541 replace it with a compare with a constant will fail.
6543 Note that in cases where this call fails, we may have replaced some
6544 of the occurrences of the biv with a giv, but no harm was done in
6545 doing so in the rare cases where it can occur. */
6547 if (bl->all_reduced == 1 && bl->eliminable
6548 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6550 /* ?? If we created a new test to bypass the loop entirely,
6551 or otherwise drop straight in, based on this test, then
6552 we might want to rewrite it also. This way some later
6553 pass has more hope of removing the initialization of this
6554 biv entirely. */
6556 /* If final_value != 0, then the biv may be used after loop end
6557 and we must emit an insn to set it just in case.
6559 Reversed bivs already have an insn after the loop setting their
6560 value, so we don't need another one. We can't calculate the
6561 proper final value for such a biv here anyways. */
6562 if (bl->final_value && ! bl->reversed)
6563 loop_insn_sink_or_swim (loop,
6564 gen_load_of_final_value (bl->biv->dest_reg,
6565 bl->final_value));
6567 if (loop_dump_stream)
6568 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6569 bl->regno);
6571 /* See above note wrt final_value. But since we couldn't eliminate
6572 the biv, we must set the value after the loop instead of before. */
6573 else if (bl->final_value && ! bl->reversed)
6574 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6575 bl->final_value));
6578 /* Go through all the instructions in the loop, making all the
6579 register substitutions scheduled in REG_MAP. */
6581 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6582 if (INSN_P (p))
6584 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6585 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6586 INSN_CODE (p) = -1;
6589 if (loop_dump_stream)
6590 fprintf (loop_dump_stream, "\n");
6592 loop_ivs_free (loop);
6593 if (reg_map)
6594 free (reg_map);
6597 /*Record all basic induction variables calculated in the insn. */
6598 static rtx
6599 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6600 int maybe_multiple)
6602 struct loop_ivs *ivs = LOOP_IVS (loop);
6603 rtx set;
6604 rtx dest_reg;
6605 rtx inc_val;
6606 rtx mult_val;
6607 rtx *location;
6609 if (NONJUMP_INSN_P (p)
6610 && (set = single_set (p))
6611 && REG_P (SET_DEST (set)))
6613 dest_reg = SET_DEST (set);
6614 if (REGNO (dest_reg) < max_reg_before_loop
6615 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6616 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6618 if (basic_induction_var (loop, SET_SRC (set),
6619 GET_MODE (SET_SRC (set)),
6620 dest_reg, p, &inc_val, &mult_val,
6621 &location))
6623 /* It is a possible basic induction variable.
6624 Create and initialize an induction structure for it. */
6626 struct induction *v = xmalloc (sizeof (struct induction));
6628 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6629 not_every_iteration, maybe_multiple);
6630 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6632 else if (REGNO (dest_reg) < ivs->n_regs)
6633 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6636 return p;
6639 /* Record all givs calculated in the insn.
6640 A register is a giv if: it is only set once, it is a function of a
6641 biv and a constant (or invariant), and it is not a biv. */
6642 static rtx
6643 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6644 int maybe_multiple)
6646 struct loop_regs *regs = LOOP_REGS (loop);
6648 rtx set;
6649 /* Look for a general induction variable in a register. */
6650 if (NONJUMP_INSN_P (p)
6651 && (set = single_set (p))
6652 && REG_P (SET_DEST (set))
6653 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6655 rtx src_reg;
6656 rtx dest_reg;
6657 rtx add_val;
6658 rtx mult_val;
6659 rtx ext_val;
6660 int benefit;
6661 rtx regnote = 0;
6662 rtx last_consec_insn;
6664 dest_reg = SET_DEST (set);
6665 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6666 return p;
6668 if (/* SET_SRC is a giv. */
6669 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6670 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6671 /* Equivalent expression is a giv. */
6672 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6673 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6674 &add_val, &mult_val, &ext_val, 0,
6675 &benefit, VOIDmode)))
6676 /* Don't try to handle any regs made by loop optimization.
6677 We have nothing on them in regno_first_uid, etc. */
6678 && REGNO (dest_reg) < max_reg_before_loop
6679 /* Don't recognize a BASIC_INDUCT_VAR here. */
6680 && dest_reg != src_reg
6681 /* This must be the only place where the register is set. */
6682 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6683 /* or all sets must be consecutive and make a giv. */
6684 || (benefit = consec_sets_giv (loop, benefit, p,
6685 src_reg, dest_reg,
6686 &add_val, &mult_val, &ext_val,
6687 &last_consec_insn))))
6689 struct induction *v = xmalloc (sizeof (struct induction));
6691 /* If this is a library call, increase benefit. */
6692 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6693 benefit += libcall_benefit (p);
6695 /* Skip the consecutive insns, if there are any. */
6696 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6697 p = last_consec_insn;
6699 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6700 ext_val, benefit, DEST_REG, not_every_iteration,
6701 maybe_multiple, (rtx*) 0);
6706 /* Look for givs which are memory addresses. */
6707 if (NONJUMP_INSN_P (p))
6708 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6709 maybe_multiple);
6711 /* Update the status of whether giv can derive other givs. This can
6712 change when we pass a label or an insn that updates a biv. */
6713 if (INSN_P (p) || LABEL_P (p))
6714 update_giv_derive (loop, p);
6715 return p;
6718 /* Return 1 if X is a valid source for an initial value (or as value being
6719 compared against in an initial test).
6721 X must be either a register or constant and must not be clobbered between
6722 the current insn and the start of the loop.
6724 INSN is the insn containing X. */
6726 static int
6727 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6729 if (CONSTANT_P (x))
6730 return 1;
6732 /* Only consider pseudos we know about initialized in insns whose luids
6733 we know. */
6734 if (!REG_P (x)
6735 || REGNO (x) >= max_reg_before_loop)
6736 return 0;
6738 /* Don't use call-clobbered registers across a call which clobbers it. On
6739 some machines, don't use any hard registers at all. */
6740 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6741 && (SMALL_REGISTER_CLASSES
6742 || (call_seen && call_used_regs[REGNO (x)])))
6743 return 0;
6745 /* Don't use registers that have been clobbered before the start of the
6746 loop. */
6747 if (reg_set_between_p (x, insn, loop_start))
6748 return 0;
6750 return 1;
6753 /* Scan X for memory refs and check each memory address
6754 as a possible giv. INSN is the insn whose pattern X comes from.
6755 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6756 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6757 more than once in each loop iteration. */
6759 static void
6760 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6761 int not_every_iteration, int maybe_multiple)
6763 int i, j;
6764 enum rtx_code code;
6765 const char *fmt;
6767 if (x == 0)
6768 return;
6770 code = GET_CODE (x);
6771 switch (code)
6773 case REG:
6774 case CONST_INT:
6775 case CONST:
6776 case CONST_DOUBLE:
6777 case SYMBOL_REF:
6778 case LABEL_REF:
6779 case PC:
6780 case CC0:
6781 case ADDR_VEC:
6782 case ADDR_DIFF_VEC:
6783 case USE:
6784 case CLOBBER:
6785 return;
6787 case MEM:
6789 rtx src_reg;
6790 rtx add_val;
6791 rtx mult_val;
6792 rtx ext_val;
6793 int benefit;
6795 /* This code used to disable creating GIVs with mult_val == 1 and
6796 add_val == 0. However, this leads to lost optimizations when
6797 it comes time to combine a set of related DEST_ADDR GIVs, since
6798 this one would not be seen. */
6800 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6801 &mult_val, &ext_val, 1, &benefit,
6802 GET_MODE (x)))
6804 /* Found one; record it. */
6805 struct induction *v = xmalloc (sizeof (struct induction));
6807 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6808 add_val, ext_val, benefit, DEST_ADDR,
6809 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6811 v->mem = x;
6814 return;
6816 default:
6817 break;
6820 /* Recursively scan the subexpressions for other mem refs. */
6822 fmt = GET_RTX_FORMAT (code);
6823 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6824 if (fmt[i] == 'e')
6825 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6826 maybe_multiple);
6827 else if (fmt[i] == 'E')
6828 for (j = 0; j < XVECLEN (x, i); j++)
6829 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6830 maybe_multiple);
6833 /* Fill in the data about one biv update.
6834 V is the `struct induction' in which we record the biv. (It is
6835 allocated by the caller, with alloca.)
6836 INSN is the insn that sets it.
6837 DEST_REG is the biv's reg.
6839 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6840 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6841 being set to INC_VAL.
6843 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6844 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6845 can be executed more than once per iteration. If MAYBE_MULTIPLE
6846 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6847 executed exactly once per iteration. */
6849 static void
6850 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6851 rtx inc_val, rtx mult_val, rtx *location,
6852 int not_every_iteration, int maybe_multiple)
6854 struct loop_ivs *ivs = LOOP_IVS (loop);
6855 struct iv_class *bl;
6857 v->insn = insn;
6858 v->src_reg = dest_reg;
6859 v->dest_reg = dest_reg;
6860 v->mult_val = mult_val;
6861 v->add_val = inc_val;
6862 v->ext_dependent = NULL_RTX;
6863 v->location = location;
6864 v->mode = GET_MODE (dest_reg);
6865 v->always_computable = ! not_every_iteration;
6866 v->always_executed = ! not_every_iteration;
6867 v->maybe_multiple = maybe_multiple;
6868 v->same = 0;
6870 /* Add this to the reg's iv_class, creating a class
6871 if this is the first incrementation of the reg. */
6873 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6874 if (bl == 0)
6876 /* Create and initialize new iv_class. */
6878 bl = xmalloc (sizeof (struct iv_class));
6880 bl->regno = REGNO (dest_reg);
6881 bl->biv = 0;
6882 bl->giv = 0;
6883 bl->biv_count = 0;
6884 bl->giv_count = 0;
6886 /* Set initial value to the reg itself. */
6887 bl->initial_value = dest_reg;
6888 bl->final_value = 0;
6889 /* We haven't seen the initializing insn yet. */
6890 bl->init_insn = 0;
6891 bl->init_set = 0;
6892 bl->initial_test = 0;
6893 bl->incremented = 0;
6894 bl->eliminable = 0;
6895 bl->nonneg = 0;
6896 bl->reversed = 0;
6897 bl->total_benefit = 0;
6899 /* Add this class to ivs->list. */
6900 bl->next = ivs->list;
6901 ivs->list = bl;
6903 /* Put it in the array of biv register classes. */
6904 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6906 else
6908 /* Check if location is the same as a previous one. */
6909 struct induction *induction;
6910 for (induction = bl->biv; induction; induction = induction->next_iv)
6911 if (location == induction->location)
6913 v->same = induction;
6914 break;
6918 /* Update IV_CLASS entry for this biv. */
6919 v->next_iv = bl->biv;
6920 bl->biv = v;
6921 bl->biv_count++;
6922 if (mult_val == const1_rtx)
6923 bl->incremented = 1;
6925 if (loop_dump_stream)
6926 loop_biv_dump (v, loop_dump_stream, 0);
6929 /* Fill in the data about one giv.
6930 V is the `struct induction' in which we record the giv. (It is
6931 allocated by the caller, with alloca.)
6932 INSN is the insn that sets it.
6933 BENEFIT estimates the savings from deleting this insn.
6934 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6935 into a register or is used as a memory address.
6937 SRC_REG is the biv reg which the giv is computed from.
6938 DEST_REG is the giv's reg (if the giv is stored in a reg).
6939 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6940 LOCATION points to the place where this giv's value appears in INSN. */
6942 static void
6943 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6944 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6945 rtx ext_val, int benefit, enum g_types type,
6946 int not_every_iteration, int maybe_multiple, rtx *location)
6948 struct loop_ivs *ivs = LOOP_IVS (loop);
6949 struct induction *b;
6950 struct iv_class *bl;
6951 rtx set = single_set (insn);
6952 rtx temp;
6954 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6955 undo the MULT canonicalization that we performed earlier. */
6956 temp = simplify_rtx (add_val);
6957 if (temp
6958 && ! (GET_CODE (add_val) == MULT
6959 && GET_CODE (temp) == ASHIFT))
6960 add_val = temp;
6962 v->insn = insn;
6963 v->src_reg = src_reg;
6964 v->giv_type = type;
6965 v->dest_reg = dest_reg;
6966 v->mult_val = mult_val;
6967 v->add_val = add_val;
6968 v->ext_dependent = ext_val;
6969 v->benefit = benefit;
6970 v->location = location;
6971 v->cant_derive = 0;
6972 v->combined_with = 0;
6973 v->maybe_multiple = maybe_multiple;
6974 v->maybe_dead = 0;
6975 v->derive_adjustment = 0;
6976 v->same = 0;
6977 v->ignore = 0;
6978 v->new_reg = 0;
6979 v->final_value = 0;
6980 v->same_insn = 0;
6981 v->auto_inc_opt = 0;
6982 v->shared = 0;
6984 /* The v->always_computable field is used in update_giv_derive, to
6985 determine whether a giv can be used to derive another giv. For a
6986 DEST_REG giv, INSN computes a new value for the giv, so its value
6987 isn't computable if INSN insn't executed every iteration.
6988 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
6989 it does not compute a new value. Hence the value is always computable
6990 regardless of whether INSN is executed each iteration. */
6992 if (type == DEST_ADDR)
6993 v->always_computable = 1;
6994 else
6995 v->always_computable = ! not_every_iteration;
6997 v->always_executed = ! not_every_iteration;
6999 if (type == DEST_ADDR)
7001 v->mode = GET_MODE (*location);
7002 v->lifetime = 1;
7004 else /* type == DEST_REG */
7006 v->mode = GET_MODE (SET_DEST (set));
7008 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
7010 /* If the lifetime is zero, it means that this register is
7011 really a dead store. So mark this as a giv that can be
7012 ignored. This will not prevent the biv from being eliminated. */
7013 if (v->lifetime == 0)
7014 v->ignore = 1;
7016 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7017 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7020 /* Add the giv to the class of givs computed from one biv. */
7022 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
7023 gcc_assert (bl);
7024 v->next_iv = bl->giv;
7025 bl->giv = v;
7027 /* Don't count DEST_ADDR. This is supposed to count the number of
7028 insns that calculate givs. */
7029 if (type == DEST_REG)
7030 bl->giv_count++;
7031 bl->total_benefit += benefit;
7033 if (type == DEST_ADDR)
7035 v->replaceable = 1;
7036 v->not_replaceable = 0;
7038 else
7040 /* The giv can be replaced outright by the reduced register only if all
7041 of the following conditions are true:
7042 - the insn that sets the giv is always executed on any iteration
7043 on which the giv is used at all
7044 (there are two ways to deduce this:
7045 either the insn is executed on every iteration,
7046 or all uses follow that insn in the same basic block),
7047 - the giv is not used outside the loop
7048 - no assignments to the biv occur during the giv's lifetime. */
7050 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
7051 /* Previous line always fails if INSN was moved by loop opt. */
7052 && REGNO_LAST_LUID (REGNO (dest_reg))
7053 < INSN_LUID (loop->end)
7054 && (! not_every_iteration
7055 || last_use_this_basic_block (dest_reg, insn)))
7057 /* Now check that there are no assignments to the biv within the
7058 giv's lifetime. This requires two separate checks. */
7060 /* Check each biv update, and fail if any are between the first
7061 and last use of the giv.
7063 If this loop contains an inner loop that was unrolled, then
7064 the insn modifying the biv may have been emitted by the loop
7065 unrolling code, and hence does not have a valid luid. Just
7066 mark the biv as not replaceable in this case. It is not very
7067 useful as a biv, because it is used in two different loops.
7068 It is very unlikely that we would be able to optimize the giv
7069 using this biv anyways. */
7071 v->replaceable = 1;
7072 v->not_replaceable = 0;
7073 for (b = bl->biv; b; b = b->next_iv)
7075 if (INSN_UID (b->insn) >= max_uid_for_loop
7076 || ((INSN_LUID (b->insn)
7077 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7078 && (INSN_LUID (b->insn)
7079 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7081 v->replaceable = 0;
7082 v->not_replaceable = 1;
7083 break;
7087 /* If there are any backwards branches that go from after the
7088 biv update to before it, then this giv is not replaceable. */
7089 if (v->replaceable)
7090 for (b = bl->biv; b; b = b->next_iv)
7091 if (back_branch_in_range_p (loop, b->insn))
7093 v->replaceable = 0;
7094 v->not_replaceable = 1;
7095 break;
7098 else
7100 /* May still be replaceable, we don't have enough info here to
7101 decide. */
7102 v->replaceable = 0;
7103 v->not_replaceable = 0;
7107 /* Record whether the add_val contains a const_int, for later use by
7108 combine_givs. */
7110 rtx tem = add_val;
7112 v->no_const_addval = 1;
7113 if (tem == const0_rtx)
7115 else if (CONSTANT_P (add_val))
7116 v->no_const_addval = 0;
7117 if (GET_CODE (tem) == PLUS)
7119 while (1)
7121 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7122 tem = XEXP (tem, 0);
7123 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7124 tem = XEXP (tem, 1);
7125 else
7126 break;
7128 if (CONSTANT_P (XEXP (tem, 1)))
7129 v->no_const_addval = 0;
7133 if (loop_dump_stream)
7134 loop_giv_dump (v, loop_dump_stream, 0);
7137 /* Try to calculate the final value of the giv, the value it will have at
7138 the end of the loop. If we can do it, return that value. */
7140 static rtx
7141 final_giv_value (const struct loop *loop, struct induction *v)
7143 struct loop_ivs *ivs = LOOP_IVS (loop);
7144 struct iv_class *bl;
7145 rtx insn;
7146 rtx increment, tem;
7147 rtx seq;
7148 rtx loop_end = loop->end;
7149 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7151 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7153 /* The final value for givs which depend on reversed bivs must be calculated
7154 differently than for ordinary givs. In this case, there is already an
7155 insn after the loop which sets this giv's final value (if necessary),
7156 and there are no other loop exits, so we can return any value. */
7157 if (bl->reversed)
7159 if (loop_dump_stream)
7160 fprintf (loop_dump_stream,
7161 "Final giv value for %d, depends on reversed biv\n",
7162 REGNO (v->dest_reg));
7163 return const0_rtx;
7166 /* Try to calculate the final value as a function of the biv it depends
7167 upon. The only exit from the loop must be the fall through at the bottom
7168 and the insn that sets the giv must be executed on every iteration
7169 (otherwise the giv may not have its final value when the loop exits). */
7171 /* ??? Can calculate the final giv value by subtracting off the
7172 extra biv increments times the giv's mult_val. The loop must have
7173 only one exit for this to work, but the loop iterations does not need
7174 to be known. */
7176 if (n_iterations != 0
7177 && ! loop->exit_count
7178 && v->always_executed)
7180 /* ?? It is tempting to use the biv's value here since these insns will
7181 be put after the loop, and hence the biv will have its final value
7182 then. However, this fails if the biv is subsequently eliminated.
7183 Perhaps determine whether biv's are eliminable before trying to
7184 determine whether giv's are replaceable so that we can use the
7185 biv value here if it is not eliminable. */
7187 /* We are emitting code after the end of the loop, so we must make
7188 sure that bl->initial_value is still valid then. It will still
7189 be valid if it is invariant. */
7191 increment = biv_total_increment (bl);
7193 if (increment && loop_invariant_p (loop, increment)
7194 && loop_invariant_p (loop, bl->initial_value))
7196 /* Can calculate the loop exit value of its biv as
7197 (n_iterations * increment) + initial_value */
7199 /* The loop exit value of the giv is then
7200 (final_biv_value - extra increments) * mult_val + add_val.
7201 The extra increments are any increments to the biv which
7202 occur in the loop after the giv's value is calculated.
7203 We must search from the insn that sets the giv to the end
7204 of the loop to calculate this value. */
7206 /* Put the final biv value in tem. */
7207 tem = gen_reg_rtx (v->mode);
7208 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7209 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7210 GEN_INT (n_iterations),
7211 extend_value_for_giv (v, bl->initial_value),
7212 tem);
7214 /* Subtract off extra increments as we find them. */
7215 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7216 insn = NEXT_INSN (insn))
7218 struct induction *biv;
7220 for (biv = bl->biv; biv; biv = biv->next_iv)
7221 if (biv->insn == insn)
7223 start_sequence ();
7224 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7225 biv->add_val, NULL_RTX, 0,
7226 OPTAB_LIB_WIDEN);
7227 seq = get_insns ();
7228 end_sequence ();
7229 loop_insn_sink (loop, seq);
7233 /* Now calculate the giv's final value. */
7234 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7236 if (loop_dump_stream)
7237 fprintf (loop_dump_stream,
7238 "Final giv value for %d, calc from biv's value.\n",
7239 REGNO (v->dest_reg));
7241 return tem;
7245 /* Replaceable giv's should never reach here. */
7246 gcc_assert (!v->replaceable);
7248 /* Check to see if the biv is dead at all loop exits. */
7249 if (reg_dead_after_loop (loop, v->dest_reg))
7251 if (loop_dump_stream)
7252 fprintf (loop_dump_stream,
7253 "Final giv value for %d, giv dead after loop exit.\n",
7254 REGNO (v->dest_reg));
7256 return const0_rtx;
7259 return 0;
7262 /* All this does is determine whether a giv can be made replaceable because
7263 its final value can be calculated. This code can not be part of record_giv
7264 above, because final_giv_value requires that the number of loop iterations
7265 be known, and that can not be accurately calculated until after all givs
7266 have been identified. */
7268 static void
7269 check_final_value (const struct loop *loop, struct induction *v)
7271 rtx final_value = 0;
7273 /* DEST_ADDR givs will never reach here, because they are always marked
7274 replaceable above in record_giv. */
7276 /* The giv can be replaced outright by the reduced register only if all
7277 of the following conditions are true:
7278 - the insn that sets the giv is always executed on any iteration
7279 on which the giv is used at all
7280 (there are two ways to deduce this:
7281 either the insn is executed on every iteration,
7282 or all uses follow that insn in the same basic block),
7283 - its final value can be calculated (this condition is different
7284 than the one above in record_giv)
7285 - it's not used before the it's set
7286 - no assignments to the biv occur during the giv's lifetime. */
7288 #if 0
7289 /* This is only called now when replaceable is known to be false. */
7290 /* Clear replaceable, so that it won't confuse final_giv_value. */
7291 v->replaceable = 0;
7292 #endif
7294 if ((final_value = final_giv_value (loop, v))
7295 && (v->always_executed
7296 || last_use_this_basic_block (v->dest_reg, v->insn)))
7298 int biv_increment_seen = 0, before_giv_insn = 0;
7299 rtx p = v->insn;
7300 rtx last_giv_use;
7302 v->replaceable = 1;
7303 v->not_replaceable = 0;
7305 /* When trying to determine whether or not a biv increment occurs
7306 during the lifetime of the giv, we can ignore uses of the variable
7307 outside the loop because final_value is true. Hence we can not
7308 use regno_last_uid and regno_first_uid as above in record_giv. */
7310 /* Search the loop to determine whether any assignments to the
7311 biv occur during the giv's lifetime. Start with the insn
7312 that sets the giv, and search around the loop until we come
7313 back to that insn again.
7315 Also fail if there is a jump within the giv's lifetime that jumps
7316 to somewhere outside the lifetime but still within the loop. This
7317 catches spaghetti code where the execution order is not linear, and
7318 hence the above test fails. Here we assume that the giv lifetime
7319 does not extend from one iteration of the loop to the next, so as
7320 to make the test easier. Since the lifetime isn't known yet,
7321 this requires two loops. See also record_giv above. */
7323 last_giv_use = v->insn;
7325 while (1)
7327 p = NEXT_INSN (p);
7328 if (p == loop->end)
7330 before_giv_insn = 1;
7331 p = NEXT_INSN (loop->start);
7333 if (p == v->insn)
7334 break;
7336 if (INSN_P (p))
7338 /* It is possible for the BIV increment to use the GIV if we
7339 have a cycle. Thus we must be sure to check each insn for
7340 both BIV and GIV uses, and we must check for BIV uses
7341 first. */
7343 if (! biv_increment_seen
7344 && reg_set_p (v->src_reg, PATTERN (p)))
7345 biv_increment_seen = 1;
7347 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7349 if (biv_increment_seen || before_giv_insn)
7351 v->replaceable = 0;
7352 v->not_replaceable = 1;
7353 break;
7355 last_giv_use = p;
7360 /* Now that the lifetime of the giv is known, check for branches
7361 from within the lifetime to outside the lifetime if it is still
7362 replaceable. */
7364 if (v->replaceable)
7366 p = v->insn;
7367 while (1)
7369 p = NEXT_INSN (p);
7370 if (p == loop->end)
7371 p = NEXT_INSN (loop->start);
7372 if (p == last_giv_use)
7373 break;
7375 if (JUMP_P (p) && JUMP_LABEL (p)
7376 && LABEL_NAME (JUMP_LABEL (p))
7377 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7378 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7379 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7380 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7382 v->replaceable = 0;
7383 v->not_replaceable = 1;
7385 if (loop_dump_stream)
7386 fprintf (loop_dump_stream,
7387 "Found branch outside giv lifetime.\n");
7389 break;
7394 /* If it is replaceable, then save the final value. */
7395 if (v->replaceable)
7396 v->final_value = final_value;
7399 if (loop_dump_stream && v->replaceable)
7400 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7401 INSN_UID (v->insn), REGNO (v->dest_reg));
7404 /* Update the status of whether a giv can derive other givs.
7406 We need to do something special if there is or may be an update to the biv
7407 between the time the giv is defined and the time it is used to derive
7408 another giv.
7410 In addition, a giv that is only conditionally set is not allowed to
7411 derive another giv once a label has been passed.
7413 The cases we look at are when a label or an update to a biv is passed. */
7415 static void
7416 update_giv_derive (const struct loop *loop, rtx p)
7418 struct loop_ivs *ivs = LOOP_IVS (loop);
7419 struct iv_class *bl;
7420 struct induction *biv, *giv;
7421 rtx tem;
7422 int dummy;
7424 /* Search all IV classes, then all bivs, and finally all givs.
7426 There are three cases we are concerned with. First we have the situation
7427 of a giv that is only updated conditionally. In that case, it may not
7428 derive any givs after a label is passed.
7430 The second case is when a biv update occurs, or may occur, after the
7431 definition of a giv. For certain biv updates (see below) that are
7432 known to occur between the giv definition and use, we can adjust the
7433 giv definition. For others, or when the biv update is conditional,
7434 we must prevent the giv from deriving any other givs. There are two
7435 sub-cases within this case.
7437 If this is a label, we are concerned with any biv update that is done
7438 conditionally, since it may be done after the giv is defined followed by
7439 a branch here (actually, we need to pass both a jump and a label, but
7440 this extra tracking doesn't seem worth it).
7442 If this is a jump, we are concerned about any biv update that may be
7443 executed multiple times. We are actually only concerned about
7444 backward jumps, but it is probably not worth performing the test
7445 on the jump again here.
7447 If this is a biv update, we must adjust the giv status to show that a
7448 subsequent biv update was performed. If this adjustment cannot be done,
7449 the giv cannot derive further givs. */
7451 for (bl = ivs->list; bl; bl = bl->next)
7452 for (biv = bl->biv; biv; biv = biv->next_iv)
7453 if (LABEL_P (p) || JUMP_P (p)
7454 || biv->insn == p)
7456 /* Skip if location is the same as a previous one. */
7457 if (biv->same)
7458 continue;
7460 for (giv = bl->giv; giv; giv = giv->next_iv)
7462 /* If cant_derive is already true, there is no point in
7463 checking all of these conditions again. */
7464 if (giv->cant_derive)
7465 continue;
7467 /* If this giv is conditionally set and we have passed a label,
7468 it cannot derive anything. */
7469 if (LABEL_P (p) && ! giv->always_computable)
7470 giv->cant_derive = 1;
7472 /* Skip givs that have mult_val == 0, since
7473 they are really invariants. Also skip those that are
7474 replaceable, since we know their lifetime doesn't contain
7475 any biv update. */
7476 else if (giv->mult_val == const0_rtx || giv->replaceable)
7477 continue;
7479 /* The only way we can allow this giv to derive another
7480 is if this is a biv increment and we can form the product
7481 of biv->add_val and giv->mult_val. In this case, we will
7482 be able to compute a compensation. */
7483 else if (biv->insn == p)
7485 rtx ext_val_dummy;
7487 tem = 0;
7488 if (biv->mult_val == const1_rtx)
7489 tem = simplify_giv_expr (loop,
7490 gen_rtx_MULT (giv->mode,
7491 biv->add_val,
7492 giv->mult_val),
7493 &ext_val_dummy, &dummy);
7495 if (tem && giv->derive_adjustment)
7496 tem = simplify_giv_expr
7497 (loop,
7498 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7499 &ext_val_dummy, &dummy);
7501 if (tem)
7502 giv->derive_adjustment = tem;
7503 else
7504 giv->cant_derive = 1;
7506 else if ((LABEL_P (p) && ! biv->always_computable)
7507 || (JUMP_P (p) && biv->maybe_multiple))
7508 giv->cant_derive = 1;
7513 /* Check whether an insn is an increment legitimate for a basic induction var.
7514 X is the source of insn P, or a part of it.
7515 MODE is the mode in which X should be interpreted.
7517 DEST_REG is the putative biv, also the destination of the insn.
7518 We accept patterns of these forms:
7519 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7520 REG = INVARIANT + REG
7522 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7523 store the additive term into *INC_VAL, and store the place where
7524 we found the additive term into *LOCATION.
7526 If X is an assignment of an invariant into DEST_REG, we set
7527 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7529 We also want to detect a BIV when it corresponds to a variable
7530 whose mode was promoted. In that case, an increment
7531 of the variable may be a PLUS that adds a SUBREG of that variable to
7532 an invariant and then sign- or zero-extends the result of the PLUS
7533 into the variable.
7535 Most GIVs in such cases will be in the promoted mode, since that is the
7536 probably the natural computation mode (and almost certainly the mode
7537 used for addresses) on the machine. So we view the pseudo-reg containing
7538 the variable as the BIV, as if it were simply incremented.
7540 Note that treating the entire pseudo as a BIV will result in making
7541 simple increments to any GIVs based on it. However, if the variable
7542 overflows in its declared mode but not its promoted mode, the result will
7543 be incorrect. This is acceptable if the variable is signed, since
7544 overflows in such cases are undefined, but not if it is unsigned, since
7545 those overflows are defined. So we only check for SIGN_EXTEND and
7546 not ZERO_EXTEND.
7548 If we cannot find a biv, we return 0. */
7550 static int
7551 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7552 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7553 rtx **location)
7555 enum rtx_code code;
7556 rtx *argp, arg;
7557 rtx insn, set = 0, last, inc;
7559 code = GET_CODE (x);
7560 *location = NULL;
7561 switch (code)
7563 case PLUS:
7564 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7565 || (GET_CODE (XEXP (x, 0)) == SUBREG
7566 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7567 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7569 argp = &XEXP (x, 1);
7571 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7572 || (GET_CODE (XEXP (x, 1)) == SUBREG
7573 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7574 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7576 argp = &XEXP (x, 0);
7578 else
7579 return 0;
7581 arg = *argp;
7582 if (loop_invariant_p (loop, arg) != 1)
7583 return 0;
7585 /* convert_modes can emit new instructions, e.g. when arg is a loop
7586 invariant MEM and dest_reg has a different mode.
7587 These instructions would be emitted after the end of the function
7588 and then *inc_val would be an uninitialized pseudo.
7589 Detect this and bail in this case.
7590 Other alternatives to solve this can be introducing a convert_modes
7591 variant which is allowed to fail but not allowed to emit new
7592 instructions, emit these instructions before loop start and let
7593 it be garbage collected if *inc_val is never used or saving the
7594 *inc_val initialization sequence generated here and when *inc_val
7595 is going to be actually used, emit it at some suitable place. */
7596 last = get_last_insn ();
7597 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7598 if (get_last_insn () != last)
7600 delete_insns_since (last);
7601 return 0;
7604 *inc_val = inc;
7605 *mult_val = const1_rtx;
7606 *location = argp;
7607 return 1;
7609 case SUBREG:
7610 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7611 handle addition of promoted variables.
7612 ??? The comment at the start of this function is wrong: promoted
7613 variable increments don't look like it says they do. */
7614 return basic_induction_var (loop, SUBREG_REG (x),
7615 GET_MODE (SUBREG_REG (x)),
7616 dest_reg, p, inc_val, mult_val, location);
7618 case REG:
7619 /* If this register is assigned in a previous insn, look at its
7620 source, but don't go outside the loop or past a label. */
7622 /* If this sets a register to itself, we would repeat any previous
7623 biv increment if we applied this strategy blindly. */
7624 if (rtx_equal_p (dest_reg, x))
7625 return 0;
7627 insn = p;
7628 while (1)
7630 rtx dest;
7633 insn = PREV_INSN (insn);
7635 while (insn && NOTE_P (insn)
7636 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7638 if (!insn)
7639 break;
7640 set = single_set (insn);
7641 if (set == 0)
7642 break;
7643 dest = SET_DEST (set);
7644 if (dest == x
7645 || (GET_CODE (dest) == SUBREG
7646 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7647 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7648 && SUBREG_REG (dest) == x))
7649 return basic_induction_var (loop, SET_SRC (set),
7650 (GET_MODE (SET_SRC (set)) == VOIDmode
7651 ? GET_MODE (x)
7652 : GET_MODE (SET_SRC (set))),
7653 dest_reg, insn,
7654 inc_val, mult_val, location);
7656 while (GET_CODE (dest) == SUBREG
7657 || GET_CODE (dest) == ZERO_EXTRACT
7658 || GET_CODE (dest) == STRICT_LOW_PART)
7659 dest = XEXP (dest, 0);
7660 if (dest == x)
7661 break;
7663 /* Fall through. */
7665 /* Can accept constant setting of biv only when inside inner most loop.
7666 Otherwise, a biv of an inner loop may be incorrectly recognized
7667 as a biv of the outer loop,
7668 causing code to be moved INTO the inner loop. */
7669 case MEM:
7670 if (loop_invariant_p (loop, x) != 1)
7671 return 0;
7672 case CONST_INT:
7673 case SYMBOL_REF:
7674 case CONST:
7675 /* convert_modes dies if we try to convert to or from CCmode, so just
7676 exclude that case. It is very unlikely that a condition code value
7677 would be a useful iterator anyways. convert_modes dies if we try to
7678 convert a float mode to non-float or vice versa too. */
7679 if (loop->level == 1
7680 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7681 && GET_MODE_CLASS (mode) != MODE_CC)
7683 /* Possible bug here? Perhaps we don't know the mode of X. */
7684 last = get_last_insn ();
7685 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7686 if (get_last_insn () != last)
7688 delete_insns_since (last);
7689 return 0;
7692 *inc_val = inc;
7693 *mult_val = const0_rtx;
7694 return 1;
7696 else
7697 return 0;
7699 case SIGN_EXTEND:
7700 /* Ignore this BIV if signed arithmetic overflow is defined. */
7701 if (flag_wrapv)
7702 return 0;
7703 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7704 dest_reg, p, inc_val, mult_val, location);
7706 case ASHIFTRT:
7707 /* Similar, since this can be a sign extension. */
7708 for (insn = PREV_INSN (p);
7709 (insn && NOTE_P (insn)
7710 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7711 insn = PREV_INSN (insn))
7714 if (insn)
7715 set = single_set (insn);
7717 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
7718 && set && SET_DEST (set) == XEXP (x, 0)
7719 && GET_CODE (XEXP (x, 1)) == CONST_INT
7720 && INTVAL (XEXP (x, 1)) >= 0
7721 && GET_CODE (SET_SRC (set)) == ASHIFT
7722 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7723 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7724 GET_MODE (XEXP (x, 0)),
7725 dest_reg, insn, inc_val, mult_val,
7726 location);
7727 return 0;
7729 default:
7730 return 0;
7734 /* A general induction variable (giv) is any quantity that is a linear
7735 function of a basic induction variable,
7736 i.e. giv = biv * mult_val + add_val.
7737 The coefficients can be any loop invariant quantity.
7738 A giv need not be computed directly from the biv;
7739 it can be computed by way of other givs. */
7741 /* Determine whether X computes a giv.
7742 If it does, return a nonzero value
7743 which is the benefit from eliminating the computation of X;
7744 set *SRC_REG to the register of the biv that it is computed from;
7745 set *ADD_VAL and *MULT_VAL to the coefficients,
7746 such that the value of X is biv * mult + add; */
7748 static int
7749 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7750 rtx *add_val, rtx *mult_val, rtx *ext_val,
7751 int is_addr, int *pbenefit,
7752 enum machine_mode addr_mode)
7754 struct loop_ivs *ivs = LOOP_IVS (loop);
7755 rtx orig_x = x;
7757 /* If this is an invariant, forget it, it isn't a giv. */
7758 if (loop_invariant_p (loop, x) == 1)
7759 return 0;
7761 *pbenefit = 0;
7762 *ext_val = NULL_RTX;
7763 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7764 if (x == 0)
7765 return 0;
7767 switch (GET_CODE (x))
7769 case USE:
7770 case CONST_INT:
7771 /* Since this is now an invariant and wasn't before, it must be a giv
7772 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7773 with. */
7774 *src_reg = ivs->list->biv->dest_reg;
7775 *mult_val = const0_rtx;
7776 *add_val = x;
7777 break;
7779 case REG:
7780 /* This is equivalent to a BIV. */
7781 *src_reg = x;
7782 *mult_val = const1_rtx;
7783 *add_val = const0_rtx;
7784 break;
7786 case PLUS:
7787 /* Either (plus (biv) (invar)) or
7788 (plus (mult (biv) (invar_1)) (invar_2)). */
7789 if (GET_CODE (XEXP (x, 0)) == MULT)
7791 *src_reg = XEXP (XEXP (x, 0), 0);
7792 *mult_val = XEXP (XEXP (x, 0), 1);
7794 else
7796 *src_reg = XEXP (x, 0);
7797 *mult_val = const1_rtx;
7799 *add_val = XEXP (x, 1);
7800 break;
7802 case MULT:
7803 /* ADD_VAL is zero. */
7804 *src_reg = XEXP (x, 0);
7805 *mult_val = XEXP (x, 1);
7806 *add_val = const0_rtx;
7807 break;
7809 default:
7810 gcc_unreachable ();
7813 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7814 unless they are CONST_INT). */
7815 if (GET_CODE (*add_val) == USE)
7816 *add_val = XEXP (*add_val, 0);
7817 if (GET_CODE (*mult_val) == USE)
7818 *mult_val = XEXP (*mult_val, 0);
7820 if (is_addr)
7821 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7822 else
7823 *pbenefit += rtx_cost (orig_x, SET);
7825 /* Always return true if this is a giv so it will be detected as such,
7826 even if the benefit is zero or negative. This allows elimination
7827 of bivs that might otherwise not be eliminated. */
7828 return 1;
7831 /* Given an expression, X, try to form it as a linear function of a biv.
7832 We will canonicalize it to be of the form
7833 (plus (mult (BIV) (invar_1))
7834 (invar_2))
7835 with possible degeneracies.
7837 The invariant expressions must each be of a form that can be used as a
7838 machine operand. We surround then with a USE rtx (a hack, but localized
7839 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7840 routine; it is the caller's responsibility to strip them.
7842 If no such canonicalization is possible (i.e., two biv's are used or an
7843 expression that is neither invariant nor a biv or giv), this routine
7844 returns 0.
7846 For a nonzero return, the result will have a code of CONST_INT, USE,
7847 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7849 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7851 static rtx sge_plus (enum machine_mode, rtx, rtx);
7852 static rtx sge_plus_constant (rtx, rtx);
7854 static rtx
7855 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7857 struct loop_ivs *ivs = LOOP_IVS (loop);
7858 struct loop_regs *regs = LOOP_REGS (loop);
7859 enum machine_mode mode = GET_MODE (x);
7860 rtx arg0, arg1;
7861 rtx tem;
7863 /* If this is not an integer mode, or if we cannot do arithmetic in this
7864 mode, this can't be a giv. */
7865 if (mode != VOIDmode
7866 && (GET_MODE_CLASS (mode) != MODE_INT
7867 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7868 return NULL_RTX;
7870 switch (GET_CODE (x))
7872 case PLUS:
7873 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7874 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7875 if (arg0 == 0 || arg1 == 0)
7876 return NULL_RTX;
7878 /* Put constant last, CONST_INT last if both constant. */
7879 if ((GET_CODE (arg0) == USE
7880 || GET_CODE (arg0) == CONST_INT)
7881 && ! ((GET_CODE (arg0) == USE
7882 && GET_CODE (arg1) == USE)
7883 || GET_CODE (arg1) == CONST_INT))
7884 tem = arg0, arg0 = arg1, arg1 = tem;
7886 /* Handle addition of zero, then addition of an invariant. */
7887 if (arg1 == const0_rtx)
7888 return arg0;
7889 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7890 switch (GET_CODE (arg0))
7892 case CONST_INT:
7893 case USE:
7894 /* Adding two invariants must result in an invariant, so enclose
7895 addition operation inside a USE and return it. */
7896 if (GET_CODE (arg0) == USE)
7897 arg0 = XEXP (arg0, 0);
7898 if (GET_CODE (arg1) == USE)
7899 arg1 = XEXP (arg1, 0);
7901 if (GET_CODE (arg0) == CONST_INT)
7902 tem = arg0, arg0 = arg1, arg1 = tem;
7903 if (GET_CODE (arg1) == CONST_INT)
7904 tem = sge_plus_constant (arg0, arg1);
7905 else
7906 tem = sge_plus (mode, arg0, arg1);
7908 if (GET_CODE (tem) != CONST_INT)
7909 tem = gen_rtx_USE (mode, tem);
7910 return tem;
7912 case REG:
7913 case MULT:
7914 /* biv + invar or mult + invar. Return sum. */
7915 return gen_rtx_PLUS (mode, arg0, arg1);
7917 case PLUS:
7918 /* (a + invar_1) + invar_2. Associate. */
7919 return
7920 simplify_giv_expr (loop,
7921 gen_rtx_PLUS (mode,
7922 XEXP (arg0, 0),
7923 gen_rtx_PLUS (mode,
7924 XEXP (arg0, 1),
7925 arg1)),
7926 ext_val, benefit);
7928 default:
7929 gcc_unreachable ();
7932 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7933 MULT to reduce cases. */
7934 if (REG_P (arg0))
7935 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
7936 if (REG_P (arg1))
7937 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
7939 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7940 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7941 Recurse to associate the second PLUS. */
7942 if (GET_CODE (arg1) == MULT)
7943 tem = arg0, arg0 = arg1, arg1 = tem;
7945 if (GET_CODE (arg1) == PLUS)
7946 return
7947 simplify_giv_expr (loop,
7948 gen_rtx_PLUS (mode,
7949 gen_rtx_PLUS (mode, arg0,
7950 XEXP (arg1, 0)),
7951 XEXP (arg1, 1)),
7952 ext_val, benefit);
7954 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7955 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
7956 return NULL_RTX;
7958 if (!rtx_equal_p (arg0, arg1))
7959 return NULL_RTX;
7961 return simplify_giv_expr (loop,
7962 gen_rtx_MULT (mode,
7963 XEXP (arg0, 0),
7964 gen_rtx_PLUS (mode,
7965 XEXP (arg0, 1),
7966 XEXP (arg1, 1))),
7967 ext_val, benefit);
7969 case MINUS:
7970 /* Handle "a - b" as "a + b * (-1)". */
7971 return simplify_giv_expr (loop,
7972 gen_rtx_PLUS (mode,
7973 XEXP (x, 0),
7974 gen_rtx_MULT (mode,
7975 XEXP (x, 1),
7976 constm1_rtx)),
7977 ext_val, benefit);
7979 case MULT:
7980 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7981 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7982 if (arg0 == 0 || arg1 == 0)
7983 return NULL_RTX;
7985 /* Put constant last, CONST_INT last if both constant. */
7986 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
7987 && GET_CODE (arg1) != CONST_INT)
7988 tem = arg0, arg0 = arg1, arg1 = tem;
7990 /* If second argument is not now constant, not giv. */
7991 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
7992 return NULL_RTX;
7994 /* Handle multiply by 0 or 1. */
7995 if (arg1 == const0_rtx)
7996 return const0_rtx;
7998 else if (arg1 == const1_rtx)
7999 return arg0;
8001 switch (GET_CODE (arg0))
8003 case REG:
8004 /* biv * invar. Done. */
8005 return gen_rtx_MULT (mode, arg0, arg1);
8007 case CONST_INT:
8008 /* Product of two constants. */
8009 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
8011 case USE:
8012 /* invar * invar is a giv, but attempt to simplify it somehow. */
8013 if (GET_CODE (arg1) != CONST_INT)
8014 return NULL_RTX;
8016 arg0 = XEXP (arg0, 0);
8017 if (GET_CODE (arg0) == MULT)
8019 /* (invar_0 * invar_1) * invar_2. Associate. */
8020 return simplify_giv_expr (loop,
8021 gen_rtx_MULT (mode,
8022 XEXP (arg0, 0),
8023 gen_rtx_MULT (mode,
8024 XEXP (arg0,
8026 arg1)),
8027 ext_val, benefit);
8029 /* Propagate the MULT expressions to the innermost nodes. */
8030 else if (GET_CODE (arg0) == PLUS)
8032 /* (invar_0 + invar_1) * invar_2. Distribute. */
8033 return simplify_giv_expr (loop,
8034 gen_rtx_PLUS (mode,
8035 gen_rtx_MULT (mode,
8036 XEXP (arg0,
8038 arg1),
8039 gen_rtx_MULT (mode,
8040 XEXP (arg0,
8042 arg1)),
8043 ext_val, benefit);
8045 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
8047 case MULT:
8048 /* (a * invar_1) * invar_2. Associate. */
8049 return simplify_giv_expr (loop,
8050 gen_rtx_MULT (mode,
8051 XEXP (arg0, 0),
8052 gen_rtx_MULT (mode,
8053 XEXP (arg0, 1),
8054 arg1)),
8055 ext_val, benefit);
8057 case PLUS:
8058 /* (a + invar_1) * invar_2. Distribute. */
8059 return simplify_giv_expr (loop,
8060 gen_rtx_PLUS (mode,
8061 gen_rtx_MULT (mode,
8062 XEXP (arg0, 0),
8063 arg1),
8064 gen_rtx_MULT (mode,
8065 XEXP (arg0, 1),
8066 arg1)),
8067 ext_val, benefit);
8069 default:
8070 gcc_unreachable ();
8073 case ASHIFT:
8074 /* Shift by constant is multiply by power of two. */
8075 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8076 return 0;
8078 return
8079 simplify_giv_expr (loop,
8080 gen_rtx_MULT (mode,
8081 XEXP (x, 0),
8082 GEN_INT ((HOST_WIDE_INT) 1
8083 << INTVAL (XEXP (x, 1)))),
8084 ext_val, benefit);
8086 case NEG:
8087 /* "-a" is "a * (-1)" */
8088 return simplify_giv_expr (loop,
8089 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8090 ext_val, benefit);
8092 case NOT:
8093 /* "~a" is "-a - 1". Silly, but easy. */
8094 return simplify_giv_expr (loop,
8095 gen_rtx_MINUS (mode,
8096 gen_rtx_NEG (mode, XEXP (x, 0)),
8097 const1_rtx),
8098 ext_val, benefit);
8100 case USE:
8101 /* Already in proper form for invariant. */
8102 return x;
8104 case SIGN_EXTEND:
8105 case ZERO_EXTEND:
8106 case TRUNCATE:
8107 /* Conditionally recognize extensions of simple IVs. After we've
8108 computed loop traversal counts and verified the range of the
8109 source IV, we'll reevaluate this as a GIV. */
8110 if (*ext_val == NULL_RTX)
8112 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8113 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8115 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8116 return arg0;
8119 goto do_default;
8121 case REG:
8122 /* If this is a new register, we can't deal with it. */
8123 if (REGNO (x) >= max_reg_before_loop)
8124 return 0;
8126 /* Check for biv or giv. */
8127 switch (REG_IV_TYPE (ivs, REGNO (x)))
8129 case BASIC_INDUCT:
8130 return x;
8131 case GENERAL_INDUCT:
8133 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8135 /* Form expression from giv and add benefit. Ensure this giv
8136 can derive another and subtract any needed adjustment if so. */
8138 /* Increasing the benefit here is risky. The only case in which it
8139 is arguably correct is if this is the only use of V. In other
8140 cases, this will artificially inflate the benefit of the current
8141 giv, and lead to suboptimal code. Thus, it is disabled, since
8142 potentially not reducing an only marginally beneficial giv is
8143 less harmful than reducing many givs that are not really
8144 beneficial. */
8146 rtx single_use = regs->array[REGNO (x)].single_usage;
8147 if (single_use && single_use != const0_rtx)
8148 *benefit += v->benefit;
8151 if (v->cant_derive)
8152 return 0;
8154 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8155 v->src_reg, v->mult_val),
8156 v->add_val);
8158 if (v->derive_adjustment)
8159 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8160 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8161 if (*ext_val)
8163 if (!v->ext_dependent)
8164 return arg0;
8166 else
8168 *ext_val = v->ext_dependent;
8169 return arg0;
8171 return 0;
8174 default:
8175 do_default:
8176 /* If it isn't an induction variable, and it is invariant, we
8177 may be able to simplify things further by looking through
8178 the bits we just moved outside the loop. */
8179 if (loop_invariant_p (loop, x) == 1)
8181 struct movable *m;
8182 struct loop_movables *movables = LOOP_MOVABLES (loop);
8184 for (m = movables->head; m; m = m->next)
8185 if (rtx_equal_p (x, m->set_dest))
8187 /* Ok, we found a match. Substitute and simplify. */
8189 /* If we match another movable, we must use that, as
8190 this one is going away. */
8191 if (m->match)
8192 return simplify_giv_expr (loop, m->match->set_dest,
8193 ext_val, benefit);
8195 /* If consec is nonzero, this is a member of a group of
8196 instructions that were moved together. We handle this
8197 case only to the point of seeking to the last insn and
8198 looking for a REG_EQUAL. Fail if we don't find one. */
8199 if (m->consec != 0)
8201 int i = m->consec;
8202 tem = m->insn;
8205 tem = NEXT_INSN (tem);
8207 while (--i > 0);
8209 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8210 if (tem)
8211 tem = XEXP (tem, 0);
8213 else
8215 tem = single_set (m->insn);
8216 if (tem)
8217 tem = SET_SRC (tem);
8220 if (tem)
8222 /* What we are most interested in is pointer
8223 arithmetic on invariants -- only take
8224 patterns we may be able to do something with. */
8225 if (GET_CODE (tem) == PLUS
8226 || GET_CODE (tem) == MULT
8227 || GET_CODE (tem) == ASHIFT
8228 || GET_CODE (tem) == CONST_INT
8229 || GET_CODE (tem) == SYMBOL_REF)
8231 tem = simplify_giv_expr (loop, tem, ext_val,
8232 benefit);
8233 if (tem)
8234 return tem;
8236 else if (GET_CODE (tem) == CONST
8237 && GET_CODE (XEXP (tem, 0)) == PLUS
8238 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8239 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8241 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8242 ext_val, benefit);
8243 if (tem)
8244 return tem;
8247 break;
8250 break;
8253 /* Fall through to general case. */
8254 default:
8255 /* If invariant, return as USE (unless CONST_INT).
8256 Otherwise, not giv. */
8257 if (GET_CODE (x) == USE)
8258 x = XEXP (x, 0);
8260 if (loop_invariant_p (loop, x) == 1)
8262 if (GET_CODE (x) == CONST_INT)
8263 return x;
8264 if (GET_CODE (x) == CONST
8265 && GET_CODE (XEXP (x, 0)) == PLUS
8266 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8267 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8268 x = XEXP (x, 0);
8269 return gen_rtx_USE (mode, x);
8271 else
8272 return 0;
8276 /* This routine folds invariants such that there is only ever one
8277 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8279 static rtx
8280 sge_plus_constant (rtx x, rtx c)
8282 if (GET_CODE (x) == CONST_INT)
8283 return GEN_INT (INTVAL (x) + INTVAL (c));
8284 else if (GET_CODE (x) != PLUS)
8285 return gen_rtx_PLUS (GET_MODE (x), x, c);
8286 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8288 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8289 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8291 else if (GET_CODE (XEXP (x, 0)) == PLUS
8292 || GET_CODE (XEXP (x, 1)) != PLUS)
8294 return gen_rtx_PLUS (GET_MODE (x),
8295 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8297 else
8299 return gen_rtx_PLUS (GET_MODE (x),
8300 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8304 static rtx
8305 sge_plus (enum machine_mode mode, rtx x, rtx y)
8307 while (GET_CODE (y) == PLUS)
8309 rtx a = XEXP (y, 0);
8310 if (GET_CODE (a) == CONST_INT)
8311 x = sge_plus_constant (x, a);
8312 else
8313 x = gen_rtx_PLUS (mode, x, a);
8314 y = XEXP (y, 1);
8316 if (GET_CODE (y) == CONST_INT)
8317 x = sge_plus_constant (x, y);
8318 else
8319 x = gen_rtx_PLUS (mode, x, y);
8320 return x;
8323 /* Help detect a giv that is calculated by several consecutive insns;
8324 for example,
8325 giv = biv * M
8326 giv = giv + A
8327 The caller has already identified the first insn P as having a giv as dest;
8328 we check that all other insns that set the same register follow
8329 immediately after P, that they alter nothing else,
8330 and that the result of the last is still a giv.
8332 The value is 0 if the reg set in P is not really a giv.
8333 Otherwise, the value is the amount gained by eliminating
8334 all the consecutive insns that compute the value.
8336 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8337 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8339 The coefficients of the ultimate giv value are stored in
8340 *MULT_VAL and *ADD_VAL. */
8342 static int
8343 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8344 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8345 rtx *ext_val, rtx *last_consec_insn)
8347 struct loop_ivs *ivs = LOOP_IVS (loop);
8348 struct loop_regs *regs = LOOP_REGS (loop);
8349 int count;
8350 enum rtx_code code;
8351 int benefit;
8352 rtx temp;
8353 rtx set;
8355 /* Indicate that this is a giv so that we can update the value produced in
8356 each insn of the multi-insn sequence.
8358 This induction structure will be used only by the call to
8359 general_induction_var below, so we can allocate it on our stack.
8360 If this is a giv, our caller will replace the induct var entry with
8361 a new induction structure. */
8362 struct induction *v;
8364 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8365 return 0;
8367 v = alloca (sizeof (struct induction));
8368 v->src_reg = src_reg;
8369 v->mult_val = *mult_val;
8370 v->add_val = *add_val;
8371 v->benefit = first_benefit;
8372 v->cant_derive = 0;
8373 v->derive_adjustment = 0;
8374 v->ext_dependent = NULL_RTX;
8376 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8377 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8379 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8381 while (count > 0)
8383 p = NEXT_INSN (p);
8384 code = GET_CODE (p);
8386 /* If libcall, skip to end of call sequence. */
8387 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8388 p = XEXP (temp, 0);
8390 if (code == INSN
8391 && (set = single_set (p))
8392 && REG_P (SET_DEST (set))
8393 && SET_DEST (set) == dest_reg
8394 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8395 add_val, mult_val, ext_val, 0,
8396 &benefit, VOIDmode)
8397 /* Giv created by equivalent expression. */
8398 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8399 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8400 add_val, mult_val, ext_val, 0,
8401 &benefit, VOIDmode)))
8402 && src_reg == v->src_reg)
8404 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8405 benefit += libcall_benefit (p);
8407 count--;
8408 v->mult_val = *mult_val;
8409 v->add_val = *add_val;
8410 v->benefit += benefit;
8412 else if (code != NOTE)
8414 /* Allow insns that set something other than this giv to a
8415 constant. Such insns are needed on machines which cannot
8416 include long constants and should not disqualify a giv. */
8417 if (code == INSN
8418 && (set = single_set (p))
8419 && SET_DEST (set) != dest_reg
8420 && CONSTANT_P (SET_SRC (set)))
8421 continue;
8423 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8424 return 0;
8428 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8429 *last_consec_insn = p;
8430 return v->benefit;
8433 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8434 represented by G1. If no such expression can be found, or it is clear that
8435 it cannot possibly be a valid address, 0 is returned.
8437 To perform the computation, we note that
8438 G1 = x * v + a and
8439 G2 = y * v + b
8440 where `v' is the biv.
8442 So G2 = (y/b) * G1 + (b - a*y/x).
8444 Note that MULT = y/x.
8446 Update: A and B are now allowed to be additive expressions such that
8447 B contains all variables in A. That is, computing B-A will not require
8448 subtracting variables. */
8450 static rtx
8451 express_from_1 (rtx a, rtx b, rtx mult)
8453 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8455 if (mult == const0_rtx)
8456 return b;
8458 /* If MULT is not 1, we cannot handle A with non-constants, since we
8459 would then be required to subtract multiples of the registers in A.
8460 This is theoretically possible, and may even apply to some Fortran
8461 constructs, but it is a lot of work and we do not attempt it here. */
8463 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8464 return NULL_RTX;
8466 /* In general these structures are sorted top to bottom (down the PLUS
8467 chain), but not left to right across the PLUS. If B is a higher
8468 order giv than A, we can strip one level and recurse. If A is higher
8469 order, we'll eventually bail out, but won't know that until the end.
8470 If they are the same, we'll strip one level around this loop. */
8472 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8474 rtx ra, rb, oa, ob, tmp;
8476 ra = XEXP (a, 0), oa = XEXP (a, 1);
8477 if (GET_CODE (ra) == PLUS)
8478 tmp = ra, ra = oa, oa = tmp;
8480 rb = XEXP (b, 0), ob = XEXP (b, 1);
8481 if (GET_CODE (rb) == PLUS)
8482 tmp = rb, rb = ob, ob = tmp;
8484 if (rtx_equal_p (ra, rb))
8485 /* We matched: remove one reg completely. */
8486 a = oa, b = ob;
8487 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8488 /* An alternate match. */
8489 a = oa, b = rb;
8490 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8491 /* An alternate match. */
8492 a = ra, b = ob;
8493 else
8495 /* Indicates an extra register in B. Strip one level from B and
8496 recurse, hoping B was the higher order expression. */
8497 ob = express_from_1 (a, ob, mult);
8498 if (ob == NULL_RTX)
8499 return NULL_RTX;
8500 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8504 /* Here we are at the last level of A, go through the cases hoping to
8505 get rid of everything but a constant. */
8507 if (GET_CODE (a) == PLUS)
8509 rtx ra, oa;
8511 ra = XEXP (a, 0), oa = XEXP (a, 1);
8512 if (rtx_equal_p (oa, b))
8513 oa = ra;
8514 else if (!rtx_equal_p (ra, b))
8515 return NULL_RTX;
8517 if (GET_CODE (oa) != CONST_INT)
8518 return NULL_RTX;
8520 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8522 else if (GET_CODE (a) == CONST_INT)
8524 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8526 else if (CONSTANT_P (a))
8528 enum machine_mode mode_a = GET_MODE (a);
8529 enum machine_mode mode_b = GET_MODE (b);
8530 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8531 return simplify_gen_binary (MINUS, mode, b, a);
8533 else if (GET_CODE (b) == PLUS)
8535 if (rtx_equal_p (a, XEXP (b, 0)))
8536 return XEXP (b, 1);
8537 else if (rtx_equal_p (a, XEXP (b, 1)))
8538 return XEXP (b, 0);
8539 else
8540 return NULL_RTX;
8542 else if (rtx_equal_p (a, b))
8543 return const0_rtx;
8545 return NULL_RTX;
8548 static rtx
8549 express_from (struct induction *g1, struct induction *g2)
8551 rtx mult, add;
8553 /* The value that G1 will be multiplied by must be a constant integer. Also,
8554 the only chance we have of getting a valid address is if b*c/a (see above
8555 for notation) is also an integer. */
8556 if (GET_CODE (g1->mult_val) == CONST_INT
8557 && GET_CODE (g2->mult_val) == CONST_INT)
8559 if (g1->mult_val == const0_rtx
8560 || (g1->mult_val == constm1_rtx
8561 && INTVAL (g2->mult_val)
8562 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8563 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8564 return NULL_RTX;
8565 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8567 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8568 mult = const1_rtx;
8569 else
8571 /* ??? Find out if the one is a multiple of the other? */
8572 return NULL_RTX;
8575 add = express_from_1 (g1->add_val, g2->add_val, mult);
8576 if (add == NULL_RTX)
8578 /* Failed. If we've got a multiplication factor between G1 and G2,
8579 scale G1's addend and try again. */
8580 if (INTVAL (mult) > 1)
8582 rtx g1_add_val = g1->add_val;
8583 if (GET_CODE (g1_add_val) == MULT
8584 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8586 HOST_WIDE_INT m;
8587 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8588 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8589 XEXP (g1_add_val, 0), GEN_INT (m));
8591 else
8593 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8594 mult);
8597 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8600 if (add == NULL_RTX)
8601 return NULL_RTX;
8603 /* Form simplified final result. */
8604 if (mult == const0_rtx)
8605 return add;
8606 else if (mult == const1_rtx)
8607 mult = g1->dest_reg;
8608 else
8609 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8611 if (add == const0_rtx)
8612 return mult;
8613 else
8615 if (GET_CODE (add) == PLUS
8616 && CONSTANT_P (XEXP (add, 1)))
8618 rtx tem = XEXP (add, 1);
8619 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8620 add = tem;
8623 return gen_rtx_PLUS (g2->mode, mult, add);
8627 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8628 represented by G1. This indicates that G2 should be combined with G1 and
8629 that G2 can use (either directly or via an address expression) a register
8630 used to represent G1. */
8632 static rtx
8633 combine_givs_p (struct induction *g1, struct induction *g2)
8635 rtx comb, ret;
8637 /* With the introduction of ext dependent givs, we must care for modes.
8638 G2 must not use a wider mode than G1. */
8639 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8640 return NULL_RTX;
8642 ret = comb = express_from (g1, g2);
8643 if (comb == NULL_RTX)
8644 return NULL_RTX;
8645 if (g1->mode != g2->mode)
8646 ret = gen_lowpart (g2->mode, comb);
8648 /* If these givs are identical, they can be combined. We use the results
8649 of express_from because the addends are not in a canonical form, so
8650 rtx_equal_p is a weaker test. */
8651 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8652 combination to be the other way round. */
8653 if (comb == g1->dest_reg
8654 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8656 return ret;
8659 /* If G2 can be expressed as a function of G1 and that function is valid
8660 as an address and no more expensive than using a register for G2,
8661 the expression of G2 in terms of G1 can be used. */
8662 if (ret != NULL_RTX
8663 && g2->giv_type == DEST_ADDR
8664 && memory_address_p (GET_MODE (g2->mem), ret))
8665 return ret;
8667 return NULL_RTX;
8670 /* See if BL is monotonic and has a constant per-iteration increment.
8671 Return the increment if so, otherwise return 0. */
8673 static HOST_WIDE_INT
8674 get_monotonic_increment (struct iv_class *bl)
8676 struct induction *v;
8677 rtx incr;
8679 /* Get the total increment and check that it is constant. */
8680 incr = biv_total_increment (bl);
8681 if (incr == 0 || GET_CODE (incr) != CONST_INT)
8682 return 0;
8684 for (v = bl->biv; v != 0; v = v->next_iv)
8686 if (GET_CODE (v->add_val) != CONST_INT)
8687 return 0;
8689 if (INTVAL (v->add_val) < 0 && INTVAL (incr) >= 0)
8690 return 0;
8692 if (INTVAL (v->add_val) > 0 && INTVAL (incr) <= 0)
8693 return 0;
8695 return INTVAL (incr);
8699 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8700 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8701 to which the biv belongs and INCR is its per-iteration increment. */
8703 static bool
8704 biased_biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8705 HOST_WIDE_INT incr, enum machine_mode mode,
8706 unsigned HOST_WIDE_INT bias)
8708 unsigned HOST_WIDE_INT initial, maximum, span, delta;
8710 /* We need to be able to manipulate MODE-size constants. */
8711 if (HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode))
8712 return false;
8714 /* The number of loop iterations must be constant. */
8715 if (LOOP_INFO (loop)->n_iterations == 0)
8716 return false;
8718 /* So must the biv's initial value. */
8719 if (bl->initial_value == 0 || GET_CODE (bl->initial_value) != CONST_INT)
8720 return false;
8722 initial = bias + INTVAL (bl->initial_value);
8723 maximum = GET_MODE_MASK (mode);
8725 /* Make sure that the initial value is within range. */
8726 if (initial > maximum)
8727 return false;
8729 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8730 (calculated to arbitrary precision) must be <= SPAN. */
8731 if (incr < 0)
8733 delta = -incr;
8734 span = initial;
8736 else
8738 delta = incr;
8739 /* Handle the special case in which MAXIMUM is the largest
8740 unsigned HOST_WIDE_INT and INITIAL is 0. */
8741 if (maximum + 1 == initial)
8742 span = LOOP_INFO (loop)->n_iterations * delta;
8743 else
8744 span = maximum + 1 - initial;
8746 return (span / LOOP_INFO (loop)->n_iterations >= delta);
8750 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8751 the loop to which BL belongs and INCR is its per-iteration increment.
8752 UNSIGNEDP is true if the biv should be treated as unsigned. */
8754 static bool
8755 biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8756 HOST_WIDE_INT incr, enum machine_mode mode, bool unsignedp)
8758 struct loop_info *loop_info;
8759 unsigned HOST_WIDE_INT bias;
8761 /* A biv's value will always be limited to its natural mode.
8762 Larger modes will observe the same wrap-around. */
8763 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (bl->biv->src_reg)))
8764 mode = GET_MODE (bl->biv->src_reg);
8766 loop_info = LOOP_INFO (loop);
8768 bias = (unsignedp ? 0 : (GET_MODE_MASK (mode) >> 1) + 1);
8769 if (biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8770 return true;
8772 if (mode == GET_MODE (bl->biv->src_reg)
8773 && bl->biv->src_reg == loop_info->iteration_var
8774 && loop_info->comparison_value
8775 && loop_invariant_p (loop, loop_info->comparison_value))
8777 /* If the increment is +1, and the exit test is a <, the BIV
8778 cannot overflow. (For <=, we have the problematic case that
8779 the comparison value might be the maximum value of the range.) */
8780 if (incr == 1)
8782 if (loop_info->comparison_code == LT)
8783 return true;
8784 if (loop_info->comparison_code == LTU && unsignedp)
8785 return true;
8788 /* Likewise for increment -1 and exit test >. */
8789 if (incr == -1)
8791 if (loop_info->comparison_code == GT)
8792 return true;
8793 if (loop_info->comparison_code == GTU && unsignedp)
8794 return true;
8797 return false;
8801 /* Given that X is an extension or truncation of BL, return true
8802 if it is unaffected by overflow. LOOP is the loop to which
8803 BL belongs and INCR is its per-iteration increment. */
8805 static bool
8806 extension_within_bounds_p (const struct loop *loop, struct iv_class *bl,
8807 HOST_WIDE_INT incr, rtx x)
8809 enum machine_mode mode;
8810 bool signedp, unsignedp;
8812 switch (GET_CODE (x))
8814 case SIGN_EXTEND:
8815 case ZERO_EXTEND:
8816 mode = GET_MODE (XEXP (x, 0));
8817 signedp = (GET_CODE (x) == SIGN_EXTEND);
8818 unsignedp = (GET_CODE (x) == ZERO_EXTEND);
8819 break;
8821 case TRUNCATE:
8822 /* We don't know whether this value is being used as signed
8823 or unsigned, so check the conditions for both. */
8824 mode = GET_MODE (x);
8825 signedp = unsignedp = true;
8826 break;
8828 default:
8829 gcc_unreachable ();
8832 return ((!signedp || biv_fits_mode_p (loop, bl, incr, mode, false))
8833 && (!unsignedp || biv_fits_mode_p (loop, bl, incr, mode, true)));
8837 /* Check each extension dependent giv in this class to see if its
8838 root biv is safe from wrapping in the interior mode, which would
8839 make the giv illegal. */
8841 static void
8842 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8844 struct induction *v;
8845 HOST_WIDE_INT incr;
8847 incr = get_monotonic_increment (bl);
8849 /* Invalidate givs that fail the tests. */
8850 for (v = bl->giv; v; v = v->next_iv)
8851 if (v->ext_dependent)
8853 if (incr != 0
8854 && extension_within_bounds_p (loop, bl, incr, v->ext_dependent))
8856 if (loop_dump_stream)
8857 fprintf (loop_dump_stream,
8858 "Verified ext dependent giv at %d of reg %d\n",
8859 INSN_UID (v->insn), bl->regno);
8861 else
8863 if (loop_dump_stream)
8864 fprintf (loop_dump_stream,
8865 "Failed ext dependent giv at %d\n",
8866 INSN_UID (v->insn));
8868 v->ignore = 1;
8869 bl->all_reduced = 0;
8874 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8876 static rtx
8877 extend_value_for_giv (struct induction *v, rtx value)
8879 rtx ext_dep = v->ext_dependent;
8881 if (! ext_dep)
8882 return value;
8884 /* Recall that check_ext_dependent_givs verified that the known bounds
8885 of a biv did not overflow or wrap with respect to the extension for
8886 the giv. Therefore, constants need no additional adjustment. */
8887 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
8888 return value;
8890 /* Otherwise, we must adjust the value to compensate for the
8891 differing modes of the biv and the giv. */
8892 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
8895 struct combine_givs_stats
8897 int giv_number;
8898 int total_benefit;
8901 static int
8902 cmp_combine_givs_stats (const void *xp, const void *yp)
8904 const struct combine_givs_stats * const x =
8905 (const struct combine_givs_stats *) xp;
8906 const struct combine_givs_stats * const y =
8907 (const struct combine_givs_stats *) yp;
8908 int d;
8909 d = y->total_benefit - x->total_benefit;
8910 /* Stabilize the sort. */
8911 if (!d)
8912 d = x->giv_number - y->giv_number;
8913 return d;
8916 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8917 any other. If so, point SAME to the giv combined with and set NEW_REG to
8918 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8919 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8921 static void
8922 combine_givs (struct loop_regs *regs, struct iv_class *bl)
8924 /* Additional benefit to add for being combined multiple times. */
8925 const int extra_benefit = 3;
8927 struct induction *g1, *g2, **giv_array;
8928 int i, j, k, giv_count;
8929 struct combine_givs_stats *stats;
8930 rtx *can_combine;
8932 /* Count givs, because bl->giv_count is incorrect here. */
8933 giv_count = 0;
8934 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8935 if (!g1->ignore)
8936 giv_count++;
8938 giv_array = alloca (giv_count * sizeof (struct induction *));
8939 i = 0;
8940 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8941 if (!g1->ignore)
8942 giv_array[i++] = g1;
8944 stats = xcalloc (giv_count, sizeof (*stats));
8945 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
8947 for (i = 0; i < giv_count; i++)
8949 int this_benefit;
8950 rtx single_use;
8952 g1 = giv_array[i];
8953 stats[i].giv_number = i;
8955 /* If a DEST_REG GIV is used only once, do not allow it to combine
8956 with anything, for in doing so we will gain nothing that cannot
8957 be had by simply letting the GIV with which we would have combined
8958 to be reduced on its own. The lossage shows up in particular with
8959 DEST_ADDR targets on hosts with reg+reg addressing, though it can
8960 be seen elsewhere as well. */
8961 if (g1->giv_type == DEST_REG
8962 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
8963 && single_use != const0_rtx)
8964 continue;
8966 this_benefit = g1->benefit;
8967 /* Add an additional weight for zero addends. */
8968 if (g1->no_const_addval)
8969 this_benefit += 1;
8971 for (j = 0; j < giv_count; j++)
8973 rtx this_combine;
8975 g2 = giv_array[j];
8976 if (g1 != g2
8977 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
8979 can_combine[i * giv_count + j] = this_combine;
8980 this_benefit += g2->benefit + extra_benefit;
8983 stats[i].total_benefit = this_benefit;
8986 /* Iterate, combining until we can't. */
8987 restart:
8988 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
8990 if (loop_dump_stream)
8992 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
8993 for (k = 0; k < giv_count; k++)
8995 g1 = giv_array[stats[k].giv_number];
8996 if (!g1->combined_with && !g1->same)
8997 fprintf (loop_dump_stream, " {%d, %d}",
8998 INSN_UID (giv_array[stats[k].giv_number]->insn),
8999 stats[k].total_benefit);
9001 putc ('\n', loop_dump_stream);
9004 for (k = 0; k < giv_count; k++)
9006 int g1_add_benefit = 0;
9008 i = stats[k].giv_number;
9009 g1 = giv_array[i];
9011 /* If it has already been combined, skip. */
9012 if (g1->combined_with || g1->same)
9013 continue;
9015 for (j = 0; j < giv_count; j++)
9017 g2 = giv_array[j];
9018 if (g1 != g2 && can_combine[i * giv_count + j]
9019 /* If it has already been combined, skip. */
9020 && ! g2->same && ! g2->combined_with)
9022 int l;
9024 g2->new_reg = can_combine[i * giv_count + j];
9025 g2->same = g1;
9026 /* For destination, we now may replace by mem expression instead
9027 of register. This changes the costs considerably, so add the
9028 compensation. */
9029 if (g2->giv_type == DEST_ADDR)
9030 g2->benefit = (g2->benefit + reg_address_cost
9031 - address_cost (g2->new_reg,
9032 GET_MODE (g2->mem)));
9033 g1->combined_with++;
9034 g1->lifetime += g2->lifetime;
9036 g1_add_benefit += g2->benefit;
9038 /* ??? The new final_[bg]iv_value code does a much better job
9039 of finding replaceable giv's, and hence this code may no
9040 longer be necessary. */
9041 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
9042 g1_add_benefit -= copy_cost;
9044 /* To help optimize the next set of combinations, remove
9045 this giv from the benefits of other potential mates. */
9046 for (l = 0; l < giv_count; ++l)
9048 int m = stats[l].giv_number;
9049 if (can_combine[m * giv_count + j])
9050 stats[l].total_benefit -= g2->benefit + extra_benefit;
9053 if (loop_dump_stream)
9054 fprintf (loop_dump_stream,
9055 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9056 INSN_UID (g2->insn), INSN_UID (g1->insn),
9057 g1->benefit, g1_add_benefit, g1->lifetime);
9061 /* To help optimize the next set of combinations, remove
9062 this giv from the benefits of other potential mates. */
9063 if (g1->combined_with)
9065 for (j = 0; j < giv_count; ++j)
9067 int m = stats[j].giv_number;
9068 if (can_combine[m * giv_count + i])
9069 stats[j].total_benefit -= g1->benefit + extra_benefit;
9072 g1->benefit += g1_add_benefit;
9074 /* We've finished with this giv, and everything it touched.
9075 Restart the combination so that proper weights for the
9076 rest of the givs are properly taken into account. */
9077 /* ??? Ideally we would compact the arrays at this point, so
9078 as to not cover old ground. But sanely compacting
9079 can_combine is tricky. */
9080 goto restart;
9084 /* Clean up. */
9085 free (stats);
9086 free (can_combine);
9089 /* Generate sequence for REG = B * M + A. B is the initial value of
9090 the basic induction variable, M a multiplicative constant, A an
9091 additive constant and REG the destination register. */
9093 static rtx
9094 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9096 rtx seq;
9097 rtx result;
9099 start_sequence ();
9100 /* Use unsigned arithmetic. */
9101 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9102 if (reg != result)
9103 emit_move_insn (reg, result);
9104 seq = get_insns ();
9105 end_sequence ();
9107 return seq;
9111 /* Update registers created in insn sequence SEQ. */
9113 static void
9114 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9116 rtx insn;
9118 /* Update register info for alias analysis. */
9120 insn = seq;
9121 while (insn != NULL_RTX)
9123 rtx set = single_set (insn);
9125 if (set && REG_P (SET_DEST (set)))
9126 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9128 insn = NEXT_INSN (insn);
9133 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9134 is the initial value of the basic induction variable, M a
9135 multiplicative constant, A an additive constant and REG the
9136 destination register. */
9138 static void
9139 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9140 rtx reg, basic_block before_bb, rtx before_insn)
9142 rtx seq;
9144 if (! before_insn)
9146 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9147 return;
9150 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9151 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9153 /* Increase the lifetime of any invariants moved further in code. */
9154 update_reg_last_use (a, before_insn);
9155 update_reg_last_use (b, before_insn);
9156 update_reg_last_use (m, before_insn);
9158 /* It is possible that the expansion created lots of new registers.
9159 Iterate over the sequence we just created and record them all. We
9160 must do this before inserting the sequence. */
9161 loop_regs_update (loop, seq);
9163 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9167 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9168 initial value of the basic induction variable, M a multiplicative
9169 constant, A an additive constant and REG the destination
9170 register. */
9172 static void
9173 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9175 rtx seq;
9177 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9178 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9180 /* Increase the lifetime of any invariants moved further in code.
9181 ???? Is this really necessary? */
9182 update_reg_last_use (a, loop->sink);
9183 update_reg_last_use (b, loop->sink);
9184 update_reg_last_use (m, loop->sink);
9186 /* It is possible that the expansion created lots of new registers.
9187 Iterate over the sequence we just created and record them all. We
9188 must do this before inserting the sequence. */
9189 loop_regs_update (loop, seq);
9191 loop_insn_sink (loop, seq);
9195 /* Emit insns after loop to set REG = B * M + A. B is the initial
9196 value of the basic induction variable, M a multiplicative constant,
9197 A an additive constant and REG the destination register. */
9199 static void
9200 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9202 rtx seq;
9204 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9205 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9207 /* It is possible that the expansion created lots of new registers.
9208 Iterate over the sequence we just created and record them all. We
9209 must do this before inserting the sequence. */
9210 loop_regs_update (loop, seq);
9212 loop_insn_hoist (loop, seq);
9217 /* Similar to gen_add_mult, but compute cost rather than generating
9218 sequence. */
9220 static int
9221 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9223 int cost = 0;
9224 rtx last, result;
9226 start_sequence ();
9227 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9228 if (reg != result)
9229 emit_move_insn (reg, result);
9230 last = get_last_insn ();
9231 while (last)
9233 rtx t = single_set (last);
9234 if (t)
9235 cost += rtx_cost (SET_SRC (t), SET);
9236 last = PREV_INSN (last);
9238 end_sequence ();
9239 return cost;
9242 /* Test whether A * B can be computed without
9243 an actual multiply insn. Value is 1 if so.
9245 ??? This function stinks because it generates a ton of wasted RTL
9246 ??? and as a result fragments GC memory to no end. There are other
9247 ??? places in the compiler which are invoked a lot and do the same
9248 ??? thing, generate wasted RTL just to see if something is possible. */
9250 static int
9251 product_cheap_p (rtx a, rtx b)
9253 rtx tmp;
9254 int win, n_insns;
9256 /* If only one is constant, make it B. */
9257 if (GET_CODE (a) == CONST_INT)
9258 tmp = a, a = b, b = tmp;
9260 /* If first constant, both constant, so don't need multiply. */
9261 if (GET_CODE (a) == CONST_INT)
9262 return 1;
9264 /* If second not constant, neither is constant, so would need multiply. */
9265 if (GET_CODE (b) != CONST_INT)
9266 return 0;
9268 /* One operand is constant, so might not need multiply insn. Generate the
9269 code for the multiply and see if a call or multiply, or long sequence
9270 of insns is generated. */
9272 start_sequence ();
9273 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9274 tmp = get_insns ();
9275 end_sequence ();
9277 win = 1;
9278 if (tmp == NULL_RTX)
9280 else if (INSN_P (tmp))
9282 n_insns = 0;
9283 while (tmp != NULL_RTX)
9285 rtx next = NEXT_INSN (tmp);
9287 if (++n_insns > 3
9288 || !NONJUMP_INSN_P (tmp)
9289 || (GET_CODE (PATTERN (tmp)) == SET
9290 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9291 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9292 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9293 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9295 win = 0;
9296 break;
9299 tmp = next;
9302 else if (GET_CODE (tmp) == SET
9303 && GET_CODE (SET_SRC (tmp)) == MULT)
9304 win = 0;
9305 else if (GET_CODE (tmp) == PARALLEL
9306 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9307 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9308 win = 0;
9310 return win;
9313 /* Check to see if loop can be terminated by a "decrement and branch until
9314 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9315 Also try reversing an increment loop to a decrement loop
9316 to see if the optimization can be performed.
9317 Value is nonzero if optimization was performed. */
9319 /* This is useful even if the architecture doesn't have such an insn,
9320 because it might change a loops which increments from 0 to n to a loop
9321 which decrements from n to 0. A loop that decrements to zero is usually
9322 faster than one that increments from zero. */
9324 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9325 such as approx_final_value, biv_total_increment, loop_iterations, and
9326 final_[bg]iv_value. */
9328 static int
9329 check_dbra_loop (struct loop *loop, int insn_count)
9331 struct loop_info *loop_info = LOOP_INFO (loop);
9332 struct loop_regs *regs = LOOP_REGS (loop);
9333 struct loop_ivs *ivs = LOOP_IVS (loop);
9334 struct iv_class *bl;
9335 rtx reg;
9336 enum machine_mode mode;
9337 rtx jump_label;
9338 rtx final_value;
9339 rtx start_value;
9340 rtx new_add_val;
9341 rtx comparison;
9342 rtx before_comparison;
9343 rtx p;
9344 rtx jump;
9345 rtx first_compare;
9346 int compare_and_branch;
9347 rtx loop_start = loop->start;
9348 rtx loop_end = loop->end;
9350 /* If last insn is a conditional branch, and the insn before tests a
9351 register value, try to optimize it. Otherwise, we can't do anything. */
9353 jump = PREV_INSN (loop_end);
9354 comparison = get_condition_for_loop (loop, jump);
9355 if (comparison == 0)
9356 return 0;
9357 if (!onlyjump_p (jump))
9358 return 0;
9360 /* Try to compute whether the compare/branch at the loop end is one or
9361 two instructions. */
9362 get_condition (jump, &first_compare, false, true);
9363 if (first_compare == jump)
9364 compare_and_branch = 1;
9365 else if (first_compare == prev_nonnote_insn (jump))
9366 compare_and_branch = 2;
9367 else
9368 return 0;
9371 /* If more than one condition is present to control the loop, then
9372 do not proceed, as this function does not know how to rewrite
9373 loop tests with more than one condition.
9375 Look backwards from the first insn in the last comparison
9376 sequence and see if we've got another comparison sequence. */
9378 rtx jump1;
9379 if ((jump1 = prev_nonnote_insn (first_compare))
9380 && JUMP_P (jump1))
9381 return 0;
9384 /* Check all of the bivs to see if the compare uses one of them.
9385 Skip biv's set more than once because we can't guarantee that
9386 it will be zero on the last iteration. Also skip if the biv is
9387 used between its update and the test insn. */
9389 for (bl = ivs->list; bl; bl = bl->next)
9391 if (bl->biv_count == 1
9392 && ! bl->biv->maybe_multiple
9393 && bl->biv->dest_reg == XEXP (comparison, 0)
9394 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9395 first_compare))
9396 break;
9399 /* Try swapping the comparison to identify a suitable biv. */
9400 if (!bl)
9401 for (bl = ivs->list; bl; bl = bl->next)
9402 if (bl->biv_count == 1
9403 && ! bl->biv->maybe_multiple
9404 && bl->biv->dest_reg == XEXP (comparison, 1)
9405 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9406 first_compare))
9408 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9409 VOIDmode,
9410 XEXP (comparison, 1),
9411 XEXP (comparison, 0));
9412 break;
9415 if (! bl)
9416 return 0;
9418 /* Look for the case where the basic induction variable is always
9419 nonnegative, and equals zero on the last iteration.
9420 In this case, add a reg_note REG_NONNEG, which allows the
9421 m68k DBRA instruction to be used. */
9423 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9424 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9425 && GET_CODE (bl->biv->add_val) == CONST_INT
9426 && INTVAL (bl->biv->add_val) < 0)
9428 /* Initial value must be greater than 0,
9429 init_val % -dec_value == 0 to ensure that it equals zero on
9430 the last iteration */
9432 if (GET_CODE (bl->initial_value) == CONST_INT
9433 && INTVAL (bl->initial_value) > 0
9434 && (INTVAL (bl->initial_value)
9435 % (-INTVAL (bl->biv->add_val))) == 0)
9437 /* Register always nonnegative, add REG_NOTE to branch. */
9438 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9439 REG_NOTES (jump)
9440 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9441 REG_NOTES (jump));
9442 bl->nonneg = 1;
9444 return 1;
9447 /* If the decrement is 1 and the value was tested as >= 0 before
9448 the loop, then we can safely optimize. */
9449 for (p = loop_start; p; p = PREV_INSN (p))
9451 if (LABEL_P (p))
9452 break;
9453 if (!JUMP_P (p))
9454 continue;
9456 before_comparison = get_condition_for_loop (loop, p);
9457 if (before_comparison
9458 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9459 && (GET_CODE (before_comparison) == LT
9460 || GET_CODE (before_comparison) == LTU)
9461 && XEXP (before_comparison, 1) == const0_rtx
9462 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9463 && INTVAL (bl->biv->add_val) == -1)
9465 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9466 REG_NOTES (jump)
9467 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9468 REG_NOTES (jump));
9469 bl->nonneg = 1;
9471 return 1;
9475 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9476 && INTVAL (bl->biv->add_val) > 0)
9478 /* Try to change inc to dec, so can apply above optimization. */
9479 /* Can do this if:
9480 all registers modified are induction variables or invariant,
9481 all memory references have non-overlapping addresses
9482 (obviously true if only one write)
9483 allow 2 insns for the compare/jump at the end of the loop. */
9484 /* Also, we must avoid any instructions which use both the reversed
9485 biv and another biv. Such instructions will fail if the loop is
9486 reversed. We meet this condition by requiring that either
9487 no_use_except_counting is true, or else that there is only
9488 one biv. */
9489 int num_nonfixed_reads = 0;
9490 /* 1 if the iteration var is used only to count iterations. */
9491 int no_use_except_counting = 0;
9492 /* 1 if the loop has no memory store, or it has a single memory store
9493 which is reversible. */
9494 int reversible_mem_store = 1;
9496 if (bl->giv_count == 0
9497 && !loop->exit_count
9498 && !loop_info->has_multiple_exit_targets)
9500 rtx bivreg = regno_reg_rtx[bl->regno];
9501 struct iv_class *blt;
9503 /* If there are no givs for this biv, and the only exit is the
9504 fall through at the end of the loop, then
9505 see if perhaps there are no uses except to count. */
9506 no_use_except_counting = 1;
9507 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9508 if (INSN_P (p))
9510 rtx set = single_set (p);
9512 if (set && REG_P (SET_DEST (set))
9513 && REGNO (SET_DEST (set)) == bl->regno)
9514 /* An insn that sets the biv is okay. */
9516 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9517 /* An insn that doesn't mention the biv is okay. */
9519 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9520 || p == prev_nonnote_insn (loop_end))
9522 /* If either of these insns uses the biv and sets a pseudo
9523 that has more than one usage, then the biv has uses
9524 other than counting since it's used to derive a value
9525 that is used more than one time. */
9526 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9527 regs);
9528 if (regs->multiple_uses)
9530 no_use_except_counting = 0;
9531 break;
9534 else
9536 no_use_except_counting = 0;
9537 break;
9541 /* A biv has uses besides counting if it is used to set
9542 another biv. */
9543 for (blt = ivs->list; blt; blt = blt->next)
9544 if (blt->init_set
9545 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9547 no_use_except_counting = 0;
9548 break;
9552 if (no_use_except_counting)
9553 /* No need to worry about MEMs. */
9555 else if (loop_info->num_mem_sets <= 1)
9557 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9558 if (INSN_P (p))
9559 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9561 /* If the loop has a single store, and the destination address is
9562 invariant, then we can't reverse the loop, because this address
9563 might then have the wrong value at loop exit.
9564 This would work if the source was invariant also, however, in that
9565 case, the insn should have been moved out of the loop. */
9567 if (loop_info->num_mem_sets == 1)
9569 struct induction *v;
9571 /* If we could prove that each of the memory locations
9572 written to was different, then we could reverse the
9573 store -- but we don't presently have any way of
9574 knowing that. */
9575 reversible_mem_store = 0;
9577 /* If the store depends on a register that is set after the
9578 store, it depends on the initial value, and is thus not
9579 reversible. */
9580 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9582 if (v->giv_type == DEST_REG
9583 && reg_mentioned_p (v->dest_reg,
9584 PATTERN (loop_info->first_loop_store_insn))
9585 && loop_insn_first_p (loop_info->first_loop_store_insn,
9586 v->insn))
9587 reversible_mem_store = 0;
9591 else
9592 return 0;
9594 /* This code only acts for innermost loops. Also it simplifies
9595 the memory address check by only reversing loops with
9596 zero or one memory access.
9597 Two memory accesses could involve parts of the same array,
9598 and that can't be reversed.
9599 If the biv is used only for counting, than we don't need to worry
9600 about all these things. */
9602 if ((num_nonfixed_reads <= 1
9603 && ! loop_info->has_nonconst_call
9604 && ! loop_info->has_prefetch
9605 && ! loop_info->has_volatile
9606 && reversible_mem_store
9607 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9608 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9609 && (bl == ivs->list && bl->next == 0))
9610 || (no_use_except_counting && ! loop_info->has_prefetch))
9612 rtx tem;
9614 /* Loop can be reversed. */
9615 if (loop_dump_stream)
9616 fprintf (loop_dump_stream, "Can reverse loop\n");
9618 /* Now check other conditions:
9620 The increment must be a constant, as must the initial value,
9621 and the comparison code must be LT.
9623 This test can probably be improved since +/- 1 in the constant
9624 can be obtained by changing LT to LE and vice versa; this is
9625 confusing. */
9627 if (comparison
9628 /* for constants, LE gets turned into LT */
9629 && (GET_CODE (comparison) == LT
9630 || (GET_CODE (comparison) == LE
9631 && no_use_except_counting)
9632 || GET_CODE (comparison) == LTU))
9634 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9635 rtx initial_value, comparison_value;
9636 int nonneg = 0;
9637 enum rtx_code cmp_code;
9638 int comparison_const_width;
9639 unsigned HOST_WIDE_INT comparison_sign_mask;
9640 bool keep_first_compare;
9642 add_val = INTVAL (bl->biv->add_val);
9643 comparison_value = XEXP (comparison, 1);
9644 if (GET_MODE (comparison_value) == VOIDmode)
9645 comparison_const_width
9646 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9647 else
9648 comparison_const_width
9649 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9650 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9651 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9652 comparison_sign_mask
9653 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9655 /* If the comparison value is not a loop invariant, then we
9656 can not reverse this loop.
9658 ??? If the insns which initialize the comparison value as
9659 a whole compute an invariant result, then we could move
9660 them out of the loop and proceed with loop reversal. */
9661 if (! loop_invariant_p (loop, comparison_value))
9662 return 0;
9664 if (GET_CODE (comparison_value) == CONST_INT)
9665 comparison_val = INTVAL (comparison_value);
9666 initial_value = bl->initial_value;
9668 /* Normalize the initial value if it is an integer and
9669 has no other use except as a counter. This will allow
9670 a few more loops to be reversed. */
9671 if (no_use_except_counting
9672 && GET_CODE (comparison_value) == CONST_INT
9673 && GET_CODE (initial_value) == CONST_INT)
9675 comparison_val = comparison_val - INTVAL (bl->initial_value);
9676 /* The code below requires comparison_val to be a multiple
9677 of add_val in order to do the loop reversal, so
9678 round up comparison_val to a multiple of add_val.
9679 Since comparison_value is constant, we know that the
9680 current comparison code is LT. */
9681 comparison_val = comparison_val + add_val - 1;
9682 comparison_val
9683 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9684 /* We postpone overflow checks for COMPARISON_VAL here;
9685 even if there is an overflow, we might still be able to
9686 reverse the loop, if converting the loop exit test to
9687 NE is possible. */
9688 initial_value = const0_rtx;
9691 /* First check if we can do a vanilla loop reversal. */
9692 if (initial_value == const0_rtx
9693 && GET_CODE (comparison_value) == CONST_INT
9694 /* Now do postponed overflow checks on COMPARISON_VAL. */
9695 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9696 & comparison_sign_mask))
9698 /* Register will always be nonnegative, with value
9699 0 on last iteration */
9700 add_adjust = add_val;
9701 nonneg = 1;
9702 cmp_code = GE;
9704 else
9705 return 0;
9707 if (GET_CODE (comparison) == LE)
9708 add_adjust -= add_val;
9710 /* If the initial value is not zero, or if the comparison
9711 value is not an exact multiple of the increment, then we
9712 can not reverse this loop. */
9713 if (initial_value == const0_rtx
9714 && GET_CODE (comparison_value) == CONST_INT)
9716 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9717 return 0;
9719 else
9721 if (! no_use_except_counting || add_val != 1)
9722 return 0;
9725 final_value = comparison_value;
9727 /* Reset these in case we normalized the initial value
9728 and comparison value above. */
9729 if (GET_CODE (comparison_value) == CONST_INT
9730 && GET_CODE (initial_value) == CONST_INT)
9732 comparison_value = GEN_INT (comparison_val);
9733 final_value
9734 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9736 bl->initial_value = initial_value;
9738 /* Save some info needed to produce the new insns. */
9739 reg = bl->biv->dest_reg;
9740 mode = GET_MODE (reg);
9741 jump_label = condjump_label (PREV_INSN (loop_end));
9742 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9744 /* Set start_value; if this is not a CONST_INT, we need
9745 to generate a SUB.
9746 Initialize biv to start_value before loop start.
9747 The old initializing insn will be deleted as a
9748 dead store by flow.c. */
9749 if (initial_value == const0_rtx
9750 && GET_CODE (comparison_value) == CONST_INT)
9752 start_value
9753 = gen_int_mode (comparison_val - add_adjust, mode);
9754 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9756 else if (GET_CODE (initial_value) == CONST_INT)
9758 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9759 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9761 if (add_insn == 0)
9762 return 0;
9764 start_value
9765 = gen_rtx_PLUS (mode, comparison_value, offset);
9766 loop_insn_hoist (loop, add_insn);
9767 if (GET_CODE (comparison) == LE)
9768 final_value = gen_rtx_PLUS (mode, comparison_value,
9769 GEN_INT (add_val));
9771 else if (! add_adjust)
9773 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9774 initial_value);
9776 if (sub_insn == 0)
9777 return 0;
9778 start_value
9779 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9780 loop_insn_hoist (loop, sub_insn);
9782 else
9783 /* We could handle the other cases too, but it'll be
9784 better to have a testcase first. */
9785 return 0;
9787 /* We may not have a single insn which can increment a reg, so
9788 create a sequence to hold all the insns from expand_inc. */
9789 start_sequence ();
9790 expand_inc (reg, new_add_val);
9791 tem = get_insns ();
9792 end_sequence ();
9794 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9795 delete_insn (bl->biv->insn);
9797 /* Update biv info to reflect its new status. */
9798 bl->biv->insn = p;
9799 bl->initial_value = start_value;
9800 bl->biv->add_val = new_add_val;
9802 /* Update loop info. */
9803 loop_info->initial_value = reg;
9804 loop_info->initial_equiv_value = reg;
9805 loop_info->final_value = const0_rtx;
9806 loop_info->final_equiv_value = const0_rtx;
9807 loop_info->comparison_value = const0_rtx;
9808 loop_info->comparison_code = cmp_code;
9809 loop_info->increment = new_add_val;
9811 /* Inc LABEL_NUSES so that delete_insn will
9812 not delete the label. */
9813 LABEL_NUSES (XEXP (jump_label, 0))++;
9815 /* If we have a separate comparison insn that does more
9816 than just set cc0, the result of the comparison might
9817 be used outside the loop. */
9818 keep_first_compare = (compare_and_branch == 2
9819 #ifdef HAVE_CC0
9820 && sets_cc0_p (first_compare) <= 0
9821 #endif
9824 /* Emit an insn after the end of the loop to set the biv's
9825 proper exit value if it is used anywhere outside the loop. */
9826 if (keep_first_compare
9827 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9828 || ! bl->init_insn
9829 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9830 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9832 if (keep_first_compare)
9833 loop_insn_sink (loop, PATTERN (first_compare));
9835 /* Delete compare/branch at end of loop. */
9836 delete_related_insns (PREV_INSN (loop_end));
9837 if (compare_and_branch == 2)
9838 delete_related_insns (first_compare);
9840 /* Add new compare/branch insn at end of loop. */
9841 start_sequence ();
9842 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9843 mode, 0,
9844 XEXP (jump_label, 0));
9845 tem = get_insns ();
9846 end_sequence ();
9847 emit_jump_insn_before (tem, loop_end);
9849 for (tem = PREV_INSN (loop_end);
9850 tem && !JUMP_P (tem);
9851 tem = PREV_INSN (tem))
9854 if (tem)
9855 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9857 if (nonneg)
9859 if (tem)
9861 /* Increment of LABEL_NUSES done above. */
9862 /* Register is now always nonnegative,
9863 so add REG_NONNEG note to the branch. */
9864 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9865 REG_NOTES (tem));
9867 bl->nonneg = 1;
9870 /* No insn may reference both the reversed and another biv or it
9871 will fail (see comment near the top of the loop reversal
9872 code).
9873 Earlier on, we have verified that the biv has no use except
9874 counting, or it is the only biv in this function.
9875 However, the code that computes no_use_except_counting does
9876 not verify reg notes. It's possible to have an insn that
9877 references another biv, and has a REG_EQUAL note with an
9878 expression based on the reversed biv. To avoid this case,
9879 remove all REG_EQUAL notes based on the reversed biv
9880 here. */
9881 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9882 if (INSN_P (p))
9884 rtx *pnote;
9885 rtx set = single_set (p);
9886 /* If this is a set of a GIV based on the reversed biv, any
9887 REG_EQUAL notes should still be correct. */
9888 if (! set
9889 || !REG_P (SET_DEST (set))
9890 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
9891 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
9892 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
9893 for (pnote = &REG_NOTES (p); *pnote;)
9895 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
9896 && reg_mentioned_p (regno_reg_rtx[bl->regno],
9897 XEXP (*pnote, 0)))
9898 *pnote = XEXP (*pnote, 1);
9899 else
9900 pnote = &XEXP (*pnote, 1);
9904 /* Mark that this biv has been reversed. Each giv which depends
9905 on this biv, and which is also live past the end of the loop
9906 will have to be fixed up. */
9908 bl->reversed = 1;
9910 if (loop_dump_stream)
9912 fprintf (loop_dump_stream, "Reversed loop");
9913 if (bl->nonneg)
9914 fprintf (loop_dump_stream, " and added reg_nonneg\n");
9915 else
9916 fprintf (loop_dump_stream, "\n");
9919 return 1;
9924 return 0;
9927 /* Verify whether the biv BL appears to be eliminable,
9928 based on the insns in the loop that refer to it.
9930 If ELIMINATE_P is nonzero, actually do the elimination.
9932 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
9933 determine whether invariant insns should be placed inside or at the
9934 start of the loop. */
9936 static int
9937 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
9938 int eliminate_p, int threshold, int insn_count)
9940 struct loop_ivs *ivs = LOOP_IVS (loop);
9941 rtx reg = bl->biv->dest_reg;
9942 rtx p;
9944 /* Scan all insns in the loop, stopping if we find one that uses the
9945 biv in a way that we cannot eliminate. */
9947 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9949 enum rtx_code code = GET_CODE (p);
9950 basic_block where_bb = 0;
9951 rtx where_insn = threshold >= insn_count ? 0 : p;
9952 rtx note;
9954 /* If this is a libcall that sets a giv, skip ahead to its end. */
9955 if (INSN_P (p))
9957 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
9959 if (note)
9961 rtx last = XEXP (note, 0);
9962 rtx set = single_set (last);
9964 if (set && REG_P (SET_DEST (set)))
9966 unsigned int regno = REGNO (SET_DEST (set));
9968 if (regno < ivs->n_regs
9969 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
9970 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
9971 p = last;
9976 /* Closely examine the insn if the biv is mentioned. */
9977 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
9978 && reg_mentioned_p (reg, PATTERN (p))
9979 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
9980 eliminate_p, where_bb, where_insn))
9982 if (loop_dump_stream)
9983 fprintf (loop_dump_stream,
9984 "Cannot eliminate biv %d: biv used in insn %d.\n",
9985 bl->regno, INSN_UID (p));
9986 break;
9989 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
9990 if (eliminate_p
9991 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
9992 && reg_mentioned_p (reg, XEXP (note, 0)))
9993 remove_note (p, note);
9996 if (p == loop->end)
9998 if (loop_dump_stream)
9999 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
10000 bl->regno, eliminate_p ? "was" : "can be");
10001 return 1;
10004 return 0;
10007 /* INSN and REFERENCE are instructions in the same insn chain.
10008 Return nonzero if INSN is first. */
10010 static int
10011 loop_insn_first_p (rtx insn, rtx reference)
10013 rtx p, q;
10015 for (p = insn, q = reference;;)
10017 /* Start with test for not first so that INSN == REFERENCE yields not
10018 first. */
10019 if (q == insn || ! p)
10020 return 0;
10021 if (p == reference || ! q)
10022 return 1;
10024 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
10025 previous insn, hence the <= comparison below does not work if
10026 P is a note. */
10027 if (INSN_UID (p) < max_uid_for_loop
10028 && INSN_UID (q) < max_uid_for_loop
10029 && !NOTE_P (p))
10030 return INSN_LUID (p) <= INSN_LUID (q);
10032 if (INSN_UID (p) >= max_uid_for_loop
10033 || NOTE_P (p))
10034 p = NEXT_INSN (p);
10035 if (INSN_UID (q) >= max_uid_for_loop)
10036 q = NEXT_INSN (q);
10040 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
10041 the offset that we have to take into account due to auto-increment /
10042 div derivation is zero. */
10043 static int
10044 biv_elimination_giv_has_0_offset (struct induction *biv,
10045 struct induction *giv, rtx insn)
10047 /* If the giv V had the auto-inc address optimization applied
10048 to it, and INSN occurs between the giv insn and the biv
10049 insn, then we'd have to adjust the value used here.
10050 This is rare, so we don't bother to make this possible. */
10051 if (giv->auto_inc_opt
10052 && ((loop_insn_first_p (giv->insn, insn)
10053 && loop_insn_first_p (insn, biv->insn))
10054 || (loop_insn_first_p (biv->insn, insn)
10055 && loop_insn_first_p (insn, giv->insn))))
10056 return 0;
10058 return 1;
10061 /* If BL appears in X (part of the pattern of INSN), see if we can
10062 eliminate its use. If so, return 1. If not, return 0.
10064 If BIV does not appear in X, return 1.
10066 If ELIMINATE_P is nonzero, actually do the elimination.
10067 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10068 Depending on how many items have been moved out of the loop, it
10069 will either be before INSN (when WHERE_INSN is nonzero) or at the
10070 start of the loop (when WHERE_INSN is zero). */
10072 static int
10073 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10074 struct iv_class *bl, int eliminate_p,
10075 basic_block where_bb, rtx where_insn)
10077 enum rtx_code code = GET_CODE (x);
10078 rtx reg = bl->biv->dest_reg;
10079 enum machine_mode mode = GET_MODE (reg);
10080 struct induction *v;
10081 rtx arg, tem;
10082 #ifdef HAVE_cc0
10083 rtx new;
10084 #endif
10085 int arg_operand;
10086 const char *fmt;
10087 int i, j;
10089 switch (code)
10091 case REG:
10092 /* If we haven't already been able to do something with this BIV,
10093 we can't eliminate it. */
10094 if (x == reg)
10095 return 0;
10096 return 1;
10098 case SET:
10099 /* If this sets the BIV, it is not a problem. */
10100 if (SET_DEST (x) == reg)
10101 return 1;
10103 /* If this is an insn that defines a giv, it is also ok because
10104 it will go away when the giv is reduced. */
10105 for (v = bl->giv; v; v = v->next_iv)
10106 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10107 return 1;
10109 #ifdef HAVE_cc0
10110 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10112 /* Can replace with any giv that was reduced and
10113 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10114 Require a constant for MULT_VAL, so we know it's nonzero.
10115 ??? We disable this optimization to avoid potential
10116 overflows. */
10118 for (v = bl->giv; v; v = v->next_iv)
10119 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10120 && v->add_val == const0_rtx
10121 && ! v->ignore && ! v->maybe_dead && v->always_computable
10122 && v->mode == mode
10123 && 0)
10125 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10126 continue;
10128 if (! eliminate_p)
10129 return 1;
10131 /* If the giv has the opposite direction of change,
10132 then reverse the comparison. */
10133 if (INTVAL (v->mult_val) < 0)
10134 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10135 const0_rtx, v->new_reg);
10136 else
10137 new = v->new_reg;
10139 /* We can probably test that giv's reduced reg. */
10140 if (validate_change (insn, &SET_SRC (x), new, 0))
10141 return 1;
10144 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10145 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10146 Require a constant for MULT_VAL, so we know it's nonzero.
10147 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10148 overflow problem. */
10150 for (v = bl->giv; v; v = v->next_iv)
10151 if (GET_CODE (v->mult_val) == CONST_INT
10152 && v->mult_val != const0_rtx
10153 && ! v->ignore && ! v->maybe_dead && v->always_computable
10154 && v->mode == mode
10155 && (GET_CODE (v->add_val) == SYMBOL_REF
10156 || GET_CODE (v->add_val) == LABEL_REF
10157 || GET_CODE (v->add_val) == CONST
10158 || (REG_P (v->add_val)
10159 && REG_POINTER (v->add_val))))
10161 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10162 continue;
10164 if (! eliminate_p)
10165 return 1;
10167 /* If the giv has the opposite direction of change,
10168 then reverse the comparison. */
10169 if (INTVAL (v->mult_val) < 0)
10170 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10171 v->new_reg);
10172 else
10173 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10174 copy_rtx (v->add_val));
10176 /* Replace biv with the giv's reduced register. */
10177 update_reg_last_use (v->add_val, insn);
10178 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10179 return 1;
10181 /* Insn doesn't support that constant or invariant. Copy it
10182 into a register (it will be a loop invariant.) */
10183 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10185 loop_insn_emit_before (loop, 0, where_insn,
10186 gen_move_insn (tem,
10187 copy_rtx (v->add_val)));
10189 /* Substitute the new register for its invariant value in
10190 the compare expression. */
10191 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10192 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10193 return 1;
10196 #endif
10197 break;
10199 case COMPARE:
10200 case EQ: case NE:
10201 case GT: case GE: case GTU: case GEU:
10202 case LT: case LE: case LTU: case LEU:
10203 /* See if either argument is the biv. */
10204 if (XEXP (x, 0) == reg)
10205 arg = XEXP (x, 1), arg_operand = 1;
10206 else if (XEXP (x, 1) == reg)
10207 arg = XEXP (x, 0), arg_operand = 0;
10208 else
10209 break;
10211 if (CONSTANT_P (arg))
10213 /* First try to replace with any giv that has constant positive
10214 mult_val and constant add_val. We might be able to support
10215 negative mult_val, but it seems complex to do it in general. */
10217 for (v = bl->giv; v; v = v->next_iv)
10218 if (GET_CODE (v->mult_val) == CONST_INT
10219 && INTVAL (v->mult_val) > 0
10220 && (GET_CODE (v->add_val) == SYMBOL_REF
10221 || GET_CODE (v->add_val) == LABEL_REF
10222 || GET_CODE (v->add_val) == CONST
10223 || (REG_P (v->add_val)
10224 && REG_POINTER (v->add_val)))
10225 && ! v->ignore && ! v->maybe_dead && v->always_computable
10226 && v->mode == mode)
10228 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10229 continue;
10231 /* Don't eliminate if the linear combination that makes up
10232 the giv overflows when it is applied to ARG. */
10233 if (GET_CODE (arg) == CONST_INT)
10235 rtx add_val;
10237 if (GET_CODE (v->add_val) == CONST_INT)
10238 add_val = v->add_val;
10239 else
10240 add_val = const0_rtx;
10242 if (const_mult_add_overflow_p (arg, v->mult_val,
10243 add_val, mode, 1))
10244 continue;
10247 if (! eliminate_p)
10248 return 1;
10250 /* Replace biv with the giv's reduced reg. */
10251 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10253 /* If all constants are actually constant integers and
10254 the derived constant can be directly placed in the COMPARE,
10255 do so. */
10256 if (GET_CODE (arg) == CONST_INT
10257 && GET_CODE (v->add_val) == CONST_INT)
10259 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
10260 v->add_val, mode, 1);
10262 else
10264 /* Otherwise, load it into a register. */
10265 tem = gen_reg_rtx (mode);
10266 loop_iv_add_mult_emit_before (loop, arg,
10267 v->mult_val, v->add_val,
10268 tem, where_bb, where_insn);
10271 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10273 if (apply_change_group ())
10274 return 1;
10277 /* Look for giv with positive constant mult_val and nonconst add_val.
10278 Insert insns to calculate new compare value.
10279 ??? Turn this off due to possible overflow. */
10281 for (v = bl->giv; v; v = v->next_iv)
10282 if (GET_CODE (v->mult_val) == CONST_INT
10283 && INTVAL (v->mult_val) > 0
10284 && ! v->ignore && ! v->maybe_dead && v->always_computable
10285 && v->mode == mode
10286 && 0)
10288 rtx tem;
10290 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10291 continue;
10293 if (! eliminate_p)
10294 return 1;
10296 tem = gen_reg_rtx (mode);
10298 /* Replace biv with giv's reduced register. */
10299 validate_change (insn, &XEXP (x, 1 - arg_operand),
10300 v->new_reg, 1);
10302 /* Compute value to compare against. */
10303 loop_iv_add_mult_emit_before (loop, arg,
10304 v->mult_val, v->add_val,
10305 tem, where_bb, where_insn);
10306 /* Use it in this insn. */
10307 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10308 if (apply_change_group ())
10309 return 1;
10312 else if (REG_P (arg) || MEM_P (arg))
10314 if (loop_invariant_p (loop, arg) == 1)
10316 /* Look for giv with constant positive mult_val and nonconst
10317 add_val. Insert insns to compute new compare value.
10318 ??? Turn this off due to possible overflow. */
10320 for (v = bl->giv; v; v = v->next_iv)
10321 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
10322 && ! v->ignore && ! v->maybe_dead && v->always_computable
10323 && v->mode == mode
10324 && 0)
10326 rtx tem;
10328 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10329 continue;
10331 if (! eliminate_p)
10332 return 1;
10334 tem = gen_reg_rtx (mode);
10336 /* Replace biv with giv's reduced register. */
10337 validate_change (insn, &XEXP (x, 1 - arg_operand),
10338 v->new_reg, 1);
10340 /* Compute value to compare against. */
10341 loop_iv_add_mult_emit_before (loop, arg,
10342 v->mult_val, v->add_val,
10343 tem, where_bb, where_insn);
10344 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10345 if (apply_change_group ())
10346 return 1;
10350 /* This code has problems. Basically, you can't know when
10351 seeing if we will eliminate BL, whether a particular giv
10352 of ARG will be reduced. If it isn't going to be reduced,
10353 we can't eliminate BL. We can try forcing it to be reduced,
10354 but that can generate poor code.
10356 The problem is that the benefit of reducing TV, below should
10357 be increased if BL can actually be eliminated, but this means
10358 we might have to do a topological sort of the order in which
10359 we try to process biv. It doesn't seem worthwhile to do
10360 this sort of thing now. */
10362 #if 0
10363 /* Otherwise the reg compared with had better be a biv. */
10364 if (!REG_P (arg)
10365 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
10366 return 0;
10368 /* Look for a pair of givs, one for each biv,
10369 with identical coefficients. */
10370 for (v = bl->giv; v; v = v->next_iv)
10372 struct induction *tv;
10374 if (v->ignore || v->maybe_dead || v->mode != mode)
10375 continue;
10377 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
10378 tv = tv->next_iv)
10379 if (! tv->ignore && ! tv->maybe_dead
10380 && rtx_equal_p (tv->mult_val, v->mult_val)
10381 && rtx_equal_p (tv->add_val, v->add_val)
10382 && tv->mode == mode)
10384 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10385 continue;
10387 if (! eliminate_p)
10388 return 1;
10390 /* Replace biv with its giv's reduced reg. */
10391 XEXP (x, 1 - arg_operand) = v->new_reg;
10392 /* Replace other operand with the other giv's
10393 reduced reg. */
10394 XEXP (x, arg_operand) = tv->new_reg;
10395 return 1;
10398 #endif
10401 /* If we get here, the biv can't be eliminated. */
10402 return 0;
10404 case MEM:
10405 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10406 biv is used in it, since it will be replaced. */
10407 for (v = bl->giv; v; v = v->next_iv)
10408 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10409 return 1;
10410 break;
10412 default:
10413 break;
10416 /* See if any subexpression fails elimination. */
10417 fmt = GET_RTX_FORMAT (code);
10418 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10420 switch (fmt[i])
10422 case 'e':
10423 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10424 eliminate_p, where_bb, where_insn))
10425 return 0;
10426 break;
10428 case 'E':
10429 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10430 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10431 eliminate_p, where_bb, where_insn))
10432 return 0;
10433 break;
10437 return 1;
10440 /* Return nonzero if the last use of REG
10441 is in an insn following INSN in the same basic block. */
10443 static int
10444 last_use_this_basic_block (rtx reg, rtx insn)
10446 rtx n;
10447 for (n = insn;
10448 n && !LABEL_P (n) && !JUMP_P (n);
10449 n = NEXT_INSN (n))
10451 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10452 return 1;
10454 return 0;
10457 /* Called via `note_stores' to record the initial value of a biv. Here we
10458 just record the location of the set and process it later. */
10460 static void
10461 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10463 struct loop_ivs *ivs = (struct loop_ivs *) data;
10464 struct iv_class *bl;
10466 if (!REG_P (dest)
10467 || REGNO (dest) >= ivs->n_regs
10468 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10469 return;
10471 bl = REG_IV_CLASS (ivs, REGNO (dest));
10473 /* If this is the first set found, record it. */
10474 if (bl->init_insn == 0)
10476 bl->init_insn = note_insn;
10477 bl->init_set = set;
10481 /* If any of the registers in X are "old" and currently have a last use earlier
10482 than INSN, update them to have a last use of INSN. Their actual last use
10483 will be the previous insn but it will not have a valid uid_luid so we can't
10484 use it. X must be a source expression only. */
10486 static void
10487 update_reg_last_use (rtx x, rtx insn)
10489 /* Check for the case where INSN does not have a valid luid. In this case,
10490 there is no need to modify the regno_last_uid, as this can only happen
10491 when code is inserted after the loop_end to set a pseudo's final value,
10492 and hence this insn will never be the last use of x.
10493 ???? This comment is not correct. See for example loop_givs_reduce.
10494 This may insert an insn before another new insn. */
10495 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10496 && INSN_UID (insn) < max_uid_for_loop
10497 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10499 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10501 else
10503 int i, j;
10504 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10505 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10507 if (fmt[i] == 'e')
10508 update_reg_last_use (XEXP (x, i), insn);
10509 else if (fmt[i] == 'E')
10510 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10511 update_reg_last_use (XVECEXP (x, i, j), insn);
10516 /* Similar to rtlanal.c:get_condition, except that we also put an
10517 invariant last unless both operands are invariants. */
10519 static rtx
10520 get_condition_for_loop (const struct loop *loop, rtx x)
10522 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10524 if (comparison == 0
10525 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10526 || loop_invariant_p (loop, XEXP (comparison, 1)))
10527 return comparison;
10529 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10530 XEXP (comparison, 1), XEXP (comparison, 0));
10533 /* Scan the function and determine whether it has indirect (computed) jumps.
10535 This is taken mostly from flow.c; similar code exists elsewhere
10536 in the compiler. It may be useful to put this into rtlanal.c. */
10537 static int
10538 indirect_jump_in_function_p (rtx start)
10540 rtx insn;
10542 for (insn = start; insn; insn = NEXT_INSN (insn))
10543 if (computed_jump_p (insn))
10544 return 1;
10546 return 0;
10549 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10550 documentation for LOOP_MEMS for the definition of `appropriate'.
10551 This function is called from prescan_loop via for_each_rtx. */
10553 static int
10554 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10556 struct loop_info *loop_info = data;
10557 int i;
10558 rtx m = *mem;
10560 if (m == NULL_RTX)
10561 return 0;
10563 switch (GET_CODE (m))
10565 case MEM:
10566 break;
10568 case CLOBBER:
10569 /* We're not interested in MEMs that are only clobbered. */
10570 return -1;
10572 case CONST_DOUBLE:
10573 /* We're not interested in the MEM associated with a
10574 CONST_DOUBLE, so there's no need to traverse into this. */
10575 return -1;
10577 case EXPR_LIST:
10578 /* We're not interested in any MEMs that only appear in notes. */
10579 return -1;
10581 default:
10582 /* This is not a MEM. */
10583 return 0;
10586 /* See if we've already seen this MEM. */
10587 for (i = 0; i < loop_info->mems_idx; ++i)
10588 if (rtx_equal_p (m, loop_info->mems[i].mem))
10590 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10591 loop_info->mems[i].mem = m;
10592 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10593 /* The modes of the two memory accesses are different. If
10594 this happens, something tricky is going on, and we just
10595 don't optimize accesses to this MEM. */
10596 loop_info->mems[i].optimize = 0;
10598 return 0;
10601 /* Resize the array, if necessary. */
10602 if (loop_info->mems_idx == loop_info->mems_allocated)
10604 if (loop_info->mems_allocated != 0)
10605 loop_info->mems_allocated *= 2;
10606 else
10607 loop_info->mems_allocated = 32;
10609 loop_info->mems = xrealloc (loop_info->mems,
10610 loop_info->mems_allocated * sizeof (loop_mem_info));
10613 /* Actually insert the MEM. */
10614 loop_info->mems[loop_info->mems_idx].mem = m;
10615 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10616 because we can't put it in a register. We still store it in the
10617 table, though, so that if we see the same address later, but in a
10618 non-BLK mode, we'll not think we can optimize it at that point. */
10619 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10620 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10621 ++loop_info->mems_idx;
10623 return 0;
10627 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10629 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10630 register that is modified by an insn between FROM and TO. If the
10631 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10632 more, stop incrementing it, to avoid overflow.
10634 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10635 register I is used, if it is only used once. Otherwise, it is set
10636 to 0 (for no uses) or const0_rtx for more than one use. This
10637 parameter may be zero, in which case this processing is not done.
10639 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10640 optimize register I. */
10642 static void
10643 loop_regs_scan (const struct loop *loop, int extra_size)
10645 struct loop_regs *regs = LOOP_REGS (loop);
10646 int old_nregs;
10647 /* last_set[n] is nonzero iff reg n has been set in the current
10648 basic block. In that case, it is the insn that last set reg n. */
10649 rtx *last_set;
10650 rtx insn;
10651 int i;
10653 old_nregs = regs->num;
10654 regs->num = max_reg_num ();
10656 /* Grow the regs array if not allocated or too small. */
10657 if (regs->num >= regs->size)
10659 regs->size = regs->num + extra_size;
10661 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10663 /* Zero the new elements. */
10664 memset (regs->array + old_nregs, 0,
10665 (regs->size - old_nregs) * sizeof (*regs->array));
10668 /* Clear previously scanned fields but do not clear n_times_set. */
10669 for (i = 0; i < old_nregs; i++)
10671 regs->array[i].set_in_loop = 0;
10672 regs->array[i].may_not_optimize = 0;
10673 regs->array[i].single_usage = NULL_RTX;
10676 last_set = xcalloc (regs->num, sizeof (rtx));
10678 /* Scan the loop, recording register usage. */
10679 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10680 insn = NEXT_INSN (insn))
10682 if (INSN_P (insn))
10684 /* Record registers that have exactly one use. */
10685 find_single_use_in_loop (regs, insn, PATTERN (insn));
10687 /* Include uses in REG_EQUAL notes. */
10688 if (REG_NOTES (insn))
10689 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10691 if (GET_CODE (PATTERN (insn)) == SET
10692 || GET_CODE (PATTERN (insn)) == CLOBBER)
10693 count_one_set (regs, insn, PATTERN (insn), last_set);
10694 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10696 int i;
10697 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10698 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10699 last_set);
10703 if (LABEL_P (insn) || JUMP_P (insn))
10704 memset (last_set, 0, regs->num * sizeof (rtx));
10706 /* Invalidate all registers used for function argument passing.
10707 We check rtx_varies_p for the same reason as below, to allow
10708 optimizing PIC calculations. */
10709 if (CALL_P (insn))
10711 rtx link;
10712 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10713 link;
10714 link = XEXP (link, 1))
10716 rtx op, reg;
10718 if (GET_CODE (op = XEXP (link, 0)) == USE
10719 && REG_P (reg = XEXP (op, 0))
10720 && rtx_varies_p (reg, 1))
10721 regs->array[REGNO (reg)].may_not_optimize = 1;
10726 /* Invalidate all hard registers clobbered by calls. With one exception:
10727 a call-clobbered PIC register is still function-invariant for our
10728 purposes, since we can hoist any PIC calculations out of the loop.
10729 Thus the call to rtx_varies_p. */
10730 if (LOOP_INFO (loop)->has_call)
10731 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10732 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10733 && rtx_varies_p (regno_reg_rtx[i], 1))
10735 regs->array[i].may_not_optimize = 1;
10736 regs->array[i].set_in_loop = 1;
10739 #ifdef AVOID_CCMODE_COPIES
10740 /* Don't try to move insns which set CC registers if we should not
10741 create CCmode register copies. */
10742 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10743 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10744 regs->array[i].may_not_optimize = 1;
10745 #endif
10747 /* Set regs->array[I].n_times_set for the new registers. */
10748 for (i = old_nregs; i < regs->num; i++)
10749 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10751 free (last_set);
10754 /* Returns the number of real INSNs in the LOOP. */
10756 static int
10757 count_insns_in_loop (const struct loop *loop)
10759 int count = 0;
10760 rtx insn;
10762 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10763 insn = NEXT_INSN (insn))
10764 if (INSN_P (insn))
10765 ++count;
10767 return count;
10770 /* Move MEMs into registers for the duration of the loop. */
10772 static void
10773 load_mems (const struct loop *loop)
10775 struct loop_info *loop_info = LOOP_INFO (loop);
10776 struct loop_regs *regs = LOOP_REGS (loop);
10777 int maybe_never = 0;
10778 int i;
10779 rtx p, prev_ebb_head;
10780 rtx label = NULL_RTX;
10781 rtx end_label;
10782 /* Nonzero if the next instruction may never be executed. */
10783 int next_maybe_never = 0;
10784 unsigned int last_max_reg = max_reg_num ();
10786 if (loop_info->mems_idx == 0)
10787 return;
10789 /* We cannot use next_label here because it skips over normal insns. */
10790 end_label = next_nonnote_insn (loop->end);
10791 if (end_label && !LABEL_P (end_label))
10792 end_label = NULL_RTX;
10794 /* Check to see if it's possible that some instructions in the loop are
10795 never executed. Also check if there is a goto out of the loop other
10796 than right after the end of the loop. */
10797 for (p = next_insn_in_loop (loop, loop->scan_start);
10798 p != NULL_RTX;
10799 p = next_insn_in_loop (loop, p))
10801 if (LABEL_P (p))
10802 maybe_never = 1;
10803 else if (JUMP_P (p)
10804 /* If we enter the loop in the middle, and scan
10805 around to the beginning, don't set maybe_never
10806 for that. This must be an unconditional jump,
10807 otherwise the code at the top of the loop might
10808 never be executed. Unconditional jumps are
10809 followed a by barrier then loop end. */
10810 && ! (JUMP_P (p)
10811 && JUMP_LABEL (p) == loop->top
10812 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10813 && any_uncondjump_p (p)))
10815 /* If this is a jump outside of the loop but not right
10816 after the end of the loop, we would have to emit new fixup
10817 sequences for each such label. */
10818 if (/* If we can't tell where control might go when this
10819 JUMP_INSN is executed, we must be conservative. */
10820 !JUMP_LABEL (p)
10821 || (JUMP_LABEL (p) != end_label
10822 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10823 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10824 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10825 return;
10827 if (!any_condjump_p (p))
10828 /* Something complicated. */
10829 maybe_never = 1;
10830 else
10831 /* If there are any more instructions in the loop, they
10832 might not be reached. */
10833 next_maybe_never = 1;
10835 else if (next_maybe_never)
10836 maybe_never = 1;
10839 /* Find start of the extended basic block that enters the loop. */
10840 for (p = loop->start;
10841 PREV_INSN (p) && !LABEL_P (p);
10842 p = PREV_INSN (p))
10844 prev_ebb_head = p;
10846 cselib_init (true);
10848 /* Build table of mems that get set to constant values before the
10849 loop. */
10850 for (; p != loop->start; p = NEXT_INSN (p))
10851 cselib_process_insn (p);
10853 /* Actually move the MEMs. */
10854 for (i = 0; i < loop_info->mems_idx; ++i)
10856 regset_head load_copies;
10857 regset_head store_copies;
10858 int written = 0;
10859 rtx reg;
10860 rtx mem = loop_info->mems[i].mem;
10861 rtx mem_list_entry;
10863 if (MEM_VOLATILE_P (mem)
10864 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10865 /* There's no telling whether or not MEM is modified. */
10866 loop_info->mems[i].optimize = 0;
10868 /* Go through the MEMs written to in the loop to see if this
10869 one is aliased by one of them. */
10870 mem_list_entry = loop_info->store_mems;
10871 while (mem_list_entry)
10873 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10874 written = 1;
10875 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10876 mem, rtx_varies_p))
10878 /* MEM is indeed aliased by this store. */
10879 loop_info->mems[i].optimize = 0;
10880 break;
10882 mem_list_entry = XEXP (mem_list_entry, 1);
10885 if (flag_float_store && written
10886 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10887 loop_info->mems[i].optimize = 0;
10889 /* If this MEM is written to, we must be sure that there
10890 are no reads from another MEM that aliases this one. */
10891 if (loop_info->mems[i].optimize && written)
10893 int j;
10895 for (j = 0; j < loop_info->mems_idx; ++j)
10897 if (j == i)
10898 continue;
10899 else if (true_dependence (mem,
10900 VOIDmode,
10901 loop_info->mems[j].mem,
10902 rtx_varies_p))
10904 /* It's not safe to hoist loop_info->mems[i] out of
10905 the loop because writes to it might not be
10906 seen by reads from loop_info->mems[j]. */
10907 loop_info->mems[i].optimize = 0;
10908 break;
10913 if (maybe_never && may_trap_p (mem))
10914 /* We can't access the MEM outside the loop; it might
10915 cause a trap that wouldn't have happened otherwise. */
10916 loop_info->mems[i].optimize = 0;
10918 if (!loop_info->mems[i].optimize)
10919 /* We thought we were going to lift this MEM out of the
10920 loop, but later discovered that we could not. */
10921 continue;
10923 INIT_REG_SET (&load_copies);
10924 INIT_REG_SET (&store_copies);
10926 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10927 order to keep scan_loop from moving stores to this MEM
10928 out of the loop just because this REG is neither a
10929 user-variable nor used in the loop test. */
10930 reg = gen_reg_rtx (GET_MODE (mem));
10931 REG_USERVAR_P (reg) = 1;
10932 loop_info->mems[i].reg = reg;
10934 /* Now, replace all references to the MEM with the
10935 corresponding pseudos. */
10936 maybe_never = 0;
10937 for (p = next_insn_in_loop (loop, loop->scan_start);
10938 p != NULL_RTX;
10939 p = next_insn_in_loop (loop, p))
10941 if (INSN_P (p))
10943 rtx set;
10945 set = single_set (p);
10947 /* See if this copies the mem into a register that isn't
10948 modified afterwards. We'll try to do copy propagation
10949 a little further on. */
10950 if (set
10951 /* @@@ This test is _way_ too conservative. */
10952 && ! maybe_never
10953 && REG_P (SET_DEST (set))
10954 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10955 && REGNO (SET_DEST (set)) < last_max_reg
10956 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10957 && rtx_equal_p (SET_SRC (set), mem))
10958 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10960 /* See if this copies the mem from a register that isn't
10961 modified afterwards. We'll try to remove the
10962 redundant copy later on by doing a little register
10963 renaming and copy propagation. This will help
10964 to untangle things for the BIV detection code. */
10965 if (set
10966 && ! maybe_never
10967 && REG_P (SET_SRC (set))
10968 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10969 && REGNO (SET_SRC (set)) < last_max_reg
10970 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10971 && rtx_equal_p (SET_DEST (set), mem))
10972 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10974 /* If this is a call which uses / clobbers this memory
10975 location, we must not change the interface here. */
10976 if (CALL_P (p)
10977 && reg_mentioned_p (loop_info->mems[i].mem,
10978 CALL_INSN_FUNCTION_USAGE (p)))
10980 cancel_changes (0);
10981 loop_info->mems[i].optimize = 0;
10982 break;
10984 else
10985 /* Replace the memory reference with the shadow register. */
10986 replace_loop_mems (p, loop_info->mems[i].mem,
10987 loop_info->mems[i].reg, written);
10990 if (LABEL_P (p)
10991 || JUMP_P (p))
10992 maybe_never = 1;
10995 if (! loop_info->mems[i].optimize)
10996 ; /* We found we couldn't do the replacement, so do nothing. */
10997 else if (! apply_change_group ())
10998 /* We couldn't replace all occurrences of the MEM. */
10999 loop_info->mems[i].optimize = 0;
11000 else
11002 /* Load the memory immediately before LOOP->START, which is
11003 the NOTE_LOOP_BEG. */
11004 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
11005 rtx set;
11006 rtx best = mem;
11007 unsigned j;
11008 struct elt_loc_list *const_equiv = 0;
11009 reg_set_iterator rsi;
11011 if (e)
11013 struct elt_loc_list *equiv;
11014 struct elt_loc_list *best_equiv = 0;
11015 for (equiv = e->locs; equiv; equiv = equiv->next)
11017 if (CONSTANT_P (equiv->loc))
11018 const_equiv = equiv;
11019 else if (REG_P (equiv->loc)
11020 /* Extending hard register lifetimes causes crash
11021 on SRC targets. Doing so on non-SRC is
11022 probably also not good idea, since we most
11023 probably have pseudoregister equivalence as
11024 well. */
11025 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
11026 best_equiv = equiv;
11028 /* Use the constant equivalence if that is cheap enough. */
11029 if (! best_equiv)
11030 best_equiv = const_equiv;
11031 else if (const_equiv
11032 && (rtx_cost (const_equiv->loc, SET)
11033 <= rtx_cost (best_equiv->loc, SET)))
11035 best_equiv = const_equiv;
11036 const_equiv = 0;
11039 /* If best_equiv is nonzero, we know that MEM is set to a
11040 constant or register before the loop. We will use this
11041 knowledge to initialize the shadow register with that
11042 constant or reg rather than by loading from MEM. */
11043 if (best_equiv)
11044 best = copy_rtx (best_equiv->loc);
11047 set = gen_move_insn (reg, best);
11048 set = loop_insn_hoist (loop, set);
11049 if (REG_P (best))
11051 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
11052 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
11054 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
11055 break;
11059 if (const_equiv)
11060 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
11062 if (written)
11064 if (label == NULL_RTX)
11066 label = gen_label_rtx ();
11067 emit_label_after (label, loop->end);
11070 /* Store the memory immediately after END, which is
11071 the NOTE_LOOP_END. */
11072 set = gen_move_insn (copy_rtx (mem), reg);
11073 loop_insn_emit_after (loop, 0, label, set);
11076 if (loop_dump_stream)
11078 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
11079 REGNO (reg), (written ? "r/w" : "r/o"));
11080 print_rtl (loop_dump_stream, mem);
11081 fputc ('\n', loop_dump_stream);
11084 /* Attempt a bit of copy propagation. This helps untangle the
11085 data flow, and enables {basic,general}_induction_var to find
11086 more bivs/givs. */
11087 EXECUTE_IF_SET_IN_REG_SET
11088 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11090 try_copy_prop (loop, reg, j);
11092 CLEAR_REG_SET (&load_copies);
11094 EXECUTE_IF_SET_IN_REG_SET
11095 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11097 try_swap_copy_prop (loop, reg, j);
11099 CLEAR_REG_SET (&store_copies);
11103 /* Now, we need to replace all references to the previous exit
11104 label with the new one. */
11105 if (label != NULL_RTX && end_label != NULL_RTX)
11106 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11107 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11108 redirect_jump (p, label, false);
11110 cselib_finish ();
11113 /* For communication between note_reg_stored and its caller. */
11114 struct note_reg_stored_arg
11116 int set_seen;
11117 rtx reg;
11120 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11121 is equal to ARG. */
11122 static void
11123 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11125 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11126 if (t->reg == x)
11127 t->set_seen = 1;
11130 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11131 There must be exactly one insn that sets this pseudo; it will be
11132 deleted if all replacements succeed and we can prove that the register
11133 is not used after the loop. */
11135 static void
11136 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11138 /* This is the reg that we are copying from. */
11139 rtx reg_rtx = regno_reg_rtx[regno];
11140 rtx init_insn = 0;
11141 rtx insn;
11142 /* These help keep track of whether we replaced all uses of the reg. */
11143 int replaced_last = 0;
11144 int store_is_first = 0;
11146 for (insn = next_insn_in_loop (loop, loop->scan_start);
11147 insn != NULL_RTX;
11148 insn = next_insn_in_loop (loop, insn))
11150 rtx set;
11152 /* Only substitute within one extended basic block from the initializing
11153 insn. */
11154 if (LABEL_P (insn) && init_insn)
11155 break;
11157 if (! INSN_P (insn))
11158 continue;
11160 /* Is this the initializing insn? */
11161 set = single_set (insn);
11162 if (set
11163 && REG_P (SET_DEST (set))
11164 && REGNO (SET_DEST (set)) == regno)
11166 gcc_assert (!init_insn);
11168 init_insn = insn;
11169 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11170 store_is_first = 1;
11173 /* Only substitute after seeing the initializing insn. */
11174 if (init_insn && insn != init_insn)
11176 struct note_reg_stored_arg arg;
11178 replace_loop_regs (insn, reg_rtx, replacement);
11179 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11180 replaced_last = 1;
11182 /* Stop replacing when REPLACEMENT is modified. */
11183 arg.reg = replacement;
11184 arg.set_seen = 0;
11185 note_stores (PATTERN (insn), note_reg_stored, &arg);
11186 if (arg.set_seen)
11188 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11190 /* It is possible that we've turned previously valid REG_EQUAL to
11191 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11192 REPLACEMENT is modified, we get different meaning. */
11193 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11194 remove_note (insn, note);
11195 break;
11199 gcc_assert (init_insn);
11200 if (apply_change_group ())
11202 if (loop_dump_stream)
11203 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11204 if (store_is_first && replaced_last)
11206 rtx first;
11207 rtx retval_note;
11209 /* Assume we're just deleting INIT_INSN. */
11210 first = init_insn;
11211 /* Look for REG_RETVAL note. If we're deleting the end of
11212 the libcall sequence, the whole sequence can go. */
11213 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11214 /* If we found a REG_RETVAL note, find the first instruction
11215 in the sequence. */
11216 if (retval_note)
11217 first = XEXP (retval_note, 0);
11219 /* Delete the instructions. */
11220 loop_delete_insns (first, init_insn);
11222 if (loop_dump_stream)
11223 fprintf (loop_dump_stream, ".\n");
11227 /* Replace all the instructions from FIRST up to and including LAST
11228 with NOTE_INSN_DELETED notes. */
11230 static void
11231 loop_delete_insns (rtx first, rtx last)
11233 while (1)
11235 if (loop_dump_stream)
11236 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11237 INSN_UID (first));
11238 delete_insn (first);
11240 /* If this was the LAST instructions we're supposed to delete,
11241 we're done. */
11242 if (first == last)
11243 break;
11245 first = NEXT_INSN (first);
11249 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11250 loop LOOP if the order of the sets of these registers can be
11251 swapped. There must be exactly one insn within the loop that sets
11252 this pseudo followed immediately by a move insn that sets
11253 REPLACEMENT with REGNO. */
11254 static void
11255 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11256 unsigned int regno)
11258 rtx insn;
11259 rtx set = NULL_RTX;
11260 unsigned int new_regno;
11262 new_regno = REGNO (replacement);
11264 for (insn = next_insn_in_loop (loop, loop->scan_start);
11265 insn != NULL_RTX;
11266 insn = next_insn_in_loop (loop, insn))
11268 /* Search for the insn that copies REGNO to NEW_REGNO? */
11269 if (INSN_P (insn)
11270 && (set = single_set (insn))
11271 && REG_P (SET_DEST (set))
11272 && REGNO (SET_DEST (set)) == new_regno
11273 && REG_P (SET_SRC (set))
11274 && REGNO (SET_SRC (set)) == regno)
11275 break;
11278 if (insn != NULL_RTX)
11280 rtx prev_insn;
11281 rtx prev_set;
11283 /* Some DEF-USE info would come in handy here to make this
11284 function more general. For now, just check the previous insn
11285 which is the most likely candidate for setting REGNO. */
11287 prev_insn = PREV_INSN (insn);
11289 if (INSN_P (insn)
11290 && (prev_set = single_set (prev_insn))
11291 && REG_P (SET_DEST (prev_set))
11292 && REGNO (SET_DEST (prev_set)) == regno)
11294 /* We have:
11295 (set (reg regno) (expr))
11296 (set (reg new_regno) (reg regno))
11298 so try converting this to:
11299 (set (reg new_regno) (expr))
11300 (set (reg regno) (reg new_regno))
11302 The former construct is often generated when a global
11303 variable used for an induction variable is shadowed by a
11304 register (NEW_REGNO). The latter construct improves the
11305 chances of GIV replacement and BIV elimination. */
11307 validate_change (prev_insn, &SET_DEST (prev_set),
11308 replacement, 1);
11309 validate_change (insn, &SET_DEST (set),
11310 SET_SRC (set), 1);
11311 validate_change (insn, &SET_SRC (set),
11312 replacement, 1);
11314 if (apply_change_group ())
11316 if (loop_dump_stream)
11317 fprintf (loop_dump_stream,
11318 " Swapped set of reg %d at %d with reg %d at %d.\n",
11319 regno, INSN_UID (insn),
11320 new_regno, INSN_UID (prev_insn));
11322 /* Update first use of REGNO. */
11323 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11324 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11326 /* Now perform copy propagation to hopefully
11327 remove all uses of REGNO within the loop. */
11328 try_copy_prop (loop, replacement, regno);
11334 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11336 static int
11337 find_mem_in_note_1 (rtx *x, void *data)
11339 if (*x != NULL_RTX && MEM_P (*x))
11341 rtx *res = (rtx *) data;
11342 *res = *x;
11343 return 1;
11345 return 0;
11348 /* Returns the first MEM found in NOTE by depth-first search. */
11350 static rtx
11351 find_mem_in_note (rtx note)
11353 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11354 return note;
11355 return NULL_RTX;
11358 /* Replace MEM with its associated pseudo register. This function is
11359 called from load_mems via for_each_rtx. DATA is actually a pointer
11360 to a structure describing the instruction currently being scanned
11361 and the MEM we are currently replacing. */
11363 static int
11364 replace_loop_mem (rtx *mem, void *data)
11366 loop_replace_args *args = (loop_replace_args *) data;
11367 rtx m = *mem;
11369 if (m == NULL_RTX)
11370 return 0;
11372 switch (GET_CODE (m))
11374 case MEM:
11375 break;
11377 case CONST_DOUBLE:
11378 /* We're not interested in the MEM associated with a
11379 CONST_DOUBLE, so there's no need to traverse into one. */
11380 return -1;
11382 default:
11383 /* This is not a MEM. */
11384 return 0;
11387 if (!rtx_equal_p (args->match, m))
11388 /* This is not the MEM we are currently replacing. */
11389 return 0;
11391 /* Actually replace the MEM. */
11392 validate_change (args->insn, mem, args->replacement, 1);
11394 return 0;
11397 static void
11398 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11400 loop_replace_args args;
11402 args.insn = insn;
11403 args.match = mem;
11404 args.replacement = reg;
11406 for_each_rtx (&insn, replace_loop_mem, &args);
11408 /* If we hoist a mem write out of the loop, then REG_EQUAL
11409 notes referring to the mem are no longer valid. */
11410 if (written)
11412 rtx note, sub;
11413 rtx *link;
11415 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11417 if (REG_NOTE_KIND (note) == REG_EQUAL
11418 && (sub = find_mem_in_note (note))
11419 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11421 /* Remove the note. */
11422 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11423 break;
11429 /* Replace one register with another. Called through for_each_rtx; PX points
11430 to the rtx being scanned. DATA is actually a pointer to
11431 a structure of arguments. */
11433 static int
11434 replace_loop_reg (rtx *px, void *data)
11436 rtx x = *px;
11437 loop_replace_args *args = (loop_replace_args *) data;
11439 if (x == NULL_RTX)
11440 return 0;
11442 if (x == args->match)
11443 validate_change (args->insn, px, args->replacement, 1);
11445 return 0;
11448 static void
11449 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11451 loop_replace_args args;
11453 args.insn = insn;
11454 args.match = reg;
11455 args.replacement = replacement;
11457 for_each_rtx (&insn, replace_loop_reg, &args);
11460 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11461 (ignored in the interim). */
11463 static rtx
11464 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11465 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11466 rtx pattern)
11468 return emit_insn_after (pattern, where_insn);
11472 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11473 in basic block WHERE_BB (ignored in the interim) within the loop
11474 otherwise hoist PATTERN into the loop pre-header. */
11476 static rtx
11477 loop_insn_emit_before (const struct loop *loop,
11478 basic_block where_bb ATTRIBUTE_UNUSED,
11479 rtx where_insn, rtx pattern)
11481 if (! where_insn)
11482 return loop_insn_hoist (loop, pattern);
11483 return emit_insn_before (pattern, where_insn);
11487 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11488 WHERE_BB (ignored in the interim) within the loop. */
11490 static rtx
11491 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11492 basic_block where_bb ATTRIBUTE_UNUSED,
11493 rtx where_insn, rtx pattern)
11495 return emit_call_insn_before (pattern, where_insn);
11499 /* Hoist insn for PATTERN into the loop pre-header. */
11501 static rtx
11502 loop_insn_hoist (const struct loop *loop, rtx pattern)
11504 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11508 /* Hoist call insn for PATTERN into the loop pre-header. */
11510 static rtx
11511 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11513 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11517 /* Sink insn for PATTERN after the loop end. */
11519 static rtx
11520 loop_insn_sink (const struct loop *loop, rtx pattern)
11522 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11525 /* bl->final_value can be either general_operand or PLUS of general_operand
11526 and constant. Emit sequence of instructions to load it into REG. */
11527 static rtx
11528 gen_load_of_final_value (rtx reg, rtx final_value)
11530 rtx seq;
11531 start_sequence ();
11532 final_value = force_operand (final_value, reg);
11533 if (final_value != reg)
11534 emit_move_insn (reg, final_value);
11535 seq = get_insns ();
11536 end_sequence ();
11537 return seq;
11540 /* If the loop has multiple exits, emit insn for PATTERN before the
11541 loop to ensure that it will always be executed no matter how the
11542 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11543 since this is slightly more efficient. */
11545 static rtx
11546 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11548 if (loop->exit_count)
11549 return loop_insn_hoist (loop, pattern);
11550 else
11551 return loop_insn_sink (loop, pattern);
11554 static void
11555 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11557 struct iv_class *bl;
11558 int iv_num = 0;
11560 if (! loop || ! file)
11561 return;
11563 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11564 iv_num++;
11566 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11568 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11570 loop_iv_class_dump (bl, file, verbose);
11571 fputc ('\n', file);
11576 static void
11577 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11578 int verbose ATTRIBUTE_UNUSED)
11580 struct induction *v;
11581 rtx incr;
11582 int i;
11584 if (! bl || ! file)
11585 return;
11587 fprintf (file, "IV class for reg %d, benefit %d\n",
11588 bl->regno, bl->total_benefit);
11590 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11591 if (bl->initial_value)
11593 fprintf (file, ", init val: ");
11594 print_simple_rtl (file, bl->initial_value);
11596 if (bl->initial_test)
11598 fprintf (file, ", init test: ");
11599 print_simple_rtl (file, bl->initial_test);
11601 fputc ('\n', file);
11603 if (bl->final_value)
11605 fprintf (file, " Final val: ");
11606 print_simple_rtl (file, bl->final_value);
11607 fputc ('\n', file);
11610 if ((incr = biv_total_increment (bl)))
11612 fprintf (file, " Total increment: ");
11613 print_simple_rtl (file, incr);
11614 fputc ('\n', file);
11617 /* List the increments. */
11618 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11620 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11621 print_simple_rtl (file, v->add_val);
11622 fputc ('\n', file);
11625 /* List the givs. */
11626 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11628 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11629 i, INSN_UID (v->insn), v->benefit);
11630 if (v->giv_type == DEST_ADDR)
11631 print_simple_rtl (file, v->mem);
11632 else
11633 print_simple_rtl (file, single_set (v->insn));
11634 fputc ('\n', file);
11639 static void
11640 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11642 if (! v || ! file)
11643 return;
11645 fprintf (file,
11646 "Biv %d: insn %d",
11647 REGNO (v->dest_reg), INSN_UID (v->insn));
11648 fprintf (file, " const ");
11649 print_simple_rtl (file, v->add_val);
11651 if (verbose && v->final_value)
11653 fputc ('\n', file);
11654 fprintf (file, " final ");
11655 print_simple_rtl (file, v->final_value);
11658 fputc ('\n', file);
11662 static void
11663 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11665 if (! v || ! file)
11666 return;
11668 if (v->giv_type == DEST_REG)
11669 fprintf (file, "Giv %d: insn %d",
11670 REGNO (v->dest_reg), INSN_UID (v->insn));
11671 else
11672 fprintf (file, "Dest address: insn %d",
11673 INSN_UID (v->insn));
11675 fprintf (file, " src reg %d benefit %d",
11676 REGNO (v->src_reg), v->benefit);
11677 fprintf (file, " lifetime %d",
11678 v->lifetime);
11680 if (v->replaceable)
11681 fprintf (file, " replaceable");
11683 if (v->no_const_addval)
11684 fprintf (file, " ncav");
11686 if (v->ext_dependent)
11688 switch (GET_CODE (v->ext_dependent))
11690 case SIGN_EXTEND:
11691 fprintf (file, " ext se");
11692 break;
11693 case ZERO_EXTEND:
11694 fprintf (file, " ext ze");
11695 break;
11696 case TRUNCATE:
11697 fprintf (file, " ext tr");
11698 break;
11699 default:
11700 gcc_unreachable ();
11704 fputc ('\n', file);
11705 fprintf (file, " mult ");
11706 print_simple_rtl (file, v->mult_val);
11708 fputc ('\n', file);
11709 fprintf (file, " add ");
11710 print_simple_rtl (file, v->add_val);
11712 if (verbose && v->final_value)
11714 fputc ('\n', file);
11715 fprintf (file, " final ");
11716 print_simple_rtl (file, v->final_value);
11719 fputc ('\n', file);
11723 void
11724 debug_ivs (const struct loop *loop)
11726 loop_ivs_dump (loop, stderr, 1);
11730 void
11731 debug_iv_class (const struct iv_class *bl)
11733 loop_iv_class_dump (bl, stderr, 1);
11737 void
11738 debug_biv (const struct induction *v)
11740 loop_biv_dump (v, stderr, 1);
11744 void
11745 debug_giv (const struct induction *v)
11747 loop_giv_dump (v, stderr, 1);
11751 #define LOOP_BLOCK_NUM_1(INSN) \
11752 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11754 /* The notes do not have an assigned block, so look at the next insn. */
11755 #define LOOP_BLOCK_NUM(INSN) \
11756 ((INSN) ? (NOTE_P (INSN) \
11757 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11758 : LOOP_BLOCK_NUM_1 (INSN)) \
11759 : -1)
11761 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11763 static void
11764 loop_dump_aux (const struct loop *loop, FILE *file,
11765 int verbose ATTRIBUTE_UNUSED)
11767 rtx label;
11769 if (! loop || ! file || !BB_HEAD (loop->first))
11770 return;
11772 /* Print diagnostics to compare our concept of a loop with
11773 what the loop notes say. */
11774 if (! PREV_INSN (BB_HEAD (loop->first))
11775 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11776 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11777 != NOTE_INSN_LOOP_BEG)
11778 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11779 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11780 if (! NEXT_INSN (BB_END (loop->last))
11781 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11782 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11783 != NOTE_INSN_LOOP_END)
11784 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11785 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11787 if (loop->start)
11789 fprintf (file,
11790 ";; start %d (%d), end %d (%d)\n",
11791 LOOP_BLOCK_NUM (loop->start),
11792 LOOP_INSN_UID (loop->start),
11793 LOOP_BLOCK_NUM (loop->end),
11794 LOOP_INSN_UID (loop->end));
11795 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11796 LOOP_BLOCK_NUM (loop->top),
11797 LOOP_INSN_UID (loop->top),
11798 LOOP_BLOCK_NUM (loop->scan_start),
11799 LOOP_INSN_UID (loop->scan_start));
11800 fprintf (file, ";; exit_count %d", loop->exit_count);
11801 if (loop->exit_count)
11803 fputs (", labels:", file);
11804 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11806 fprintf (file, " %d ",
11807 LOOP_INSN_UID (XEXP (label, 0)));
11810 fputs ("\n", file);
11814 /* Call this function from the debugger to dump LOOP. */
11816 void
11817 debug_loop (const struct loop *loop)
11819 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11822 /* Call this function from the debugger to dump LOOPS. */
11824 void
11825 debug_loops (const struct loops *loops)
11827 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
11830 static bool
11831 gate_handle_loop_optimize (void)
11833 return (optimize > 0 && flag_loop_optimize);
11836 /* Move constant computations out of loops. */
11837 static void
11838 rest_of_handle_loop_optimize (void)
11840 int do_prefetch;
11842 /* CFG is no longer maintained up-to-date. */
11843 free_bb_for_insn ();
11844 profile_status = PROFILE_ABSENT;
11846 do_prefetch = flag_prefetch_loop_arrays ? LOOP_PREFETCH : 0;
11848 if (flag_rerun_loop_opt)
11850 cleanup_barriers ();
11852 /* We only want to perform unrolling once. */
11853 loop_optimize (get_insns (), dump_file, 0);
11855 /* The first call to loop_optimize makes some instructions
11856 trivially dead. We delete those instructions now in the
11857 hope that doing so will make the heuristics in loop work
11858 better and possibly speed up compilation. */
11859 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11861 /* The regscan pass is currently necessary as the alias
11862 analysis code depends on this information. */
11863 reg_scan (get_insns (), max_reg_num ());
11865 cleanup_barriers ();
11866 loop_optimize (get_insns (), dump_file, do_prefetch);
11868 /* Loop can create trivially dead instructions. */
11869 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11870 find_basic_blocks (get_insns ());
11873 struct tree_opt_pass pass_loop_optimize =
11875 "old-loop", /* name */
11876 gate_handle_loop_optimize, /* gate */
11877 rest_of_handle_loop_optimize, /* execute */
11878 NULL, /* sub */
11879 NULL, /* next */
11880 0, /* static_pass_number */
11881 TV_LOOP, /* tv_id */
11882 0, /* properties_required */
11883 0, /* properties_provided */
11884 0, /* properties_destroyed */
11885 0, /* todo_flags_start */
11886 TODO_dump_func |
11887 TODO_ggc_collect, /* todo_flags_finish */
11888 'L' /* letter */