PR target/4198
[official-gcc.git] / gcc / loop.c
blobe2e6074ccc47dffcb1e38b217b7950066b3d9307
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "rtl.h"
51 #include "tm_p.h"
52 #include "function.h"
53 #include "expr.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
57 #include "regs.h"
58 #include "recog.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
70 /* Get the loop info pointer of a loop. */
71 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
73 /* Get a pointer to the loop movables structure. */
74 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
76 /* Get a pointer to the loop registers structure. */
77 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
79 /* Get a pointer to the loop induction variables structure. */
80 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
82 /* Get the luid of an insn. Catch the error of trying to reference the LUID
83 of an insn added during loop, since these don't have LUIDs. */
85 #define INSN_LUID(INSN) \
86 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
88 #define REGNO_FIRST_LUID(REGNO) \
89 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
90 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
91 : 0)
92 #define REGNO_LAST_LUID(REGNO) \
93 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
94 ? uid_luid[REGNO_LAST_UID (REGNO)] \
95 : INT_MAX)
97 /* A "basic induction variable" or biv is a pseudo reg that is set
98 (within this loop) only by incrementing or decrementing it. */
99 /* A "general induction variable" or giv is a pseudo reg whose
100 value is a linear function of a biv. */
102 /* Bivs are recognized by `basic_induction_var';
103 Givs by `general_induction_var'. */
105 /* An enum for the two different types of givs, those that are used
106 as memory addresses and those that are calculated into registers. */
107 enum g_types
109 DEST_ADDR,
110 DEST_REG
114 /* A `struct induction' is created for every instruction that sets
115 an induction variable (either a biv or a giv). */
117 struct induction
119 rtx insn; /* The insn that sets a biv or giv */
120 rtx new_reg; /* New register, containing strength reduced
121 version of this giv. */
122 rtx src_reg; /* Biv from which this giv is computed.
123 (If this is a biv, then this is the biv.) */
124 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
125 rtx dest_reg; /* Destination register for insn: this is the
126 register which was the biv or giv.
127 For a biv, this equals src_reg.
128 For a DEST_ADDR type giv, this is 0. */
129 rtx *location; /* Place in the insn where this giv occurs.
130 If GIV_TYPE is DEST_REG, this is 0. */
131 /* For a biv, this is the place where add_val
132 was found. */
133 enum machine_mode mode; /* The mode of this biv or giv */
134 rtx mem; /* For DEST_ADDR, the memory object. */
135 rtx mult_val; /* Multiplicative factor for src_reg. */
136 rtx add_val; /* Additive constant for that product. */
137 int benefit; /* Gain from eliminating this insn. */
138 rtx final_value; /* If the giv is used outside the loop, and its
139 final value could be calculated, it is put
140 here, and the giv is made replaceable. Set
141 the giv to this value before the loop. */
142 unsigned combined_with; /* The number of givs this giv has been
143 combined with. If nonzero, this giv
144 cannot combine with any other giv. */
145 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
146 variable for the original variable.
147 0 means they must be kept separate and the
148 new one must be copied into the old pseudo
149 reg each time the old one is set. */
150 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
151 1 if we know that the giv definitely can
152 not be made replaceable, in which case we
153 don't bother checking the variable again
154 even if further info is available.
155 Both this and the above can be zero. */
156 unsigned ignore : 1; /* 1 prohibits further processing of giv */
157 unsigned always_computable : 1;/* 1 if this value is computable every
158 iteration. */
159 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
160 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
161 update may be done multiple times per
162 iteration. */
163 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
164 another giv. This occurs in many cases
165 where a giv's lifetime spans an update to
166 a biv. */
167 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
168 we won't use it to eliminate a biv, it
169 would probably lose. */
170 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
171 to it to try to form an auto-inc address. */
172 unsigned shared : 1;
173 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
174 int lifetime; /* Length of life of this giv */
175 rtx derive_adjustment; /* If nonzero, is an adjustment to be
176 subtracted from add_val when this giv
177 derives another. This occurs when the
178 giv spans a biv update by incrementation. */
179 rtx ext_dependent; /* If nonzero, is a sign or zero extension
180 if a biv on which this giv is dependent. */
181 struct induction *next_iv; /* For givs, links together all givs that are
182 based on the same biv. For bivs, links
183 together all biv entries that refer to the
184 same biv register. */
185 struct induction *same; /* For givs, if the giv has been combined with
186 another giv, this points to the base giv.
187 The base giv will have COMBINED_WITH nonzero.
188 For bivs, if the biv has the same LOCATION
189 than another biv, this points to the base
190 biv. */
191 struct induction *same_insn; /* If there are multiple identical givs in
192 the same insn, then all but one have this
193 field set, and they all point to the giv
194 that doesn't have this field set. */
195 rtx last_use; /* For a giv made from a biv increment, this is
196 a substitute for the lifetime information. */
200 /* A `struct iv_class' is created for each biv. */
202 struct iv_class
204 unsigned int regno; /* Pseudo reg which is the biv. */
205 int biv_count; /* Number of insns setting this reg. */
206 struct induction *biv; /* List of all insns that set this reg. */
207 int giv_count; /* Number of DEST_REG givs computed from this
208 biv. The resulting count is only used in
209 check_dbra_loop. */
210 struct induction *giv; /* List of all insns that compute a giv
211 from this reg. */
212 int total_benefit; /* Sum of BENEFITs of all those givs. */
213 rtx initial_value; /* Value of reg at loop start. */
214 rtx initial_test; /* Test performed on BIV before loop. */
215 rtx final_value; /* Value of reg at loop end, if known. */
216 struct iv_class *next; /* Links all class structures together. */
217 rtx init_insn; /* insn which initializes biv, 0 if none. */
218 rtx init_set; /* SET of INIT_INSN, if any. */
219 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
220 unsigned eliminable : 1; /* 1 if plausible candidate for
221 elimination. */
222 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
223 this. */
224 unsigned reversed : 1; /* 1 if we reversed the loop that this
225 biv controls. */
226 unsigned all_reduced : 1; /* 1 if all givs using this biv have
227 been reduced. */
231 /* Definitions used by the basic induction variable discovery code. */
232 enum iv_mode
234 UNKNOWN_INDUCT,
235 BASIC_INDUCT,
236 NOT_BASIC_INDUCT,
237 GENERAL_INDUCT
241 /* A `struct iv' is created for every register. */
243 struct iv
245 enum iv_mode type;
246 union
248 struct iv_class *class;
249 struct induction *info;
250 } iv;
254 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
255 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
256 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
259 struct loop_ivs
261 /* Indexed by register number, contains pointer to `struct
262 iv' if register is an induction variable. */
263 struct iv *regs;
265 /* Size of regs array. */
266 unsigned int n_regs;
268 /* The head of a list which links together (via the next field)
269 every iv class for the current loop. */
270 struct iv_class *list;
274 typedef struct loop_mem_info
276 rtx mem; /* The MEM itself. */
277 rtx reg; /* Corresponding pseudo, if any. */
278 int optimize; /* Nonzero if we can optimize access to this MEM. */
279 } loop_mem_info;
283 struct loop_reg
285 /* Number of times the reg is set during the loop being scanned.
286 During code motion, a negative value indicates a reg that has
287 been made a candidate; in particular -2 means that it is an
288 candidate that we know is equal to a constant and -1 means that
289 it is a candidate not known equal to a constant. After code
290 motion, regs moved have 0 (which is accurate now) while the
291 failed candidates have the original number of times set.
293 Therefore, at all times, == 0 indicates an invariant register;
294 < 0 a conditionally invariant one. */
295 int set_in_loop;
297 /* Original value of set_in_loop; same except that this value
298 is not set negative for a reg whose sets have been made candidates
299 and not set to 0 for a reg that is moved. */
300 int n_times_set;
302 /* Contains the insn in which a register was used if it was used
303 exactly once; contains const0_rtx if it was used more than once. */
304 rtx single_usage;
306 /* Nonzero indicates that the register cannot be moved or strength
307 reduced. */
308 char may_not_optimize;
310 /* Nonzero means reg N has already been moved out of one loop.
311 This reduces the desire to move it out of another. */
312 char moved_once;
316 struct loop_regs
318 int num; /* Number of regs used in table. */
319 int size; /* Size of table. */
320 struct loop_reg *array; /* Register usage info. array. */
321 int multiple_uses; /* Nonzero if a reg has multiple uses. */
326 struct loop_movables
328 /* Head of movable chain. */
329 struct movable *head;
330 /* Last movable in chain. */
331 struct movable *last;
335 /* Information pertaining to a loop. */
337 struct loop_info
339 /* Nonzero if there is a subroutine call in the current loop. */
340 int has_call;
341 /* Nonzero if there is a libcall in the current loop. */
342 int has_libcall;
343 /* Nonzero if there is a non constant call in the current loop. */
344 int has_nonconst_call;
345 /* Nonzero if there is a prefetch instruction in the current loop. */
346 int has_prefetch;
347 /* Nonzero if there is a volatile memory reference in the current
348 loop. */
349 int has_volatile;
350 /* Nonzero if there is a tablejump in the current loop. */
351 int has_tablejump;
352 /* Nonzero if there are ways to leave the loop other than falling
353 off the end. */
354 int has_multiple_exit_targets;
355 /* Nonzero if there is an indirect jump in the current function. */
356 int has_indirect_jump;
357 /* Register or constant initial loop value. */
358 rtx initial_value;
359 /* Register or constant value used for comparison test. */
360 rtx comparison_value;
361 /* Register or constant approximate final value. */
362 rtx final_value;
363 /* Register or constant initial loop value with term common to
364 final_value removed. */
365 rtx initial_equiv_value;
366 /* Register or constant final loop value with term common to
367 initial_value removed. */
368 rtx final_equiv_value;
369 /* Register corresponding to iteration variable. */
370 rtx iteration_var;
371 /* Constant loop increment. */
372 rtx increment;
373 enum rtx_code comparison_code;
374 /* Holds the number of loop iterations. It is zero if the number
375 could not be calculated. Must be unsigned since the number of
376 iterations can be as high as 2^wordsize - 1. For loops with a
377 wider iterator, this number will be zero if the number of loop
378 iterations is too large for an unsigned integer to hold. */
379 unsigned HOST_WIDE_INT n_iterations;
380 int used_count_register;
381 /* The loop iterator induction variable. */
382 struct iv_class *iv;
383 /* List of MEMs that are stored in this loop. */
384 rtx store_mems;
385 /* Array of MEMs that are used (read or written) in this loop, but
386 cannot be aliased by anything in this loop, except perhaps
387 themselves. In other words, if mems[i] is altered during
388 the loop, it is altered by an expression that is rtx_equal_p to
389 it. */
390 loop_mem_info *mems;
391 /* The index of the next available slot in MEMS. */
392 int mems_idx;
393 /* The number of elements allocated in MEMS. */
394 int mems_allocated;
395 /* Nonzero if we don't know what MEMs were changed in the current
396 loop. This happens if the loop contains a call (in which case
397 `has_call' will also be set) or if we store into more than
398 NUM_STORES MEMs. */
399 int unknown_address_altered;
400 /* The above doesn't count any readonly memory locations that are
401 stored. This does. */
402 int unknown_constant_address_altered;
403 /* Count of memory write instructions discovered in the loop. */
404 int num_mem_sets;
405 /* The insn where the first of these was found. */
406 rtx first_loop_store_insn;
407 /* The chain of movable insns in loop. */
408 struct loop_movables movables;
409 /* The registers used the in loop. */
410 struct loop_regs regs;
411 /* The induction variable information in loop. */
412 struct loop_ivs ivs;
413 /* Nonzero if call is in pre_header extended basic block. */
414 int pre_header_has_call;
417 /* Not really meaningful values, but at least something. */
418 #ifndef SIMULTANEOUS_PREFETCHES
419 #define SIMULTANEOUS_PREFETCHES 3
420 #endif
421 #ifndef PREFETCH_BLOCK
422 #define PREFETCH_BLOCK 32
423 #endif
424 #ifndef HAVE_prefetch
425 #define HAVE_prefetch 0
426 #define CODE_FOR_prefetch 0
427 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
428 #endif
430 /* Give up the prefetch optimizations once we exceed a given threshold.
431 It is unlikely that we would be able to optimize something in a loop
432 with so many detected prefetches. */
433 #define MAX_PREFETCHES 100
434 /* The number of prefetch blocks that are beneficial to fetch at once before
435 a loop with a known (and low) iteration count. */
436 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
437 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
438 since it is likely that the data are already in the cache. */
439 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
441 /* Parameterize some prefetch heuristics so they can be turned on and off
442 easily for performance testing on new architectures. These can be
443 defined in target-dependent files. */
445 /* Prefetch is worthwhile only when loads/stores are dense. */
446 #ifndef PREFETCH_ONLY_DENSE_MEM
447 #define PREFETCH_ONLY_DENSE_MEM 1
448 #endif
450 /* Define what we mean by "dense" loads and stores; This value divided by 256
451 is the minimum percentage of memory references that worth prefetching. */
452 #ifndef PREFETCH_DENSE_MEM
453 #define PREFETCH_DENSE_MEM 220
454 #endif
456 /* Do not prefetch for a loop whose iteration count is known to be low. */
457 #ifndef PREFETCH_NO_LOW_LOOPCNT
458 #define PREFETCH_NO_LOW_LOOPCNT 1
459 #endif
461 /* Define what we mean by a "low" iteration count. */
462 #ifndef PREFETCH_LOW_LOOPCNT
463 #define PREFETCH_LOW_LOOPCNT 32
464 #endif
466 /* Do not prefetch for a loop that contains a function call; such a loop is
467 probably not an internal loop. */
468 #ifndef PREFETCH_NO_CALL
469 #define PREFETCH_NO_CALL 1
470 #endif
472 /* Do not prefetch accesses with an extreme stride. */
473 #ifndef PREFETCH_NO_EXTREME_STRIDE
474 #define PREFETCH_NO_EXTREME_STRIDE 1
475 #endif
477 /* Define what we mean by an "extreme" stride. */
478 #ifndef PREFETCH_EXTREME_STRIDE
479 #define PREFETCH_EXTREME_STRIDE 4096
480 #endif
482 /* Define a limit to how far apart indices can be and still be merged
483 into a single prefetch. */
484 #ifndef PREFETCH_EXTREME_DIFFERENCE
485 #define PREFETCH_EXTREME_DIFFERENCE 4096
486 #endif
488 /* Issue prefetch instructions before the loop to fetch data to be used
489 in the first few loop iterations. */
490 #ifndef PREFETCH_BEFORE_LOOP
491 #define PREFETCH_BEFORE_LOOP 1
492 #endif
494 /* Do not handle reversed order prefetches (negative stride). */
495 #ifndef PREFETCH_NO_REVERSE_ORDER
496 #define PREFETCH_NO_REVERSE_ORDER 1
497 #endif
499 /* Prefetch even if the GIV is in conditional code. */
500 #ifndef PREFETCH_CONDITIONAL
501 #define PREFETCH_CONDITIONAL 1
502 #endif
504 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
505 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
507 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
508 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
509 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
511 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
512 ((REGNO) < FIRST_PSEUDO_REGISTER \
513 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
516 /* Vector mapping INSN_UIDs to luids.
517 The luids are like uids but increase monotonically always.
518 We use them to see whether a jump comes from outside a given loop. */
520 static int *uid_luid;
522 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
523 number the insn is contained in. */
525 static struct loop **uid_loop;
527 /* 1 + largest uid of any insn. */
529 static int max_uid_for_loop;
531 /* Number of loops detected in current function. Used as index to the
532 next few tables. */
534 static int max_loop_num;
536 /* Bound on pseudo register number before loop optimization.
537 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
538 static unsigned int max_reg_before_loop;
540 /* The value to pass to the next call of reg_scan_update. */
541 static int loop_max_reg;
543 /* During the analysis of a loop, a chain of `struct movable's
544 is made to record all the movable insns found.
545 Then the entire chain can be scanned to decide which to move. */
547 struct movable
549 rtx insn; /* A movable insn */
550 rtx set_src; /* The expression this reg is set from. */
551 rtx set_dest; /* The destination of this SET. */
552 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
553 of any registers used within the LIBCALL. */
554 int consec; /* Number of consecutive following insns
555 that must be moved with this one. */
556 unsigned int regno; /* The register it sets */
557 short lifetime; /* lifetime of that register;
558 may be adjusted when matching movables
559 that load the same value are found. */
560 short savings; /* Number of insns we can move for this reg,
561 including other movables that force this
562 or match this one. */
563 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
564 a low part that we should avoid changing when
565 clearing the rest of the reg. */
566 unsigned int cond : 1; /* 1 if only conditionally movable */
567 unsigned int force : 1; /* 1 means MUST move this insn */
568 unsigned int global : 1; /* 1 means reg is live outside this loop */
569 /* If PARTIAL is 1, GLOBAL means something different:
570 that the reg is live outside the range from where it is set
571 to the following label. */
572 unsigned int done : 1; /* 1 inhibits further processing of this */
574 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
575 In particular, moving it does not make it
576 invariant. */
577 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
578 load SRC, rather than copying INSN. */
579 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
580 first insn of a consecutive sets group. */
581 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
582 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
583 the original insn with a copy from that
584 pseudo, rather than deleting it. */
585 struct movable *match; /* First entry for same value */
586 struct movable *forces; /* An insn that must be moved if this is */
587 struct movable *next;
591 static FILE *loop_dump_stream;
593 /* Forward declarations. */
595 static void invalidate_loops_containing_label (rtx);
596 static void find_and_verify_loops (rtx, struct loops *);
597 static void mark_loop_jump (rtx, struct loop *);
598 static void prescan_loop (struct loop *);
599 static int reg_in_basic_block_p (rtx, rtx);
600 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
601 static int labels_in_range_p (rtx, int);
602 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
603 static void note_addr_stored (rtx, rtx, void *);
604 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
605 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
606 static rtx find_regs_nested (rtx, rtx);
607 static void scan_loop (struct loop*, int);
608 #if 0
609 static void replace_call_address (rtx, rtx, rtx);
610 #endif
611 static rtx skip_consec_insns (rtx, int);
612 static int libcall_benefit (rtx);
613 static rtx libcall_other_reg (rtx, rtx);
614 static void record_excess_regs (rtx, rtx, rtx *);
615 static void ignore_some_movables (struct loop_movables *);
616 static void force_movables (struct loop_movables *);
617 static void combine_movables (struct loop_movables *, struct loop_regs *);
618 static int num_unmoved_movables (const struct loop *);
619 static int regs_match_p (rtx, rtx, struct loop_movables *);
620 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
621 struct loop_regs *);
622 static void add_label_notes (rtx, rtx);
623 static void move_movables (struct loop *loop, struct loop_movables *, int,
624 int);
625 static void loop_movables_add (struct loop_movables *, struct movable *);
626 static void loop_movables_free (struct loop_movables *);
627 static int count_nonfixed_reads (const struct loop *, rtx);
628 static void loop_bivs_find (struct loop *);
629 static void loop_bivs_init_find (struct loop *);
630 static void loop_bivs_check (struct loop *);
631 static void loop_givs_find (struct loop *);
632 static void loop_givs_check (struct loop *);
633 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
634 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
635 struct induction *, rtx);
636 static void loop_givs_dead_check (struct loop *, struct iv_class *);
637 static void loop_givs_reduce (struct loop *, struct iv_class *);
638 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
639 static void loop_ivs_free (struct loop *);
640 static void strength_reduce (struct loop *, int);
641 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
642 static int valid_initial_value_p (rtx, rtx, int, rtx);
643 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
644 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
645 rtx, rtx *, int, int);
646 static void check_final_value (const struct loop *, struct induction *);
647 static void loop_ivs_dump (const struct loop *, FILE *, int);
648 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
649 static void loop_biv_dump (const struct induction *, FILE *, int);
650 static void loop_giv_dump (const struct induction *, FILE *, int);
651 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
652 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
653 rtx *);
654 static void update_giv_derive (const struct loop *, rtx);
655 static HOST_WIDE_INT get_monotonic_increment (struct iv_class *);
656 static bool biased_biv_fits_mode_p (const struct loop *, struct iv_class *,
657 HOST_WIDE_INT, enum machine_mode,
658 unsigned HOST_WIDE_INT);
659 static bool biv_fits_mode_p (const struct loop *, struct iv_class *,
660 HOST_WIDE_INT, enum machine_mode, bool);
661 static bool extension_within_bounds_p (const struct loop *, struct iv_class *,
662 HOST_WIDE_INT, rtx);
663 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
664 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
665 rtx, rtx, rtx *, rtx *, rtx **);
666 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
667 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
668 rtx *, rtx *, int, int *, enum machine_mode);
669 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
670 rtx *, rtx *, rtx *);
671 static int check_dbra_loop (struct loop *, int);
672 static rtx express_from_1 (rtx, rtx, rtx);
673 static rtx combine_givs_p (struct induction *, struct induction *);
674 static int cmp_combine_givs_stats (const void *, const void *);
675 static void combine_givs (struct loop_regs *, struct iv_class *);
676 static int product_cheap_p (rtx, rtx);
677 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
678 int, int);
679 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
680 struct iv_class *, int, basic_block, rtx);
681 static int last_use_this_basic_block (rtx, rtx);
682 static void record_initial (rtx, rtx, void *);
683 static void update_reg_last_use (rtx, rtx);
684 static rtx next_insn_in_loop (const struct loop *, rtx);
685 static void loop_regs_scan (const struct loop *, int);
686 static int count_insns_in_loop (const struct loop *);
687 static int find_mem_in_note_1 (rtx *, void *);
688 static rtx find_mem_in_note (rtx);
689 static void load_mems (const struct loop *);
690 static int insert_loop_mem (rtx *, void *);
691 static int replace_loop_mem (rtx *, void *);
692 static void replace_loop_mems (rtx, rtx, rtx, int);
693 static int replace_loop_reg (rtx *, void *);
694 static void replace_loop_regs (rtx insn, rtx, rtx);
695 static void note_reg_stored (rtx, rtx, void *);
696 static void try_copy_prop (const struct loop *, rtx, unsigned int);
697 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
698 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
699 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
700 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
701 static void loop_regs_update (const struct loop *, rtx);
702 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
703 static int loop_invariant_p (const struct loop *, rtx);
704 static rtx loop_insn_hoist (const struct loop *, rtx);
705 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
706 rtx, basic_block, rtx);
707 static rtx loop_insn_emit_before (const struct loop *, basic_block,
708 rtx, rtx);
709 static int loop_insn_first_p (rtx, rtx);
710 static rtx get_condition_for_loop (const struct loop *, rtx);
711 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
712 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
713 static rtx extend_value_for_giv (struct induction *, rtx);
714 static rtx loop_insn_sink (const struct loop *, rtx);
716 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
717 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
718 rtx, rtx);
719 static rtx loop_call_insn_hoist (const struct loop *, rtx);
720 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
722 static void loop_dump_aux (const struct loop *, FILE *, int);
723 static void loop_delete_insns (rtx, rtx);
724 static HOST_WIDE_INT remove_constant_addition (rtx *);
725 static rtx gen_load_of_final_value (rtx, rtx);
726 void debug_ivs (const struct loop *);
727 void debug_iv_class (const struct iv_class *);
728 void debug_biv (const struct induction *);
729 void debug_giv (const struct induction *);
730 void debug_loop (const struct loop *);
731 void debug_loops (const struct loops *);
733 typedef struct loop_replace_args
735 rtx match;
736 rtx replacement;
737 rtx insn;
738 } loop_replace_args;
740 /* Nonzero iff INSN is between START and END, inclusive. */
741 #define INSN_IN_RANGE_P(INSN, START, END) \
742 (INSN_UID (INSN) < max_uid_for_loop \
743 && INSN_LUID (INSN) >= INSN_LUID (START) \
744 && INSN_LUID (INSN) <= INSN_LUID (END))
746 /* Indirect_jump_in_function is computed once per function. */
747 static int indirect_jump_in_function;
748 static int indirect_jump_in_function_p (rtx);
750 static int compute_luids (rtx, rtx, int);
752 static int biv_elimination_giv_has_0_offset (struct induction *,
753 struct induction *, rtx);
755 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
756 copy the value of the strength reduced giv to its original register. */
757 static int copy_cost;
759 /* Cost of using a register, to normalize the benefits of a giv. */
760 static int reg_address_cost;
762 void
763 init_loop (void)
765 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
767 reg_address_cost = address_cost (reg, SImode);
769 copy_cost = COSTS_N_INSNS (1);
772 /* Compute the mapping from uids to luids.
773 LUIDs are numbers assigned to insns, like uids,
774 except that luids increase monotonically through the code.
775 Start at insn START and stop just before END. Assign LUIDs
776 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
777 static int
778 compute_luids (rtx start, rtx end, int prev_luid)
780 int i;
781 rtx insn;
783 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
785 if (INSN_UID (insn) >= max_uid_for_loop)
786 continue;
787 /* Don't assign luids to line-number NOTEs, so that the distance in
788 luids between two insns is not affected by -g. */
789 if (!NOTE_P (insn)
790 || NOTE_LINE_NUMBER (insn) <= 0)
791 uid_luid[INSN_UID (insn)] = ++i;
792 else
793 /* Give a line number note the same luid as preceding insn. */
794 uid_luid[INSN_UID (insn)] = i;
796 return i + 1;
799 /* Entry point of this file. Perform loop optimization
800 on the current function. F is the first insn of the function
801 and DUMPFILE is a stream for output of a trace of actions taken
802 (or 0 if none should be output). */
804 void
805 loop_optimize (rtx f, FILE *dumpfile, int flags)
807 rtx insn;
808 int i;
809 struct loops loops_data;
810 struct loops *loops = &loops_data;
811 struct loop_info *loops_info;
813 loop_dump_stream = dumpfile;
815 init_recog_no_volatile ();
817 max_reg_before_loop = max_reg_num ();
818 loop_max_reg = max_reg_before_loop;
820 regs_may_share = 0;
822 /* Count the number of loops. */
824 max_loop_num = 0;
825 for (insn = f; insn; insn = NEXT_INSN (insn))
827 if (NOTE_P (insn)
828 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
829 max_loop_num++;
832 /* Don't waste time if no loops. */
833 if (max_loop_num == 0)
834 return;
836 loops->num = max_loop_num;
838 /* Get size to use for tables indexed by uids.
839 Leave some space for labels allocated by find_and_verify_loops. */
840 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
842 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
843 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
845 /* Allocate storage for array of loops. */
846 loops->array = xcalloc (loops->num, sizeof (struct loop));
848 /* Find and process each loop.
849 First, find them, and record them in order of their beginnings. */
850 find_and_verify_loops (f, loops);
852 /* Allocate and initialize auxiliary loop information. */
853 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
854 for (i = 0; i < (int) loops->num; i++)
855 loops->array[i].aux = loops_info + i;
857 /* Now find all register lifetimes. This must be done after
858 find_and_verify_loops, because it might reorder the insns in the
859 function. */
860 reg_scan (f, max_reg_before_loop);
862 /* This must occur after reg_scan so that registers created by gcse
863 will have entries in the register tables.
865 We could have added a call to reg_scan after gcse_main in toplev.c,
866 but moving this call to init_alias_analysis is more efficient. */
867 init_alias_analysis ();
869 /* See if we went too far. Note that get_max_uid already returns
870 one more that the maximum uid of all insn. */
871 gcc_assert (get_max_uid () <= max_uid_for_loop);
872 /* Now reset it to the actual size we need. See above. */
873 max_uid_for_loop = get_max_uid ();
875 /* find_and_verify_loops has already called compute_luids, but it
876 might have rearranged code afterwards, so we need to recompute
877 the luids now. */
878 compute_luids (f, NULL_RTX, 0);
880 /* Don't leave gaps in uid_luid for insns that have been
881 deleted. It is possible that the first or last insn
882 using some register has been deleted by cross-jumping.
883 Make sure that uid_luid for that former insn's uid
884 points to the general area where that insn used to be. */
885 for (i = 0; i < max_uid_for_loop; i++)
887 uid_luid[0] = uid_luid[i];
888 if (uid_luid[0] != 0)
889 break;
891 for (i = 0; i < max_uid_for_loop; i++)
892 if (uid_luid[i] == 0)
893 uid_luid[i] = uid_luid[i - 1];
895 /* Determine if the function has indirect jump. On some systems
896 this prevents low overhead loop instructions from being used. */
897 indirect_jump_in_function = indirect_jump_in_function_p (f);
899 /* Now scan the loops, last ones first, since this means inner ones are done
900 before outer ones. */
901 for (i = max_loop_num - 1; i >= 0; i--)
903 struct loop *loop = &loops->array[i];
905 if (! loop->invalid && loop->end)
907 scan_loop (loop, flags);
908 ggc_collect ();
912 end_alias_analysis ();
914 /* Clean up. */
915 for (i = 0; i < (int) loops->num; i++)
916 free (loops_info[i].mems);
918 free (uid_luid);
919 free (uid_loop);
920 free (loops_info);
921 free (loops->array);
924 /* Returns the next insn, in execution order, after INSN. START and
925 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
926 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
927 insn-stream; it is used with loops that are entered near the
928 bottom. */
930 static rtx
931 next_insn_in_loop (const struct loop *loop, rtx insn)
933 insn = NEXT_INSN (insn);
935 if (insn == loop->end)
937 if (loop->top)
938 /* Go to the top of the loop, and continue there. */
939 insn = loop->top;
940 else
941 /* We're done. */
942 insn = NULL_RTX;
945 if (insn == loop->scan_start)
946 /* We're done. */
947 insn = NULL_RTX;
949 return insn;
952 /* Find any register references hidden inside X and add them to
953 the dependency list DEPS. This is used to look inside CLOBBER (MEM
954 when checking whether a PARALLEL can be pulled out of a loop. */
956 static rtx
957 find_regs_nested (rtx deps, rtx x)
959 enum rtx_code code = GET_CODE (x);
960 if (code == REG)
961 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
962 else
964 const char *fmt = GET_RTX_FORMAT (code);
965 int i, j;
966 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
968 if (fmt[i] == 'e')
969 deps = find_regs_nested (deps, XEXP (x, i));
970 else if (fmt[i] == 'E')
971 for (j = 0; j < XVECLEN (x, i); j++)
972 deps = find_regs_nested (deps, XVECEXP (x, i, j));
975 return deps;
978 /* Optimize one loop described by LOOP. */
980 /* ??? Could also move memory writes out of loops if the destination address
981 is invariant, the source is invariant, the memory write is not volatile,
982 and if we can prove that no read inside the loop can read this address
983 before the write occurs. If there is a read of this address after the
984 write, then we can also mark the memory read as invariant. */
986 static void
987 scan_loop (struct loop *loop, int flags)
989 struct loop_info *loop_info = LOOP_INFO (loop);
990 struct loop_regs *regs = LOOP_REGS (loop);
991 int i;
992 rtx loop_start = loop->start;
993 rtx loop_end = loop->end;
994 rtx p;
995 /* 1 if we are scanning insns that could be executed zero times. */
996 int maybe_never = 0;
997 /* 1 if we are scanning insns that might never be executed
998 due to a subroutine call which might exit before they are reached. */
999 int call_passed = 0;
1000 /* Number of insns in the loop. */
1001 int insn_count;
1002 int tem;
1003 rtx temp, update_start, update_end;
1004 /* The SET from an insn, if it is the only SET in the insn. */
1005 rtx set, set1;
1006 /* Chain describing insns movable in current loop. */
1007 struct loop_movables *movables = LOOP_MOVABLES (loop);
1008 /* Ratio of extra register life span we can justify
1009 for saving an instruction. More if loop doesn't call subroutines
1010 since in that case saving an insn makes more difference
1011 and more registers are available. */
1012 int threshold;
1013 int in_libcall;
1015 loop->top = 0;
1017 movables->head = 0;
1018 movables->last = 0;
1020 /* Determine whether this loop starts with a jump down to a test at
1021 the end. This will occur for a small number of loops with a test
1022 that is too complex to duplicate in front of the loop.
1024 We search for the first insn or label in the loop, skipping NOTEs.
1025 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1026 (because we might have a loop executed only once that contains a
1027 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1028 (in case we have a degenerate loop).
1030 Note that if we mistakenly think that a loop is entered at the top
1031 when, in fact, it is entered at the exit test, the only effect will be
1032 slightly poorer optimization. Making the opposite error can generate
1033 incorrect code. Since very few loops now start with a jump to the
1034 exit test, the code here to detect that case is very conservative. */
1036 for (p = NEXT_INSN (loop_start);
1037 p != loop_end
1038 && !LABEL_P (p) && ! INSN_P (p)
1039 && (!NOTE_P (p)
1040 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1041 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1042 p = NEXT_INSN (p))
1045 loop->scan_start = p;
1047 /* If loop end is the end of the current function, then emit a
1048 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1049 note insn. This is the position we use when sinking insns out of
1050 the loop. */
1051 if (NEXT_INSN (loop->end) != 0)
1052 loop->sink = NEXT_INSN (loop->end);
1053 else
1054 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1056 /* Set up variables describing this loop. */
1057 prescan_loop (loop);
1058 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1060 /* If loop has a jump before the first label,
1061 the true entry is the target of that jump.
1062 Start scan from there.
1063 But record in LOOP->TOP the place where the end-test jumps
1064 back to so we can scan that after the end of the loop. */
1065 if (JUMP_P (p)
1066 /* Loop entry must be unconditional jump (and not a RETURN) */
1067 && any_uncondjump_p (p)
1068 && JUMP_LABEL (p) != 0
1069 /* Check to see whether the jump actually
1070 jumps out of the loop (meaning it's no loop).
1071 This case can happen for things like
1072 do {..} while (0). If this label was generated previously
1073 by loop, we can't tell anything about it and have to reject
1074 the loop. */
1075 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1077 loop->top = next_label (loop->scan_start);
1078 loop->scan_start = JUMP_LABEL (p);
1081 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1082 as required by loop_reg_used_before_p. So skip such loops. (This
1083 test may never be true, but it's best to play it safe.)
1085 Also, skip loops where we do not start scanning at a label. This
1086 test also rejects loops starting with a JUMP_INSN that failed the
1087 test above. */
1089 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1090 || !LABEL_P (loop->scan_start))
1092 if (loop_dump_stream)
1093 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1094 INSN_UID (loop_start), INSN_UID (loop_end));
1095 return;
1098 /* Allocate extra space for REGs that might be created by load_mems.
1099 We allocate a little extra slop as well, in the hopes that we
1100 won't have to reallocate the regs array. */
1101 loop_regs_scan (loop, loop_info->mems_idx + 16);
1102 insn_count = count_insns_in_loop (loop);
1104 if (loop_dump_stream)
1105 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1106 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1108 /* Scan through the loop finding insns that are safe to move.
1109 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1110 this reg will be considered invariant for subsequent insns.
1111 We consider whether subsequent insns use the reg
1112 in deciding whether it is worth actually moving.
1114 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1115 and therefore it is possible that the insns we are scanning
1116 would never be executed. At such times, we must make sure
1117 that it is safe to execute the insn once instead of zero times.
1118 When MAYBE_NEVER is 0, all insns will be executed at least once
1119 so that is not a problem. */
1121 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1122 p != NULL_RTX;
1123 p = next_insn_in_loop (loop, p))
1125 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1126 in_libcall--;
1127 if (NONJUMP_INSN_P (p))
1129 /* Do not scan past an optimization barrier. */
1130 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1131 break;
1132 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1133 if (temp)
1134 in_libcall++;
1135 if (! in_libcall
1136 && (set = single_set (p))
1137 && REG_P (SET_DEST (set))
1138 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1139 && SET_DEST (set) != pic_offset_table_rtx
1140 #endif
1141 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1143 int tem1 = 0;
1144 int tem2 = 0;
1145 int move_insn = 0;
1146 int insert_temp = 0;
1147 rtx src = SET_SRC (set);
1148 rtx dependencies = 0;
1150 /* Figure out what to use as a source of this insn. If a
1151 REG_EQUIV note is given or if a REG_EQUAL note with a
1152 constant operand is specified, use it as the source and
1153 mark that we should move this insn by calling
1154 emit_move_insn rather that duplicating the insn.
1156 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1157 note is present. */
1158 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1159 if (temp)
1160 src = XEXP (temp, 0), move_insn = 1;
1161 else
1163 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1164 if (temp && CONSTANT_P (XEXP (temp, 0)))
1165 src = XEXP (temp, 0), move_insn = 1;
1166 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1168 src = XEXP (temp, 0);
1169 /* A libcall block can use regs that don't appear in
1170 the equivalent expression. To move the libcall,
1171 we must move those regs too. */
1172 dependencies = libcall_other_reg (p, src);
1176 /* For parallels, add any possible uses to the dependencies, as
1177 we can't move the insn without resolving them first.
1178 MEMs inside CLOBBERs may also reference registers; these
1179 count as implicit uses. */
1180 if (GET_CODE (PATTERN (p)) == PARALLEL)
1182 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1184 rtx x = XVECEXP (PATTERN (p), 0, i);
1185 if (GET_CODE (x) == USE)
1186 dependencies
1187 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1188 dependencies);
1189 else if (GET_CODE (x) == CLOBBER
1190 && MEM_P (XEXP (x, 0)))
1191 dependencies = find_regs_nested (dependencies,
1192 XEXP (XEXP (x, 0), 0));
1196 if (/* The register is used in basic blocks other
1197 than the one where it is set (meaning that
1198 something after this point in the loop might
1199 depend on its value before the set). */
1200 ! reg_in_basic_block_p (p, SET_DEST (set))
1201 /* And the set is not guaranteed to be executed once
1202 the loop starts, or the value before the set is
1203 needed before the set occurs...
1205 ??? Note we have quadratic behavior here, mitigated
1206 by the fact that the previous test will often fail for
1207 large loops. Rather than re-scanning the entire loop
1208 each time for register usage, we should build tables
1209 of the register usage and use them here instead. */
1210 && (maybe_never
1211 || loop_reg_used_before_p (loop, set, p)))
1212 /* It is unsafe to move the set. However, it may be OK to
1213 move the source into a new pseudo, and substitute a
1214 reg-to-reg copy for the original insn.
1216 This code used to consider it OK to move a set of a variable
1217 which was not created by the user and not used in an exit
1218 test.
1219 That behavior is incorrect and was removed. */
1220 insert_temp = 1;
1222 /* Don't try to optimize a MODE_CC set with a constant
1223 source. It probably will be combined with a conditional
1224 jump. */
1225 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1226 && CONSTANT_P (src))
1228 /* Don't try to optimize a register that was made
1229 by loop-optimization for an inner loop.
1230 We don't know its life-span, so we can't compute
1231 the benefit. */
1232 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1234 /* Don't move the source and add a reg-to-reg copy:
1235 - with -Os (this certainly increases size),
1236 - if the mode doesn't support copy operations (obviously),
1237 - if the source is already a reg (the motion will gain nothing),
1238 - if the source is a legitimate constant (likewise). */
1239 else if (insert_temp
1240 && (optimize_size
1241 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1242 || REG_P (SET_SRC (set))
1243 || (CONSTANT_P (SET_SRC (set))
1244 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
1246 else if ((tem = loop_invariant_p (loop, src))
1247 && (dependencies == 0
1248 || (tem2
1249 = loop_invariant_p (loop, dependencies)) != 0)
1250 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1251 || (tem1
1252 = consec_sets_invariant_p
1253 (loop, SET_DEST (set),
1254 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1255 p)))
1256 /* If the insn can cause a trap (such as divide by zero),
1257 can't move it unless it's guaranteed to be executed
1258 once loop is entered. Even a function call might
1259 prevent the trap insn from being reached
1260 (since it might exit!) */
1261 && ! ((maybe_never || call_passed)
1262 && may_trap_p (src)))
1264 struct movable *m;
1265 int regno = REGNO (SET_DEST (set));
1267 /* A potential lossage is where we have a case where two insns
1268 can be combined as long as they are both in the loop, but
1269 we move one of them outside the loop. For large loops,
1270 this can lose. The most common case of this is the address
1271 of a function being called.
1273 Therefore, if this register is marked as being used
1274 exactly once if we are in a loop with calls
1275 (a "large loop"), see if we can replace the usage of
1276 this register with the source of this SET. If we can,
1277 delete this insn.
1279 Don't do this if P has a REG_RETVAL note or if we have
1280 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
1282 if (loop_info->has_call
1283 && regs->array[regno].single_usage != 0
1284 && regs->array[regno].single_usage != const0_rtx
1285 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1286 && (REGNO_LAST_UID (regno)
1287 == INSN_UID (regs->array[regno].single_usage))
1288 && regs->array[regno].set_in_loop == 1
1289 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1290 && ! side_effects_p (SET_SRC (set))
1291 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1292 && (! SMALL_REGISTER_CLASSES
1293 || (! (REG_P (SET_SRC (set))
1294 && (REGNO (SET_SRC (set))
1295 < FIRST_PSEUDO_REGISTER))))
1296 && regno >= FIRST_PSEUDO_REGISTER
1297 /* This test is not redundant; SET_SRC (set) might be
1298 a call-clobbered register and the life of REGNO
1299 might span a call. */
1300 && ! modified_between_p (SET_SRC (set), p,
1301 regs->array[regno].single_usage)
1302 && no_labels_between_p (p,
1303 regs->array[regno].single_usage)
1304 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
1305 regs->array[regno].single_usage))
1307 /* Replace any usage in a REG_EQUAL note. Must copy
1308 the new source, so that we don't get rtx sharing
1309 between the SET_SOURCE and REG_NOTES of insn p. */
1310 REG_NOTES (regs->array[regno].single_usage)
1311 = (replace_rtx
1312 (REG_NOTES (regs->array[regno].single_usage),
1313 SET_DEST (set), copy_rtx (SET_SRC (set))));
1315 delete_insn (p);
1316 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1317 i++)
1318 regs->array[regno+i].set_in_loop = 0;
1319 continue;
1322 m = xmalloc (sizeof (struct movable));
1323 m->next = 0;
1324 m->insn = p;
1325 m->set_src = src;
1326 m->dependencies = dependencies;
1327 m->set_dest = SET_DEST (set);
1328 m->force = 0;
1329 m->consec
1330 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1331 m->done = 0;
1332 m->forces = 0;
1333 m->partial = 0;
1334 m->move_insn = move_insn;
1335 m->move_insn_first = 0;
1336 m->insert_temp = insert_temp;
1337 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1338 m->savemode = VOIDmode;
1339 m->regno = regno;
1340 /* Set M->cond if either loop_invariant_p
1341 or consec_sets_invariant_p returned 2
1342 (only conditionally invariant). */
1343 m->cond = ((tem | tem1 | tem2) > 1);
1344 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1345 m->match = 0;
1346 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1347 m->savings = regs->array[regno].n_times_set;
1348 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1349 m->savings += libcall_benefit (p);
1350 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1351 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1352 /* Add M to the end of the chain MOVABLES. */
1353 loop_movables_add (movables, m);
1355 if (m->consec > 0)
1357 /* It is possible for the first instruction to have a
1358 REG_EQUAL note but a non-invariant SET_SRC, so we must
1359 remember the status of the first instruction in case
1360 the last instruction doesn't have a REG_EQUAL note. */
1361 m->move_insn_first = m->move_insn;
1363 /* Skip this insn, not checking REG_LIBCALL notes. */
1364 p = next_nonnote_insn (p);
1365 /* Skip the consecutive insns, if there are any. */
1366 p = skip_consec_insns (p, m->consec);
1367 /* Back up to the last insn of the consecutive group. */
1368 p = prev_nonnote_insn (p);
1370 /* We must now reset m->move_insn, m->is_equiv, and
1371 possibly m->set_src to correspond to the effects of
1372 all the insns. */
1373 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1374 if (temp)
1375 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1376 else
1378 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1379 if (temp && CONSTANT_P (XEXP (temp, 0)))
1380 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1381 else
1382 m->move_insn = 0;
1385 m->is_equiv
1386 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1389 /* If this register is always set within a STRICT_LOW_PART
1390 or set to zero, then its high bytes are constant.
1391 So clear them outside the loop and within the loop
1392 just load the low bytes.
1393 We must check that the machine has an instruction to do so.
1394 Also, if the value loaded into the register
1395 depends on the same register, this cannot be done. */
1396 else if (SET_SRC (set) == const0_rtx
1397 && NONJUMP_INSN_P (NEXT_INSN (p))
1398 && (set1 = single_set (NEXT_INSN (p)))
1399 && GET_CODE (set1) == SET
1400 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1401 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1402 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1403 == SET_DEST (set))
1404 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1406 int regno = REGNO (SET_DEST (set));
1407 if (regs->array[regno].set_in_loop == 2)
1409 struct movable *m;
1410 m = xmalloc (sizeof (struct movable));
1411 m->next = 0;
1412 m->insn = p;
1413 m->set_dest = SET_DEST (set);
1414 m->dependencies = 0;
1415 m->force = 0;
1416 m->consec = 0;
1417 m->done = 0;
1418 m->forces = 0;
1419 m->move_insn = 0;
1420 m->move_insn_first = 0;
1421 m->insert_temp = insert_temp;
1422 m->partial = 1;
1423 /* If the insn may not be executed on some cycles,
1424 we can't clear the whole reg; clear just high part.
1425 Not even if the reg is used only within this loop.
1426 Consider this:
1427 while (1)
1428 while (s != t) {
1429 if (foo ()) x = *s;
1430 use (x);
1432 Clearing x before the inner loop could clobber a value
1433 being saved from the last time around the outer loop.
1434 However, if the reg is not used outside this loop
1435 and all uses of the register are in the same
1436 basic block as the store, there is no problem.
1438 If this insn was made by loop, we don't know its
1439 INSN_LUID and hence must make a conservative
1440 assumption. */
1441 m->global = (INSN_UID (p) >= max_uid_for_loop
1442 || LOOP_REG_GLOBAL_P (loop, regno)
1443 || (labels_in_range_p
1444 (p, REGNO_FIRST_LUID (regno))));
1445 if (maybe_never && m->global)
1446 m->savemode = GET_MODE (SET_SRC (set1));
1447 else
1448 m->savemode = VOIDmode;
1449 m->regno = regno;
1450 m->cond = 0;
1451 m->match = 0;
1452 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1453 m->savings = 1;
1454 for (i = 0;
1455 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1456 i++)
1457 regs->array[regno+i].set_in_loop = -1;
1458 /* Add M to the end of the chain MOVABLES. */
1459 loop_movables_add (movables, m);
1464 /* Past a call insn, we get to insns which might not be executed
1465 because the call might exit. This matters for insns that trap.
1466 Constant and pure call insns always return, so they don't count. */
1467 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1468 call_passed = 1;
1469 /* Past a label or a jump, we get to insns for which we
1470 can't count on whether or how many times they will be
1471 executed during each iteration. Therefore, we can
1472 only move out sets of trivial variables
1473 (those not used after the loop). */
1474 /* Similar code appears twice in strength_reduce. */
1475 else if ((LABEL_P (p) || JUMP_P (p))
1476 /* If we enter the loop in the middle, and scan around to the
1477 beginning, don't set maybe_never for that. This must be an
1478 unconditional jump, otherwise the code at the top of the
1479 loop might never be executed. Unconditional jumps are
1480 followed by a barrier then the loop_end. */
1481 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1482 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1483 && any_uncondjump_p (p)))
1484 maybe_never = 1;
1487 /* If one movable subsumes another, ignore that other. */
1489 ignore_some_movables (movables);
1491 /* For each movable insn, see if the reg that it loads
1492 leads when it dies right into another conditionally movable insn.
1493 If so, record that the second insn "forces" the first one,
1494 since the second can be moved only if the first is. */
1496 force_movables (movables);
1498 /* See if there are multiple movable insns that load the same value.
1499 If there are, make all but the first point at the first one
1500 through the `match' field, and add the priorities of them
1501 all together as the priority of the first. */
1503 combine_movables (movables, regs);
1505 /* Now consider each movable insn to decide whether it is worth moving.
1506 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1508 For machines with few registers this increases code size, so do not
1509 move moveables when optimizing for code size on such machines.
1510 (The 18 below is the value for i386.) */
1512 if (!optimize_size
1513 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1515 move_movables (loop, movables, threshold, insn_count);
1517 /* Recalculate regs->array if move_movables has created new
1518 registers. */
1519 if (max_reg_num () > regs->num)
1521 loop_regs_scan (loop, 0);
1522 for (update_start = loop_start;
1523 PREV_INSN (update_start)
1524 && !LABEL_P (PREV_INSN (update_start));
1525 update_start = PREV_INSN (update_start))
1527 update_end = NEXT_INSN (loop_end);
1529 reg_scan_update (update_start, update_end, loop_max_reg);
1530 loop_max_reg = max_reg_num ();
1534 /* Now candidates that still are negative are those not moved.
1535 Change regs->array[I].set_in_loop to indicate that those are not actually
1536 invariant. */
1537 for (i = 0; i < regs->num; i++)
1538 if (regs->array[i].set_in_loop < 0)
1539 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1541 /* Now that we've moved some things out of the loop, we might be able to
1542 hoist even more memory references. */
1543 load_mems (loop);
1545 /* Recalculate regs->array if load_mems has created new registers. */
1546 if (max_reg_num () > regs->num)
1547 loop_regs_scan (loop, 0);
1549 for (update_start = loop_start;
1550 PREV_INSN (update_start)
1551 && !LABEL_P (PREV_INSN (update_start));
1552 update_start = PREV_INSN (update_start))
1554 update_end = NEXT_INSN (loop_end);
1556 reg_scan_update (update_start, update_end, loop_max_reg);
1557 loop_max_reg = max_reg_num ();
1559 if (flag_strength_reduce)
1561 if (update_end && LABEL_P (update_end))
1562 /* Ensure our label doesn't go away. */
1563 LABEL_NUSES (update_end)++;
1565 strength_reduce (loop, flags);
1567 reg_scan_update (update_start, update_end, loop_max_reg);
1568 loop_max_reg = max_reg_num ();
1570 if (update_end && LABEL_P (update_end)
1571 && --LABEL_NUSES (update_end) == 0)
1572 delete_related_insns (update_end);
1576 /* The movable information is required for strength reduction. */
1577 loop_movables_free (movables);
1579 free (regs->array);
1580 regs->array = 0;
1581 regs->num = 0;
1584 /* Add elements to *OUTPUT to record all the pseudo-regs
1585 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1587 static void
1588 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1590 enum rtx_code code;
1591 const char *fmt;
1592 int i;
1594 code = GET_CODE (in_this);
1596 switch (code)
1598 case PC:
1599 case CC0:
1600 case CONST_INT:
1601 case CONST_DOUBLE:
1602 case CONST:
1603 case SYMBOL_REF:
1604 case LABEL_REF:
1605 return;
1607 case REG:
1608 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1609 && ! reg_mentioned_p (in_this, not_in_this))
1610 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1611 return;
1613 default:
1614 break;
1617 fmt = GET_RTX_FORMAT (code);
1618 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1620 int j;
1622 switch (fmt[i])
1624 case 'E':
1625 for (j = 0; j < XVECLEN (in_this, i); j++)
1626 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1627 break;
1629 case 'e':
1630 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1631 break;
1636 /* Check what regs are referred to in the libcall block ending with INSN,
1637 aside from those mentioned in the equivalent value.
1638 If there are none, return 0.
1639 If there are one or more, return an EXPR_LIST containing all of them. */
1641 static rtx
1642 libcall_other_reg (rtx insn, rtx equiv)
1644 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1645 rtx p = XEXP (note, 0);
1646 rtx output = 0;
1648 /* First, find all the regs used in the libcall block
1649 that are not mentioned as inputs to the result. */
1651 while (p != insn)
1653 if (INSN_P (p))
1654 record_excess_regs (PATTERN (p), equiv, &output);
1655 p = NEXT_INSN (p);
1658 return output;
1661 /* Return 1 if all uses of REG
1662 are between INSN and the end of the basic block. */
1664 static int
1665 reg_in_basic_block_p (rtx insn, rtx reg)
1667 int regno = REGNO (reg);
1668 rtx p;
1670 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1671 return 0;
1673 /* Search this basic block for the already recorded last use of the reg. */
1674 for (p = insn; p; p = NEXT_INSN (p))
1676 switch (GET_CODE (p))
1678 case NOTE:
1679 break;
1681 case INSN:
1682 case CALL_INSN:
1683 /* Ordinary insn: if this is the last use, we win. */
1684 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1685 return 1;
1686 break;
1688 case JUMP_INSN:
1689 /* Jump insn: if this is the last use, we win. */
1690 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1691 return 1;
1692 /* Otherwise, it's the end of the basic block, so we lose. */
1693 return 0;
1695 case CODE_LABEL:
1696 case BARRIER:
1697 /* It's the end of the basic block, so we lose. */
1698 return 0;
1700 default:
1701 break;
1705 /* The "last use" that was recorded can't be found after the first
1706 use. This can happen when the last use was deleted while
1707 processing an inner loop, this inner loop was then completely
1708 unrolled, and the outer loop is always exited after the inner loop,
1709 so that everything after the first use becomes a single basic block. */
1710 return 1;
1713 /* Compute the benefit of eliminating the insns in the block whose
1714 last insn is LAST. This may be a group of insns used to compute a
1715 value directly or can contain a library call. */
1717 static int
1718 libcall_benefit (rtx last)
1720 rtx insn;
1721 int benefit = 0;
1723 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1724 insn != last; insn = NEXT_INSN (insn))
1726 if (CALL_P (insn))
1727 benefit += 10; /* Assume at least this many insns in a library
1728 routine. */
1729 else if (NONJUMP_INSN_P (insn)
1730 && GET_CODE (PATTERN (insn)) != USE
1731 && GET_CODE (PATTERN (insn)) != CLOBBER)
1732 benefit++;
1735 return benefit;
1738 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1740 static rtx
1741 skip_consec_insns (rtx insn, int count)
1743 for (; count > 0; count--)
1745 rtx temp;
1747 /* If first insn of libcall sequence, skip to end. */
1748 /* Do this at start of loop, since INSN is guaranteed to
1749 be an insn here. */
1750 if (!NOTE_P (insn)
1751 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1752 insn = XEXP (temp, 0);
1755 insn = NEXT_INSN (insn);
1756 while (NOTE_P (insn));
1759 return insn;
1762 /* Ignore any movable whose insn falls within a libcall
1763 which is part of another movable.
1764 We make use of the fact that the movable for the libcall value
1765 was made later and so appears later on the chain. */
1767 static void
1768 ignore_some_movables (struct loop_movables *movables)
1770 struct movable *m, *m1;
1772 for (m = movables->head; m; m = m->next)
1774 /* Is this a movable for the value of a libcall? */
1775 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1776 if (note)
1778 rtx insn;
1779 /* Check for earlier movables inside that range,
1780 and mark them invalid. We cannot use LUIDs here because
1781 insns created by loop.c for prior loops don't have LUIDs.
1782 Rather than reject all such insns from movables, we just
1783 explicitly check each insn in the libcall (since invariant
1784 libcalls aren't that common). */
1785 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1786 for (m1 = movables->head; m1 != m; m1 = m1->next)
1787 if (m1->insn == insn)
1788 m1->done = 1;
1793 /* For each movable insn, see if the reg that it loads
1794 leads when it dies right into another conditionally movable insn.
1795 If so, record that the second insn "forces" the first one,
1796 since the second can be moved only if the first is. */
1798 static void
1799 force_movables (struct loop_movables *movables)
1801 struct movable *m, *m1;
1803 for (m1 = movables->head; m1; m1 = m1->next)
1804 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1805 if (!m1->partial && !m1->done)
1807 int regno = m1->regno;
1808 for (m = m1->next; m; m = m->next)
1809 /* ??? Could this be a bug? What if CSE caused the
1810 register of M1 to be used after this insn?
1811 Since CSE does not update regno_last_uid,
1812 this insn M->insn might not be where it dies.
1813 But very likely this doesn't matter; what matters is
1814 that M's reg is computed from M1's reg. */
1815 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1816 && !m->done)
1817 break;
1818 if (m != 0 && m->set_src == m1->set_dest
1819 /* If m->consec, m->set_src isn't valid. */
1820 && m->consec == 0)
1821 m = 0;
1823 /* Increase the priority of the moving the first insn
1824 since it permits the second to be moved as well.
1825 Likewise for insns already forced by the first insn. */
1826 if (m != 0)
1828 struct movable *m2;
1830 m->forces = m1;
1831 for (m2 = m1; m2; m2 = m2->forces)
1833 m2->lifetime += m->lifetime;
1834 m2->savings += m->savings;
1840 /* Find invariant expressions that are equal and can be combined into
1841 one register. */
1843 static void
1844 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1846 struct movable *m;
1847 char *matched_regs = xmalloc (regs->num);
1848 enum machine_mode mode;
1850 /* Regs that are set more than once are not allowed to match
1851 or be matched. I'm no longer sure why not. */
1852 /* Only pseudo registers are allowed to match or be matched,
1853 since move_movables does not validate the change. */
1854 /* Perhaps testing m->consec_sets would be more appropriate here? */
1856 for (m = movables->head; m; m = m->next)
1857 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1858 && m->regno >= FIRST_PSEUDO_REGISTER
1859 && !m->insert_temp
1860 && !m->partial)
1862 struct movable *m1;
1863 int regno = m->regno;
1865 memset (matched_regs, 0, regs->num);
1866 matched_regs[regno] = 1;
1868 /* We want later insns to match the first one. Don't make the first
1869 one match any later ones. So start this loop at m->next. */
1870 for (m1 = m->next; m1; m1 = m1->next)
1871 if (m != m1 && m1->match == 0
1872 && !m1->insert_temp
1873 && regs->array[m1->regno].n_times_set == 1
1874 && m1->regno >= FIRST_PSEUDO_REGISTER
1875 /* A reg used outside the loop mustn't be eliminated. */
1876 && !m1->global
1877 /* A reg used for zero-extending mustn't be eliminated. */
1878 && !m1->partial
1879 && (matched_regs[m1->regno]
1882 /* Can combine regs with different modes loaded from the
1883 same constant only if the modes are the same or
1884 if both are integer modes with M wider or the same
1885 width as M1. The check for integer is redundant, but
1886 safe, since the only case of differing destination
1887 modes with equal sources is when both sources are
1888 VOIDmode, i.e., CONST_INT. */
1889 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1890 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1891 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1892 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1893 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1894 /* See if the source of M1 says it matches M. */
1895 && ((REG_P (m1->set_src)
1896 && matched_regs[REGNO (m1->set_src)])
1897 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1898 movables, regs))))
1899 && ((m->dependencies == m1->dependencies)
1900 || rtx_equal_p (m->dependencies, m1->dependencies)))
1902 m->lifetime += m1->lifetime;
1903 m->savings += m1->savings;
1904 m1->done = 1;
1905 m1->match = m;
1906 matched_regs[m1->regno] = 1;
1910 /* Now combine the regs used for zero-extension.
1911 This can be done for those not marked `global'
1912 provided their lives don't overlap. */
1914 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1915 mode = GET_MODE_WIDER_MODE (mode))
1917 struct movable *m0 = 0;
1919 /* Combine all the registers for extension from mode MODE.
1920 Don't combine any that are used outside this loop. */
1921 for (m = movables->head; m; m = m->next)
1922 if (m->partial && ! m->global
1923 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1925 struct movable *m1;
1927 int first = REGNO_FIRST_LUID (m->regno);
1928 int last = REGNO_LAST_LUID (m->regno);
1930 if (m0 == 0)
1932 /* First one: don't check for overlap, just record it. */
1933 m0 = m;
1934 continue;
1937 /* Make sure they extend to the same mode.
1938 (Almost always true.) */
1939 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1940 continue;
1942 /* We already have one: check for overlap with those
1943 already combined together. */
1944 for (m1 = movables->head; m1 != m; m1 = m1->next)
1945 if (m1 == m0 || (m1->partial && m1->match == m0))
1946 if (! (REGNO_FIRST_LUID (m1->regno) > last
1947 || REGNO_LAST_LUID (m1->regno) < first))
1948 goto overlap;
1950 /* No overlap: we can combine this with the others. */
1951 m0->lifetime += m->lifetime;
1952 m0->savings += m->savings;
1953 m->done = 1;
1954 m->match = m0;
1956 overlap:
1961 /* Clean up. */
1962 free (matched_regs);
1965 /* Returns the number of movable instructions in LOOP that were not
1966 moved outside the loop. */
1968 static int
1969 num_unmoved_movables (const struct loop *loop)
1971 int num = 0;
1972 struct movable *m;
1974 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1975 if (!m->done)
1976 ++num;
1978 return num;
1982 /* Return 1 if regs X and Y will become the same if moved. */
1984 static int
1985 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1987 unsigned int xn = REGNO (x);
1988 unsigned int yn = REGNO (y);
1989 struct movable *mx, *my;
1991 for (mx = movables->head; mx; mx = mx->next)
1992 if (mx->regno == xn)
1993 break;
1995 for (my = movables->head; my; my = my->next)
1996 if (my->regno == yn)
1997 break;
1999 return (mx && my
2000 && ((mx->match == my->match && mx->match != 0)
2001 || mx->match == my
2002 || mx == my->match));
2005 /* Return 1 if X and Y are identical-looking rtx's.
2006 This is the Lisp function EQUAL for rtx arguments.
2008 If two registers are matching movables or a movable register and an
2009 equivalent constant, consider them equal. */
2011 static int
2012 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2013 struct loop_regs *regs)
2015 int i;
2016 int j;
2017 struct movable *m;
2018 enum rtx_code code;
2019 const char *fmt;
2021 if (x == y)
2022 return 1;
2023 if (x == 0 || y == 0)
2024 return 0;
2026 code = GET_CODE (x);
2028 /* If we have a register and a constant, they may sometimes be
2029 equal. */
2030 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2031 && CONSTANT_P (y))
2033 for (m = movables->head; m; m = m->next)
2034 if (m->move_insn && m->regno == REGNO (x)
2035 && rtx_equal_p (m->set_src, y))
2036 return 1;
2038 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2039 && CONSTANT_P (x))
2041 for (m = movables->head; m; m = m->next)
2042 if (m->move_insn && m->regno == REGNO (y)
2043 && rtx_equal_p (m->set_src, x))
2044 return 1;
2047 /* Otherwise, rtx's of different codes cannot be equal. */
2048 if (code != GET_CODE (y))
2049 return 0;
2051 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2052 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2054 if (GET_MODE (x) != GET_MODE (y))
2055 return 0;
2057 /* These three types of rtx's can be compared nonrecursively. */
2058 if (code == REG)
2059 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2061 if (code == LABEL_REF)
2062 return XEXP (x, 0) == XEXP (y, 0);
2063 if (code == SYMBOL_REF)
2064 return XSTR (x, 0) == XSTR (y, 0);
2066 /* Compare the elements. If any pair of corresponding elements
2067 fail to match, return 0 for the whole things. */
2069 fmt = GET_RTX_FORMAT (code);
2070 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2072 switch (fmt[i])
2074 case 'w':
2075 if (XWINT (x, i) != XWINT (y, i))
2076 return 0;
2077 break;
2079 case 'i':
2080 if (XINT (x, i) != XINT (y, i))
2081 return 0;
2082 break;
2084 case 'E':
2085 /* Two vectors must have the same length. */
2086 if (XVECLEN (x, i) != XVECLEN (y, i))
2087 return 0;
2089 /* And the corresponding elements must match. */
2090 for (j = 0; j < XVECLEN (x, i); j++)
2091 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2092 movables, regs) == 0)
2093 return 0;
2094 break;
2096 case 'e':
2097 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2098 == 0)
2099 return 0;
2100 break;
2102 case 's':
2103 if (strcmp (XSTR (x, i), XSTR (y, i)))
2104 return 0;
2105 break;
2107 case 'u':
2108 /* These are just backpointers, so they don't matter. */
2109 break;
2111 case '0':
2112 break;
2114 /* It is believed that rtx's at this level will never
2115 contain anything but integers and other rtx's,
2116 except for within LABEL_REFs and SYMBOL_REFs. */
2117 default:
2118 gcc_unreachable ();
2121 return 1;
2124 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2125 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2126 references is incremented once for each added note. */
2128 static void
2129 add_label_notes (rtx x, rtx insns)
2131 enum rtx_code code = GET_CODE (x);
2132 int i, j;
2133 const char *fmt;
2134 rtx insn;
2136 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2138 /* This code used to ignore labels that referred to dispatch tables to
2139 avoid flow generating (slightly) worse code.
2141 We no longer ignore such label references (see LABEL_REF handling in
2142 mark_jump_label for additional information). */
2143 for (insn = insns; insn; insn = NEXT_INSN (insn))
2144 if (reg_mentioned_p (XEXP (x, 0), insn))
2146 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2147 REG_NOTES (insn));
2148 if (LABEL_P (XEXP (x, 0)))
2149 LABEL_NUSES (XEXP (x, 0))++;
2153 fmt = GET_RTX_FORMAT (code);
2154 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2156 if (fmt[i] == 'e')
2157 add_label_notes (XEXP (x, i), insns);
2158 else if (fmt[i] == 'E')
2159 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2160 add_label_notes (XVECEXP (x, i, j), insns);
2164 /* Scan MOVABLES, and move the insns that deserve to be moved.
2165 If two matching movables are combined, replace one reg with the
2166 other throughout. */
2168 static void
2169 move_movables (struct loop *loop, struct loop_movables *movables,
2170 int threshold, int insn_count)
2172 struct loop_regs *regs = LOOP_REGS (loop);
2173 int nregs = regs->num;
2174 rtx new_start = 0;
2175 struct movable *m;
2176 rtx p;
2177 rtx loop_start = loop->start;
2178 rtx loop_end = loop->end;
2179 /* Map of pseudo-register replacements to handle combining
2180 when we move several insns that load the same value
2181 into different pseudo-registers. */
2182 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2183 char *already_moved = xcalloc (nregs, sizeof (char));
2185 for (m = movables->head; m; m = m->next)
2187 /* Describe this movable insn. */
2189 if (loop_dump_stream)
2191 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2192 INSN_UID (m->insn), m->regno, m->lifetime);
2193 if (m->consec > 0)
2194 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2195 if (m->cond)
2196 fprintf (loop_dump_stream, "cond ");
2197 if (m->force)
2198 fprintf (loop_dump_stream, "force ");
2199 if (m->global)
2200 fprintf (loop_dump_stream, "global ");
2201 if (m->done)
2202 fprintf (loop_dump_stream, "done ");
2203 if (m->move_insn)
2204 fprintf (loop_dump_stream, "move-insn ");
2205 if (m->match)
2206 fprintf (loop_dump_stream, "matches %d ",
2207 INSN_UID (m->match->insn));
2208 if (m->forces)
2209 fprintf (loop_dump_stream, "forces %d ",
2210 INSN_UID (m->forces->insn));
2213 /* Ignore the insn if it's already done (it matched something else).
2214 Otherwise, see if it is now safe to move. */
2216 if (!m->done
2217 && (! m->cond
2218 || (1 == loop_invariant_p (loop, m->set_src)
2219 && (m->dependencies == 0
2220 || 1 == loop_invariant_p (loop, m->dependencies))
2221 && (m->consec == 0
2222 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2223 m->consec + 1,
2224 m->insn))))
2225 && (! m->forces || m->forces->done))
2227 int regno;
2228 rtx p;
2229 int savings = m->savings;
2231 /* We have an insn that is safe to move.
2232 Compute its desirability. */
2234 p = m->insn;
2235 regno = m->regno;
2237 if (loop_dump_stream)
2238 fprintf (loop_dump_stream, "savings %d ", savings);
2240 if (regs->array[regno].moved_once && loop_dump_stream)
2241 fprintf (loop_dump_stream, "halved since already moved ");
2243 /* An insn MUST be moved if we already moved something else
2244 which is safe only if this one is moved too: that is,
2245 if already_moved[REGNO] is nonzero. */
2247 /* An insn is desirable to move if the new lifetime of the
2248 register is no more than THRESHOLD times the old lifetime.
2249 If it's not desirable, it means the loop is so big
2250 that moving won't speed things up much,
2251 and it is liable to make register usage worse. */
2253 /* It is also desirable to move if it can be moved at no
2254 extra cost because something else was already moved. */
2256 if (already_moved[regno]
2257 || (threshold * savings * m->lifetime) >=
2258 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2259 || (m->forces && m->forces->done
2260 && regs->array[m->forces->regno].n_times_set == 1))
2262 int count;
2263 struct movable *m1;
2264 rtx first = NULL_RTX;
2265 rtx newreg = NULL_RTX;
2267 if (m->insert_temp)
2268 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2270 /* Now move the insns that set the reg. */
2272 if (m->partial && m->match)
2274 rtx newpat, i1;
2275 rtx r1, r2;
2276 /* Find the end of this chain of matching regs.
2277 Thus, we load each reg in the chain from that one reg.
2278 And that reg is loaded with 0 directly,
2279 since it has ->match == 0. */
2280 for (m1 = m; m1->match; m1 = m1->match);
2281 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2282 SET_DEST (PATTERN (m1->insn)));
2283 i1 = loop_insn_hoist (loop, newpat);
2285 /* Mark the moved, invariant reg as being allowed to
2286 share a hard reg with the other matching invariant. */
2287 REG_NOTES (i1) = REG_NOTES (m->insn);
2288 r1 = SET_DEST (PATTERN (m->insn));
2289 r2 = SET_DEST (PATTERN (m1->insn));
2290 regs_may_share
2291 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2292 gen_rtx_EXPR_LIST (VOIDmode, r2,
2293 regs_may_share));
2294 delete_insn (m->insn);
2296 if (new_start == 0)
2297 new_start = i1;
2299 if (loop_dump_stream)
2300 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2302 /* If we are to re-generate the item being moved with a
2303 new move insn, first delete what we have and then emit
2304 the move insn before the loop. */
2305 else if (m->move_insn)
2307 rtx i1, temp, seq;
2309 for (count = m->consec; count >= 0; count--)
2311 if (!NOTE_P (p))
2313 /* If this is the first insn of a library
2314 call sequence, something is very
2315 wrong. */
2316 gcc_assert (!find_reg_note
2317 (p, REG_LIBCALL, NULL_RTX));
2319 /* If this is the last insn of a libcall
2320 sequence, then delete every insn in the
2321 sequence except the last. The last insn
2322 is handled in the normal manner. */
2323 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
2325 if (temp)
2327 temp = XEXP (temp, 0);
2328 while (temp != p)
2329 temp = delete_insn (temp);
2333 temp = p;
2334 p = delete_insn (p);
2336 /* simplify_giv_expr expects that it can walk the insns
2337 at m->insn forwards and see this old sequence we are
2338 tossing here. delete_insn does preserve the next
2339 pointers, but when we skip over a NOTE we must fix
2340 it up. Otherwise that code walks into the non-deleted
2341 insn stream. */
2342 while (p && NOTE_P (p))
2343 p = NEXT_INSN (temp) = NEXT_INSN (p);
2345 if (m->insert_temp)
2347 /* Replace the original insn with a move from
2348 our newly created temp. */
2349 start_sequence ();
2350 emit_move_insn (m->set_dest, newreg);
2351 seq = get_insns ();
2352 end_sequence ();
2353 emit_insn_before (seq, p);
2357 start_sequence ();
2358 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2359 m->set_src);
2360 seq = get_insns ();
2361 end_sequence ();
2363 add_label_notes (m->set_src, seq);
2365 i1 = loop_insn_hoist (loop, seq);
2366 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2367 set_unique_reg_note (i1,
2368 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2369 m->set_src);
2371 if (loop_dump_stream)
2372 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2374 /* The more regs we move, the less we like moving them. */
2375 threshold -= 3;
2377 else
2379 for (count = m->consec; count >= 0; count--)
2381 rtx i1, temp;
2383 /* If first insn of libcall sequence, skip to end. */
2384 /* Do this at start of loop, since p is guaranteed to
2385 be an insn here. */
2386 if (!NOTE_P (p)
2387 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2388 p = XEXP (temp, 0);
2390 /* If last insn of libcall sequence, move all
2391 insns except the last before the loop. The last
2392 insn is handled in the normal manner. */
2393 if (!NOTE_P (p)
2394 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2396 rtx fn_address = 0;
2397 rtx fn_reg = 0;
2398 rtx fn_address_insn = 0;
2400 first = 0;
2401 for (temp = XEXP (temp, 0); temp != p;
2402 temp = NEXT_INSN (temp))
2404 rtx body;
2405 rtx n;
2406 rtx next;
2408 if (NOTE_P (temp))
2409 continue;
2411 body = PATTERN (temp);
2413 /* Find the next insn after TEMP,
2414 not counting USE or NOTE insns. */
2415 for (next = NEXT_INSN (temp); next != p;
2416 next = NEXT_INSN (next))
2417 if (! (NONJUMP_INSN_P (next)
2418 && GET_CODE (PATTERN (next)) == USE)
2419 && !NOTE_P (next))
2420 break;
2422 /* If that is the call, this may be the insn
2423 that loads the function address.
2425 Extract the function address from the insn
2426 that loads it into a register.
2427 If this insn was cse'd, we get incorrect code.
2429 So emit a new move insn that copies the
2430 function address into the register that the
2431 call insn will use. flow.c will delete any
2432 redundant stores that we have created. */
2433 if (CALL_P (next)
2434 && GET_CODE (body) == SET
2435 && REG_P (SET_DEST (body))
2436 && (n = find_reg_note (temp, REG_EQUAL,
2437 NULL_RTX)))
2439 fn_reg = SET_SRC (body);
2440 if (!REG_P (fn_reg))
2441 fn_reg = SET_DEST (body);
2442 fn_address = XEXP (n, 0);
2443 fn_address_insn = temp;
2445 /* We have the call insn.
2446 If it uses the register we suspect it might,
2447 load it with the correct address directly. */
2448 if (CALL_P (temp)
2449 && fn_address != 0
2450 && reg_referenced_p (fn_reg, body))
2451 loop_insn_emit_after (loop, 0, fn_address_insn,
2452 gen_move_insn
2453 (fn_reg, fn_address));
2455 if (CALL_P (temp))
2457 i1 = loop_call_insn_hoist (loop, body);
2458 /* Because the USAGE information potentially
2459 contains objects other than hard registers
2460 we need to copy it. */
2461 if (CALL_INSN_FUNCTION_USAGE (temp))
2462 CALL_INSN_FUNCTION_USAGE (i1)
2463 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2465 else
2466 i1 = loop_insn_hoist (loop, body);
2467 if (first == 0)
2468 first = i1;
2469 if (temp == fn_address_insn)
2470 fn_address_insn = i1;
2471 REG_NOTES (i1) = REG_NOTES (temp);
2472 REG_NOTES (temp) = NULL;
2473 delete_insn (temp);
2475 if (new_start == 0)
2476 new_start = first;
2478 if (m->savemode != VOIDmode)
2480 /* P sets REG to zero; but we should clear only
2481 the bits that are not covered by the mode
2482 m->savemode. */
2483 rtx reg = m->set_dest;
2484 rtx sequence;
2485 rtx tem;
2487 start_sequence ();
2488 tem = expand_simple_binop
2489 (GET_MODE (reg), AND, reg,
2490 GEN_INT ((((HOST_WIDE_INT) 1
2491 << GET_MODE_BITSIZE (m->savemode)))
2492 - 1),
2493 reg, 1, OPTAB_LIB_WIDEN);
2494 gcc_assert (tem);
2495 if (tem != reg)
2496 emit_move_insn (reg, tem);
2497 sequence = get_insns ();
2498 end_sequence ();
2499 i1 = loop_insn_hoist (loop, sequence);
2501 else if (CALL_P (p))
2503 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2504 /* Because the USAGE information potentially
2505 contains objects other than hard registers
2506 we need to copy it. */
2507 if (CALL_INSN_FUNCTION_USAGE (p))
2508 CALL_INSN_FUNCTION_USAGE (i1)
2509 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2511 else if (count == m->consec && m->move_insn_first)
2513 rtx seq;
2514 /* The SET_SRC might not be invariant, so we must
2515 use the REG_EQUAL note. */
2516 start_sequence ();
2517 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2518 m->set_src);
2519 seq = get_insns ();
2520 end_sequence ();
2522 add_label_notes (m->set_src, seq);
2524 i1 = loop_insn_hoist (loop, seq);
2525 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2526 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2527 : REG_EQUAL, m->set_src);
2529 else if (m->insert_temp)
2531 rtx *reg_map2 = xcalloc (REGNO (newreg),
2532 sizeof(rtx));
2533 reg_map2 [m->regno] = newreg;
2535 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2536 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2537 free (reg_map2);
2539 else
2540 i1 = loop_insn_hoist (loop, PATTERN (p));
2542 if (REG_NOTES (i1) == 0)
2544 REG_NOTES (i1) = REG_NOTES (p);
2545 REG_NOTES (p) = NULL;
2547 /* If there is a REG_EQUAL note present whose value
2548 is not loop invariant, then delete it, since it
2549 may cause problems with later optimization passes.
2550 It is possible for cse to create such notes
2551 like this as a result of record_jump_cond. */
2553 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2554 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2555 remove_note (i1, temp);
2558 if (new_start == 0)
2559 new_start = i1;
2561 if (loop_dump_stream)
2562 fprintf (loop_dump_stream, " moved to %d",
2563 INSN_UID (i1));
2565 /* If library call, now fix the REG_NOTES that contain
2566 insn pointers, namely REG_LIBCALL on FIRST
2567 and REG_RETVAL on I1. */
2568 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2570 XEXP (temp, 0) = first;
2571 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2572 XEXP (temp, 0) = i1;
2575 temp = p;
2576 delete_insn (p);
2577 p = NEXT_INSN (p);
2579 /* simplify_giv_expr expects that it can walk the insns
2580 at m->insn forwards and see this old sequence we are
2581 tossing here. delete_insn does preserve the next
2582 pointers, but when we skip over a NOTE we must fix
2583 it up. Otherwise that code walks into the non-deleted
2584 insn stream. */
2585 while (p && NOTE_P (p))
2586 p = NEXT_INSN (temp) = NEXT_INSN (p);
2588 if (m->insert_temp)
2590 rtx seq;
2591 /* Replace the original insn with a move from
2592 our newly created temp. */
2593 start_sequence ();
2594 emit_move_insn (m->set_dest, newreg);
2595 seq = get_insns ();
2596 end_sequence ();
2597 emit_insn_before (seq, p);
2601 /* The more regs we move, the less we like moving them. */
2602 threshold -= 3;
2605 m->done = 1;
2607 if (!m->insert_temp)
2609 /* Any other movable that loads the same register
2610 MUST be moved. */
2611 already_moved[regno] = 1;
2613 /* This reg has been moved out of one loop. */
2614 regs->array[regno].moved_once = 1;
2616 /* The reg set here is now invariant. */
2617 if (! m->partial)
2619 int i;
2620 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2621 regs->array[regno+i].set_in_loop = 0;
2624 /* Change the length-of-life info for the register
2625 to say it lives at least the full length of this loop.
2626 This will help guide optimizations in outer loops. */
2628 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2629 /* This is the old insn before all the moved insns.
2630 We can't use the moved insn because it is out of range
2631 in uid_luid. Only the old insns have luids. */
2632 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2633 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2634 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2637 /* Combine with this moved insn any other matching movables. */
2639 if (! m->partial)
2640 for (m1 = movables->head; m1; m1 = m1->next)
2641 if (m1->match == m)
2643 rtx temp;
2645 /* Schedule the reg loaded by M1
2646 for replacement so that shares the reg of M.
2647 If the modes differ (only possible in restricted
2648 circumstances, make a SUBREG.
2650 Note this assumes that the target dependent files
2651 treat REG and SUBREG equally, including within
2652 GO_IF_LEGITIMATE_ADDRESS and in all the
2653 predicates since we never verify that replacing the
2654 original register with a SUBREG results in a
2655 recognizable insn. */
2656 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2657 reg_map[m1->regno] = m->set_dest;
2658 else
2659 reg_map[m1->regno]
2660 = gen_lowpart_common (GET_MODE (m1->set_dest),
2661 m->set_dest);
2663 /* Get rid of the matching insn
2664 and prevent further processing of it. */
2665 m1->done = 1;
2667 /* If library call, delete all insns. */
2668 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2669 NULL_RTX)))
2670 delete_insn_chain (XEXP (temp, 0), m1->insn);
2671 else
2672 delete_insn (m1->insn);
2674 /* Any other movable that loads the same register
2675 MUST be moved. */
2676 already_moved[m1->regno] = 1;
2678 /* The reg merged here is now invariant,
2679 if the reg it matches is invariant. */
2680 if (! m->partial)
2682 int i;
2683 for (i = 0;
2684 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2685 i++)
2686 regs->array[m1->regno+i].set_in_loop = 0;
2690 else if (loop_dump_stream)
2691 fprintf (loop_dump_stream, "not desirable");
2693 else if (loop_dump_stream && !m->match)
2694 fprintf (loop_dump_stream, "not safe");
2696 if (loop_dump_stream)
2697 fprintf (loop_dump_stream, "\n");
2700 if (new_start == 0)
2701 new_start = loop_start;
2703 /* Go through all the instructions in the loop, making
2704 all the register substitutions scheduled in REG_MAP. */
2705 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2706 if (INSN_P (p))
2708 replace_regs (PATTERN (p), reg_map, nregs, 0);
2709 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2710 INSN_CODE (p) = -1;
2713 /* Clean up. */
2714 free (reg_map);
2715 free (already_moved);
2719 static void
2720 loop_movables_add (struct loop_movables *movables, struct movable *m)
2722 if (movables->head == 0)
2723 movables->head = m;
2724 else
2725 movables->last->next = m;
2726 movables->last = m;
2730 static void
2731 loop_movables_free (struct loop_movables *movables)
2733 struct movable *m;
2734 struct movable *m_next;
2736 for (m = movables->head; m; m = m_next)
2738 m_next = m->next;
2739 free (m);
2743 #if 0
2744 /* Scan X and replace the address of any MEM in it with ADDR.
2745 REG is the address that MEM should have before the replacement. */
2747 static void
2748 replace_call_address (rtx x, rtx reg, rtx addr)
2750 enum rtx_code code;
2751 int i;
2752 const char *fmt;
2754 if (x == 0)
2755 return;
2756 code = GET_CODE (x);
2757 switch (code)
2759 case PC:
2760 case CC0:
2761 case CONST_INT:
2762 case CONST_DOUBLE:
2763 case CONST:
2764 case SYMBOL_REF:
2765 case LABEL_REF:
2766 case REG:
2767 return;
2769 case SET:
2770 /* Short cut for very common case. */
2771 replace_call_address (XEXP (x, 1), reg, addr);
2772 return;
2774 case CALL:
2775 /* Short cut for very common case. */
2776 replace_call_address (XEXP (x, 0), reg, addr);
2777 return;
2779 case MEM:
2780 /* If this MEM uses a reg other than the one we expected,
2781 something is wrong. */
2782 gcc_assert (XEXP (x, 0) == reg);
2783 XEXP (x, 0) = addr;
2784 return;
2786 default:
2787 break;
2790 fmt = GET_RTX_FORMAT (code);
2791 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2793 if (fmt[i] == 'e')
2794 replace_call_address (XEXP (x, i), reg, addr);
2795 else if (fmt[i] == 'E')
2797 int j;
2798 for (j = 0; j < XVECLEN (x, i); j++)
2799 replace_call_address (XVECEXP (x, i, j), reg, addr);
2803 #endif
2805 /* Return the number of memory refs to addresses that vary
2806 in the rtx X. */
2808 static int
2809 count_nonfixed_reads (const struct loop *loop, rtx x)
2811 enum rtx_code code;
2812 int i;
2813 const char *fmt;
2814 int value;
2816 if (x == 0)
2817 return 0;
2819 code = GET_CODE (x);
2820 switch (code)
2822 case PC:
2823 case CC0:
2824 case CONST_INT:
2825 case CONST_DOUBLE:
2826 case CONST:
2827 case SYMBOL_REF:
2828 case LABEL_REF:
2829 case REG:
2830 return 0;
2832 case MEM:
2833 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2834 + count_nonfixed_reads (loop, XEXP (x, 0)));
2836 default:
2837 break;
2840 value = 0;
2841 fmt = GET_RTX_FORMAT (code);
2842 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2844 if (fmt[i] == 'e')
2845 value += count_nonfixed_reads (loop, XEXP (x, i));
2846 if (fmt[i] == 'E')
2848 int j;
2849 for (j = 0; j < XVECLEN (x, i); j++)
2850 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2853 return value;
2856 /* Scan a loop setting the elements `loops_enclosed',
2857 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2858 `unknown_address_altered', `unknown_constant_address_altered', and
2859 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2860 list `store_mems' in LOOP. */
2862 static void
2863 prescan_loop (struct loop *loop)
2865 int level = 1;
2866 rtx insn;
2867 struct loop_info *loop_info = LOOP_INFO (loop);
2868 rtx start = loop->start;
2869 rtx end = loop->end;
2870 /* The label after END. Jumping here is just like falling off the
2871 end of the loop. We use next_nonnote_insn instead of next_label
2872 as a hedge against the (pathological) case where some actual insn
2873 might end up between the two. */
2874 rtx exit_target = next_nonnote_insn (end);
2876 loop_info->has_indirect_jump = indirect_jump_in_function;
2877 loop_info->pre_header_has_call = 0;
2878 loop_info->has_call = 0;
2879 loop_info->has_nonconst_call = 0;
2880 loop_info->has_prefetch = 0;
2881 loop_info->has_volatile = 0;
2882 loop_info->has_tablejump = 0;
2883 loop_info->has_multiple_exit_targets = 0;
2884 loop->level = 1;
2886 loop_info->unknown_address_altered = 0;
2887 loop_info->unknown_constant_address_altered = 0;
2888 loop_info->store_mems = NULL_RTX;
2889 loop_info->first_loop_store_insn = NULL_RTX;
2890 loop_info->mems_idx = 0;
2891 loop_info->num_mem_sets = 0;
2893 for (insn = start; insn && !LABEL_P (insn);
2894 insn = PREV_INSN (insn))
2896 if (CALL_P (insn))
2898 loop_info->pre_header_has_call = 1;
2899 break;
2903 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2904 insn = NEXT_INSN (insn))
2906 switch (GET_CODE (insn))
2908 case NOTE:
2909 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2911 ++level;
2912 /* Count number of loops contained in this one. */
2913 loop->level++;
2915 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2916 --level;
2917 break;
2919 case CALL_INSN:
2920 if (! CONST_OR_PURE_CALL_P (insn))
2922 loop_info->unknown_address_altered = 1;
2923 loop_info->has_nonconst_call = 1;
2925 else if (pure_call_p (insn))
2926 loop_info->has_nonconst_call = 1;
2927 loop_info->has_call = 1;
2928 if (can_throw_internal (insn))
2929 loop_info->has_multiple_exit_targets = 1;
2930 break;
2932 case JUMP_INSN:
2933 if (! loop_info->has_multiple_exit_targets)
2935 rtx set = pc_set (insn);
2937 if (set)
2939 rtx src = SET_SRC (set);
2940 rtx label1, label2;
2942 if (GET_CODE (src) == IF_THEN_ELSE)
2944 label1 = XEXP (src, 1);
2945 label2 = XEXP (src, 2);
2947 else
2949 label1 = src;
2950 label2 = NULL_RTX;
2955 if (label1 && label1 != pc_rtx)
2957 if (GET_CODE (label1) != LABEL_REF)
2959 /* Something tricky. */
2960 loop_info->has_multiple_exit_targets = 1;
2961 break;
2963 else if (XEXP (label1, 0) != exit_target
2964 && LABEL_OUTSIDE_LOOP_P (label1))
2966 /* A jump outside the current loop. */
2967 loop_info->has_multiple_exit_targets = 1;
2968 break;
2972 label1 = label2;
2973 label2 = NULL_RTX;
2975 while (label1);
2977 else
2979 /* A return, or something tricky. */
2980 loop_info->has_multiple_exit_targets = 1;
2983 /* Fall through. */
2985 case INSN:
2986 if (volatile_refs_p (PATTERN (insn)))
2987 loop_info->has_volatile = 1;
2989 if (JUMP_P (insn)
2990 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2991 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2992 loop_info->has_tablejump = 1;
2994 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2995 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2996 loop_info->first_loop_store_insn = insn;
2998 if (flag_non_call_exceptions && can_throw_internal (insn))
2999 loop_info->has_multiple_exit_targets = 1;
3000 break;
3002 default:
3003 break;
3007 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
3008 if (/* An exception thrown by a called function might land us
3009 anywhere. */
3010 ! loop_info->has_nonconst_call
3011 /* We don't want loads for MEMs moved to a location before the
3012 one at which their stack memory becomes allocated. (Note
3013 that this is not a problem for malloc, etc., since those
3014 require actual function calls. */
3015 && ! current_function_calls_alloca
3016 /* There are ways to leave the loop other than falling off the
3017 end. */
3018 && ! loop_info->has_multiple_exit_targets)
3019 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3020 insn = NEXT_INSN (insn))
3021 for_each_rtx (&insn, insert_loop_mem, loop_info);
3023 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3024 that loop_invariant_p and load_mems can use true_dependence
3025 to determine what is really clobbered. */
3026 if (loop_info->unknown_address_altered)
3028 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3030 loop_info->store_mems
3031 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3033 if (loop_info->unknown_constant_address_altered)
3035 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3036 MEM_READONLY_P (mem) = 1;
3037 loop_info->store_mems
3038 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3042 /* Invalidate all loops containing LABEL. */
3044 static void
3045 invalidate_loops_containing_label (rtx label)
3047 struct loop *loop;
3048 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3049 loop->invalid = 1;
3052 /* Scan the function looking for loops. Record the start and end of each loop.
3053 Also mark as invalid loops any loops that contain a setjmp or are branched
3054 to from outside the loop. */
3056 static void
3057 find_and_verify_loops (rtx f, struct loops *loops)
3059 rtx insn;
3060 rtx label;
3061 int num_loops;
3062 struct loop *current_loop;
3063 struct loop *next_loop;
3064 struct loop *loop;
3066 num_loops = loops->num;
3068 compute_luids (f, NULL_RTX, 0);
3070 /* If there are jumps to undefined labels,
3071 treat them as jumps out of any/all loops.
3072 This also avoids writing past end of tables when there are no loops. */
3073 uid_loop[0] = NULL;
3075 /* Find boundaries of loops, mark which loops are contained within
3076 loops, and invalidate loops that have setjmp. */
3078 num_loops = 0;
3079 current_loop = NULL;
3080 for (insn = f; insn; insn = NEXT_INSN (insn))
3082 if (NOTE_P (insn))
3083 switch (NOTE_LINE_NUMBER (insn))
3085 case NOTE_INSN_LOOP_BEG:
3086 next_loop = loops->array + num_loops;
3087 next_loop->num = num_loops;
3088 num_loops++;
3089 next_loop->start = insn;
3090 next_loop->outer = current_loop;
3091 current_loop = next_loop;
3092 break;
3094 case NOTE_INSN_LOOP_END:
3095 gcc_assert (current_loop);
3097 current_loop->end = insn;
3098 current_loop = current_loop->outer;
3099 break;
3101 default:
3102 break;
3105 if (CALL_P (insn)
3106 && find_reg_note (insn, REG_SETJMP, NULL))
3108 /* In this case, we must invalidate our current loop and any
3109 enclosing loop. */
3110 for (loop = current_loop; loop; loop = loop->outer)
3112 loop->invalid = 1;
3113 if (loop_dump_stream)
3114 fprintf (loop_dump_stream,
3115 "\nLoop at %d ignored due to setjmp.\n",
3116 INSN_UID (loop->start));
3120 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3121 enclosing loop, but this doesn't matter. */
3122 uid_loop[INSN_UID (insn)] = current_loop;
3125 /* Any loop containing a label used in an initializer must be invalidated,
3126 because it can be jumped into from anywhere. */
3127 for (label = forced_labels; label; label = XEXP (label, 1))
3128 invalidate_loops_containing_label (XEXP (label, 0));
3130 /* Any loop containing a label used for an exception handler must be
3131 invalidated, because it can be jumped into from anywhere. */
3132 for_each_eh_label (invalidate_loops_containing_label);
3134 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3135 loop that it is not contained within, that loop is marked invalid.
3136 If any INSN or CALL_INSN uses a label's address, then the loop containing
3137 that label is marked invalid, because it could be jumped into from
3138 anywhere.
3140 Also look for blocks of code ending in an unconditional branch that
3141 exits the loop. If such a block is surrounded by a conditional
3142 branch around the block, move the block elsewhere (see below) and
3143 invert the jump to point to the code block. This may eliminate a
3144 label in our loop and will simplify processing by both us and a
3145 possible second cse pass. */
3147 for (insn = f; insn; insn = NEXT_INSN (insn))
3148 if (INSN_P (insn))
3150 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3152 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3154 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3155 if (note)
3156 invalidate_loops_containing_label (XEXP (note, 0));
3159 if (!JUMP_P (insn))
3160 continue;
3162 mark_loop_jump (PATTERN (insn), this_loop);
3164 /* See if this is an unconditional branch outside the loop. */
3165 if (this_loop
3166 && (GET_CODE (PATTERN (insn)) == RETURN
3167 || (any_uncondjump_p (insn)
3168 && onlyjump_p (insn)
3169 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3170 != this_loop)))
3171 && get_max_uid () < max_uid_for_loop)
3173 rtx p;
3174 rtx our_next = next_real_insn (insn);
3175 rtx last_insn_to_move = NEXT_INSN (insn);
3176 struct loop *dest_loop;
3177 struct loop *outer_loop = NULL;
3179 /* Go backwards until we reach the start of the loop, a label,
3180 or a JUMP_INSN. */
3181 for (p = PREV_INSN (insn);
3182 !LABEL_P (p)
3183 && ! (NOTE_P (p)
3184 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3185 && !JUMP_P (p);
3186 p = PREV_INSN (p))
3189 /* Check for the case where we have a jump to an inner nested
3190 loop, and do not perform the optimization in that case. */
3192 if (JUMP_LABEL (insn))
3194 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3195 if (dest_loop)
3197 for (outer_loop = dest_loop; outer_loop;
3198 outer_loop = outer_loop->outer)
3199 if (outer_loop == this_loop)
3200 break;
3204 /* Make sure that the target of P is within the current loop. */
3206 if (JUMP_P (p) && JUMP_LABEL (p)
3207 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3208 outer_loop = this_loop;
3210 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3211 we have a block of code to try to move.
3213 We look backward and then forward from the target of INSN
3214 to find a BARRIER at the same loop depth as the target.
3215 If we find such a BARRIER, we make a new label for the start
3216 of the block, invert the jump in P and point it to that label,
3217 and move the block of code to the spot we found. */
3219 if (! outer_loop
3220 && JUMP_P (p)
3221 && JUMP_LABEL (p) != 0
3222 /* Just ignore jumps to labels that were never emitted.
3223 These always indicate compilation errors. */
3224 && INSN_UID (JUMP_LABEL (p)) != 0
3225 && any_condjump_p (p) && onlyjump_p (p)
3226 && next_real_insn (JUMP_LABEL (p)) == our_next
3227 /* If it's not safe to move the sequence, then we
3228 mustn't try. */
3229 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3230 &last_insn_to_move))
3232 rtx target
3233 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3234 struct loop *target_loop = uid_loop[INSN_UID (target)];
3235 rtx loc, loc2;
3236 rtx tmp;
3238 /* Search for possible garbage past the conditional jumps
3239 and look for the last barrier. */
3240 for (tmp = last_insn_to_move;
3241 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3242 if (BARRIER_P (tmp))
3243 last_insn_to_move = tmp;
3245 for (loc = target; loc; loc = PREV_INSN (loc))
3246 if (BARRIER_P (loc)
3247 /* Don't move things inside a tablejump. */
3248 && ((loc2 = next_nonnote_insn (loc)) == 0
3249 || !LABEL_P (loc2)
3250 || (loc2 = next_nonnote_insn (loc2)) == 0
3251 || !JUMP_P (loc2)
3252 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3253 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3254 && uid_loop[INSN_UID (loc)] == target_loop)
3255 break;
3257 if (loc == 0)
3258 for (loc = target; loc; loc = NEXT_INSN (loc))
3259 if (BARRIER_P (loc)
3260 /* Don't move things inside a tablejump. */
3261 && ((loc2 = next_nonnote_insn (loc)) == 0
3262 || !LABEL_P (loc2)
3263 || (loc2 = next_nonnote_insn (loc2)) == 0
3264 || !JUMP_P (loc2)
3265 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3266 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3267 && uid_loop[INSN_UID (loc)] == target_loop)
3268 break;
3270 if (loc)
3272 rtx cond_label = JUMP_LABEL (p);
3273 rtx new_label = get_label_after (p);
3275 /* Ensure our label doesn't go away. */
3276 LABEL_NUSES (cond_label)++;
3278 /* Verify that uid_loop is large enough and that
3279 we can invert P. */
3280 if (invert_jump (p, new_label, 1))
3282 rtx q, r;
3284 /* If no suitable BARRIER was found, create a suitable
3285 one before TARGET. Since TARGET is a fall through
3286 path, we'll need to insert a jump around our block
3287 and add a BARRIER before TARGET.
3289 This creates an extra unconditional jump outside
3290 the loop. However, the benefits of removing rarely
3291 executed instructions from inside the loop usually
3292 outweighs the cost of the extra unconditional jump
3293 outside the loop. */
3294 if (loc == 0)
3296 rtx temp;
3298 temp = gen_jump (JUMP_LABEL (insn));
3299 temp = emit_jump_insn_before (temp, target);
3300 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3301 LABEL_NUSES (JUMP_LABEL (insn))++;
3302 loc = emit_barrier_before (target);
3305 /* Include the BARRIER after INSN and copy the
3306 block after LOC. */
3307 if (squeeze_notes (&new_label, &last_insn_to_move))
3308 abort ();
3309 reorder_insns (new_label, last_insn_to_move, loc);
3311 /* All those insns are now in TARGET_LOOP. */
3312 for (q = new_label;
3313 q != NEXT_INSN (last_insn_to_move);
3314 q = NEXT_INSN (q))
3315 uid_loop[INSN_UID (q)] = target_loop;
3317 /* The label jumped to by INSN is no longer a loop
3318 exit. Unless INSN does not have a label (e.g.,
3319 it is a RETURN insn), search loop->exit_labels
3320 to find its label_ref, and remove it. Also turn
3321 off LABEL_OUTSIDE_LOOP_P bit. */
3322 if (JUMP_LABEL (insn))
3324 for (q = 0, r = this_loop->exit_labels;
3326 q = r, r = LABEL_NEXTREF (r))
3327 if (XEXP (r, 0) == JUMP_LABEL (insn))
3329 LABEL_OUTSIDE_LOOP_P (r) = 0;
3330 if (q)
3331 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3332 else
3333 this_loop->exit_labels = LABEL_NEXTREF (r);
3334 break;
3337 for (loop = this_loop; loop && loop != target_loop;
3338 loop = loop->outer)
3339 loop->exit_count--;
3341 /* If we didn't find it, then something is
3342 wrong. */
3343 gcc_assert (r);
3346 /* P is now a jump outside the loop, so it must be put
3347 in loop->exit_labels, and marked as such.
3348 The easiest way to do this is to just call
3349 mark_loop_jump again for P. */
3350 mark_loop_jump (PATTERN (p), this_loop);
3352 /* If INSN now jumps to the insn after it,
3353 delete INSN. */
3354 if (JUMP_LABEL (insn) != 0
3355 && (next_real_insn (JUMP_LABEL (insn))
3356 == next_real_insn (insn)))
3357 delete_related_insns (insn);
3360 /* Continue the loop after where the conditional
3361 branch used to jump, since the only branch insn
3362 in the block (if it still remains) is an inter-loop
3363 branch and hence needs no processing. */
3364 insn = NEXT_INSN (cond_label);
3366 if (--LABEL_NUSES (cond_label) == 0)
3367 delete_related_insns (cond_label);
3369 /* This loop will be continued with NEXT_INSN (insn). */
3370 insn = PREV_INSN (insn);
3377 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3378 loops it is contained in, mark the target loop invalid.
3380 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3382 static void
3383 mark_loop_jump (rtx x, struct loop *loop)
3385 struct loop *dest_loop;
3386 struct loop *outer_loop;
3387 int i;
3389 switch (GET_CODE (x))
3391 case PC:
3392 case USE:
3393 case CLOBBER:
3394 case REG:
3395 case MEM:
3396 case CONST_INT:
3397 case CONST_DOUBLE:
3398 case RETURN:
3399 return;
3401 case CONST:
3402 /* There could be a label reference in here. */
3403 mark_loop_jump (XEXP (x, 0), loop);
3404 return;
3406 case PLUS:
3407 case MINUS:
3408 case MULT:
3409 mark_loop_jump (XEXP (x, 0), loop);
3410 mark_loop_jump (XEXP (x, 1), loop);
3411 return;
3413 case LO_SUM:
3414 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3415 mark_loop_jump (XEXP (x, 1), loop);
3416 return;
3418 case SIGN_EXTEND:
3419 case ZERO_EXTEND:
3420 mark_loop_jump (XEXP (x, 0), loop);
3421 return;
3423 case LABEL_REF:
3424 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3426 /* Link together all labels that branch outside the loop. This
3427 is used by final_[bg]iv_value and the loop unrolling code. Also
3428 mark this LABEL_REF so we know that this branch should predict
3429 false. */
3431 /* A check to make sure the label is not in an inner nested loop,
3432 since this does not count as a loop exit. */
3433 if (dest_loop)
3435 for (outer_loop = dest_loop; outer_loop;
3436 outer_loop = outer_loop->outer)
3437 if (outer_loop == loop)
3438 break;
3440 else
3441 outer_loop = NULL;
3443 if (loop && ! outer_loop)
3445 LABEL_OUTSIDE_LOOP_P (x) = 1;
3446 LABEL_NEXTREF (x) = loop->exit_labels;
3447 loop->exit_labels = x;
3449 for (outer_loop = loop;
3450 outer_loop && outer_loop != dest_loop;
3451 outer_loop = outer_loop->outer)
3452 outer_loop->exit_count++;
3455 /* If this is inside a loop, but not in the current loop or one enclosed
3456 by it, it invalidates at least one loop. */
3458 if (! dest_loop)
3459 return;
3461 /* We must invalidate every nested loop containing the target of this
3462 label, except those that also contain the jump insn. */
3464 for (; dest_loop; dest_loop = dest_loop->outer)
3466 /* Stop when we reach a loop that also contains the jump insn. */
3467 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3468 if (dest_loop == outer_loop)
3469 return;
3471 /* If we get here, we know we need to invalidate a loop. */
3472 if (loop_dump_stream && ! dest_loop->invalid)
3473 fprintf (loop_dump_stream,
3474 "\nLoop at %d ignored due to multiple entry points.\n",
3475 INSN_UID (dest_loop->start));
3477 dest_loop->invalid = 1;
3479 return;
3481 case SET:
3482 /* If this is not setting pc, ignore. */
3483 if (SET_DEST (x) == pc_rtx)
3484 mark_loop_jump (SET_SRC (x), loop);
3485 return;
3487 case IF_THEN_ELSE:
3488 mark_loop_jump (XEXP (x, 1), loop);
3489 mark_loop_jump (XEXP (x, 2), loop);
3490 return;
3492 case PARALLEL:
3493 case ADDR_VEC:
3494 for (i = 0; i < XVECLEN (x, 0); i++)
3495 mark_loop_jump (XVECEXP (x, 0, i), loop);
3496 return;
3498 case ADDR_DIFF_VEC:
3499 for (i = 0; i < XVECLEN (x, 1); i++)
3500 mark_loop_jump (XVECEXP (x, 1, i), loop);
3501 return;
3503 default:
3504 /* Strictly speaking this is not a jump into the loop, only a possible
3505 jump out of the loop. However, we have no way to link the destination
3506 of this jump onto the list of exit labels. To be safe we mark this
3507 loop and any containing loops as invalid. */
3508 if (loop)
3510 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3512 if (loop_dump_stream && ! outer_loop->invalid)
3513 fprintf (loop_dump_stream,
3514 "\nLoop at %d ignored due to unknown exit jump.\n",
3515 INSN_UID (outer_loop->start));
3516 outer_loop->invalid = 1;
3519 return;
3523 /* Return nonzero if there is a label in the range from
3524 insn INSN to and including the insn whose luid is END
3525 INSN must have an assigned luid (i.e., it must not have
3526 been previously created by loop.c). */
3528 static int
3529 labels_in_range_p (rtx insn, int end)
3531 while (insn && INSN_LUID (insn) <= end)
3533 if (LABEL_P (insn))
3534 return 1;
3535 insn = NEXT_INSN (insn);
3538 return 0;
3541 /* Record that a memory reference X is being set. */
3543 static void
3544 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3545 void *data ATTRIBUTE_UNUSED)
3547 struct loop_info *loop_info = data;
3549 if (x == 0 || !MEM_P (x))
3550 return;
3552 /* Count number of memory writes.
3553 This affects heuristics in strength_reduce. */
3554 loop_info->num_mem_sets++;
3556 /* BLKmode MEM means all memory is clobbered. */
3557 if (GET_MODE (x) == BLKmode)
3559 if (MEM_READONLY_P (x))
3560 loop_info->unknown_constant_address_altered = 1;
3561 else
3562 loop_info->unknown_address_altered = 1;
3564 return;
3567 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3568 loop_info->store_mems);
3571 /* X is a value modified by an INSN that references a biv inside a loop
3572 exit test (i.e., X is somehow related to the value of the biv). If X
3573 is a pseudo that is used more than once, then the biv is (effectively)
3574 used more than once. DATA is a pointer to a loop_regs structure. */
3576 static void
3577 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3579 struct loop_regs *regs = (struct loop_regs *) data;
3581 if (x == 0)
3582 return;
3584 while (GET_CODE (x) == STRICT_LOW_PART
3585 || GET_CODE (x) == SIGN_EXTRACT
3586 || GET_CODE (x) == ZERO_EXTRACT
3587 || GET_CODE (x) == SUBREG)
3588 x = XEXP (x, 0);
3590 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3591 return;
3593 /* If we do not have usage information, or if we know the register
3594 is used more than once, note that fact for check_dbra_loop. */
3595 if (REGNO (x) >= max_reg_before_loop
3596 || ! regs->array[REGNO (x)].single_usage
3597 || regs->array[REGNO (x)].single_usage == const0_rtx)
3598 regs->multiple_uses = 1;
3601 /* Return nonzero if the rtx X is invariant over the current loop.
3603 The value is 2 if we refer to something only conditionally invariant.
3605 A memory ref is invariant if it is not volatile and does not conflict
3606 with anything stored in `loop_info->store_mems'. */
3608 static int
3609 loop_invariant_p (const struct loop *loop, rtx x)
3611 struct loop_info *loop_info = LOOP_INFO (loop);
3612 struct loop_regs *regs = LOOP_REGS (loop);
3613 int i;
3614 enum rtx_code code;
3615 const char *fmt;
3616 int conditional = 0;
3617 rtx mem_list_entry;
3619 if (x == 0)
3620 return 1;
3621 code = GET_CODE (x);
3622 switch (code)
3624 case CONST_INT:
3625 case CONST_DOUBLE:
3626 case SYMBOL_REF:
3627 case CONST:
3628 return 1;
3630 case LABEL_REF:
3631 return 1;
3633 case PC:
3634 case CC0:
3635 case UNSPEC_VOLATILE:
3636 return 0;
3638 case REG:
3639 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3640 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3641 && ! current_function_has_nonlocal_goto)
3642 return 1;
3644 if (LOOP_INFO (loop)->has_call
3645 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3646 return 0;
3648 /* Out-of-range regs can occur when we are called from unrolling.
3649 These registers created by the unroller are set in the loop,
3650 hence are never invariant.
3651 Other out-of-range regs can be generated by load_mems; those that
3652 are written to in the loop are not invariant, while those that are
3653 not written to are invariant. It would be easy for load_mems
3654 to set n_times_set correctly for these registers, however, there
3655 is no easy way to distinguish them from registers created by the
3656 unroller. */
3658 if (REGNO (x) >= (unsigned) regs->num)
3659 return 0;
3661 if (regs->array[REGNO (x)].set_in_loop < 0)
3662 return 2;
3664 return regs->array[REGNO (x)].set_in_loop == 0;
3666 case MEM:
3667 /* Volatile memory references must be rejected. Do this before
3668 checking for read-only items, so that volatile read-only items
3669 will be rejected also. */
3670 if (MEM_VOLATILE_P (x))
3671 return 0;
3673 /* See if there is any dependence between a store and this load. */
3674 mem_list_entry = loop_info->store_mems;
3675 while (mem_list_entry)
3677 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3678 x, rtx_varies_p))
3679 return 0;
3681 mem_list_entry = XEXP (mem_list_entry, 1);
3684 /* It's not invalidated by a store in memory
3685 but we must still verify the address is invariant. */
3686 break;
3688 case ASM_OPERANDS:
3689 /* Don't mess with insns declared volatile. */
3690 if (MEM_VOLATILE_P (x))
3691 return 0;
3692 break;
3694 default:
3695 break;
3698 fmt = GET_RTX_FORMAT (code);
3699 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3701 if (fmt[i] == 'e')
3703 int tem = loop_invariant_p (loop, XEXP (x, i));
3704 if (tem == 0)
3705 return 0;
3706 if (tem == 2)
3707 conditional = 1;
3709 else if (fmt[i] == 'E')
3711 int j;
3712 for (j = 0; j < XVECLEN (x, i); j++)
3714 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3715 if (tem == 0)
3716 return 0;
3717 if (tem == 2)
3718 conditional = 1;
3724 return 1 + conditional;
3727 /* Return nonzero if all the insns in the loop that set REG
3728 are INSN and the immediately following insns,
3729 and if each of those insns sets REG in an invariant way
3730 (not counting uses of REG in them).
3732 The value is 2 if some of these insns are only conditionally invariant.
3734 We assume that INSN itself is the first set of REG
3735 and that its source is invariant. */
3737 static int
3738 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3739 rtx insn)
3741 struct loop_regs *regs = LOOP_REGS (loop);
3742 rtx p = insn;
3743 unsigned int regno = REGNO (reg);
3744 rtx temp;
3745 /* Number of sets we have to insist on finding after INSN. */
3746 int count = n_sets - 1;
3747 int old = regs->array[regno].set_in_loop;
3748 int value = 0;
3749 int this;
3751 /* If N_SETS hit the limit, we can't rely on its value. */
3752 if (n_sets == 127)
3753 return 0;
3755 regs->array[regno].set_in_loop = 0;
3757 while (count > 0)
3759 enum rtx_code code;
3760 rtx set;
3762 p = NEXT_INSN (p);
3763 code = GET_CODE (p);
3765 /* If library call, skip to end of it. */
3766 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3767 p = XEXP (temp, 0);
3769 this = 0;
3770 if (code == INSN
3771 && (set = single_set (p))
3772 && REG_P (SET_DEST (set))
3773 && REGNO (SET_DEST (set)) == regno)
3775 this = loop_invariant_p (loop, SET_SRC (set));
3776 if (this != 0)
3777 value |= this;
3778 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3780 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3781 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3782 notes are OK. */
3783 this = (CONSTANT_P (XEXP (temp, 0))
3784 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3785 && loop_invariant_p (loop, XEXP (temp, 0))));
3786 if (this != 0)
3787 value |= this;
3790 if (this != 0)
3791 count--;
3792 else if (code != NOTE)
3794 regs->array[regno].set_in_loop = old;
3795 return 0;
3799 regs->array[regno].set_in_loop = old;
3800 /* If loop_invariant_p ever returned 2, we return 2. */
3801 return 1 + (value & 2);
3804 /* Look at all uses (not sets) of registers in X. For each, if it is
3805 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3806 a different insn, set USAGE[REGNO] to const0_rtx. */
3808 static void
3809 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3811 enum rtx_code code = GET_CODE (x);
3812 const char *fmt = GET_RTX_FORMAT (code);
3813 int i, j;
3815 if (code == REG)
3816 regs->array[REGNO (x)].single_usage
3817 = (regs->array[REGNO (x)].single_usage != 0
3818 && regs->array[REGNO (x)].single_usage != insn)
3819 ? const0_rtx : insn;
3821 else if (code == SET)
3823 /* Don't count SET_DEST if it is a REG; otherwise count things
3824 in SET_DEST because if a register is partially modified, it won't
3825 show up as a potential movable so we don't care how USAGE is set
3826 for it. */
3827 if (!REG_P (SET_DEST (x)))
3828 find_single_use_in_loop (regs, insn, SET_DEST (x));
3829 find_single_use_in_loop (regs, insn, SET_SRC (x));
3831 else
3832 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3834 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3835 find_single_use_in_loop (regs, insn, XEXP (x, i));
3836 else if (fmt[i] == 'E')
3837 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3838 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3842 /* Count and record any set in X which is contained in INSN. Update
3843 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3844 in X. */
3846 static void
3847 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3849 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3850 /* Don't move a reg that has an explicit clobber.
3851 It's not worth the pain to try to do it correctly. */
3852 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3854 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3856 rtx dest = SET_DEST (x);
3857 while (GET_CODE (dest) == SUBREG
3858 || GET_CODE (dest) == ZERO_EXTRACT
3859 || GET_CODE (dest) == STRICT_LOW_PART)
3860 dest = XEXP (dest, 0);
3861 if (REG_P (dest))
3863 int i;
3864 int regno = REGNO (dest);
3865 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3867 /* If this is the first setting of this reg
3868 in current basic block, and it was set before,
3869 it must be set in two basic blocks, so it cannot
3870 be moved out of the loop. */
3871 if (regs->array[regno].set_in_loop > 0
3872 && last_set[regno] == 0)
3873 regs->array[regno+i].may_not_optimize = 1;
3874 /* If this is not first setting in current basic block,
3875 see if reg was used in between previous one and this.
3876 If so, neither one can be moved. */
3877 if (last_set[regno] != 0
3878 && reg_used_between_p (dest, last_set[regno], insn))
3879 regs->array[regno+i].may_not_optimize = 1;
3880 if (regs->array[regno+i].set_in_loop < 127)
3881 ++regs->array[regno+i].set_in_loop;
3882 last_set[regno+i] = insn;
3888 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3889 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3890 contained in insn INSN is used by any insn that precedes INSN in
3891 cyclic order starting from the loop entry point.
3893 We don't want to use INSN_LUID here because if we restrict INSN to those
3894 that have a valid INSN_LUID, it means we cannot move an invariant out
3895 from an inner loop past two loops. */
3897 static int
3898 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3900 rtx reg = SET_DEST (set);
3901 rtx p;
3903 /* Scan forward checking for register usage. If we hit INSN, we
3904 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3905 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3907 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3908 return 1;
3910 if (p == loop->end)
3911 p = loop->start;
3914 return 0;
3918 /* Information we collect about arrays that we might want to prefetch. */
3919 struct prefetch_info
3921 struct iv_class *class; /* Class this prefetch is based on. */
3922 struct induction *giv; /* GIV this prefetch is based on. */
3923 rtx base_address; /* Start prefetching from this address plus
3924 index. */
3925 HOST_WIDE_INT index;
3926 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3927 iteration. */
3928 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3929 prefetch area in one iteration. */
3930 unsigned int total_bytes; /* Total bytes loop will access in this block.
3931 This is set only for loops with known
3932 iteration counts and is 0xffffffff
3933 otherwise. */
3934 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3935 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3936 unsigned int write : 1; /* 1 for read/write prefetches. */
3939 /* Data used by check_store function. */
3940 struct check_store_data
3942 rtx mem_address;
3943 int mem_write;
3946 static void check_store (rtx, rtx, void *);
3947 static void emit_prefetch_instructions (struct loop *);
3948 static int rtx_equal_for_prefetch_p (rtx, rtx);
3950 /* Set mem_write when mem_address is found. Used as callback to
3951 note_stores. */
3952 static void
3953 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3955 struct check_store_data *d = (struct check_store_data *) data;
3957 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3958 d->mem_write = 1;
3961 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3962 important to get some addresses combined. Later more sophisticated
3963 transformations can be added when necessary.
3965 ??? Same trick with swapping operand is done at several other places.
3966 It can be nice to develop some common way to handle this. */
3968 static int
3969 rtx_equal_for_prefetch_p (rtx x, rtx y)
3971 int i;
3972 int j;
3973 enum rtx_code code = GET_CODE (x);
3974 const char *fmt;
3976 if (x == y)
3977 return 1;
3978 if (code != GET_CODE (y))
3979 return 0;
3981 if (COMMUTATIVE_ARITH_P (x))
3983 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3984 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3985 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3986 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3989 /* Compare the elements. If any pair of corresponding elements fails to
3990 match, return 0 for the whole thing. */
3992 fmt = GET_RTX_FORMAT (code);
3993 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3995 switch (fmt[i])
3997 case 'w':
3998 if (XWINT (x, i) != XWINT (y, i))
3999 return 0;
4000 break;
4002 case 'i':
4003 if (XINT (x, i) != XINT (y, i))
4004 return 0;
4005 break;
4007 case 'E':
4008 /* Two vectors must have the same length. */
4009 if (XVECLEN (x, i) != XVECLEN (y, i))
4010 return 0;
4012 /* And the corresponding elements must match. */
4013 for (j = 0; j < XVECLEN (x, i); j++)
4014 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4015 XVECEXP (y, i, j)) == 0)
4016 return 0;
4017 break;
4019 case 'e':
4020 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4021 return 0;
4022 break;
4024 case 's':
4025 if (strcmp (XSTR (x, i), XSTR (y, i)))
4026 return 0;
4027 break;
4029 case 'u':
4030 /* These are just backpointers, so they don't matter. */
4031 break;
4033 case '0':
4034 break;
4036 /* It is believed that rtx's at this level will never
4037 contain anything but integers and other rtx's,
4038 except for within LABEL_REFs and SYMBOL_REFs. */
4039 default:
4040 gcc_unreachable ();
4043 return 1;
4046 /* Remove constant addition value from the expression X (when present)
4047 and return it. */
4049 static HOST_WIDE_INT
4050 remove_constant_addition (rtx *x)
4052 HOST_WIDE_INT addval = 0;
4053 rtx exp = *x;
4055 /* Avoid clobbering a shared CONST expression. */
4056 if (GET_CODE (exp) == CONST)
4058 if (GET_CODE (XEXP (exp, 0)) == PLUS
4059 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4060 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4062 *x = XEXP (XEXP (exp, 0), 0);
4063 return INTVAL (XEXP (XEXP (exp, 0), 1));
4065 return 0;
4068 if (GET_CODE (exp) == CONST_INT)
4070 addval = INTVAL (exp);
4071 *x = const0_rtx;
4074 /* For plus expression recurse on ourself. */
4075 else if (GET_CODE (exp) == PLUS)
4077 addval += remove_constant_addition (&XEXP (exp, 0));
4078 addval += remove_constant_addition (&XEXP (exp, 1));
4080 /* In case our parameter was constant, remove extra zero from the
4081 expression. */
4082 if (XEXP (exp, 0) == const0_rtx)
4083 *x = XEXP (exp, 1);
4084 else if (XEXP (exp, 1) == const0_rtx)
4085 *x = XEXP (exp, 0);
4088 return addval;
4091 /* Attempt to identify accesses to arrays that are most likely to cause cache
4092 misses, and emit prefetch instructions a few prefetch blocks forward.
4094 To detect the arrays we use the GIV information that was collected by the
4095 strength reduction pass.
4097 The prefetch instructions are generated after the GIV information is done
4098 and before the strength reduction process. The new GIVs are injected into
4099 the strength reduction tables, so the prefetch addresses are optimized as
4100 well.
4102 GIVs are split into base address, stride, and constant addition values.
4103 GIVs with the same address, stride and close addition values are combined
4104 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4105 for write instructions can be used for the block we write to, on machines
4106 that support write prefetches.
4108 Several heuristics are used to determine when to prefetch. They are
4109 controlled by defined symbols that can be overridden for each target. */
4111 static void
4112 emit_prefetch_instructions (struct loop *loop)
4114 int num_prefetches = 0;
4115 int num_real_prefetches = 0;
4116 int num_real_write_prefetches = 0;
4117 int num_prefetches_before = 0;
4118 int num_write_prefetches_before = 0;
4119 int ahead = 0;
4120 int i;
4121 struct iv_class *bl;
4122 struct induction *iv;
4123 struct prefetch_info info[MAX_PREFETCHES];
4124 struct loop_ivs *ivs = LOOP_IVS (loop);
4126 if (!HAVE_prefetch || PREFETCH_BLOCK == 0)
4127 return;
4129 /* Consider only loops w/o calls. When a call is done, the loop is probably
4130 slow enough to read the memory. */
4131 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4133 if (loop_dump_stream)
4134 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4136 return;
4139 /* Don't prefetch in loops known to have few iterations. */
4140 if (PREFETCH_NO_LOW_LOOPCNT
4141 && LOOP_INFO (loop)->n_iterations
4142 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4144 if (loop_dump_stream)
4145 fprintf (loop_dump_stream,
4146 "Prefetch: ignoring loop: not enough iterations.\n");
4147 return;
4150 /* Search all induction variables and pick those interesting for the prefetch
4151 machinery. */
4152 for (bl = ivs->list; bl; bl = bl->next)
4154 struct induction *biv = bl->biv, *biv1;
4155 int basestride = 0;
4157 biv1 = biv;
4159 /* Expect all BIVs to be executed in each iteration. This makes our
4160 analysis more conservative. */
4161 while (biv1)
4163 /* Discard non-constant additions that we can't handle well yet, and
4164 BIVs that are executed multiple times; such BIVs ought to be
4165 handled in the nested loop. We accept not_every_iteration BIVs,
4166 since these only result in larger strides and make our
4167 heuristics more conservative. */
4168 if (GET_CODE (biv->add_val) != CONST_INT)
4170 if (loop_dump_stream)
4172 fprintf (loop_dump_stream,
4173 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4174 REGNO (biv->src_reg), INSN_UID (biv->insn));
4175 print_rtl (loop_dump_stream, biv->add_val);
4176 fprintf (loop_dump_stream, "\n");
4178 break;
4181 if (biv->maybe_multiple)
4183 if (loop_dump_stream)
4185 fprintf (loop_dump_stream,
4186 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4187 REGNO (biv->src_reg), INSN_UID (biv->insn));
4188 print_rtl (loop_dump_stream, biv->add_val);
4189 fprintf (loop_dump_stream, "\n");
4191 break;
4194 basestride += INTVAL (biv1->add_val);
4195 biv1 = biv1->next_iv;
4198 if (biv1 || !basestride)
4199 continue;
4201 for (iv = bl->giv; iv; iv = iv->next_iv)
4203 rtx address;
4204 rtx temp;
4205 HOST_WIDE_INT index = 0;
4206 int add = 1;
4207 HOST_WIDE_INT stride = 0;
4208 int stride_sign = 1;
4209 struct check_store_data d;
4210 const char *ignore_reason = NULL;
4211 int size = GET_MODE_SIZE (GET_MODE (iv));
4213 /* See whether an induction variable is interesting to us and if
4214 not, report the reason. */
4215 if (iv->giv_type != DEST_ADDR)
4216 ignore_reason = "giv is not a destination address";
4218 /* We are interested only in constant stride memory references
4219 in order to be able to compute density easily. */
4220 else if (GET_CODE (iv->mult_val) != CONST_INT)
4221 ignore_reason = "stride is not constant";
4223 else
4225 stride = INTVAL (iv->mult_val) * basestride;
4226 if (stride < 0)
4228 stride = -stride;
4229 stride_sign = -1;
4232 /* On some targets, reversed order prefetches are not
4233 worthwhile. */
4234 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4235 ignore_reason = "reversed order stride";
4237 /* Prefetch of accesses with an extreme stride might not be
4238 worthwhile, either. */
4239 else if (PREFETCH_NO_EXTREME_STRIDE
4240 && stride > PREFETCH_EXTREME_STRIDE)
4241 ignore_reason = "extreme stride";
4243 /* Ignore GIVs with varying add values; we can't predict the
4244 value for the next iteration. */
4245 else if (!loop_invariant_p (loop, iv->add_val))
4246 ignore_reason = "giv has varying add value";
4248 /* Ignore GIVs in the nested loops; they ought to have been
4249 handled already. */
4250 else if (iv->maybe_multiple)
4251 ignore_reason = "giv is in nested loop";
4254 if (ignore_reason != NULL)
4256 if (loop_dump_stream)
4257 fprintf (loop_dump_stream,
4258 "Prefetch: ignoring giv at %d: %s.\n",
4259 INSN_UID (iv->insn), ignore_reason);
4260 continue;
4263 /* Determine the pointer to the basic array we are examining. It is
4264 the sum of the BIV's initial value and the GIV's add_val. */
4265 address = copy_rtx (iv->add_val);
4266 temp = copy_rtx (bl->initial_value);
4268 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4269 index = remove_constant_addition (&address);
4271 d.mem_write = 0;
4272 d.mem_address = *iv->location;
4274 /* When the GIV is not always executed, we might be better off by
4275 not dirtying the cache pages. */
4276 if (PREFETCH_CONDITIONAL || iv->always_executed)
4277 note_stores (PATTERN (iv->insn), check_store, &d);
4278 else
4280 if (loop_dump_stream)
4281 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4282 INSN_UID (iv->insn), "in conditional code.");
4283 continue;
4286 /* Attempt to find another prefetch to the same array and see if we
4287 can merge this one. */
4288 for (i = 0; i < num_prefetches; i++)
4289 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4290 && stride == info[i].stride)
4292 /* In case both access same array (same location
4293 just with small difference in constant indexes), merge
4294 the prefetches. Just do the later and the earlier will
4295 get prefetched from previous iteration.
4296 The artificial threshold should not be too small,
4297 but also not bigger than small portion of memory usually
4298 traversed by single loop. */
4299 if (index >= info[i].index
4300 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4302 info[i].write |= d.mem_write;
4303 info[i].bytes_accessed += size;
4304 info[i].index = index;
4305 info[i].giv = iv;
4306 info[i].class = bl;
4307 info[num_prefetches].base_address = address;
4308 add = 0;
4309 break;
4312 if (index < info[i].index
4313 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4315 info[i].write |= d.mem_write;
4316 info[i].bytes_accessed += size;
4317 add = 0;
4318 break;
4322 /* Merging failed. */
4323 if (add)
4325 info[num_prefetches].giv = iv;
4326 info[num_prefetches].class = bl;
4327 info[num_prefetches].index = index;
4328 info[num_prefetches].stride = stride;
4329 info[num_prefetches].base_address = address;
4330 info[num_prefetches].write = d.mem_write;
4331 info[num_prefetches].bytes_accessed = size;
4332 num_prefetches++;
4333 if (num_prefetches >= MAX_PREFETCHES)
4335 if (loop_dump_stream)
4336 fprintf (loop_dump_stream,
4337 "Maximal number of prefetches exceeded.\n");
4338 return;
4344 for (i = 0; i < num_prefetches; i++)
4346 int density;
4348 /* Attempt to calculate the total number of bytes fetched by all
4349 iterations of the loop. Avoid overflow. */
4350 if (LOOP_INFO (loop)->n_iterations
4351 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4352 >= LOOP_INFO (loop)->n_iterations))
4353 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4354 else
4355 info[i].total_bytes = 0xffffffff;
4357 density = info[i].bytes_accessed * 100 / info[i].stride;
4359 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4360 if (PREFETCH_ONLY_DENSE_MEM)
4361 if (density * 256 > PREFETCH_DENSE_MEM * 100
4362 && (info[i].total_bytes / PREFETCH_BLOCK
4363 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4365 info[i].prefetch_before_loop = 1;
4366 info[i].prefetch_in_loop
4367 = (info[i].total_bytes / PREFETCH_BLOCK
4368 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4370 else
4372 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4373 if (loop_dump_stream)
4374 fprintf (loop_dump_stream,
4375 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4376 INSN_UID (info[i].giv->insn), density);
4378 else
4379 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4381 /* Find how many prefetch instructions we'll use within the loop. */
4382 if (info[i].prefetch_in_loop != 0)
4384 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4385 / PREFETCH_BLOCK);
4386 num_real_prefetches += info[i].prefetch_in_loop;
4387 if (info[i].write)
4388 num_real_write_prefetches += info[i].prefetch_in_loop;
4392 /* Determine how many iterations ahead to prefetch within the loop, based
4393 on how many prefetches we currently expect to do within the loop. */
4394 if (num_real_prefetches != 0)
4396 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4398 if (loop_dump_stream)
4399 fprintf (loop_dump_stream,
4400 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4401 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4402 num_real_prefetches = 0, num_real_write_prefetches = 0;
4405 /* We'll also use AHEAD to determine how many prefetch instructions to
4406 emit before a loop, so don't leave it zero. */
4407 if (ahead == 0)
4408 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4410 for (i = 0; i < num_prefetches; i++)
4412 /* Update if we've decided not to prefetch anything within the loop. */
4413 if (num_real_prefetches == 0)
4414 info[i].prefetch_in_loop = 0;
4416 /* Find how many prefetch instructions we'll use before the loop. */
4417 if (info[i].prefetch_before_loop != 0)
4419 int n = info[i].total_bytes / PREFETCH_BLOCK;
4420 if (n > ahead)
4421 n = ahead;
4422 info[i].prefetch_before_loop = n;
4423 num_prefetches_before += n;
4424 if (info[i].write)
4425 num_write_prefetches_before += n;
4428 if (loop_dump_stream)
4430 if (info[i].prefetch_in_loop == 0
4431 && info[i].prefetch_before_loop == 0)
4432 continue;
4433 fprintf (loop_dump_stream, "Prefetch insn: %d",
4434 INSN_UID (info[i].giv->insn));
4435 fprintf (loop_dump_stream,
4436 "; in loop: %d; before: %d; %s\n",
4437 info[i].prefetch_in_loop,
4438 info[i].prefetch_before_loop,
4439 info[i].write ? "read/write" : "read only");
4440 fprintf (loop_dump_stream,
4441 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4442 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4443 info[i].bytes_accessed, info[i].total_bytes);
4444 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4445 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4446 info[i].index, info[i].stride);
4447 print_rtl (loop_dump_stream, info[i].base_address);
4448 fprintf (loop_dump_stream, "\n");
4452 if (num_real_prefetches + num_prefetches_before > 0)
4454 /* Record that this loop uses prefetch instructions. */
4455 LOOP_INFO (loop)->has_prefetch = 1;
4457 if (loop_dump_stream)
4459 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4460 num_real_prefetches, num_real_write_prefetches);
4461 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4462 num_prefetches_before, num_write_prefetches_before);
4466 for (i = 0; i < num_prefetches; i++)
4468 int y;
4470 for (y = 0; y < info[i].prefetch_in_loop; y++)
4472 rtx loc = copy_rtx (*info[i].giv->location);
4473 rtx insn;
4474 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4475 rtx before_insn = info[i].giv->insn;
4476 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4477 rtx seq;
4479 /* We can save some effort by offsetting the address on
4480 architectures with offsettable memory references. */
4481 if (offsettable_address_p (0, VOIDmode, loc))
4482 loc = plus_constant (loc, bytes_ahead);
4483 else
4485 rtx reg = gen_reg_rtx (Pmode);
4486 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4487 GEN_INT (bytes_ahead), reg,
4488 0, before_insn);
4489 loc = reg;
4492 start_sequence ();
4493 /* Make sure the address operand is valid for prefetch. */
4494 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4495 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4496 loc = force_reg (Pmode, loc);
4497 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4498 GEN_INT (3)));
4499 seq = get_insns ();
4500 end_sequence ();
4501 emit_insn_before (seq, before_insn);
4503 /* Check all insns emitted and record the new GIV
4504 information. */
4505 insn = NEXT_INSN (prev_insn);
4506 while (insn != before_insn)
4508 insn = check_insn_for_givs (loop, insn,
4509 info[i].giv->always_executed,
4510 info[i].giv->maybe_multiple);
4511 insn = NEXT_INSN (insn);
4515 if (PREFETCH_BEFORE_LOOP)
4517 /* Emit insns before the loop to fetch the first cache lines or,
4518 if we're not prefetching within the loop, everything we expect
4519 to need. */
4520 for (y = 0; y < info[i].prefetch_before_loop; y++)
4522 rtx reg = gen_reg_rtx (Pmode);
4523 rtx loop_start = loop->start;
4524 rtx init_val = info[i].class->initial_value;
4525 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4526 info[i].giv->add_val,
4527 GEN_INT (y * PREFETCH_BLOCK));
4529 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4530 non-constant INIT_VAL to have the same mode as REG, which
4531 in this case we know to be Pmode. */
4532 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4534 rtx seq;
4536 start_sequence ();
4537 init_val = convert_to_mode (Pmode, init_val, 0);
4538 seq = get_insns ();
4539 end_sequence ();
4540 loop_insn_emit_before (loop, 0, loop_start, seq);
4542 loop_iv_add_mult_emit_before (loop, init_val,
4543 info[i].giv->mult_val,
4544 add_val, reg, 0, loop_start);
4545 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4546 GEN_INT (3)),
4547 loop_start);
4552 return;
4555 /* Communication with routines called via `note_stores'. */
4557 static rtx note_insn;
4559 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4561 static rtx addr_placeholder;
4563 /* ??? Unfinished optimizations, and possible future optimizations,
4564 for the strength reduction code. */
4566 /* ??? The interaction of biv elimination, and recognition of 'constant'
4567 bivs, may cause problems. */
4569 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4570 performance problems.
4572 Perhaps don't eliminate things that can be combined with an addressing
4573 mode. Find all givs that have the same biv, mult_val, and add_val;
4574 then for each giv, check to see if its only use dies in a following
4575 memory address. If so, generate a new memory address and check to see
4576 if it is valid. If it is valid, then store the modified memory address,
4577 otherwise, mark the giv as not done so that it will get its own iv. */
4579 /* ??? Could try to optimize branches when it is known that a biv is always
4580 positive. */
4582 /* ??? When replace a biv in a compare insn, we should replace with closest
4583 giv so that an optimized branch can still be recognized by the combiner,
4584 e.g. the VAX acb insn. */
4586 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4587 was rerun in loop_optimize whenever a register was added or moved.
4588 Also, some of the optimizations could be a little less conservative. */
4590 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4591 is a backward branch in that range that branches to somewhere between
4592 LOOP->START and INSN. Returns 0 otherwise. */
4594 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4595 In practice, this is not a problem, because this function is seldom called,
4596 and uses a negligible amount of CPU time on average. */
4598 static int
4599 back_branch_in_range_p (const struct loop *loop, rtx insn)
4601 rtx p, q, target_insn;
4602 rtx loop_start = loop->start;
4603 rtx loop_end = loop->end;
4604 rtx orig_loop_end = loop->end;
4606 /* Stop before we get to the backward branch at the end of the loop. */
4607 loop_end = prev_nonnote_insn (loop_end);
4608 if (BARRIER_P (loop_end))
4609 loop_end = PREV_INSN (loop_end);
4611 /* Check in case insn has been deleted, search forward for first non
4612 deleted insn following it. */
4613 while (INSN_DELETED_P (insn))
4614 insn = NEXT_INSN (insn);
4616 /* Check for the case where insn is the last insn in the loop. Deal
4617 with the case where INSN was a deleted loop test insn, in which case
4618 it will now be the NOTE_LOOP_END. */
4619 if (insn == loop_end || insn == orig_loop_end)
4620 return 0;
4622 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4624 if (JUMP_P (p))
4626 target_insn = JUMP_LABEL (p);
4628 /* Search from loop_start to insn, to see if one of them is
4629 the target_insn. We can't use INSN_LUID comparisons here,
4630 since insn may not have an LUID entry. */
4631 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4632 if (q == target_insn)
4633 return 1;
4637 return 0;
4640 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4641 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4642 callback.
4644 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4645 least once for every loop iteration except for the last one.
4647 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4648 loop iteration.
4650 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4651 static void
4652 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4654 int not_every_iteration = 0;
4655 int maybe_multiple = 0;
4656 int past_loop_latch = 0;
4657 bool exit_test_is_entry = false;
4658 rtx p;
4660 /* If loop_scan_start points to the loop exit test, the loop body
4661 cannot be counted on running on every iteration, and we have to
4662 be wary of subversive use of gotos inside expression
4663 statements. */
4664 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4666 exit_test_is_entry = true;
4667 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4670 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4671 for (p = next_insn_in_loop (loop, loop->scan_start);
4672 p != NULL_RTX;
4673 p = next_insn_in_loop (loop, p))
4675 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4677 /* Past CODE_LABEL, we get to insns that may be executed multiple
4678 times. The only way we can be sure that they can't is if every
4679 jump insn between here and the end of the loop either
4680 returns, exits the loop, is a jump to a location that is still
4681 behind the label, or is a jump to the loop start. */
4683 if (LABEL_P (p))
4685 rtx insn = p;
4687 maybe_multiple = 0;
4689 while (1)
4691 insn = NEXT_INSN (insn);
4692 if (insn == loop->scan_start)
4693 break;
4694 if (insn == loop->end)
4696 if (loop->top != 0)
4697 insn = loop->top;
4698 else
4699 break;
4700 if (insn == loop->scan_start)
4701 break;
4704 if (JUMP_P (insn)
4705 && GET_CODE (PATTERN (insn)) != RETURN
4706 && (!any_condjump_p (insn)
4707 || (JUMP_LABEL (insn) != 0
4708 && JUMP_LABEL (insn) != loop->scan_start
4709 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4711 maybe_multiple = 1;
4712 break;
4717 /* Past a jump, we get to insns for which we can't count
4718 on whether they will be executed during each iteration. */
4719 /* This code appears twice in strength_reduce. There is also similar
4720 code in scan_loop. */
4721 if (JUMP_P (p)
4722 /* If we enter the loop in the middle, and scan around to the
4723 beginning, don't set not_every_iteration for that.
4724 This can be any kind of jump, since we want to know if insns
4725 will be executed if the loop is executed. */
4726 && (exit_test_is_entry
4727 || !(JUMP_LABEL (p) == loop->top
4728 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4729 && any_uncondjump_p (p))
4730 || (NEXT_INSN (p) == loop->end
4731 && any_condjump_p (p))))))
4733 rtx label = 0;
4735 /* If this is a jump outside the loop, then it also doesn't
4736 matter. Check to see if the target of this branch is on the
4737 loop->exits_labels list. */
4739 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4740 if (XEXP (label, 0) == JUMP_LABEL (p))
4741 break;
4743 if (!label)
4744 not_every_iteration = 1;
4747 /* Note if we pass a loop latch. If we do, then we can not clear
4748 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4749 a loop since a jump before the last CODE_LABEL may have started
4750 a new loop iteration.
4752 Note that LOOP_TOP is only set for rotated loops and we need
4753 this check for all loops, so compare against the CODE_LABEL
4754 which immediately follows LOOP_START. */
4755 if (JUMP_P (p)
4756 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4757 past_loop_latch = 1;
4759 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4760 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4761 or not an insn is known to be executed each iteration of the
4762 loop, whether or not any iterations are known to occur.
4764 Therefore, if we have just passed a label and have no more labels
4765 between here and the test insn of the loop, and we have not passed
4766 a jump to the top of the loop, then we know these insns will be
4767 executed each iteration. */
4769 if (not_every_iteration
4770 && !past_loop_latch
4771 && LABEL_P (p)
4772 && no_labels_between_p (p, loop->end))
4773 not_every_iteration = 0;
4777 static void
4778 loop_bivs_find (struct loop *loop)
4780 struct loop_regs *regs = LOOP_REGS (loop);
4781 struct loop_ivs *ivs = LOOP_IVS (loop);
4782 /* Temporary list pointers for traversing ivs->list. */
4783 struct iv_class *bl, **backbl;
4785 ivs->list = 0;
4787 for_each_insn_in_loop (loop, check_insn_for_bivs);
4789 /* Scan ivs->list to remove all regs that proved not to be bivs.
4790 Make a sanity check against regs->n_times_set. */
4791 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4793 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4794 /* Above happens if register modified by subreg, etc. */
4795 /* Make sure it is not recognized as a basic induction var: */
4796 || regs->array[bl->regno].n_times_set != bl->biv_count
4797 /* If never incremented, it is invariant that we decided not to
4798 move. So leave it alone. */
4799 || ! bl->incremented)
4801 if (loop_dump_stream)
4802 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4803 bl->regno,
4804 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4805 ? "not induction variable"
4806 : (! bl->incremented ? "never incremented"
4807 : "count error")));
4809 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4810 *backbl = bl->next;
4812 else
4814 backbl = &bl->next;
4816 if (loop_dump_stream)
4817 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4823 /* Determine how BIVS are initialized by looking through pre-header
4824 extended basic block. */
4825 static void
4826 loop_bivs_init_find (struct loop *loop)
4828 struct loop_ivs *ivs = LOOP_IVS (loop);
4829 /* Temporary list pointers for traversing ivs->list. */
4830 struct iv_class *bl;
4831 int call_seen;
4832 rtx p;
4834 /* Find initial value for each biv by searching backwards from loop_start,
4835 halting at first label. Also record any test condition. */
4837 call_seen = 0;
4838 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4840 rtx test;
4842 note_insn = p;
4844 if (CALL_P (p))
4845 call_seen = 1;
4847 if (INSN_P (p))
4848 note_stores (PATTERN (p), record_initial, ivs);
4850 /* Record any test of a biv that branches around the loop if no store
4851 between it and the start of loop. We only care about tests with
4852 constants and registers and only certain of those. */
4853 if (JUMP_P (p)
4854 && JUMP_LABEL (p) != 0
4855 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4856 && (test = get_condition_for_loop (loop, p)) != 0
4857 && REG_P (XEXP (test, 0))
4858 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4859 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4860 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4861 && bl->init_insn == 0)
4863 /* If an NE test, we have an initial value! */
4864 if (GET_CODE (test) == NE)
4866 bl->init_insn = p;
4867 bl->init_set = gen_rtx_SET (VOIDmode,
4868 XEXP (test, 0), XEXP (test, 1));
4870 else
4871 bl->initial_test = test;
4877 /* Look at the each biv and see if we can say anything better about its
4878 initial value from any initializing insns set up above. (This is done
4879 in two passes to avoid missing SETs in a PARALLEL.) */
4880 static void
4881 loop_bivs_check (struct loop *loop)
4883 struct loop_ivs *ivs = LOOP_IVS (loop);
4884 /* Temporary list pointers for traversing ivs->list. */
4885 struct iv_class *bl;
4886 struct iv_class **backbl;
4888 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4890 rtx src;
4891 rtx note;
4893 if (! bl->init_insn)
4894 continue;
4896 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4897 is a constant, use the value of that. */
4898 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4899 && CONSTANT_P (XEXP (note, 0)))
4900 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4901 && CONSTANT_P (XEXP (note, 0))))
4902 src = XEXP (note, 0);
4903 else
4904 src = SET_SRC (bl->init_set);
4906 if (loop_dump_stream)
4907 fprintf (loop_dump_stream,
4908 "Biv %d: initialized at insn %d: initial value ",
4909 bl->regno, INSN_UID (bl->init_insn));
4911 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4912 || GET_MODE (src) == VOIDmode)
4913 && valid_initial_value_p (src, bl->init_insn,
4914 LOOP_INFO (loop)->pre_header_has_call,
4915 loop->start))
4917 bl->initial_value = src;
4919 if (loop_dump_stream)
4921 print_simple_rtl (loop_dump_stream, src);
4922 fputc ('\n', loop_dump_stream);
4925 /* If we can't make it a giv,
4926 let biv keep initial value of "itself". */
4927 else if (loop_dump_stream)
4928 fprintf (loop_dump_stream, "is complex\n");
4933 /* Search the loop for general induction variables. */
4935 static void
4936 loop_givs_find (struct loop* loop)
4938 for_each_insn_in_loop (loop, check_insn_for_givs);
4942 /* For each giv for which we still don't know whether or not it is
4943 replaceable, check to see if it is replaceable because its final value
4944 can be calculated. */
4946 static void
4947 loop_givs_check (struct loop *loop)
4949 struct loop_ivs *ivs = LOOP_IVS (loop);
4950 struct iv_class *bl;
4952 for (bl = ivs->list; bl; bl = bl->next)
4954 struct induction *v;
4956 for (v = bl->giv; v; v = v->next_iv)
4957 if (! v->replaceable && ! v->not_replaceable)
4958 check_final_value (loop, v);
4962 /* Try to generate the simplest rtx for the expression
4963 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4964 value of giv's. */
4966 static rtx
4967 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4969 rtx temp, mult_res;
4970 rtx result;
4972 /* The modes must all be the same. This should always be true. For now,
4973 check to make sure. */
4974 gcc_assert (GET_MODE (mult1) == mode || GET_MODE (mult1) == VOIDmode);
4975 gcc_assert (GET_MODE (mult2) == mode || GET_MODE (mult2) == VOIDmode);
4976 gcc_assert (GET_MODE (add1) == mode || GET_MODE (add1) == VOIDmode);
4978 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4979 will be a constant. */
4980 if (GET_CODE (mult1) == CONST_INT)
4982 temp = mult2;
4983 mult2 = mult1;
4984 mult1 = temp;
4987 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
4988 if (! mult_res)
4989 mult_res = gen_rtx_MULT (mode, mult1, mult2);
4991 /* Again, put the constant second. */
4992 if (GET_CODE (add1) == CONST_INT)
4994 temp = add1;
4995 add1 = mult_res;
4996 mult_res = temp;
4999 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
5000 if (! result)
5001 result = gen_rtx_PLUS (mode, add1, mult_res);
5003 return result;
5006 /* Searches the list of induction struct's for the biv BL, to try to calculate
5007 the total increment value for one iteration of the loop as a constant.
5009 Returns the increment value as an rtx, simplified as much as possible,
5010 if it can be calculated. Otherwise, returns 0. */
5012 static rtx
5013 biv_total_increment (const struct iv_class *bl)
5015 struct induction *v;
5016 rtx result;
5018 /* For increment, must check every instruction that sets it. Each
5019 instruction must be executed only once each time through the loop.
5020 To verify this, we check that the insn is always executed, and that
5021 there are no backward branches after the insn that branch to before it.
5022 Also, the insn must have a mult_val of one (to make sure it really is
5023 an increment). */
5025 result = const0_rtx;
5026 for (v = bl->biv; v; v = v->next_iv)
5028 if (v->always_computable && v->mult_val == const1_rtx
5029 && ! v->maybe_multiple
5030 && SCALAR_INT_MODE_P (v->mode))
5032 /* If we have already counted it, skip it. */
5033 if (v->same)
5034 continue;
5036 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5038 else
5039 return 0;
5042 return result;
5045 /* Try to prove that the register is dead after the loop exits. Trace every
5046 loop exit looking for an insn that will always be executed, which sets
5047 the register to some value, and appears before the first use of the register
5048 is found. If successful, then return 1, otherwise return 0. */
5050 /* ?? Could be made more intelligent in the handling of jumps, so that
5051 it can search past if statements and other similar structures. */
5053 static int
5054 reg_dead_after_loop (const struct loop *loop, rtx reg)
5056 rtx insn, label;
5057 int jump_count = 0;
5058 int label_count = 0;
5060 /* In addition to checking all exits of this loop, we must also check
5061 all exits of inner nested loops that would exit this loop. We don't
5062 have any way to identify those, so we just give up if there are any
5063 such inner loop exits. */
5065 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5066 label_count++;
5068 if (label_count != loop->exit_count)
5069 return 0;
5071 /* HACK: Must also search the loop fall through exit, create a label_ref
5072 here which points to the loop->end, and append the loop_number_exit_labels
5073 list to it. */
5074 label = gen_rtx_LABEL_REF (VOIDmode, loop->end);
5075 LABEL_NEXTREF (label) = loop->exit_labels;
5077 for (; label; label = LABEL_NEXTREF (label))
5079 /* Succeed if find an insn which sets the biv or if reach end of
5080 function. Fail if find an insn that uses the biv, or if come to
5081 a conditional jump. */
5083 insn = NEXT_INSN (XEXP (label, 0));
5084 while (insn)
5086 if (INSN_P (insn))
5088 rtx set, note;
5090 if (reg_referenced_p (reg, PATTERN (insn)))
5091 return 0;
5093 note = find_reg_equal_equiv_note (insn);
5094 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5095 return 0;
5097 set = single_set (insn);
5098 if (set && rtx_equal_p (SET_DEST (set), reg))
5099 break;
5101 if (JUMP_P (insn))
5103 if (GET_CODE (PATTERN (insn)) == RETURN)
5104 break;
5105 else if (!any_uncondjump_p (insn)
5106 /* Prevent infinite loop following infinite loops. */
5107 || jump_count++ > 20)
5108 return 0;
5109 else
5110 insn = JUMP_LABEL (insn);
5114 insn = NEXT_INSN (insn);
5118 /* Success, the register is dead on all loop exits. */
5119 return 1;
5122 /* Try to calculate the final value of the biv, the value it will have at
5123 the end of the loop. If we can do it, return that value. */
5125 static rtx
5126 final_biv_value (const struct loop *loop, struct iv_class *bl)
5128 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5129 rtx increment, tem;
5131 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5133 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5134 return 0;
5136 /* The final value for reversed bivs must be calculated differently than
5137 for ordinary bivs. In this case, there is already an insn after the
5138 loop which sets this biv's final value (if necessary), and there are
5139 no other loop exits, so we can return any value. */
5140 if (bl->reversed)
5142 if (loop_dump_stream)
5143 fprintf (loop_dump_stream,
5144 "Final biv value for %d, reversed biv.\n", bl->regno);
5146 return const0_rtx;
5149 /* Try to calculate the final value as initial value + (number of iterations
5150 * increment). For this to work, increment must be invariant, the only
5151 exit from the loop must be the fall through at the bottom (otherwise
5152 it may not have its final value when the loop exits), and the initial
5153 value of the biv must be invariant. */
5155 if (n_iterations != 0
5156 && ! loop->exit_count
5157 && loop_invariant_p (loop, bl->initial_value))
5159 increment = biv_total_increment (bl);
5161 if (increment && loop_invariant_p (loop, increment))
5163 /* Can calculate the loop exit value, emit insns after loop
5164 end to calculate this value into a temporary register in
5165 case it is needed later. */
5167 tem = gen_reg_rtx (bl->biv->mode);
5168 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5169 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5170 bl->initial_value, tem);
5172 if (loop_dump_stream)
5173 fprintf (loop_dump_stream,
5174 "Final biv value for %d, calculated.\n", bl->regno);
5176 return tem;
5180 /* Check to see if the biv is dead at all loop exits. */
5181 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5183 if (loop_dump_stream)
5184 fprintf (loop_dump_stream,
5185 "Final biv value for %d, biv dead after loop exit.\n",
5186 bl->regno);
5188 return const0_rtx;
5191 return 0;
5194 /* Return nonzero if it is possible to eliminate the biv BL provided
5195 all givs are reduced. This is possible if either the reg is not
5196 used outside the loop, or we can compute what its final value will
5197 be. */
5199 static int
5200 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5201 int threshold, int insn_count)
5203 /* For architectures with a decrement_and_branch_until_zero insn,
5204 don't do this if we put a REG_NONNEG note on the endtest for this
5205 biv. */
5207 #ifdef HAVE_decrement_and_branch_until_zero
5208 if (bl->nonneg)
5210 if (loop_dump_stream)
5211 fprintf (loop_dump_stream,
5212 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5213 return 0;
5215 #endif
5217 /* Check that biv is used outside loop or if it has a final value.
5218 Compare against bl->init_insn rather than loop->start. We aren't
5219 concerned with any uses of the biv between init_insn and
5220 loop->start since these won't be affected by the value of the biv
5221 elsewhere in the function, so long as init_insn doesn't use the
5222 biv itself. */
5224 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5225 && bl->init_insn
5226 && INSN_UID (bl->init_insn) < max_uid_for_loop
5227 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5228 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5229 || (bl->final_value = final_biv_value (loop, bl)))
5230 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5232 if (loop_dump_stream)
5234 fprintf (loop_dump_stream,
5235 "Cannot eliminate biv %d.\n",
5236 bl->regno);
5237 fprintf (loop_dump_stream,
5238 "First use: insn %d, last use: insn %d.\n",
5239 REGNO_FIRST_UID (bl->regno),
5240 REGNO_LAST_UID (bl->regno));
5242 return 0;
5246 /* Reduce each giv of BL that we have decided to reduce. */
5248 static void
5249 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5251 struct induction *v;
5253 for (v = bl->giv; v; v = v->next_iv)
5255 struct induction *tv;
5256 if (! v->ignore && v->same == 0)
5258 int auto_inc_opt = 0;
5260 /* If the code for derived givs immediately below has already
5261 allocated a new_reg, we must keep it. */
5262 if (! v->new_reg)
5263 v->new_reg = gen_reg_rtx (v->mode);
5265 #ifdef AUTO_INC_DEC
5266 /* If the target has auto-increment addressing modes, and
5267 this is an address giv, then try to put the increment
5268 immediately after its use, so that flow can create an
5269 auto-increment addressing mode. */
5270 /* Don't do this for loops entered at the bottom, to avoid
5271 this invalid transformation:
5272 jmp L; -> jmp L;
5273 TOP: TOP:
5274 use giv use giv
5275 L: inc giv
5276 inc biv L:
5277 test biv test giv
5278 cbr TOP cbr TOP
5280 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5281 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5282 /* We don't handle reversed biv's because bl->biv->insn
5283 does not have a valid INSN_LUID. */
5284 && ! bl->reversed
5285 && v->always_executed && ! v->maybe_multiple
5286 && INSN_UID (v->insn) < max_uid_for_loop
5287 && !loop->top)
5289 /* If other giv's have been combined with this one, then
5290 this will work only if all uses of the other giv's occur
5291 before this giv's insn. This is difficult to check.
5293 We simplify this by looking for the common case where
5294 there is one DEST_REG giv, and this giv's insn is the
5295 last use of the dest_reg of that DEST_REG giv. If the
5296 increment occurs after the address giv, then we can
5297 perform the optimization. (Otherwise, the increment
5298 would have to go before other_giv, and we would not be
5299 able to combine it with the address giv to get an
5300 auto-inc address.) */
5301 if (v->combined_with)
5303 struct induction *other_giv = 0;
5305 for (tv = bl->giv; tv; tv = tv->next_iv)
5306 if (tv->same == v)
5308 if (other_giv)
5309 break;
5310 else
5311 other_giv = tv;
5313 if (! tv && other_giv
5314 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5315 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5316 == INSN_UID (v->insn))
5317 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5318 auto_inc_opt = 1;
5320 /* Check for case where increment is before the address
5321 giv. Do this test in "loop order". */
5322 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5323 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5324 || (INSN_LUID (bl->biv->insn)
5325 > INSN_LUID (loop->scan_start))))
5326 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5327 && (INSN_LUID (loop->scan_start)
5328 < INSN_LUID (bl->biv->insn))))
5329 auto_inc_opt = -1;
5330 else
5331 auto_inc_opt = 1;
5333 #ifdef HAVE_cc0
5335 rtx prev;
5337 /* We can't put an insn immediately after one setting
5338 cc0, or immediately before one using cc0. */
5339 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5340 || (auto_inc_opt == -1
5341 && (prev = prev_nonnote_insn (v->insn)) != 0
5342 && INSN_P (prev)
5343 && sets_cc0_p (PATTERN (prev))))
5344 auto_inc_opt = 0;
5346 #endif
5348 if (auto_inc_opt)
5349 v->auto_inc_opt = 1;
5351 #endif
5353 /* For each place where the biv is incremented, add an insn
5354 to increment the new, reduced reg for the giv. */
5355 for (tv = bl->biv; tv; tv = tv->next_iv)
5357 rtx insert_before;
5359 /* Skip if location is the same as a previous one. */
5360 if (tv->same)
5361 continue;
5362 if (! auto_inc_opt)
5363 insert_before = NEXT_INSN (tv->insn);
5364 else if (auto_inc_opt == 1)
5365 insert_before = NEXT_INSN (v->insn);
5366 else
5367 insert_before = v->insn;
5369 if (tv->mult_val == const1_rtx)
5370 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5371 v->new_reg, v->new_reg,
5372 0, insert_before);
5373 else /* tv->mult_val == const0_rtx */
5374 /* A multiply is acceptable here
5375 since this is presumed to be seldom executed. */
5376 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5377 v->add_val, v->new_reg,
5378 0, insert_before);
5381 /* Add code at loop start to initialize giv's reduced reg. */
5383 loop_iv_add_mult_hoist (loop,
5384 extend_value_for_giv (v, bl->initial_value),
5385 v->mult_val, v->add_val, v->new_reg);
5391 /* Check for givs whose first use is their definition and whose
5392 last use is the definition of another giv. If so, it is likely
5393 dead and should not be used to derive another giv nor to
5394 eliminate a biv. */
5396 static void
5397 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5399 struct induction *v;
5401 for (v = bl->giv; v; v = v->next_iv)
5403 if (v->ignore
5404 || (v->same && v->same->ignore))
5405 continue;
5407 if (v->giv_type == DEST_REG
5408 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5410 struct induction *v1;
5412 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5413 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5414 v->maybe_dead = 1;
5420 static void
5421 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5423 struct induction *v;
5425 for (v = bl->giv; v; v = v->next_iv)
5427 if (v->same && v->same->ignore)
5428 v->ignore = 1;
5430 if (v->ignore)
5431 continue;
5433 /* Update expression if this was combined, in case other giv was
5434 replaced. */
5435 if (v->same)
5436 v->new_reg = replace_rtx (v->new_reg,
5437 v->same->dest_reg, v->same->new_reg);
5439 /* See if this register is known to be a pointer to something. If
5440 so, see if we can find the alignment. First see if there is a
5441 destination register that is a pointer. If so, this shares the
5442 alignment too. Next see if we can deduce anything from the
5443 computational information. If not, and this is a DEST_ADDR
5444 giv, at least we know that it's a pointer, though we don't know
5445 the alignment. */
5446 if (REG_P (v->new_reg)
5447 && v->giv_type == DEST_REG
5448 && REG_POINTER (v->dest_reg))
5449 mark_reg_pointer (v->new_reg,
5450 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5451 else if (REG_P (v->new_reg)
5452 && REG_POINTER (v->src_reg))
5454 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5456 if (align == 0
5457 || GET_CODE (v->add_val) != CONST_INT
5458 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5459 align = 0;
5461 mark_reg_pointer (v->new_reg, align);
5463 else if (REG_P (v->new_reg)
5464 && REG_P (v->add_val)
5465 && REG_POINTER (v->add_val))
5467 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5469 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5470 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5471 align = 0;
5473 mark_reg_pointer (v->new_reg, align);
5475 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5476 mark_reg_pointer (v->new_reg, 0);
5478 if (v->giv_type == DEST_ADDR)
5479 /* Store reduced reg as the address in the memref where we found
5480 this giv. */
5481 validate_change (v->insn, v->location, v->new_reg, 0);
5482 else if (v->replaceable)
5484 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5486 else
5488 rtx original_insn = v->insn;
5489 rtx note;
5491 /* Not replaceable; emit an insn to set the original giv reg from
5492 the reduced giv, same as above. */
5493 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5494 gen_move_insn (v->dest_reg,
5495 v->new_reg));
5497 /* The original insn may have a REG_EQUAL note. This note is
5498 now incorrect and may result in invalid substitutions later.
5499 The original insn is dead, but may be part of a libcall
5500 sequence, which doesn't seem worth the bother of handling. */
5501 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5502 if (note)
5503 remove_note (original_insn, note);
5506 /* When a loop is reversed, givs which depend on the reversed
5507 biv, and which are live outside the loop, must be set to their
5508 correct final value. This insn is only needed if the giv is
5509 not replaceable. The correct final value is the same as the
5510 value that the giv starts the reversed loop with. */
5511 if (bl->reversed && ! v->replaceable)
5512 loop_iv_add_mult_sink (loop,
5513 extend_value_for_giv (v, bl->initial_value),
5514 v->mult_val, v->add_val, v->dest_reg);
5515 else if (v->final_value)
5516 loop_insn_sink_or_swim (loop,
5517 gen_load_of_final_value (v->dest_reg,
5518 v->final_value));
5520 if (loop_dump_stream)
5522 fprintf (loop_dump_stream, "giv at %d reduced to ",
5523 INSN_UID (v->insn));
5524 print_simple_rtl (loop_dump_stream, v->new_reg);
5525 fprintf (loop_dump_stream, "\n");
5531 static int
5532 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5533 struct iv_class *bl, struct induction *v,
5534 rtx test_reg)
5536 int add_cost;
5537 int benefit;
5539 benefit = v->benefit;
5540 PUT_MODE (test_reg, v->mode);
5541 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
5542 test_reg, test_reg);
5544 /* Reduce benefit if not replaceable, since we will insert a
5545 move-insn to replace the insn that calculates this giv. Don't do
5546 this unless the giv is a user variable, since it will often be
5547 marked non-replaceable because of the duplication of the exit
5548 code outside the loop. In such a case, the copies we insert are
5549 dead and will be deleted. So they don't have a cost. Similar
5550 situations exist. */
5551 /* ??? The new final_[bg]iv_value code does a much better job of
5552 finding replaceable giv's, and hence this code may no longer be
5553 necessary. */
5554 if (! v->replaceable && ! bl->eliminable
5555 && REG_USERVAR_P (v->dest_reg))
5556 benefit -= copy_cost;
5558 /* Decrease the benefit to count the add-insns that we will insert
5559 to increment the reduced reg for the giv. ??? This can
5560 overestimate the run-time cost of the additional insns, e.g. if
5561 there are multiple basic blocks that increment the biv, but only
5562 one of these blocks is executed during each iteration. There is
5563 no good way to detect cases like this with the current structure
5564 of the loop optimizer. This code is more accurate for
5565 determining code size than run-time benefits. */
5566 benefit -= add_cost * bl->biv_count;
5568 /* Decide whether to strength-reduce this giv or to leave the code
5569 unchanged (recompute it from the biv each time it is used). This
5570 decision can be made independently for each giv. */
5572 #ifdef AUTO_INC_DEC
5573 /* Attempt to guess whether autoincrement will handle some of the
5574 new add insns; if so, increase BENEFIT (undo the subtraction of
5575 add_cost that was done above). */
5576 if (v->giv_type == DEST_ADDR
5577 /* Increasing the benefit is risky, since this is only a guess.
5578 Avoid increasing register pressure in cases where there would
5579 be no other benefit from reducing this giv. */
5580 && benefit > 0
5581 && GET_CODE (v->mult_val) == CONST_INT)
5583 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5585 if (HAVE_POST_INCREMENT
5586 && INTVAL (v->mult_val) == size)
5587 benefit += add_cost * bl->biv_count;
5588 else if (HAVE_PRE_INCREMENT
5589 && INTVAL (v->mult_val) == size)
5590 benefit += add_cost * bl->biv_count;
5591 else if (HAVE_POST_DECREMENT
5592 && -INTVAL (v->mult_val) == size)
5593 benefit += add_cost * bl->biv_count;
5594 else if (HAVE_PRE_DECREMENT
5595 && -INTVAL (v->mult_val) == size)
5596 benefit += add_cost * bl->biv_count;
5598 #endif
5600 return benefit;
5604 /* Free IV structures for LOOP. */
5606 static void
5607 loop_ivs_free (struct loop *loop)
5609 struct loop_ivs *ivs = LOOP_IVS (loop);
5610 struct iv_class *iv = ivs->list;
5612 free (ivs->regs);
5614 while (iv)
5616 struct iv_class *next = iv->next;
5617 struct induction *induction;
5618 struct induction *next_induction;
5620 for (induction = iv->biv; induction; induction = next_induction)
5622 next_induction = induction->next_iv;
5623 free (induction);
5625 for (induction = iv->giv; induction; induction = next_induction)
5627 next_induction = induction->next_iv;
5628 free (induction);
5631 free (iv);
5632 iv = next;
5636 /* Look back before LOOP->START for the insn that sets REG and return
5637 the equivalent constant if there is a REG_EQUAL note otherwise just
5638 the SET_SRC of REG. */
5640 static rtx
5641 loop_find_equiv_value (const struct loop *loop, rtx reg)
5643 rtx loop_start = loop->start;
5644 rtx insn, set;
5645 rtx ret;
5647 ret = reg;
5648 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5650 if (LABEL_P (insn))
5651 break;
5653 else if (INSN_P (insn) && reg_set_p (reg, insn))
5655 /* We found the last insn before the loop that sets the register.
5656 If it sets the entire register, and has a REG_EQUAL note,
5657 then use the value of the REG_EQUAL note. */
5658 if ((set = single_set (insn))
5659 && (SET_DEST (set) == reg))
5661 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5663 /* Only use the REG_EQUAL note if it is a constant.
5664 Other things, divide in particular, will cause
5665 problems later if we use them. */
5666 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5667 && CONSTANT_P (XEXP (note, 0)))
5668 ret = XEXP (note, 0);
5669 else
5670 ret = SET_SRC (set);
5672 /* We cannot do this if it changes between the
5673 assignment and loop start though. */
5674 if (modified_between_p (ret, insn, loop_start))
5675 ret = reg;
5677 break;
5680 return ret;
5683 /* Find and return register term common to both expressions OP0 and
5684 OP1 or NULL_RTX if no such term exists. Each expression must be a
5685 REG or a PLUS of a REG. */
5687 static rtx
5688 find_common_reg_term (rtx op0, rtx op1)
5690 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5691 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5693 rtx op00;
5694 rtx op01;
5695 rtx op10;
5696 rtx op11;
5698 if (GET_CODE (op0) == PLUS)
5699 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5700 else
5701 op01 = const0_rtx, op00 = op0;
5703 if (GET_CODE (op1) == PLUS)
5704 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5705 else
5706 op11 = const0_rtx, op10 = op1;
5708 /* Find and return common register term if present. */
5709 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5710 return op00;
5711 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5712 return op01;
5715 /* No common register term found. */
5716 return NULL_RTX;
5719 /* Determine the loop iterator and calculate the number of loop
5720 iterations. Returns the exact number of loop iterations if it can
5721 be calculated, otherwise returns zero. */
5723 static unsigned HOST_WIDE_INT
5724 loop_iterations (struct loop *loop)
5726 struct loop_info *loop_info = LOOP_INFO (loop);
5727 struct loop_ivs *ivs = LOOP_IVS (loop);
5728 rtx comparison, comparison_value;
5729 rtx iteration_var, initial_value, increment, final_value;
5730 enum rtx_code comparison_code;
5731 HOST_WIDE_INT inc;
5732 unsigned HOST_WIDE_INT abs_inc;
5733 unsigned HOST_WIDE_INT abs_diff;
5734 int off_by_one;
5735 int increment_dir;
5736 int unsigned_p, compare_dir, final_larger;
5737 rtx last_loop_insn;
5738 struct iv_class *bl;
5740 loop_info->n_iterations = 0;
5741 loop_info->initial_value = 0;
5742 loop_info->initial_equiv_value = 0;
5743 loop_info->comparison_value = 0;
5744 loop_info->final_value = 0;
5745 loop_info->final_equiv_value = 0;
5746 loop_info->increment = 0;
5747 loop_info->iteration_var = 0;
5748 loop_info->iv = 0;
5750 /* We used to use prev_nonnote_insn here, but that fails because it might
5751 accidentally get the branch for a contained loop if the branch for this
5752 loop was deleted. We can only trust branches immediately before the
5753 loop_end. */
5754 last_loop_insn = PREV_INSN (loop->end);
5756 /* ??? We should probably try harder to find the jump insn
5757 at the end of the loop. The following code assumes that
5758 the last loop insn is a jump to the top of the loop. */
5759 if (!JUMP_P (last_loop_insn))
5761 if (loop_dump_stream)
5762 fprintf (loop_dump_stream,
5763 "Loop iterations: No final conditional branch found.\n");
5764 return 0;
5767 /* If there is a more than a single jump to the top of the loop
5768 we cannot (easily) determine the iteration count. */
5769 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5771 if (loop_dump_stream)
5772 fprintf (loop_dump_stream,
5773 "Loop iterations: Loop has multiple back edges.\n");
5774 return 0;
5777 /* Find the iteration variable. If the last insn is a conditional
5778 branch, and the insn before tests a register value, make that the
5779 iteration variable. */
5781 comparison = get_condition_for_loop (loop, last_loop_insn);
5782 if (comparison == 0)
5784 if (loop_dump_stream)
5785 fprintf (loop_dump_stream,
5786 "Loop iterations: No final comparison found.\n");
5787 return 0;
5790 /* ??? Get_condition may switch position of induction variable and
5791 invariant register when it canonicalizes the comparison. */
5793 comparison_code = GET_CODE (comparison);
5794 iteration_var = XEXP (comparison, 0);
5795 comparison_value = XEXP (comparison, 1);
5797 if (!REG_P (iteration_var))
5799 if (loop_dump_stream)
5800 fprintf (loop_dump_stream,
5801 "Loop iterations: Comparison not against register.\n");
5802 return 0;
5805 /* The only new registers that are created before loop iterations
5806 are givs made from biv increments or registers created by
5807 load_mems. In the latter case, it is possible that try_copy_prop
5808 will propagate a new pseudo into the old iteration register but
5809 this will be marked by having the REG_USERVAR_P bit set. */
5811 gcc_assert ((unsigned) REGNO (iteration_var) < ivs->n_regs
5812 || REG_USERVAR_P (iteration_var));
5814 /* Determine the initial value of the iteration variable, and the amount
5815 that it is incremented each loop. Use the tables constructed by
5816 the strength reduction pass to calculate these values. */
5818 /* Clear the result values, in case no answer can be found. */
5819 initial_value = 0;
5820 increment = 0;
5822 /* The iteration variable can be either a giv or a biv. Check to see
5823 which it is, and compute the variable's initial value, and increment
5824 value if possible. */
5826 /* If this is a new register, can't handle it since we don't have any
5827 reg_iv_type entry for it. */
5828 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5830 if (loop_dump_stream)
5831 fprintf (loop_dump_stream,
5832 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5833 return 0;
5836 /* Reject iteration variables larger than the host wide int size, since they
5837 could result in a number of iterations greater than the range of our
5838 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5839 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5840 > HOST_BITS_PER_WIDE_INT))
5842 if (loop_dump_stream)
5843 fprintf (loop_dump_stream,
5844 "Loop iterations: Iteration var rejected because mode too large.\n");
5845 return 0;
5847 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5849 if (loop_dump_stream)
5850 fprintf (loop_dump_stream,
5851 "Loop iterations: Iteration var not an integer.\n");
5852 return 0;
5855 /* Try swapping the comparison to identify a suitable iv. */
5856 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5857 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5858 && REG_P (comparison_value)
5859 && REGNO (comparison_value) < ivs->n_regs)
5861 rtx temp = comparison_value;
5862 comparison_code = swap_condition (comparison_code);
5863 comparison_value = iteration_var;
5864 iteration_var = temp;
5867 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5869 gcc_assert (REGNO (iteration_var) < ivs->n_regs);
5871 /* Grab initial value, only useful if it is a constant. */
5872 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5873 initial_value = bl->initial_value;
5874 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5876 if (loop_dump_stream)
5877 fprintf (loop_dump_stream,
5878 "Loop iterations: Basic induction var not set once in each iteration.\n");
5879 return 0;
5882 increment = biv_total_increment (bl);
5884 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5886 HOST_WIDE_INT offset = 0;
5887 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5888 rtx biv_initial_value;
5890 gcc_assert (REGNO (v->src_reg) < ivs->n_regs);
5892 if (!v->always_executed || v->maybe_multiple)
5894 if (loop_dump_stream)
5895 fprintf (loop_dump_stream,
5896 "Loop iterations: General induction var not set once in each iteration.\n");
5897 return 0;
5900 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5902 /* Increment value is mult_val times the increment value of the biv. */
5904 increment = biv_total_increment (bl);
5905 if (increment)
5907 struct induction *biv_inc;
5909 increment = fold_rtx_mult_add (v->mult_val,
5910 extend_value_for_giv (v, increment),
5911 const0_rtx, v->mode);
5912 /* The caller assumes that one full increment has occurred at the
5913 first loop test. But that's not true when the biv is incremented
5914 after the giv is set (which is the usual case), e.g.:
5915 i = 6; do {;} while (i++ < 9) .
5916 Therefore, we bias the initial value by subtracting the amount of
5917 the increment that occurs between the giv set and the giv test. */
5918 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5920 if (loop_insn_first_p (v->insn, biv_inc->insn))
5922 if (REG_P (biv_inc->add_val))
5924 if (loop_dump_stream)
5925 fprintf (loop_dump_stream,
5926 "Loop iterations: Basic induction var add_val is REG %d.\n",
5927 REGNO (biv_inc->add_val));
5928 return 0;
5931 /* If we have already counted it, skip it. */
5932 if (biv_inc->same)
5933 continue;
5935 offset -= INTVAL (biv_inc->add_val);
5939 if (loop_dump_stream)
5940 fprintf (loop_dump_stream,
5941 "Loop iterations: Giv iterator, initial value bias %ld.\n",
5942 (long) offset);
5944 /* Initial value is mult_val times the biv's initial value plus
5945 add_val. Only useful if it is a constant. */
5946 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
5947 initial_value
5948 = fold_rtx_mult_add (v->mult_val,
5949 plus_constant (biv_initial_value, offset),
5950 v->add_val, v->mode);
5952 else
5954 if (loop_dump_stream)
5955 fprintf (loop_dump_stream,
5956 "Loop iterations: Not basic or general induction var.\n");
5957 return 0;
5960 if (initial_value == 0)
5961 return 0;
5963 unsigned_p = 0;
5964 off_by_one = 0;
5965 switch (comparison_code)
5967 case LEU:
5968 unsigned_p = 1;
5969 case LE:
5970 compare_dir = 1;
5971 off_by_one = 1;
5972 break;
5973 case GEU:
5974 unsigned_p = 1;
5975 case GE:
5976 compare_dir = -1;
5977 off_by_one = -1;
5978 break;
5979 case EQ:
5980 /* Cannot determine loop iterations with this case. */
5981 compare_dir = 0;
5982 break;
5983 case LTU:
5984 unsigned_p = 1;
5985 case LT:
5986 compare_dir = 1;
5987 break;
5988 case GTU:
5989 unsigned_p = 1;
5990 case GT:
5991 compare_dir = -1;
5992 break;
5993 case NE:
5994 compare_dir = 0;
5995 break;
5996 default:
5997 gcc_unreachable ();
6000 /* If the comparison value is an invariant register, then try to find
6001 its value from the insns before the start of the loop. */
6003 final_value = comparison_value;
6004 if (REG_P (comparison_value)
6005 && loop_invariant_p (loop, comparison_value))
6007 final_value = loop_find_equiv_value (loop, comparison_value);
6009 /* If we don't get an invariant final value, we are better
6010 off with the original register. */
6011 if (! loop_invariant_p (loop, final_value))
6012 final_value = comparison_value;
6015 /* Calculate the approximate final value of the induction variable
6016 (on the last successful iteration). The exact final value
6017 depends on the branch operator, and increment sign. It will be
6018 wrong if the iteration variable is not incremented by one each
6019 time through the loop and (comparison_value + off_by_one -
6020 initial_value) % increment != 0.
6021 ??? Note that the final_value may overflow and thus final_larger
6022 will be bogus. A potentially infinite loop will be classified
6023 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6024 if (off_by_one)
6025 final_value = plus_constant (final_value, off_by_one);
6027 /* Save the calculated values describing this loop's bounds, in case
6028 precondition_loop_p will need them later. These values can not be
6029 recalculated inside precondition_loop_p because strength reduction
6030 optimizations may obscure the loop's structure.
6032 These values are only required by precondition_loop_p and insert_bct
6033 whenever the number of iterations cannot be computed at compile time.
6034 Only the difference between final_value and initial_value is
6035 important. Note that final_value is only approximate. */
6036 loop_info->initial_value = initial_value;
6037 loop_info->comparison_value = comparison_value;
6038 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6039 loop_info->increment = increment;
6040 loop_info->iteration_var = iteration_var;
6041 loop_info->comparison_code = comparison_code;
6042 loop_info->iv = bl;
6044 /* Try to determine the iteration count for loops such
6045 as (for i = init; i < init + const; i++). When running the
6046 loop optimization twice, the first pass often converts simple
6047 loops into this form. */
6049 if (REG_P (initial_value))
6051 rtx reg1;
6052 rtx reg2;
6053 rtx const2;
6055 reg1 = initial_value;
6056 if (GET_CODE (final_value) == PLUS)
6057 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6058 else
6059 reg2 = final_value, const2 = const0_rtx;
6061 /* Check for initial_value = reg1, final_value = reg2 + const2,
6062 where reg1 != reg2. */
6063 if (REG_P (reg2) && reg2 != reg1)
6065 rtx temp;
6067 /* Find what reg1 is equivalent to. Hopefully it will
6068 either be reg2 or reg2 plus a constant. */
6069 temp = loop_find_equiv_value (loop, reg1);
6071 if (find_common_reg_term (temp, reg2))
6072 initial_value = temp;
6073 else if (loop_invariant_p (loop, reg2))
6075 /* Find what reg2 is equivalent to. Hopefully it will
6076 either be reg1 or reg1 plus a constant. Let's ignore
6077 the latter case for now since it is not so common. */
6078 temp = loop_find_equiv_value (loop, reg2);
6080 if (temp == loop_info->iteration_var)
6081 temp = initial_value;
6082 if (temp == reg1)
6083 final_value = (const2 == const0_rtx)
6084 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6089 loop_info->initial_equiv_value = initial_value;
6090 loop_info->final_equiv_value = final_value;
6092 /* For EQ comparison loops, we don't have a valid final value.
6093 Check this now so that we won't leave an invalid value if we
6094 return early for any other reason. */
6095 if (comparison_code == EQ)
6096 loop_info->final_equiv_value = loop_info->final_value = 0;
6098 if (increment == 0)
6100 if (loop_dump_stream)
6101 fprintf (loop_dump_stream,
6102 "Loop iterations: Increment value can't be calculated.\n");
6103 return 0;
6106 if (GET_CODE (increment) != CONST_INT)
6108 /* If we have a REG, check to see if REG holds a constant value. */
6109 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6110 clear if it is worthwhile to try to handle such RTL. */
6111 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6112 increment = loop_find_equiv_value (loop, increment);
6114 if (GET_CODE (increment) != CONST_INT)
6116 if (loop_dump_stream)
6118 fprintf (loop_dump_stream,
6119 "Loop iterations: Increment value not constant ");
6120 print_simple_rtl (loop_dump_stream, increment);
6121 fprintf (loop_dump_stream, ".\n");
6123 return 0;
6125 loop_info->increment = increment;
6128 if (GET_CODE (initial_value) != CONST_INT)
6130 if (loop_dump_stream)
6132 fprintf (loop_dump_stream,
6133 "Loop iterations: Initial value not constant ");
6134 print_simple_rtl (loop_dump_stream, initial_value);
6135 fprintf (loop_dump_stream, ".\n");
6137 return 0;
6139 else if (GET_CODE (final_value) != CONST_INT)
6141 if (loop_dump_stream)
6143 fprintf (loop_dump_stream,
6144 "Loop iterations: Final value not constant ");
6145 print_simple_rtl (loop_dump_stream, final_value);
6146 fprintf (loop_dump_stream, ".\n");
6148 return 0;
6150 else if (comparison_code == EQ)
6152 rtx inc_once;
6154 if (loop_dump_stream)
6155 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6157 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6158 GET_MODE (iteration_var));
6160 if (inc_once == final_value)
6162 /* The iterator value once through the loop is equal to the
6163 comparison value. Either we have an infinite loop, or
6164 we'll loop twice. */
6165 if (increment == const0_rtx)
6166 return 0;
6167 loop_info->n_iterations = 2;
6169 else
6170 loop_info->n_iterations = 1;
6172 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6173 loop_info->final_value
6174 = gen_int_mode ((INTVAL (loop_info->initial_value)
6175 + loop_info->n_iterations * INTVAL (increment)),
6176 GET_MODE (iteration_var));
6177 else
6178 loop_info->final_value
6179 = plus_constant (loop_info->initial_value,
6180 loop_info->n_iterations * INTVAL (increment));
6181 loop_info->final_equiv_value
6182 = gen_int_mode ((INTVAL (initial_value)
6183 + loop_info->n_iterations * INTVAL (increment)),
6184 GET_MODE (iteration_var));
6185 return loop_info->n_iterations;
6188 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6189 if (unsigned_p)
6190 final_larger
6191 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6192 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6193 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6194 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6195 else
6196 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6197 - (INTVAL (final_value) < INTVAL (initial_value));
6199 if (INTVAL (increment) > 0)
6200 increment_dir = 1;
6201 else if (INTVAL (increment) == 0)
6202 increment_dir = 0;
6203 else
6204 increment_dir = -1;
6206 /* There are 27 different cases: compare_dir = -1, 0, 1;
6207 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6208 There are 4 normal cases, 4 reverse cases (where the iteration variable
6209 will overflow before the loop exits), 4 infinite loop cases, and 15
6210 immediate exit (0 or 1 iteration depending on loop type) cases.
6211 Only try to optimize the normal cases. */
6213 /* (compare_dir/final_larger/increment_dir)
6214 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6215 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6216 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6217 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6219 /* ?? If the meaning of reverse loops (where the iteration variable
6220 will overflow before the loop exits) is undefined, then could
6221 eliminate all of these special checks, and just always assume
6222 the loops are normal/immediate/infinite. Note that this means
6223 the sign of increment_dir does not have to be known. Also,
6224 since it does not really hurt if immediate exit loops or infinite loops
6225 are optimized, then that case could be ignored also, and hence all
6226 loops can be optimized.
6228 According to ANSI Spec, the reverse loop case result is undefined,
6229 because the action on overflow is undefined.
6231 See also the special test for NE loops below. */
6233 if (final_larger == increment_dir && final_larger != 0
6234 && (final_larger == compare_dir || compare_dir == 0))
6235 /* Normal case. */
6237 else
6239 if (loop_dump_stream)
6240 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6241 return 0;
6244 /* Calculate the number of iterations, final_value is only an approximation,
6245 so correct for that. Note that abs_diff and n_iterations are
6246 unsigned, because they can be as large as 2^n - 1. */
6248 inc = INTVAL (increment);
6249 gcc_assert (inc);
6250 if (inc > 0)
6252 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6253 abs_inc = inc;
6255 else
6257 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6258 abs_inc = -inc;
6261 /* Given that iteration_var is going to iterate over its own mode,
6262 not HOST_WIDE_INT, disregard higher bits that might have come
6263 into the picture due to sign extension of initial and final
6264 values. */
6265 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6266 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6267 << 1) - 1;
6269 /* For NE tests, make sure that the iteration variable won't miss
6270 the final value. If abs_diff mod abs_incr is not zero, then the
6271 iteration variable will overflow before the loop exits, and we
6272 can not calculate the number of iterations. */
6273 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6274 return 0;
6276 /* Note that the number of iterations could be calculated using
6277 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6278 handle potential overflow of the summation. */
6279 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6280 return loop_info->n_iterations;
6283 /* Perform strength reduction and induction variable elimination.
6285 Pseudo registers created during this function will be beyond the
6286 last valid index in several tables including
6287 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6288 problem here, because the added registers cannot be givs outside of
6289 their loop, and hence will never be reconsidered. But scan_loop
6290 must check regnos to make sure they are in bounds. */
6292 static void
6293 strength_reduce (struct loop *loop, int flags)
6295 struct loop_info *loop_info = LOOP_INFO (loop);
6296 struct loop_regs *regs = LOOP_REGS (loop);
6297 struct loop_ivs *ivs = LOOP_IVS (loop);
6298 rtx p;
6299 /* Temporary list pointer for traversing ivs->list. */
6300 struct iv_class *bl;
6301 /* Ratio of extra register life span we can justify
6302 for saving an instruction. More if loop doesn't call subroutines
6303 since in that case saving an insn makes more difference
6304 and more registers are available. */
6305 /* ??? could set this to last value of threshold in move_movables */
6306 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6307 /* Map of pseudo-register replacements. */
6308 rtx *reg_map = NULL;
6309 int reg_map_size;
6310 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6311 int insn_count = count_insns_in_loop (loop);
6313 addr_placeholder = gen_reg_rtx (Pmode);
6315 ivs->n_regs = max_reg_before_loop;
6316 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6318 /* Find all BIVs in loop. */
6319 loop_bivs_find (loop);
6321 /* Exit if there are no bivs. */
6322 if (! ivs->list)
6324 loop_ivs_free (loop);
6325 return;
6328 /* Determine how BIVS are initialized by looking through pre-header
6329 extended basic block. */
6330 loop_bivs_init_find (loop);
6332 /* Look at the each biv and see if we can say anything better about its
6333 initial value from any initializing insns set up above. */
6334 loop_bivs_check (loop);
6336 /* Search the loop for general induction variables. */
6337 loop_givs_find (loop);
6339 /* Try to calculate and save the number of loop iterations. This is
6340 set to zero if the actual number can not be calculated. This must
6341 be called after all giv's have been identified, since otherwise it may
6342 fail if the iteration variable is a giv. */
6343 loop_iterations (loop);
6345 #ifdef HAVE_prefetch
6346 if (flags & LOOP_PREFETCH)
6347 emit_prefetch_instructions (loop);
6348 #endif
6350 /* Now for each giv for which we still don't know whether or not it is
6351 replaceable, check to see if it is replaceable because its final value
6352 can be calculated. This must be done after loop_iterations is called,
6353 so that final_giv_value will work correctly. */
6354 loop_givs_check (loop);
6356 /* Try to prove that the loop counter variable (if any) is always
6357 nonnegative; if so, record that fact with a REG_NONNEG note
6358 so that "decrement and branch until zero" insn can be used. */
6359 check_dbra_loop (loop, insn_count);
6361 /* Create reg_map to hold substitutions for replaceable giv regs.
6362 Some givs might have been made from biv increments, so look at
6363 ivs->reg_iv_type for a suitable size. */
6364 reg_map_size = ivs->n_regs;
6365 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6367 /* Examine each iv class for feasibility of strength reduction/induction
6368 variable elimination. */
6370 for (bl = ivs->list; bl; bl = bl->next)
6372 struct induction *v;
6373 int benefit;
6375 /* Test whether it will be possible to eliminate this biv
6376 provided all givs are reduced. */
6377 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6379 /* This will be true at the end, if all givs which depend on this
6380 biv have been strength reduced.
6381 We can't (currently) eliminate the biv unless this is so. */
6382 bl->all_reduced = 1;
6384 /* Check each extension dependent giv in this class to see if its
6385 root biv is safe from wrapping in the interior mode. */
6386 check_ext_dependent_givs (loop, bl);
6388 /* Combine all giv's for this iv_class. */
6389 combine_givs (regs, bl);
6391 for (v = bl->giv; v; v = v->next_iv)
6393 struct induction *tv;
6395 if (v->ignore || v->same)
6396 continue;
6398 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6400 /* If an insn is not to be strength reduced, then set its ignore
6401 flag, and clear bl->all_reduced. */
6403 /* A giv that depends on a reversed biv must be reduced if it is
6404 used after the loop exit, otherwise, it would have the wrong
6405 value after the loop exit. To make it simple, just reduce all
6406 of such giv's whether or not we know they are used after the loop
6407 exit. */
6409 if (v->lifetime * threshold * benefit < insn_count
6410 && ! bl->reversed)
6412 if (loop_dump_stream)
6413 fprintf (loop_dump_stream,
6414 "giv of insn %d not worth while, %d vs %d.\n",
6415 INSN_UID (v->insn),
6416 v->lifetime * threshold * benefit, insn_count);
6417 v->ignore = 1;
6418 bl->all_reduced = 0;
6420 else
6422 /* Check that we can increment the reduced giv without a
6423 multiply insn. If not, reject it. */
6425 for (tv = bl->biv; tv; tv = tv->next_iv)
6426 if (tv->mult_val == const1_rtx
6427 && ! product_cheap_p (tv->add_val, v->mult_val))
6429 if (loop_dump_stream)
6430 fprintf (loop_dump_stream,
6431 "giv of insn %d: would need a multiply.\n",
6432 INSN_UID (v->insn));
6433 v->ignore = 1;
6434 bl->all_reduced = 0;
6435 break;
6440 /* Check for givs whose first use is their definition and whose
6441 last use is the definition of another giv. If so, it is likely
6442 dead and should not be used to derive another giv nor to
6443 eliminate a biv. */
6444 loop_givs_dead_check (loop, bl);
6446 /* Reduce each giv that we decided to reduce. */
6447 loop_givs_reduce (loop, bl);
6449 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6450 as not reduced.
6452 For each giv register that can be reduced now: if replaceable,
6453 substitute reduced reg wherever the old giv occurs;
6454 else add new move insn "giv_reg = reduced_reg". */
6455 loop_givs_rescan (loop, bl, reg_map);
6457 /* All the givs based on the biv bl have been reduced if they
6458 merit it. */
6460 /* For each giv not marked as maybe dead that has been combined with a
6461 second giv, clear any "maybe dead" mark on that second giv.
6462 v->new_reg will either be or refer to the register of the giv it
6463 combined with.
6465 Doing this clearing avoids problems in biv elimination where
6466 a giv's new_reg is a complex value that can't be put in the
6467 insn but the giv combined with (with a reg as new_reg) is
6468 marked maybe_dead. Since the register will be used in either
6469 case, we'd prefer it be used from the simpler giv. */
6471 for (v = bl->giv; v; v = v->next_iv)
6472 if (! v->maybe_dead && v->same)
6473 v->same->maybe_dead = 0;
6475 /* Try to eliminate the biv, if it is a candidate.
6476 This won't work if ! bl->all_reduced,
6477 since the givs we planned to use might not have been reduced.
6479 We have to be careful that we didn't initially think we could
6480 eliminate this biv because of a giv that we now think may be
6481 dead and shouldn't be used as a biv replacement.
6483 Also, there is the possibility that we may have a giv that looks
6484 like it can be used to eliminate a biv, but the resulting insn
6485 isn't valid. This can happen, for example, on the 88k, where a
6486 JUMP_INSN can compare a register only with zero. Attempts to
6487 replace it with a compare with a constant will fail.
6489 Note that in cases where this call fails, we may have replaced some
6490 of the occurrences of the biv with a giv, but no harm was done in
6491 doing so in the rare cases where it can occur. */
6493 if (bl->all_reduced == 1 && bl->eliminable
6494 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6496 /* ?? If we created a new test to bypass the loop entirely,
6497 or otherwise drop straight in, based on this test, then
6498 we might want to rewrite it also. This way some later
6499 pass has more hope of removing the initialization of this
6500 biv entirely. */
6502 /* If final_value != 0, then the biv may be used after loop end
6503 and we must emit an insn to set it just in case.
6505 Reversed bivs already have an insn after the loop setting their
6506 value, so we don't need another one. We can't calculate the
6507 proper final value for such a biv here anyways. */
6508 if (bl->final_value && ! bl->reversed)
6509 loop_insn_sink_or_swim (loop,
6510 gen_load_of_final_value (bl->biv->dest_reg,
6511 bl->final_value));
6513 if (loop_dump_stream)
6514 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6515 bl->regno);
6517 /* See above note wrt final_value. But since we couldn't eliminate
6518 the biv, we must set the value after the loop instead of before. */
6519 else if (bl->final_value && ! bl->reversed)
6520 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6521 bl->final_value));
6524 /* Go through all the instructions in the loop, making all the
6525 register substitutions scheduled in REG_MAP. */
6527 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6528 if (INSN_P (p))
6530 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6531 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6532 INSN_CODE (p) = -1;
6535 if (loop_dump_stream)
6536 fprintf (loop_dump_stream, "\n");
6538 loop_ivs_free (loop);
6539 if (reg_map)
6540 free (reg_map);
6543 /*Record all basic induction variables calculated in the insn. */
6544 static rtx
6545 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6546 int maybe_multiple)
6548 struct loop_ivs *ivs = LOOP_IVS (loop);
6549 rtx set;
6550 rtx dest_reg;
6551 rtx inc_val;
6552 rtx mult_val;
6553 rtx *location;
6555 if (NONJUMP_INSN_P (p)
6556 && (set = single_set (p))
6557 && REG_P (SET_DEST (set)))
6559 dest_reg = SET_DEST (set);
6560 if (REGNO (dest_reg) < max_reg_before_loop
6561 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6562 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6564 if (basic_induction_var (loop, SET_SRC (set),
6565 GET_MODE (SET_SRC (set)),
6566 dest_reg, p, &inc_val, &mult_val,
6567 &location))
6569 /* It is a possible basic induction variable.
6570 Create and initialize an induction structure for it. */
6572 struct induction *v = xmalloc (sizeof (struct induction));
6574 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6575 not_every_iteration, maybe_multiple);
6576 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6578 else if (REGNO (dest_reg) < ivs->n_regs)
6579 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6582 return p;
6585 /* Record all givs calculated in the insn.
6586 A register is a giv if: it is only set once, it is a function of a
6587 biv and a constant (or invariant), and it is not a biv. */
6588 static rtx
6589 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6590 int maybe_multiple)
6592 struct loop_regs *regs = LOOP_REGS (loop);
6594 rtx set;
6595 /* Look for a general induction variable in a register. */
6596 if (NONJUMP_INSN_P (p)
6597 && (set = single_set (p))
6598 && REG_P (SET_DEST (set))
6599 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6601 rtx src_reg;
6602 rtx dest_reg;
6603 rtx add_val;
6604 rtx mult_val;
6605 rtx ext_val;
6606 int benefit;
6607 rtx regnote = 0;
6608 rtx last_consec_insn;
6610 dest_reg = SET_DEST (set);
6611 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6612 return p;
6614 if (/* SET_SRC is a giv. */
6615 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6616 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6617 /* Equivalent expression is a giv. */
6618 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6619 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6620 &add_val, &mult_val, &ext_val, 0,
6621 &benefit, VOIDmode)))
6622 /* Don't try to handle any regs made by loop optimization.
6623 We have nothing on them in regno_first_uid, etc. */
6624 && REGNO (dest_reg) < max_reg_before_loop
6625 /* Don't recognize a BASIC_INDUCT_VAR here. */
6626 && dest_reg != src_reg
6627 /* This must be the only place where the register is set. */
6628 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6629 /* or all sets must be consecutive and make a giv. */
6630 || (benefit = consec_sets_giv (loop, benefit, p,
6631 src_reg, dest_reg,
6632 &add_val, &mult_val, &ext_val,
6633 &last_consec_insn))))
6635 struct induction *v = xmalloc (sizeof (struct induction));
6637 /* If this is a library call, increase benefit. */
6638 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6639 benefit += libcall_benefit (p);
6641 /* Skip the consecutive insns, if there are any. */
6642 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6643 p = last_consec_insn;
6645 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6646 ext_val, benefit, DEST_REG, not_every_iteration,
6647 maybe_multiple, (rtx*) 0);
6652 /* Look for givs which are memory addresses. */
6653 if (NONJUMP_INSN_P (p))
6654 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6655 maybe_multiple);
6657 /* Update the status of whether giv can derive other givs. This can
6658 change when we pass a label or an insn that updates a biv. */
6659 if (INSN_P (p) || LABEL_P (p))
6660 update_giv_derive (loop, p);
6661 return p;
6664 /* Return 1 if X is a valid source for an initial value (or as value being
6665 compared against in an initial test).
6667 X must be either a register or constant and must not be clobbered between
6668 the current insn and the start of the loop.
6670 INSN is the insn containing X. */
6672 static int
6673 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6675 if (CONSTANT_P (x))
6676 return 1;
6678 /* Only consider pseudos we know about initialized in insns whose luids
6679 we know. */
6680 if (!REG_P (x)
6681 || REGNO (x) >= max_reg_before_loop)
6682 return 0;
6684 /* Don't use call-clobbered registers across a call which clobbers it. On
6685 some machines, don't use any hard registers at all. */
6686 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6687 && (SMALL_REGISTER_CLASSES
6688 || (call_used_regs[REGNO (x)] && call_seen)))
6689 return 0;
6691 /* Don't use registers that have been clobbered before the start of the
6692 loop. */
6693 if (reg_set_between_p (x, insn, loop_start))
6694 return 0;
6696 return 1;
6699 /* Scan X for memory refs and check each memory address
6700 as a possible giv. INSN is the insn whose pattern X comes from.
6701 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6702 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6703 more than once in each loop iteration. */
6705 static void
6706 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6707 int not_every_iteration, int maybe_multiple)
6709 int i, j;
6710 enum rtx_code code;
6711 const char *fmt;
6713 if (x == 0)
6714 return;
6716 code = GET_CODE (x);
6717 switch (code)
6719 case REG:
6720 case CONST_INT:
6721 case CONST:
6722 case CONST_DOUBLE:
6723 case SYMBOL_REF:
6724 case LABEL_REF:
6725 case PC:
6726 case CC0:
6727 case ADDR_VEC:
6728 case ADDR_DIFF_VEC:
6729 case USE:
6730 case CLOBBER:
6731 return;
6733 case MEM:
6735 rtx src_reg;
6736 rtx add_val;
6737 rtx mult_val;
6738 rtx ext_val;
6739 int benefit;
6741 /* This code used to disable creating GIVs with mult_val == 1 and
6742 add_val == 0. However, this leads to lost optimizations when
6743 it comes time to combine a set of related DEST_ADDR GIVs, since
6744 this one would not be seen. */
6746 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6747 &mult_val, &ext_val, 1, &benefit,
6748 GET_MODE (x)))
6750 /* Found one; record it. */
6751 struct induction *v = xmalloc (sizeof (struct induction));
6753 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6754 add_val, ext_val, benefit, DEST_ADDR,
6755 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6757 v->mem = x;
6760 return;
6762 default:
6763 break;
6766 /* Recursively scan the subexpressions for other mem refs. */
6768 fmt = GET_RTX_FORMAT (code);
6769 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6770 if (fmt[i] == 'e')
6771 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6772 maybe_multiple);
6773 else if (fmt[i] == 'E')
6774 for (j = 0; j < XVECLEN (x, i); j++)
6775 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6776 maybe_multiple);
6779 /* Fill in the data about one biv update.
6780 V is the `struct induction' in which we record the biv. (It is
6781 allocated by the caller, with alloca.)
6782 INSN is the insn that sets it.
6783 DEST_REG is the biv's reg.
6785 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6786 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6787 being set to INC_VAL.
6789 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6790 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6791 can be executed more than once per iteration. If MAYBE_MULTIPLE
6792 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6793 executed exactly once per iteration. */
6795 static void
6796 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6797 rtx inc_val, rtx mult_val, rtx *location,
6798 int not_every_iteration, int maybe_multiple)
6800 struct loop_ivs *ivs = LOOP_IVS (loop);
6801 struct iv_class *bl;
6803 v->insn = insn;
6804 v->src_reg = dest_reg;
6805 v->dest_reg = dest_reg;
6806 v->mult_val = mult_val;
6807 v->add_val = inc_val;
6808 v->ext_dependent = NULL_RTX;
6809 v->location = location;
6810 v->mode = GET_MODE (dest_reg);
6811 v->always_computable = ! not_every_iteration;
6812 v->always_executed = ! not_every_iteration;
6813 v->maybe_multiple = maybe_multiple;
6814 v->same = 0;
6816 /* Add this to the reg's iv_class, creating a class
6817 if this is the first incrementation of the reg. */
6819 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6820 if (bl == 0)
6822 /* Create and initialize new iv_class. */
6824 bl = xmalloc (sizeof (struct iv_class));
6826 bl->regno = REGNO (dest_reg);
6827 bl->biv = 0;
6828 bl->giv = 0;
6829 bl->biv_count = 0;
6830 bl->giv_count = 0;
6832 /* Set initial value to the reg itself. */
6833 bl->initial_value = dest_reg;
6834 bl->final_value = 0;
6835 /* We haven't seen the initializing insn yet. */
6836 bl->init_insn = 0;
6837 bl->init_set = 0;
6838 bl->initial_test = 0;
6839 bl->incremented = 0;
6840 bl->eliminable = 0;
6841 bl->nonneg = 0;
6842 bl->reversed = 0;
6843 bl->total_benefit = 0;
6845 /* Add this class to ivs->list. */
6846 bl->next = ivs->list;
6847 ivs->list = bl;
6849 /* Put it in the array of biv register classes. */
6850 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6852 else
6854 /* Check if location is the same as a previous one. */
6855 struct induction *induction;
6856 for (induction = bl->biv; induction; induction = induction->next_iv)
6857 if (location == induction->location)
6859 v->same = induction;
6860 break;
6864 /* Update IV_CLASS entry for this biv. */
6865 v->next_iv = bl->biv;
6866 bl->biv = v;
6867 bl->biv_count++;
6868 if (mult_val == const1_rtx)
6869 bl->incremented = 1;
6871 if (loop_dump_stream)
6872 loop_biv_dump (v, loop_dump_stream, 0);
6875 /* Fill in the data about one giv.
6876 V is the `struct induction' in which we record the giv. (It is
6877 allocated by the caller, with alloca.)
6878 INSN is the insn that sets it.
6879 BENEFIT estimates the savings from deleting this insn.
6880 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6881 into a register or is used as a memory address.
6883 SRC_REG is the biv reg which the giv is computed from.
6884 DEST_REG is the giv's reg (if the giv is stored in a reg).
6885 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6886 LOCATION points to the place where this giv's value appears in INSN. */
6888 static void
6889 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6890 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6891 rtx ext_val, int benefit, enum g_types type,
6892 int not_every_iteration, int maybe_multiple, rtx *location)
6894 struct loop_ivs *ivs = LOOP_IVS (loop);
6895 struct induction *b;
6896 struct iv_class *bl;
6897 rtx set = single_set (insn);
6898 rtx temp;
6900 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6901 undo the MULT canonicalization that we performed earlier. */
6902 temp = simplify_rtx (add_val);
6903 if (temp
6904 && ! (GET_CODE (add_val) == MULT
6905 && GET_CODE (temp) == ASHIFT))
6906 add_val = temp;
6908 v->insn = insn;
6909 v->src_reg = src_reg;
6910 v->giv_type = type;
6911 v->dest_reg = dest_reg;
6912 v->mult_val = mult_val;
6913 v->add_val = add_val;
6914 v->ext_dependent = ext_val;
6915 v->benefit = benefit;
6916 v->location = location;
6917 v->cant_derive = 0;
6918 v->combined_with = 0;
6919 v->maybe_multiple = maybe_multiple;
6920 v->maybe_dead = 0;
6921 v->derive_adjustment = 0;
6922 v->same = 0;
6923 v->ignore = 0;
6924 v->new_reg = 0;
6925 v->final_value = 0;
6926 v->same_insn = 0;
6927 v->auto_inc_opt = 0;
6928 v->shared = 0;
6930 /* The v->always_computable field is used in update_giv_derive, to
6931 determine whether a giv can be used to derive another giv. For a
6932 DEST_REG giv, INSN computes a new value for the giv, so its value
6933 isn't computable if INSN insn't executed every iteration.
6934 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
6935 it does not compute a new value. Hence the value is always computable
6936 regardless of whether INSN is executed each iteration. */
6938 if (type == DEST_ADDR)
6939 v->always_computable = 1;
6940 else
6941 v->always_computable = ! not_every_iteration;
6943 v->always_executed = ! not_every_iteration;
6945 if (type == DEST_ADDR)
6947 v->mode = GET_MODE (*location);
6948 v->lifetime = 1;
6950 else /* type == DEST_REG */
6952 v->mode = GET_MODE (SET_DEST (set));
6954 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
6956 /* If the lifetime is zero, it means that this register is
6957 really a dead store. So mark this as a giv that can be
6958 ignored. This will not prevent the biv from being eliminated. */
6959 if (v->lifetime == 0)
6960 v->ignore = 1;
6962 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6963 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6966 /* Add the giv to the class of givs computed from one biv. */
6968 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
6969 gcc_assert (bl);
6970 v->next_iv = bl->giv;
6971 bl->giv = v;
6973 /* Don't count DEST_ADDR. This is supposed to count the number of
6974 insns that calculate givs. */
6975 if (type == DEST_REG)
6976 bl->giv_count++;
6977 bl->total_benefit += benefit;
6979 if (type == DEST_ADDR)
6981 v->replaceable = 1;
6982 v->not_replaceable = 0;
6984 else
6986 /* The giv can be replaced outright by the reduced register only if all
6987 of the following conditions are true:
6988 - the insn that sets the giv is always executed on any iteration
6989 on which the giv is used at all
6990 (there are two ways to deduce this:
6991 either the insn is executed on every iteration,
6992 or all uses follow that insn in the same basic block),
6993 - the giv is not used outside the loop
6994 - no assignments to the biv occur during the giv's lifetime. */
6996 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
6997 /* Previous line always fails if INSN was moved by loop opt. */
6998 && REGNO_LAST_LUID (REGNO (dest_reg))
6999 < INSN_LUID (loop->end)
7000 && (! not_every_iteration
7001 || last_use_this_basic_block (dest_reg, insn)))
7003 /* Now check that there are no assignments to the biv within the
7004 giv's lifetime. This requires two separate checks. */
7006 /* Check each biv update, and fail if any are between the first
7007 and last use of the giv.
7009 If this loop contains an inner loop that was unrolled, then
7010 the insn modifying the biv may have been emitted by the loop
7011 unrolling code, and hence does not have a valid luid. Just
7012 mark the biv as not replaceable in this case. It is not very
7013 useful as a biv, because it is used in two different loops.
7014 It is very unlikely that we would be able to optimize the giv
7015 using this biv anyways. */
7017 v->replaceable = 1;
7018 v->not_replaceable = 0;
7019 for (b = bl->biv; b; b = b->next_iv)
7021 if (INSN_UID (b->insn) >= max_uid_for_loop
7022 || ((INSN_LUID (b->insn)
7023 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7024 && (INSN_LUID (b->insn)
7025 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7027 v->replaceable = 0;
7028 v->not_replaceable = 1;
7029 break;
7033 /* If there are any backwards branches that go from after the
7034 biv update to before it, then this giv is not replaceable. */
7035 if (v->replaceable)
7036 for (b = bl->biv; b; b = b->next_iv)
7037 if (back_branch_in_range_p (loop, b->insn))
7039 v->replaceable = 0;
7040 v->not_replaceable = 1;
7041 break;
7044 else
7046 /* May still be replaceable, we don't have enough info here to
7047 decide. */
7048 v->replaceable = 0;
7049 v->not_replaceable = 0;
7053 /* Record whether the add_val contains a const_int, for later use by
7054 combine_givs. */
7056 rtx tem = add_val;
7058 v->no_const_addval = 1;
7059 if (tem == const0_rtx)
7061 else if (CONSTANT_P (add_val))
7062 v->no_const_addval = 0;
7063 if (GET_CODE (tem) == PLUS)
7065 while (1)
7067 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7068 tem = XEXP (tem, 0);
7069 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7070 tem = XEXP (tem, 1);
7071 else
7072 break;
7074 if (CONSTANT_P (XEXP (tem, 1)))
7075 v->no_const_addval = 0;
7079 if (loop_dump_stream)
7080 loop_giv_dump (v, loop_dump_stream, 0);
7083 /* Try to calculate the final value of the giv, the value it will have at
7084 the end of the loop. If we can do it, return that value. */
7086 static rtx
7087 final_giv_value (const struct loop *loop, struct induction *v)
7089 struct loop_ivs *ivs = LOOP_IVS (loop);
7090 struct iv_class *bl;
7091 rtx insn;
7092 rtx increment, tem;
7093 rtx seq;
7094 rtx loop_end = loop->end;
7095 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7097 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7099 /* The final value for givs which depend on reversed bivs must be calculated
7100 differently than for ordinary givs. In this case, there is already an
7101 insn after the loop which sets this giv's final value (if necessary),
7102 and there are no other loop exits, so we can return any value. */
7103 if (bl->reversed)
7105 if (loop_dump_stream)
7106 fprintf (loop_dump_stream,
7107 "Final giv value for %d, depends on reversed biv\n",
7108 REGNO (v->dest_reg));
7109 return const0_rtx;
7112 /* Try to calculate the final value as a function of the biv it depends
7113 upon. The only exit from the loop must be the fall through at the bottom
7114 and the insn that sets the giv must be executed on every iteration
7115 (otherwise the giv may not have its final value when the loop exits). */
7117 /* ??? Can calculate the final giv value by subtracting off the
7118 extra biv increments times the giv's mult_val. The loop must have
7119 only one exit for this to work, but the loop iterations does not need
7120 to be known. */
7122 if (n_iterations != 0
7123 && ! loop->exit_count
7124 && v->always_executed)
7126 /* ?? It is tempting to use the biv's value here since these insns will
7127 be put after the loop, and hence the biv will have its final value
7128 then. However, this fails if the biv is subsequently eliminated.
7129 Perhaps determine whether biv's are eliminable before trying to
7130 determine whether giv's are replaceable so that we can use the
7131 biv value here if it is not eliminable. */
7133 /* We are emitting code after the end of the loop, so we must make
7134 sure that bl->initial_value is still valid then. It will still
7135 be valid if it is invariant. */
7137 increment = biv_total_increment (bl);
7139 if (increment && loop_invariant_p (loop, increment)
7140 && loop_invariant_p (loop, bl->initial_value))
7142 /* Can calculate the loop exit value of its biv as
7143 (n_iterations * increment) + initial_value */
7145 /* The loop exit value of the giv is then
7146 (final_biv_value - extra increments) * mult_val + add_val.
7147 The extra increments are any increments to the biv which
7148 occur in the loop after the giv's value is calculated.
7149 We must search from the insn that sets the giv to the end
7150 of the loop to calculate this value. */
7152 /* Put the final biv value in tem. */
7153 tem = gen_reg_rtx (v->mode);
7154 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7155 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7156 GEN_INT (n_iterations),
7157 extend_value_for_giv (v, bl->initial_value),
7158 tem);
7160 /* Subtract off extra increments as we find them. */
7161 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7162 insn = NEXT_INSN (insn))
7164 struct induction *biv;
7166 for (biv = bl->biv; biv; biv = biv->next_iv)
7167 if (biv->insn == insn)
7169 start_sequence ();
7170 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7171 biv->add_val, NULL_RTX, 0,
7172 OPTAB_LIB_WIDEN);
7173 seq = get_insns ();
7174 end_sequence ();
7175 loop_insn_sink (loop, seq);
7179 /* Now calculate the giv's final value. */
7180 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7182 if (loop_dump_stream)
7183 fprintf (loop_dump_stream,
7184 "Final giv value for %d, calc from biv's value.\n",
7185 REGNO (v->dest_reg));
7187 return tem;
7191 /* Replaceable giv's should never reach here. */
7192 gcc_assert (!v->replaceable);
7194 /* Check to see if the biv is dead at all loop exits. */
7195 if (reg_dead_after_loop (loop, v->dest_reg))
7197 if (loop_dump_stream)
7198 fprintf (loop_dump_stream,
7199 "Final giv value for %d, giv dead after loop exit.\n",
7200 REGNO (v->dest_reg));
7202 return const0_rtx;
7205 return 0;
7208 /* All this does is determine whether a giv can be made replaceable because
7209 its final value can be calculated. This code can not be part of record_giv
7210 above, because final_giv_value requires that the number of loop iterations
7211 be known, and that can not be accurately calculated until after all givs
7212 have been identified. */
7214 static void
7215 check_final_value (const struct loop *loop, struct induction *v)
7217 rtx final_value = 0;
7219 /* DEST_ADDR givs will never reach here, because they are always marked
7220 replaceable above in record_giv. */
7222 /* The giv can be replaced outright by the reduced register only if all
7223 of the following conditions are true:
7224 - the insn that sets the giv is always executed on any iteration
7225 on which the giv is used at all
7226 (there are two ways to deduce this:
7227 either the insn is executed on every iteration,
7228 or all uses follow that insn in the same basic block),
7229 - its final value can be calculated (this condition is different
7230 than the one above in record_giv)
7231 - it's not used before the it's set
7232 - no assignments to the biv occur during the giv's lifetime. */
7234 #if 0
7235 /* This is only called now when replaceable is known to be false. */
7236 /* Clear replaceable, so that it won't confuse final_giv_value. */
7237 v->replaceable = 0;
7238 #endif
7240 if ((final_value = final_giv_value (loop, v))
7241 && (v->always_executed
7242 || last_use_this_basic_block (v->dest_reg, v->insn)))
7244 int biv_increment_seen = 0, before_giv_insn = 0;
7245 rtx p = v->insn;
7246 rtx last_giv_use;
7248 v->replaceable = 1;
7249 v->not_replaceable = 0;
7251 /* When trying to determine whether or not a biv increment occurs
7252 during the lifetime of the giv, we can ignore uses of the variable
7253 outside the loop because final_value is true. Hence we can not
7254 use regno_last_uid and regno_first_uid as above in record_giv. */
7256 /* Search the loop to determine whether any assignments to the
7257 biv occur during the giv's lifetime. Start with the insn
7258 that sets the giv, and search around the loop until we come
7259 back to that insn again.
7261 Also fail if there is a jump within the giv's lifetime that jumps
7262 to somewhere outside the lifetime but still within the loop. This
7263 catches spaghetti code where the execution order is not linear, and
7264 hence the above test fails. Here we assume that the giv lifetime
7265 does not extend from one iteration of the loop to the next, so as
7266 to make the test easier. Since the lifetime isn't known yet,
7267 this requires two loops. See also record_giv above. */
7269 last_giv_use = v->insn;
7271 while (1)
7273 p = NEXT_INSN (p);
7274 if (p == loop->end)
7276 before_giv_insn = 1;
7277 p = NEXT_INSN (loop->start);
7279 if (p == v->insn)
7280 break;
7282 if (INSN_P (p))
7284 /* It is possible for the BIV increment to use the GIV if we
7285 have a cycle. Thus we must be sure to check each insn for
7286 both BIV and GIV uses, and we must check for BIV uses
7287 first. */
7289 if (! biv_increment_seen
7290 && reg_set_p (v->src_reg, PATTERN (p)))
7291 biv_increment_seen = 1;
7293 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7295 if (biv_increment_seen || before_giv_insn)
7297 v->replaceable = 0;
7298 v->not_replaceable = 1;
7299 break;
7301 last_giv_use = p;
7306 /* Now that the lifetime of the giv is known, check for branches
7307 from within the lifetime to outside the lifetime if it is still
7308 replaceable. */
7310 if (v->replaceable)
7312 p = v->insn;
7313 while (1)
7315 p = NEXT_INSN (p);
7316 if (p == loop->end)
7317 p = NEXT_INSN (loop->start);
7318 if (p == last_giv_use)
7319 break;
7321 if (JUMP_P (p) && JUMP_LABEL (p)
7322 && LABEL_NAME (JUMP_LABEL (p))
7323 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7324 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7325 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7326 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7328 v->replaceable = 0;
7329 v->not_replaceable = 1;
7331 if (loop_dump_stream)
7332 fprintf (loop_dump_stream,
7333 "Found branch outside giv lifetime.\n");
7335 break;
7340 /* If it is replaceable, then save the final value. */
7341 if (v->replaceable)
7342 v->final_value = final_value;
7345 if (loop_dump_stream && v->replaceable)
7346 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7347 INSN_UID (v->insn), REGNO (v->dest_reg));
7350 /* Update the status of whether a giv can derive other givs.
7352 We need to do something special if there is or may be an update to the biv
7353 between the time the giv is defined and the time it is used to derive
7354 another giv.
7356 In addition, a giv that is only conditionally set is not allowed to
7357 derive another giv once a label has been passed.
7359 The cases we look at are when a label or an update to a biv is passed. */
7361 static void
7362 update_giv_derive (const struct loop *loop, rtx p)
7364 struct loop_ivs *ivs = LOOP_IVS (loop);
7365 struct iv_class *bl;
7366 struct induction *biv, *giv;
7367 rtx tem;
7368 int dummy;
7370 /* Search all IV classes, then all bivs, and finally all givs.
7372 There are three cases we are concerned with. First we have the situation
7373 of a giv that is only updated conditionally. In that case, it may not
7374 derive any givs after a label is passed.
7376 The second case is when a biv update occurs, or may occur, after the
7377 definition of a giv. For certain biv updates (see below) that are
7378 known to occur between the giv definition and use, we can adjust the
7379 giv definition. For others, or when the biv update is conditional,
7380 we must prevent the giv from deriving any other givs. There are two
7381 sub-cases within this case.
7383 If this is a label, we are concerned with any biv update that is done
7384 conditionally, since it may be done after the giv is defined followed by
7385 a branch here (actually, we need to pass both a jump and a label, but
7386 this extra tracking doesn't seem worth it).
7388 If this is a jump, we are concerned about any biv update that may be
7389 executed multiple times. We are actually only concerned about
7390 backward jumps, but it is probably not worth performing the test
7391 on the jump again here.
7393 If this is a biv update, we must adjust the giv status to show that a
7394 subsequent biv update was performed. If this adjustment cannot be done,
7395 the giv cannot derive further givs. */
7397 for (bl = ivs->list; bl; bl = bl->next)
7398 for (biv = bl->biv; biv; biv = biv->next_iv)
7399 if (LABEL_P (p) || JUMP_P (p)
7400 || biv->insn == p)
7402 /* Skip if location is the same as a previous one. */
7403 if (biv->same)
7404 continue;
7406 for (giv = bl->giv; giv; giv = giv->next_iv)
7408 /* If cant_derive is already true, there is no point in
7409 checking all of these conditions again. */
7410 if (giv->cant_derive)
7411 continue;
7413 /* If this giv is conditionally set and we have passed a label,
7414 it cannot derive anything. */
7415 if (LABEL_P (p) && ! giv->always_computable)
7416 giv->cant_derive = 1;
7418 /* Skip givs that have mult_val == 0, since
7419 they are really invariants. Also skip those that are
7420 replaceable, since we know their lifetime doesn't contain
7421 any biv update. */
7422 else if (giv->mult_val == const0_rtx || giv->replaceable)
7423 continue;
7425 /* The only way we can allow this giv to derive another
7426 is if this is a biv increment and we can form the product
7427 of biv->add_val and giv->mult_val. In this case, we will
7428 be able to compute a compensation. */
7429 else if (biv->insn == p)
7431 rtx ext_val_dummy;
7433 tem = 0;
7434 if (biv->mult_val == const1_rtx)
7435 tem = simplify_giv_expr (loop,
7436 gen_rtx_MULT (giv->mode,
7437 biv->add_val,
7438 giv->mult_val),
7439 &ext_val_dummy, &dummy);
7441 if (tem && giv->derive_adjustment)
7442 tem = simplify_giv_expr
7443 (loop,
7444 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7445 &ext_val_dummy, &dummy);
7447 if (tem)
7448 giv->derive_adjustment = tem;
7449 else
7450 giv->cant_derive = 1;
7452 else if ((LABEL_P (p) && ! biv->always_computable)
7453 || (JUMP_P (p) && biv->maybe_multiple))
7454 giv->cant_derive = 1;
7459 /* Check whether an insn is an increment legitimate for a basic induction var.
7460 X is the source of insn P, or a part of it.
7461 MODE is the mode in which X should be interpreted.
7463 DEST_REG is the putative biv, also the destination of the insn.
7464 We accept patterns of these forms:
7465 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7466 REG = INVARIANT + REG
7468 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7469 store the additive term into *INC_VAL, and store the place where
7470 we found the additive term into *LOCATION.
7472 If X is an assignment of an invariant into DEST_REG, we set
7473 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7475 We also want to detect a BIV when it corresponds to a variable
7476 whose mode was promoted. In that case, an increment
7477 of the variable may be a PLUS that adds a SUBREG of that variable to
7478 an invariant and then sign- or zero-extends the result of the PLUS
7479 into the variable.
7481 Most GIVs in such cases will be in the promoted mode, since that is the
7482 probably the natural computation mode (and almost certainly the mode
7483 used for addresses) on the machine. So we view the pseudo-reg containing
7484 the variable as the BIV, as if it were simply incremented.
7486 Note that treating the entire pseudo as a BIV will result in making
7487 simple increments to any GIVs based on it. However, if the variable
7488 overflows in its declared mode but not its promoted mode, the result will
7489 be incorrect. This is acceptable if the variable is signed, since
7490 overflows in such cases are undefined, but not if it is unsigned, since
7491 those overflows are defined. So we only check for SIGN_EXTEND and
7492 not ZERO_EXTEND.
7494 If we cannot find a biv, we return 0. */
7496 static int
7497 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7498 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7499 rtx **location)
7501 enum rtx_code code;
7502 rtx *argp, arg;
7503 rtx insn, set = 0, last, inc;
7505 code = GET_CODE (x);
7506 *location = NULL;
7507 switch (code)
7509 case PLUS:
7510 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7511 || (GET_CODE (XEXP (x, 0)) == SUBREG
7512 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7513 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7515 argp = &XEXP (x, 1);
7517 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7518 || (GET_CODE (XEXP (x, 1)) == SUBREG
7519 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7520 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7522 argp = &XEXP (x, 0);
7524 else
7525 return 0;
7527 arg = *argp;
7528 if (loop_invariant_p (loop, arg) != 1)
7529 return 0;
7531 /* convert_modes can emit new instructions, e.g. when arg is a loop
7532 invariant MEM and dest_reg has a different mode.
7533 These instructions would be emitted after the end of the function
7534 and then *inc_val would be an uninitialized pseudo.
7535 Detect this and bail in this case.
7536 Other alternatives to solve this can be introducing a convert_modes
7537 variant which is allowed to fail but not allowed to emit new
7538 instructions, emit these instructions before loop start and let
7539 it be garbage collected if *inc_val is never used or saving the
7540 *inc_val initialization sequence generated here and when *inc_val
7541 is going to be actually used, emit it at some suitable place. */
7542 last = get_last_insn ();
7543 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7544 if (get_last_insn () != last)
7546 delete_insns_since (last);
7547 return 0;
7550 *inc_val = inc;
7551 *mult_val = const1_rtx;
7552 *location = argp;
7553 return 1;
7555 case SUBREG:
7556 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7557 handle addition of promoted variables.
7558 ??? The comment at the start of this function is wrong: promoted
7559 variable increments don't look like it says they do. */
7560 return basic_induction_var (loop, SUBREG_REG (x),
7561 GET_MODE (SUBREG_REG (x)),
7562 dest_reg, p, inc_val, mult_val, location);
7564 case REG:
7565 /* If this register is assigned in a previous insn, look at its
7566 source, but don't go outside the loop or past a label. */
7568 /* If this sets a register to itself, we would repeat any previous
7569 biv increment if we applied this strategy blindly. */
7570 if (rtx_equal_p (dest_reg, x))
7571 return 0;
7573 insn = p;
7574 while (1)
7576 rtx dest;
7579 insn = PREV_INSN (insn);
7581 while (insn && NOTE_P (insn)
7582 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7584 if (!insn)
7585 break;
7586 set = single_set (insn);
7587 if (set == 0)
7588 break;
7589 dest = SET_DEST (set);
7590 if (dest == x
7591 || (GET_CODE (dest) == SUBREG
7592 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7593 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7594 && SUBREG_REG (dest) == x))
7595 return basic_induction_var (loop, SET_SRC (set),
7596 (GET_MODE (SET_SRC (set)) == VOIDmode
7597 ? GET_MODE (x)
7598 : GET_MODE (SET_SRC (set))),
7599 dest_reg, insn,
7600 inc_val, mult_val, location);
7602 while (GET_CODE (dest) == SUBREG
7603 || GET_CODE (dest) == ZERO_EXTRACT
7604 || GET_CODE (dest) == STRICT_LOW_PART)
7605 dest = XEXP (dest, 0);
7606 if (dest == x)
7607 break;
7609 /* Fall through. */
7611 /* Can accept constant setting of biv only when inside inner most loop.
7612 Otherwise, a biv of an inner loop may be incorrectly recognized
7613 as a biv of the outer loop,
7614 causing code to be moved INTO the inner loop. */
7615 case MEM:
7616 if (loop_invariant_p (loop, x) != 1)
7617 return 0;
7618 case CONST_INT:
7619 case SYMBOL_REF:
7620 case CONST:
7621 /* convert_modes aborts if we try to convert to or from CCmode, so just
7622 exclude that case. It is very unlikely that a condition code value
7623 would be a useful iterator anyways. convert_modes aborts if we try to
7624 convert a float mode to non-float or vice versa too. */
7625 if (loop->level == 1
7626 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7627 && GET_MODE_CLASS (mode) != MODE_CC)
7629 /* Possible bug here? Perhaps we don't know the mode of X. */
7630 last = get_last_insn ();
7631 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7632 if (get_last_insn () != last)
7634 delete_insns_since (last);
7635 return 0;
7638 *inc_val = inc;
7639 *mult_val = const0_rtx;
7640 return 1;
7642 else
7643 return 0;
7645 case SIGN_EXTEND:
7646 /* Ignore this BIV if signed arithmetic overflow is defined. */
7647 if (flag_wrapv)
7648 return 0;
7649 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7650 dest_reg, p, inc_val, mult_val, location);
7652 case ASHIFTRT:
7653 /* Similar, since this can be a sign extension. */
7654 for (insn = PREV_INSN (p);
7655 (insn && NOTE_P (insn)
7656 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7657 insn = PREV_INSN (insn))
7660 if (insn)
7661 set = single_set (insn);
7663 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
7664 && set && SET_DEST (set) == XEXP (x, 0)
7665 && GET_CODE (XEXP (x, 1)) == CONST_INT
7666 && INTVAL (XEXP (x, 1)) >= 0
7667 && GET_CODE (SET_SRC (set)) == ASHIFT
7668 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7669 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7670 GET_MODE (XEXP (x, 0)),
7671 dest_reg, insn, inc_val, mult_val,
7672 location);
7673 return 0;
7675 default:
7676 return 0;
7680 /* A general induction variable (giv) is any quantity that is a linear
7681 function of a basic induction variable,
7682 i.e. giv = biv * mult_val + add_val.
7683 The coefficients can be any loop invariant quantity.
7684 A giv need not be computed directly from the biv;
7685 it can be computed by way of other givs. */
7687 /* Determine whether X computes a giv.
7688 If it does, return a nonzero value
7689 which is the benefit from eliminating the computation of X;
7690 set *SRC_REG to the register of the biv that it is computed from;
7691 set *ADD_VAL and *MULT_VAL to the coefficients,
7692 such that the value of X is biv * mult + add; */
7694 static int
7695 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7696 rtx *add_val, rtx *mult_val, rtx *ext_val,
7697 int is_addr, int *pbenefit,
7698 enum machine_mode addr_mode)
7700 struct loop_ivs *ivs = LOOP_IVS (loop);
7701 rtx orig_x = x;
7703 /* If this is an invariant, forget it, it isn't a giv. */
7704 if (loop_invariant_p (loop, x) == 1)
7705 return 0;
7707 *pbenefit = 0;
7708 *ext_val = NULL_RTX;
7709 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7710 if (x == 0)
7711 return 0;
7713 switch (GET_CODE (x))
7715 case USE:
7716 case CONST_INT:
7717 /* Since this is now an invariant and wasn't before, it must be a giv
7718 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7719 with. */
7720 *src_reg = ivs->list->biv->dest_reg;
7721 *mult_val = const0_rtx;
7722 *add_val = x;
7723 break;
7725 case REG:
7726 /* This is equivalent to a BIV. */
7727 *src_reg = x;
7728 *mult_val = const1_rtx;
7729 *add_val = const0_rtx;
7730 break;
7732 case PLUS:
7733 /* Either (plus (biv) (invar)) or
7734 (plus (mult (biv) (invar_1)) (invar_2)). */
7735 if (GET_CODE (XEXP (x, 0)) == MULT)
7737 *src_reg = XEXP (XEXP (x, 0), 0);
7738 *mult_val = XEXP (XEXP (x, 0), 1);
7740 else
7742 *src_reg = XEXP (x, 0);
7743 *mult_val = const1_rtx;
7745 *add_val = XEXP (x, 1);
7746 break;
7748 case MULT:
7749 /* ADD_VAL is zero. */
7750 *src_reg = XEXP (x, 0);
7751 *mult_val = XEXP (x, 1);
7752 *add_val = const0_rtx;
7753 break;
7755 default:
7756 gcc_unreachable ();
7759 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7760 unless they are CONST_INT). */
7761 if (GET_CODE (*add_val) == USE)
7762 *add_val = XEXP (*add_val, 0);
7763 if (GET_CODE (*mult_val) == USE)
7764 *mult_val = XEXP (*mult_val, 0);
7766 if (is_addr)
7767 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7768 else
7769 *pbenefit += rtx_cost (orig_x, SET);
7771 /* Always return true if this is a giv so it will be detected as such,
7772 even if the benefit is zero or negative. This allows elimination
7773 of bivs that might otherwise not be eliminated. */
7774 return 1;
7777 /* Given an expression, X, try to form it as a linear function of a biv.
7778 We will canonicalize it to be of the form
7779 (plus (mult (BIV) (invar_1))
7780 (invar_2))
7781 with possible degeneracies.
7783 The invariant expressions must each be of a form that can be used as a
7784 machine operand. We surround then with a USE rtx (a hack, but localized
7785 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7786 routine; it is the caller's responsibility to strip them.
7788 If no such canonicalization is possible (i.e., two biv's are used or an
7789 expression that is neither invariant nor a biv or giv), this routine
7790 returns 0.
7792 For a nonzero return, the result will have a code of CONST_INT, USE,
7793 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7795 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7797 static rtx sge_plus (enum machine_mode, rtx, rtx);
7798 static rtx sge_plus_constant (rtx, rtx);
7800 static rtx
7801 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7803 struct loop_ivs *ivs = LOOP_IVS (loop);
7804 struct loop_regs *regs = LOOP_REGS (loop);
7805 enum machine_mode mode = GET_MODE (x);
7806 rtx arg0, arg1;
7807 rtx tem;
7809 /* If this is not an integer mode, or if we cannot do arithmetic in this
7810 mode, this can't be a giv. */
7811 if (mode != VOIDmode
7812 && (GET_MODE_CLASS (mode) != MODE_INT
7813 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7814 return NULL_RTX;
7816 switch (GET_CODE (x))
7818 case PLUS:
7819 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7820 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7821 if (arg0 == 0 || arg1 == 0)
7822 return NULL_RTX;
7824 /* Put constant last, CONST_INT last if both constant. */
7825 if ((GET_CODE (arg0) == USE
7826 || GET_CODE (arg0) == CONST_INT)
7827 && ! ((GET_CODE (arg0) == USE
7828 && GET_CODE (arg1) == USE)
7829 || GET_CODE (arg1) == CONST_INT))
7830 tem = arg0, arg0 = arg1, arg1 = tem;
7832 /* Handle addition of zero, then addition of an invariant. */
7833 if (arg1 == const0_rtx)
7834 return arg0;
7835 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7836 switch (GET_CODE (arg0))
7838 case CONST_INT:
7839 case USE:
7840 /* Adding two invariants must result in an invariant, so enclose
7841 addition operation inside a USE and return it. */
7842 if (GET_CODE (arg0) == USE)
7843 arg0 = XEXP (arg0, 0);
7844 if (GET_CODE (arg1) == USE)
7845 arg1 = XEXP (arg1, 0);
7847 if (GET_CODE (arg0) == CONST_INT)
7848 tem = arg0, arg0 = arg1, arg1 = tem;
7849 if (GET_CODE (arg1) == CONST_INT)
7850 tem = sge_plus_constant (arg0, arg1);
7851 else
7852 tem = sge_plus (mode, arg0, arg1);
7854 if (GET_CODE (tem) != CONST_INT)
7855 tem = gen_rtx_USE (mode, tem);
7856 return tem;
7858 case REG:
7859 case MULT:
7860 /* biv + invar or mult + invar. Return sum. */
7861 return gen_rtx_PLUS (mode, arg0, arg1);
7863 case PLUS:
7864 /* (a + invar_1) + invar_2. Associate. */
7865 return
7866 simplify_giv_expr (loop,
7867 gen_rtx_PLUS (mode,
7868 XEXP (arg0, 0),
7869 gen_rtx_PLUS (mode,
7870 XEXP (arg0, 1),
7871 arg1)),
7872 ext_val, benefit);
7874 default:
7875 gcc_unreachable ();
7878 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7879 MULT to reduce cases. */
7880 if (REG_P (arg0))
7881 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
7882 if (REG_P (arg1))
7883 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
7885 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7886 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7887 Recurse to associate the second PLUS. */
7888 if (GET_CODE (arg1) == MULT)
7889 tem = arg0, arg0 = arg1, arg1 = tem;
7891 if (GET_CODE (arg1) == PLUS)
7892 return
7893 simplify_giv_expr (loop,
7894 gen_rtx_PLUS (mode,
7895 gen_rtx_PLUS (mode, arg0,
7896 XEXP (arg1, 0)),
7897 XEXP (arg1, 1)),
7898 ext_val, benefit);
7900 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7901 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
7902 return NULL_RTX;
7904 if (!rtx_equal_p (arg0, arg1))
7905 return NULL_RTX;
7907 return simplify_giv_expr (loop,
7908 gen_rtx_MULT (mode,
7909 XEXP (arg0, 0),
7910 gen_rtx_PLUS (mode,
7911 XEXP (arg0, 1),
7912 XEXP (arg1, 1))),
7913 ext_val, benefit);
7915 case MINUS:
7916 /* Handle "a - b" as "a + b * (-1)". */
7917 return simplify_giv_expr (loop,
7918 gen_rtx_PLUS (mode,
7919 XEXP (x, 0),
7920 gen_rtx_MULT (mode,
7921 XEXP (x, 1),
7922 constm1_rtx)),
7923 ext_val, benefit);
7925 case MULT:
7926 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7927 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7928 if (arg0 == 0 || arg1 == 0)
7929 return NULL_RTX;
7931 /* Put constant last, CONST_INT last if both constant. */
7932 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
7933 && GET_CODE (arg1) != CONST_INT)
7934 tem = arg0, arg0 = arg1, arg1 = tem;
7936 /* If second argument is not now constant, not giv. */
7937 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
7938 return NULL_RTX;
7940 /* Handle multiply by 0 or 1. */
7941 if (arg1 == const0_rtx)
7942 return const0_rtx;
7944 else if (arg1 == const1_rtx)
7945 return arg0;
7947 switch (GET_CODE (arg0))
7949 case REG:
7950 /* biv * invar. Done. */
7951 return gen_rtx_MULT (mode, arg0, arg1);
7953 case CONST_INT:
7954 /* Product of two constants. */
7955 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
7957 case USE:
7958 /* invar * invar is a giv, but attempt to simplify it somehow. */
7959 if (GET_CODE (arg1) != CONST_INT)
7960 return NULL_RTX;
7962 arg0 = XEXP (arg0, 0);
7963 if (GET_CODE (arg0) == MULT)
7965 /* (invar_0 * invar_1) * invar_2. Associate. */
7966 return simplify_giv_expr (loop,
7967 gen_rtx_MULT (mode,
7968 XEXP (arg0, 0),
7969 gen_rtx_MULT (mode,
7970 XEXP (arg0,
7972 arg1)),
7973 ext_val, benefit);
7975 /* Propagate the MULT expressions to the innermost nodes. */
7976 else if (GET_CODE (arg0) == PLUS)
7978 /* (invar_0 + invar_1) * invar_2. Distribute. */
7979 return simplify_giv_expr (loop,
7980 gen_rtx_PLUS (mode,
7981 gen_rtx_MULT (mode,
7982 XEXP (arg0,
7984 arg1),
7985 gen_rtx_MULT (mode,
7986 XEXP (arg0,
7988 arg1)),
7989 ext_val, benefit);
7991 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
7993 case MULT:
7994 /* (a * invar_1) * invar_2. Associate. */
7995 return simplify_giv_expr (loop,
7996 gen_rtx_MULT (mode,
7997 XEXP (arg0, 0),
7998 gen_rtx_MULT (mode,
7999 XEXP (arg0, 1),
8000 arg1)),
8001 ext_val, benefit);
8003 case PLUS:
8004 /* (a + invar_1) * invar_2. Distribute. */
8005 return simplify_giv_expr (loop,
8006 gen_rtx_PLUS (mode,
8007 gen_rtx_MULT (mode,
8008 XEXP (arg0, 0),
8009 arg1),
8010 gen_rtx_MULT (mode,
8011 XEXP (arg0, 1),
8012 arg1)),
8013 ext_val, benefit);
8015 default:
8016 gcc_unreachable ();
8019 case ASHIFT:
8020 /* Shift by constant is multiply by power of two. */
8021 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8022 return 0;
8024 return
8025 simplify_giv_expr (loop,
8026 gen_rtx_MULT (mode,
8027 XEXP (x, 0),
8028 GEN_INT ((HOST_WIDE_INT) 1
8029 << INTVAL (XEXP (x, 1)))),
8030 ext_val, benefit);
8032 case NEG:
8033 /* "-a" is "a * (-1)" */
8034 return simplify_giv_expr (loop,
8035 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8036 ext_val, benefit);
8038 case NOT:
8039 /* "~a" is "-a - 1". Silly, but easy. */
8040 return simplify_giv_expr (loop,
8041 gen_rtx_MINUS (mode,
8042 gen_rtx_NEG (mode, XEXP (x, 0)),
8043 const1_rtx),
8044 ext_val, benefit);
8046 case USE:
8047 /* Already in proper form for invariant. */
8048 return x;
8050 case SIGN_EXTEND:
8051 case ZERO_EXTEND:
8052 case TRUNCATE:
8053 /* Conditionally recognize extensions of simple IVs. After we've
8054 computed loop traversal counts and verified the range of the
8055 source IV, we'll reevaluate this as a GIV. */
8056 if (*ext_val == NULL_RTX)
8058 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8059 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8061 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8062 return arg0;
8065 goto do_default;
8067 case REG:
8068 /* If this is a new register, we can't deal with it. */
8069 if (REGNO (x) >= max_reg_before_loop)
8070 return 0;
8072 /* Check for biv or giv. */
8073 switch (REG_IV_TYPE (ivs, REGNO (x)))
8075 case BASIC_INDUCT:
8076 return x;
8077 case GENERAL_INDUCT:
8079 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8081 /* Form expression from giv and add benefit. Ensure this giv
8082 can derive another and subtract any needed adjustment if so. */
8084 /* Increasing the benefit here is risky. The only case in which it
8085 is arguably correct is if this is the only use of V. In other
8086 cases, this will artificially inflate the benefit of the current
8087 giv, and lead to suboptimal code. Thus, it is disabled, since
8088 potentially not reducing an only marginally beneficial giv is
8089 less harmful than reducing many givs that are not really
8090 beneficial. */
8092 rtx single_use = regs->array[REGNO (x)].single_usage;
8093 if (single_use && single_use != const0_rtx)
8094 *benefit += v->benefit;
8097 if (v->cant_derive)
8098 return 0;
8100 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8101 v->src_reg, v->mult_val),
8102 v->add_val);
8104 if (v->derive_adjustment)
8105 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8106 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8107 if (*ext_val)
8109 if (!v->ext_dependent)
8110 return arg0;
8112 else
8114 *ext_val = v->ext_dependent;
8115 return arg0;
8117 return 0;
8120 default:
8121 do_default:
8122 /* If it isn't an induction variable, and it is invariant, we
8123 may be able to simplify things further by looking through
8124 the bits we just moved outside the loop. */
8125 if (loop_invariant_p (loop, x) == 1)
8127 struct movable *m;
8128 struct loop_movables *movables = LOOP_MOVABLES (loop);
8130 for (m = movables->head; m; m = m->next)
8131 if (rtx_equal_p (x, m->set_dest))
8133 /* Ok, we found a match. Substitute and simplify. */
8135 /* If we match another movable, we must use that, as
8136 this one is going away. */
8137 if (m->match)
8138 return simplify_giv_expr (loop, m->match->set_dest,
8139 ext_val, benefit);
8141 /* If consec is nonzero, this is a member of a group of
8142 instructions that were moved together. We handle this
8143 case only to the point of seeking to the last insn and
8144 looking for a REG_EQUAL. Fail if we don't find one. */
8145 if (m->consec != 0)
8147 int i = m->consec;
8148 tem = m->insn;
8151 tem = NEXT_INSN (tem);
8153 while (--i > 0);
8155 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8156 if (tem)
8157 tem = XEXP (tem, 0);
8159 else
8161 tem = single_set (m->insn);
8162 if (tem)
8163 tem = SET_SRC (tem);
8166 if (tem)
8168 /* What we are most interested in is pointer
8169 arithmetic on invariants -- only take
8170 patterns we may be able to do something with. */
8171 if (GET_CODE (tem) == PLUS
8172 || GET_CODE (tem) == MULT
8173 || GET_CODE (tem) == ASHIFT
8174 || GET_CODE (tem) == CONST_INT
8175 || GET_CODE (tem) == SYMBOL_REF)
8177 tem = simplify_giv_expr (loop, tem, ext_val,
8178 benefit);
8179 if (tem)
8180 return tem;
8182 else if (GET_CODE (tem) == CONST
8183 && GET_CODE (XEXP (tem, 0)) == PLUS
8184 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8185 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8187 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8188 ext_val, benefit);
8189 if (tem)
8190 return tem;
8193 break;
8196 break;
8199 /* Fall through to general case. */
8200 default:
8201 /* If invariant, return as USE (unless CONST_INT).
8202 Otherwise, not giv. */
8203 if (GET_CODE (x) == USE)
8204 x = XEXP (x, 0);
8206 if (loop_invariant_p (loop, x) == 1)
8208 if (GET_CODE (x) == CONST_INT)
8209 return x;
8210 if (GET_CODE (x) == CONST
8211 && GET_CODE (XEXP (x, 0)) == PLUS
8212 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8213 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8214 x = XEXP (x, 0);
8215 return gen_rtx_USE (mode, x);
8217 else
8218 return 0;
8222 /* This routine folds invariants such that there is only ever one
8223 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8225 static rtx
8226 sge_plus_constant (rtx x, rtx c)
8228 if (GET_CODE (x) == CONST_INT)
8229 return GEN_INT (INTVAL (x) + INTVAL (c));
8230 else if (GET_CODE (x) != PLUS)
8231 return gen_rtx_PLUS (GET_MODE (x), x, c);
8232 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8234 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8235 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8237 else if (GET_CODE (XEXP (x, 0)) == PLUS
8238 || GET_CODE (XEXP (x, 1)) != PLUS)
8240 return gen_rtx_PLUS (GET_MODE (x),
8241 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8243 else
8245 return gen_rtx_PLUS (GET_MODE (x),
8246 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8250 static rtx
8251 sge_plus (enum machine_mode mode, rtx x, rtx y)
8253 while (GET_CODE (y) == PLUS)
8255 rtx a = XEXP (y, 0);
8256 if (GET_CODE (a) == CONST_INT)
8257 x = sge_plus_constant (x, a);
8258 else
8259 x = gen_rtx_PLUS (mode, x, a);
8260 y = XEXP (y, 1);
8262 if (GET_CODE (y) == CONST_INT)
8263 x = sge_plus_constant (x, y);
8264 else
8265 x = gen_rtx_PLUS (mode, x, y);
8266 return x;
8269 /* Help detect a giv that is calculated by several consecutive insns;
8270 for example,
8271 giv = biv * M
8272 giv = giv + A
8273 The caller has already identified the first insn P as having a giv as dest;
8274 we check that all other insns that set the same register follow
8275 immediately after P, that they alter nothing else,
8276 and that the result of the last is still a giv.
8278 The value is 0 if the reg set in P is not really a giv.
8279 Otherwise, the value is the amount gained by eliminating
8280 all the consecutive insns that compute the value.
8282 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8283 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8285 The coefficients of the ultimate giv value are stored in
8286 *MULT_VAL and *ADD_VAL. */
8288 static int
8289 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8290 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8291 rtx *ext_val, rtx *last_consec_insn)
8293 struct loop_ivs *ivs = LOOP_IVS (loop);
8294 struct loop_regs *regs = LOOP_REGS (loop);
8295 int count;
8296 enum rtx_code code;
8297 int benefit;
8298 rtx temp;
8299 rtx set;
8301 /* Indicate that this is a giv so that we can update the value produced in
8302 each insn of the multi-insn sequence.
8304 This induction structure will be used only by the call to
8305 general_induction_var below, so we can allocate it on our stack.
8306 If this is a giv, our caller will replace the induct var entry with
8307 a new induction structure. */
8308 struct induction *v;
8310 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8311 return 0;
8313 v = alloca (sizeof (struct induction));
8314 v->src_reg = src_reg;
8315 v->mult_val = *mult_val;
8316 v->add_val = *add_val;
8317 v->benefit = first_benefit;
8318 v->cant_derive = 0;
8319 v->derive_adjustment = 0;
8320 v->ext_dependent = NULL_RTX;
8322 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8323 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8325 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8327 while (count > 0)
8329 p = NEXT_INSN (p);
8330 code = GET_CODE (p);
8332 /* If libcall, skip to end of call sequence. */
8333 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8334 p = XEXP (temp, 0);
8336 if (code == INSN
8337 && (set = single_set (p))
8338 && REG_P (SET_DEST (set))
8339 && SET_DEST (set) == dest_reg
8340 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8341 add_val, mult_val, ext_val, 0,
8342 &benefit, VOIDmode)
8343 /* Giv created by equivalent expression. */
8344 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8345 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8346 add_val, mult_val, ext_val, 0,
8347 &benefit, VOIDmode)))
8348 && src_reg == v->src_reg)
8350 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8351 benefit += libcall_benefit (p);
8353 count--;
8354 v->mult_val = *mult_val;
8355 v->add_val = *add_val;
8356 v->benefit += benefit;
8358 else if (code != NOTE)
8360 /* Allow insns that set something other than this giv to a
8361 constant. Such insns are needed on machines which cannot
8362 include long constants and should not disqualify a giv. */
8363 if (code == INSN
8364 && (set = single_set (p))
8365 && SET_DEST (set) != dest_reg
8366 && CONSTANT_P (SET_SRC (set)))
8367 continue;
8369 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8370 return 0;
8374 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8375 *last_consec_insn = p;
8376 return v->benefit;
8379 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8380 represented by G1. If no such expression can be found, or it is clear that
8381 it cannot possibly be a valid address, 0 is returned.
8383 To perform the computation, we note that
8384 G1 = x * v + a and
8385 G2 = y * v + b
8386 where `v' is the biv.
8388 So G2 = (y/b) * G1 + (b - a*y/x).
8390 Note that MULT = y/x.
8392 Update: A and B are now allowed to be additive expressions such that
8393 B contains all variables in A. That is, computing B-A will not require
8394 subtracting variables. */
8396 static rtx
8397 express_from_1 (rtx a, rtx b, rtx mult)
8399 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8401 if (mult == const0_rtx)
8402 return b;
8404 /* If MULT is not 1, we cannot handle A with non-constants, since we
8405 would then be required to subtract multiples of the registers in A.
8406 This is theoretically possible, and may even apply to some Fortran
8407 constructs, but it is a lot of work and we do not attempt it here. */
8409 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8410 return NULL_RTX;
8412 /* In general these structures are sorted top to bottom (down the PLUS
8413 chain), but not left to right across the PLUS. If B is a higher
8414 order giv than A, we can strip one level and recurse. If A is higher
8415 order, we'll eventually bail out, but won't know that until the end.
8416 If they are the same, we'll strip one level around this loop. */
8418 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8420 rtx ra, rb, oa, ob, tmp;
8422 ra = XEXP (a, 0), oa = XEXP (a, 1);
8423 if (GET_CODE (ra) == PLUS)
8424 tmp = ra, ra = oa, oa = tmp;
8426 rb = XEXP (b, 0), ob = XEXP (b, 1);
8427 if (GET_CODE (rb) == PLUS)
8428 tmp = rb, rb = ob, ob = tmp;
8430 if (rtx_equal_p (ra, rb))
8431 /* We matched: remove one reg completely. */
8432 a = oa, b = ob;
8433 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8434 /* An alternate match. */
8435 a = oa, b = rb;
8436 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8437 /* An alternate match. */
8438 a = ra, b = ob;
8439 else
8441 /* Indicates an extra register in B. Strip one level from B and
8442 recurse, hoping B was the higher order expression. */
8443 ob = express_from_1 (a, ob, mult);
8444 if (ob == NULL_RTX)
8445 return NULL_RTX;
8446 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8450 /* Here we are at the last level of A, go through the cases hoping to
8451 get rid of everything but a constant. */
8453 if (GET_CODE (a) == PLUS)
8455 rtx ra, oa;
8457 ra = XEXP (a, 0), oa = XEXP (a, 1);
8458 if (rtx_equal_p (oa, b))
8459 oa = ra;
8460 else if (!rtx_equal_p (ra, b))
8461 return NULL_RTX;
8463 if (GET_CODE (oa) != CONST_INT)
8464 return NULL_RTX;
8466 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8468 else if (GET_CODE (a) == CONST_INT)
8470 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8472 else if (CONSTANT_P (a))
8474 enum machine_mode mode_a = GET_MODE (a);
8475 enum machine_mode mode_b = GET_MODE (b);
8476 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8477 return simplify_gen_binary (MINUS, mode, b, a);
8479 else if (GET_CODE (b) == PLUS)
8481 if (rtx_equal_p (a, XEXP (b, 0)))
8482 return XEXP (b, 1);
8483 else if (rtx_equal_p (a, XEXP (b, 1)))
8484 return XEXP (b, 0);
8485 else
8486 return NULL_RTX;
8488 else if (rtx_equal_p (a, b))
8489 return const0_rtx;
8491 return NULL_RTX;
8494 static rtx
8495 express_from (struct induction *g1, struct induction *g2)
8497 rtx mult, add;
8499 /* The value that G1 will be multiplied by must be a constant integer. Also,
8500 the only chance we have of getting a valid address is if b*c/a (see above
8501 for notation) is also an integer. */
8502 if (GET_CODE (g1->mult_val) == CONST_INT
8503 && GET_CODE (g2->mult_val) == CONST_INT)
8505 if (g1->mult_val == const0_rtx
8506 || (g1->mult_val == constm1_rtx
8507 && INTVAL (g2->mult_val)
8508 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8509 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8510 return NULL_RTX;
8511 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8513 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8514 mult = const1_rtx;
8515 else
8517 /* ??? Find out if the one is a multiple of the other? */
8518 return NULL_RTX;
8521 add = express_from_1 (g1->add_val, g2->add_val, mult);
8522 if (add == NULL_RTX)
8524 /* Failed. If we've got a multiplication factor between G1 and G2,
8525 scale G1's addend and try again. */
8526 if (INTVAL (mult) > 1)
8528 rtx g1_add_val = g1->add_val;
8529 if (GET_CODE (g1_add_val) == MULT
8530 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8532 HOST_WIDE_INT m;
8533 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8534 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8535 XEXP (g1_add_val, 0), GEN_INT (m));
8537 else
8539 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8540 mult);
8543 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8546 if (add == NULL_RTX)
8547 return NULL_RTX;
8549 /* Form simplified final result. */
8550 if (mult == const0_rtx)
8551 return add;
8552 else if (mult == const1_rtx)
8553 mult = g1->dest_reg;
8554 else
8555 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8557 if (add == const0_rtx)
8558 return mult;
8559 else
8561 if (GET_CODE (add) == PLUS
8562 && CONSTANT_P (XEXP (add, 1)))
8564 rtx tem = XEXP (add, 1);
8565 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8566 add = tem;
8569 return gen_rtx_PLUS (g2->mode, mult, add);
8573 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8574 represented by G1. This indicates that G2 should be combined with G1 and
8575 that G2 can use (either directly or via an address expression) a register
8576 used to represent G1. */
8578 static rtx
8579 combine_givs_p (struct induction *g1, struct induction *g2)
8581 rtx comb, ret;
8583 /* With the introduction of ext dependent givs, we must care for modes.
8584 G2 must not use a wider mode than G1. */
8585 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8586 return NULL_RTX;
8588 ret = comb = express_from (g1, g2);
8589 if (comb == NULL_RTX)
8590 return NULL_RTX;
8591 if (g1->mode != g2->mode)
8592 ret = gen_lowpart (g2->mode, comb);
8594 /* If these givs are identical, they can be combined. We use the results
8595 of express_from because the addends are not in a canonical form, so
8596 rtx_equal_p is a weaker test. */
8597 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8598 combination to be the other way round. */
8599 if (comb == g1->dest_reg
8600 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8602 return ret;
8605 /* If G2 can be expressed as a function of G1 and that function is valid
8606 as an address and no more expensive than using a register for G2,
8607 the expression of G2 in terms of G1 can be used. */
8608 if (ret != NULL_RTX
8609 && g2->giv_type == DEST_ADDR
8610 && memory_address_p (GET_MODE (g2->mem), ret))
8611 return ret;
8613 return NULL_RTX;
8616 /* See if BL is monotonic and has a constant per-iteration increment.
8617 Return the increment if so, otherwise return 0. */
8619 static HOST_WIDE_INT
8620 get_monotonic_increment (struct iv_class *bl)
8622 struct induction *v;
8623 rtx incr;
8625 /* Get the total increment and check that it is constant. */
8626 incr = biv_total_increment (bl);
8627 if (incr == 0 || GET_CODE (incr) != CONST_INT)
8628 return 0;
8630 for (v = bl->biv; v != 0; v = v->next_iv)
8632 if (GET_CODE (v->add_val) != CONST_INT)
8633 return 0;
8635 if (INTVAL (v->add_val) < 0 && INTVAL (incr) >= 0)
8636 return 0;
8638 if (INTVAL (v->add_val) > 0 && INTVAL (incr) <= 0)
8639 return 0;
8641 return INTVAL (incr);
8645 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8646 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8647 to which the biv belongs and INCR is its per-iteration increment. */
8649 static bool
8650 biased_biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8651 HOST_WIDE_INT incr, enum machine_mode mode,
8652 unsigned HOST_WIDE_INT bias)
8654 unsigned HOST_WIDE_INT initial, maximum, span, delta;
8656 /* We need to be able to manipulate MODE-size constants. */
8657 if (HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode))
8658 return false;
8660 /* The number of loop iterations must be constant. */
8661 if (LOOP_INFO (loop)->n_iterations == 0)
8662 return false;
8664 /* So must the biv's initial value. */
8665 if (bl->initial_value == 0 || GET_CODE (bl->initial_value) != CONST_INT)
8666 return false;
8668 initial = bias + INTVAL (bl->initial_value);
8669 maximum = GET_MODE_MASK (mode);
8671 /* Make sure that the initial value is within range. */
8672 if (initial > maximum)
8673 return false;
8675 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8676 (calculated to arbitrary precision) must be <= SPAN. */
8677 if (incr < 0)
8679 delta = -incr;
8680 span = initial;
8682 else
8684 delta = incr;
8685 /* Handle the special case in which MAXIMUM is the largest
8686 unsigned HOST_WIDE_INT and INITIAL is 0. */
8687 if (maximum + 1 == initial)
8688 span = LOOP_INFO (loop)->n_iterations * delta;
8689 else
8690 span = maximum + 1 - initial;
8692 return (span / LOOP_INFO (loop)->n_iterations >= delta);
8696 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8697 the loop to which BL belongs and INCR is its per-iteration increment.
8698 UNSIGNEDP is true if the biv should be treated as unsigned. */
8700 static bool
8701 biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8702 HOST_WIDE_INT incr, enum machine_mode mode, bool unsignedp)
8704 struct loop_info *loop_info;
8705 unsigned HOST_WIDE_INT bias;
8707 /* A biv's value will always be limited to its natural mode.
8708 Larger modes will observe the same wrap-around. */
8709 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (bl->biv->src_reg)))
8710 mode = GET_MODE (bl->biv->src_reg);
8712 loop_info = LOOP_INFO (loop);
8714 bias = (unsignedp ? 0 : (GET_MODE_MASK (mode) >> 1) + 1);
8715 if (biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8716 return true;
8718 if (mode == GET_MODE (bl->biv->src_reg)
8719 && bl->biv->src_reg == loop_info->iteration_var
8720 && loop_info->comparison_value
8721 && loop_invariant_p (loop, loop_info->comparison_value))
8723 /* If the increment is +1, and the exit test is a <, the BIV
8724 cannot overflow. (For <=, we have the problematic case that
8725 the comparison value might be the maximum value of the range.) */
8726 if (incr == 1)
8728 if (loop_info->comparison_code == LT)
8729 return true;
8730 if (loop_info->comparison_code == LTU && unsignedp)
8731 return true;
8734 /* Likewise for increment -1 and exit test >. */
8735 if (incr == -1)
8737 if (loop_info->comparison_code == GT)
8738 return true;
8739 if (loop_info->comparison_code == GTU && unsignedp)
8740 return true;
8743 return false;
8747 /* Given that X is an extension or truncation of BL, return true
8748 if it is unaffected by overflow. LOOP is the loop to which
8749 BL belongs and INCR is its per-iteration increment. */
8751 static bool
8752 extension_within_bounds_p (const struct loop *loop, struct iv_class *bl,
8753 HOST_WIDE_INT incr, rtx x)
8755 enum machine_mode mode;
8756 bool signedp, unsignedp;
8758 switch (GET_CODE (x))
8760 case SIGN_EXTEND:
8761 case ZERO_EXTEND:
8762 mode = GET_MODE (XEXP (x, 0));
8763 signedp = (GET_CODE (x) == SIGN_EXTEND);
8764 unsignedp = (GET_CODE (x) == ZERO_EXTEND);
8765 break;
8767 case TRUNCATE:
8768 /* We don't know whether this value is being used as signed
8769 or unsigned, so check the conditions for both. */
8770 mode = GET_MODE (x);
8771 signedp = unsignedp = true;
8772 break;
8774 default:
8775 gcc_unreachable ();
8778 return ((!signedp || biv_fits_mode_p (loop, bl, incr, mode, false))
8779 && (!unsignedp || biv_fits_mode_p (loop, bl, incr, mode, true)));
8783 /* Check each extension dependent giv in this class to see if its
8784 root biv is safe from wrapping in the interior mode, which would
8785 make the giv illegal. */
8787 static void
8788 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8790 struct induction *v;
8791 HOST_WIDE_INT incr;
8793 incr = get_monotonic_increment (bl);
8795 /* Invalidate givs that fail the tests. */
8796 for (v = bl->giv; v; v = v->next_iv)
8797 if (v->ext_dependent)
8799 if (incr != 0
8800 && extension_within_bounds_p (loop, bl, incr, v->ext_dependent))
8802 if (loop_dump_stream)
8803 fprintf (loop_dump_stream,
8804 "Verified ext dependent giv at %d of reg %d\n",
8805 INSN_UID (v->insn), bl->regno);
8807 else
8809 if (loop_dump_stream)
8810 fprintf (loop_dump_stream,
8811 "Failed ext dependent giv at %d\n",
8812 INSN_UID (v->insn));
8814 v->ignore = 1;
8815 bl->all_reduced = 0;
8820 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8822 static rtx
8823 extend_value_for_giv (struct induction *v, rtx value)
8825 rtx ext_dep = v->ext_dependent;
8827 if (! ext_dep)
8828 return value;
8830 /* Recall that check_ext_dependent_givs verified that the known bounds
8831 of a biv did not overflow or wrap with respect to the extension for
8832 the giv. Therefore, constants need no additional adjustment. */
8833 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
8834 return value;
8836 /* Otherwise, we must adjust the value to compensate for the
8837 differing modes of the biv and the giv. */
8838 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
8841 struct combine_givs_stats
8843 int giv_number;
8844 int total_benefit;
8847 static int
8848 cmp_combine_givs_stats (const void *xp, const void *yp)
8850 const struct combine_givs_stats * const x =
8851 (const struct combine_givs_stats *) xp;
8852 const struct combine_givs_stats * const y =
8853 (const struct combine_givs_stats *) yp;
8854 int d;
8855 d = y->total_benefit - x->total_benefit;
8856 /* Stabilize the sort. */
8857 if (!d)
8858 d = x->giv_number - y->giv_number;
8859 return d;
8862 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8863 any other. If so, point SAME to the giv combined with and set NEW_REG to
8864 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8865 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8867 static void
8868 combine_givs (struct loop_regs *regs, struct iv_class *bl)
8870 /* Additional benefit to add for being combined multiple times. */
8871 const int extra_benefit = 3;
8873 struct induction *g1, *g2, **giv_array;
8874 int i, j, k, giv_count;
8875 struct combine_givs_stats *stats;
8876 rtx *can_combine;
8878 /* Count givs, because bl->giv_count is incorrect here. */
8879 giv_count = 0;
8880 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8881 if (!g1->ignore)
8882 giv_count++;
8884 giv_array = alloca (giv_count * sizeof (struct induction *));
8885 i = 0;
8886 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8887 if (!g1->ignore)
8888 giv_array[i++] = g1;
8890 stats = xcalloc (giv_count, sizeof (*stats));
8891 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
8893 for (i = 0; i < giv_count; i++)
8895 int this_benefit;
8896 rtx single_use;
8898 g1 = giv_array[i];
8899 stats[i].giv_number = i;
8901 /* If a DEST_REG GIV is used only once, do not allow it to combine
8902 with anything, for in doing so we will gain nothing that cannot
8903 be had by simply letting the GIV with which we would have combined
8904 to be reduced on its own. The lossage shows up in particular with
8905 DEST_ADDR targets on hosts with reg+reg addressing, though it can
8906 be seen elsewhere as well. */
8907 if (g1->giv_type == DEST_REG
8908 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
8909 && single_use != const0_rtx)
8910 continue;
8912 this_benefit = g1->benefit;
8913 /* Add an additional weight for zero addends. */
8914 if (g1->no_const_addval)
8915 this_benefit += 1;
8917 for (j = 0; j < giv_count; j++)
8919 rtx this_combine;
8921 g2 = giv_array[j];
8922 if (g1 != g2
8923 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
8925 can_combine[i * giv_count + j] = this_combine;
8926 this_benefit += g2->benefit + extra_benefit;
8929 stats[i].total_benefit = this_benefit;
8932 /* Iterate, combining until we can't. */
8933 restart:
8934 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
8936 if (loop_dump_stream)
8938 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
8939 for (k = 0; k < giv_count; k++)
8941 g1 = giv_array[stats[k].giv_number];
8942 if (!g1->combined_with && !g1->same)
8943 fprintf (loop_dump_stream, " {%d, %d}",
8944 INSN_UID (giv_array[stats[k].giv_number]->insn),
8945 stats[k].total_benefit);
8947 putc ('\n', loop_dump_stream);
8950 for (k = 0; k < giv_count; k++)
8952 int g1_add_benefit = 0;
8954 i = stats[k].giv_number;
8955 g1 = giv_array[i];
8957 /* If it has already been combined, skip. */
8958 if (g1->combined_with || g1->same)
8959 continue;
8961 for (j = 0; j < giv_count; j++)
8963 g2 = giv_array[j];
8964 if (g1 != g2 && can_combine[i * giv_count + j]
8965 /* If it has already been combined, skip. */
8966 && ! g2->same && ! g2->combined_with)
8968 int l;
8970 g2->new_reg = can_combine[i * giv_count + j];
8971 g2->same = g1;
8972 /* For destination, we now may replace by mem expression instead
8973 of register. This changes the costs considerably, so add the
8974 compensation. */
8975 if (g2->giv_type == DEST_ADDR)
8976 g2->benefit = (g2->benefit + reg_address_cost
8977 - address_cost (g2->new_reg,
8978 GET_MODE (g2->mem)));
8979 g1->combined_with++;
8980 g1->lifetime += g2->lifetime;
8982 g1_add_benefit += g2->benefit;
8984 /* ??? The new final_[bg]iv_value code does a much better job
8985 of finding replaceable giv's, and hence this code may no
8986 longer be necessary. */
8987 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
8988 g1_add_benefit -= copy_cost;
8990 /* To help optimize the next set of combinations, remove
8991 this giv from the benefits of other potential mates. */
8992 for (l = 0; l < giv_count; ++l)
8994 int m = stats[l].giv_number;
8995 if (can_combine[m * giv_count + j])
8996 stats[l].total_benefit -= g2->benefit + extra_benefit;
8999 if (loop_dump_stream)
9000 fprintf (loop_dump_stream,
9001 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9002 INSN_UID (g2->insn), INSN_UID (g1->insn),
9003 g1->benefit, g1_add_benefit, g1->lifetime);
9007 /* To help optimize the next set of combinations, remove
9008 this giv from the benefits of other potential mates. */
9009 if (g1->combined_with)
9011 for (j = 0; j < giv_count; ++j)
9013 int m = stats[j].giv_number;
9014 if (can_combine[m * giv_count + i])
9015 stats[j].total_benefit -= g1->benefit + extra_benefit;
9018 g1->benefit += g1_add_benefit;
9020 /* We've finished with this giv, and everything it touched.
9021 Restart the combination so that proper weights for the
9022 rest of the givs are properly taken into account. */
9023 /* ??? Ideally we would compact the arrays at this point, so
9024 as to not cover old ground. But sanely compacting
9025 can_combine is tricky. */
9026 goto restart;
9030 /* Clean up. */
9031 free (stats);
9032 free (can_combine);
9035 /* Generate sequence for REG = B * M + A. B is the initial value of
9036 the basic induction variable, M a multiplicative constant, A an
9037 additive constant and REG the destination register. */
9039 static rtx
9040 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9042 rtx seq;
9043 rtx result;
9045 start_sequence ();
9046 /* Use unsigned arithmetic. */
9047 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9048 if (reg != result)
9049 emit_move_insn (reg, result);
9050 seq = get_insns ();
9051 end_sequence ();
9053 return seq;
9057 /* Update registers created in insn sequence SEQ. */
9059 static void
9060 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9062 rtx insn;
9064 /* Update register info for alias analysis. */
9066 insn = seq;
9067 while (insn != NULL_RTX)
9069 rtx set = single_set (insn);
9071 if (set && REG_P (SET_DEST (set)))
9072 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9074 insn = NEXT_INSN (insn);
9079 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9080 is the initial value of the basic induction variable, M a
9081 multiplicative constant, A an additive constant and REG the
9082 destination register. */
9084 static void
9085 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9086 rtx reg, basic_block before_bb, rtx before_insn)
9088 rtx seq;
9090 if (! before_insn)
9092 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9093 return;
9096 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9097 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9099 /* Increase the lifetime of any invariants moved further in code. */
9100 update_reg_last_use (a, before_insn);
9101 update_reg_last_use (b, before_insn);
9102 update_reg_last_use (m, before_insn);
9104 /* It is possible that the expansion created lots of new registers.
9105 Iterate over the sequence we just created and record them all. We
9106 must do this before inserting the sequence. */
9107 loop_regs_update (loop, seq);
9109 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9113 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9114 initial value of the basic induction variable, M a multiplicative
9115 constant, A an additive constant and REG the destination
9116 register. */
9118 static void
9119 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9121 rtx seq;
9123 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9124 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9126 /* Increase the lifetime of any invariants moved further in code.
9127 ???? Is this really necessary? */
9128 update_reg_last_use (a, loop->sink);
9129 update_reg_last_use (b, loop->sink);
9130 update_reg_last_use (m, loop->sink);
9132 /* It is possible that the expansion created lots of new registers.
9133 Iterate over the sequence we just created and record them all. We
9134 must do this before inserting the sequence. */
9135 loop_regs_update (loop, seq);
9137 loop_insn_sink (loop, seq);
9141 /* Emit insns after loop to set REG = B * M + A. B is the initial
9142 value of the basic induction variable, M a multiplicative constant,
9143 A an additive constant and REG the destination register. */
9145 static void
9146 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9148 rtx seq;
9150 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9151 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9153 /* It is possible that the expansion created lots of new registers.
9154 Iterate over the sequence we just created and record them all. We
9155 must do this before inserting the sequence. */
9156 loop_regs_update (loop, seq);
9158 loop_insn_hoist (loop, seq);
9163 /* Similar to gen_add_mult, but compute cost rather than generating
9164 sequence. */
9166 static int
9167 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9169 int cost = 0;
9170 rtx last, result;
9172 start_sequence ();
9173 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9174 if (reg != result)
9175 emit_move_insn (reg, result);
9176 last = get_last_insn ();
9177 while (last)
9179 rtx t = single_set (last);
9180 if (t)
9181 cost += rtx_cost (SET_SRC (t), SET);
9182 last = PREV_INSN (last);
9184 end_sequence ();
9185 return cost;
9188 /* Test whether A * B can be computed without
9189 an actual multiply insn. Value is 1 if so.
9191 ??? This function stinks because it generates a ton of wasted RTL
9192 ??? and as a result fragments GC memory to no end. There are other
9193 ??? places in the compiler which are invoked a lot and do the same
9194 ??? thing, generate wasted RTL just to see if something is possible. */
9196 static int
9197 product_cheap_p (rtx a, rtx b)
9199 rtx tmp;
9200 int win, n_insns;
9202 /* If only one is constant, make it B. */
9203 if (GET_CODE (a) == CONST_INT)
9204 tmp = a, a = b, b = tmp;
9206 /* If first constant, both constant, so don't need multiply. */
9207 if (GET_CODE (a) == CONST_INT)
9208 return 1;
9210 /* If second not constant, neither is constant, so would need multiply. */
9211 if (GET_CODE (b) != CONST_INT)
9212 return 0;
9214 /* One operand is constant, so might not need multiply insn. Generate the
9215 code for the multiply and see if a call or multiply, or long sequence
9216 of insns is generated. */
9218 start_sequence ();
9219 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9220 tmp = get_insns ();
9221 end_sequence ();
9223 win = 1;
9224 if (tmp == NULL_RTX)
9226 else if (INSN_P (tmp))
9228 n_insns = 0;
9229 while (tmp != NULL_RTX)
9231 rtx next = NEXT_INSN (tmp);
9233 if (++n_insns > 3
9234 || !NONJUMP_INSN_P (tmp)
9235 || (GET_CODE (PATTERN (tmp)) == SET
9236 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9237 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9238 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9239 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9241 win = 0;
9242 break;
9245 tmp = next;
9248 else if (GET_CODE (tmp) == SET
9249 && GET_CODE (SET_SRC (tmp)) == MULT)
9250 win = 0;
9251 else if (GET_CODE (tmp) == PARALLEL
9252 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9253 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9254 win = 0;
9256 return win;
9259 /* Check to see if loop can be terminated by a "decrement and branch until
9260 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9261 Also try reversing an increment loop to a decrement loop
9262 to see if the optimization can be performed.
9263 Value is nonzero if optimization was performed. */
9265 /* This is useful even if the architecture doesn't have such an insn,
9266 because it might change a loops which increments from 0 to n to a loop
9267 which decrements from n to 0. A loop that decrements to zero is usually
9268 faster than one that increments from zero. */
9270 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9271 such as approx_final_value, biv_total_increment, loop_iterations, and
9272 final_[bg]iv_value. */
9274 static int
9275 check_dbra_loop (struct loop *loop, int insn_count)
9277 struct loop_info *loop_info = LOOP_INFO (loop);
9278 struct loop_regs *regs = LOOP_REGS (loop);
9279 struct loop_ivs *ivs = LOOP_IVS (loop);
9280 struct iv_class *bl;
9281 rtx reg;
9282 enum machine_mode mode;
9283 rtx jump_label;
9284 rtx final_value;
9285 rtx start_value;
9286 rtx new_add_val;
9287 rtx comparison;
9288 rtx before_comparison;
9289 rtx p;
9290 rtx jump;
9291 rtx first_compare;
9292 int compare_and_branch;
9293 rtx loop_start = loop->start;
9294 rtx loop_end = loop->end;
9296 /* If last insn is a conditional branch, and the insn before tests a
9297 register value, try to optimize it. Otherwise, we can't do anything. */
9299 jump = PREV_INSN (loop_end);
9300 comparison = get_condition_for_loop (loop, jump);
9301 if (comparison == 0)
9302 return 0;
9303 if (!onlyjump_p (jump))
9304 return 0;
9306 /* Try to compute whether the compare/branch at the loop end is one or
9307 two instructions. */
9308 get_condition (jump, &first_compare, false, true);
9309 if (first_compare == jump)
9310 compare_and_branch = 1;
9311 else if (first_compare == prev_nonnote_insn (jump))
9312 compare_and_branch = 2;
9313 else
9314 return 0;
9317 /* If more than one condition is present to control the loop, then
9318 do not proceed, as this function does not know how to rewrite
9319 loop tests with more than one condition.
9321 Look backwards from the first insn in the last comparison
9322 sequence and see if we've got another comparison sequence. */
9324 rtx jump1;
9325 if ((jump1 = prev_nonnote_insn (first_compare))
9326 && JUMP_P (jump1))
9327 return 0;
9330 /* Check all of the bivs to see if the compare uses one of them.
9331 Skip biv's set more than once because we can't guarantee that
9332 it will be zero on the last iteration. Also skip if the biv is
9333 used between its update and the test insn. */
9335 for (bl = ivs->list; bl; bl = bl->next)
9337 if (bl->biv_count == 1
9338 && ! bl->biv->maybe_multiple
9339 && bl->biv->dest_reg == XEXP (comparison, 0)
9340 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9341 first_compare))
9342 break;
9345 /* Try swapping the comparison to identify a suitable biv. */
9346 if (!bl)
9347 for (bl = ivs->list; bl; bl = bl->next)
9348 if (bl->biv_count == 1
9349 && ! bl->biv->maybe_multiple
9350 && bl->biv->dest_reg == XEXP (comparison, 1)
9351 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9352 first_compare))
9354 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9355 VOIDmode,
9356 XEXP (comparison, 1),
9357 XEXP (comparison, 0));
9358 break;
9361 if (! bl)
9362 return 0;
9364 /* Look for the case where the basic induction variable is always
9365 nonnegative, and equals zero on the last iteration.
9366 In this case, add a reg_note REG_NONNEG, which allows the
9367 m68k DBRA instruction to be used. */
9369 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9370 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9371 && GET_CODE (bl->biv->add_val) == CONST_INT
9372 && INTVAL (bl->biv->add_val) < 0)
9374 /* Initial value must be greater than 0,
9375 init_val % -dec_value == 0 to ensure that it equals zero on
9376 the last iteration */
9378 if (GET_CODE (bl->initial_value) == CONST_INT
9379 && INTVAL (bl->initial_value) > 0
9380 && (INTVAL (bl->initial_value)
9381 % (-INTVAL (bl->biv->add_val))) == 0)
9383 /* Register always nonnegative, add REG_NOTE to branch. */
9384 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9385 REG_NOTES (jump)
9386 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9387 REG_NOTES (jump));
9388 bl->nonneg = 1;
9390 return 1;
9393 /* If the decrement is 1 and the value was tested as >= 0 before
9394 the loop, then we can safely optimize. */
9395 for (p = loop_start; p; p = PREV_INSN (p))
9397 if (LABEL_P (p))
9398 break;
9399 if (!JUMP_P (p))
9400 continue;
9402 before_comparison = get_condition_for_loop (loop, p);
9403 if (before_comparison
9404 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9405 && (GET_CODE (before_comparison) == LT
9406 || GET_CODE (before_comparison) == LTU)
9407 && XEXP (before_comparison, 1) == const0_rtx
9408 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9409 && INTVAL (bl->biv->add_val) == -1)
9411 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9412 REG_NOTES (jump)
9413 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9414 REG_NOTES (jump));
9415 bl->nonneg = 1;
9417 return 1;
9421 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9422 && INTVAL (bl->biv->add_val) > 0)
9424 /* Try to change inc to dec, so can apply above optimization. */
9425 /* Can do this if:
9426 all registers modified are induction variables or invariant,
9427 all memory references have non-overlapping addresses
9428 (obviously true if only one write)
9429 allow 2 insns for the compare/jump at the end of the loop. */
9430 /* Also, we must avoid any instructions which use both the reversed
9431 biv and another biv. Such instructions will fail if the loop is
9432 reversed. We meet this condition by requiring that either
9433 no_use_except_counting is true, or else that there is only
9434 one biv. */
9435 int num_nonfixed_reads = 0;
9436 /* 1 if the iteration var is used only to count iterations. */
9437 int no_use_except_counting = 0;
9438 /* 1 if the loop has no memory store, or it has a single memory store
9439 which is reversible. */
9440 int reversible_mem_store = 1;
9442 if (bl->giv_count == 0
9443 && !loop->exit_count
9444 && !loop_info->has_multiple_exit_targets)
9446 rtx bivreg = regno_reg_rtx[bl->regno];
9447 struct iv_class *blt;
9449 /* If there are no givs for this biv, and the only exit is the
9450 fall through at the end of the loop, then
9451 see if perhaps there are no uses except to count. */
9452 no_use_except_counting = 1;
9453 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9454 if (INSN_P (p))
9456 rtx set = single_set (p);
9458 if (set && REG_P (SET_DEST (set))
9459 && REGNO (SET_DEST (set)) == bl->regno)
9460 /* An insn that sets the biv is okay. */
9462 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9463 /* An insn that doesn't mention the biv is okay. */
9465 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9466 || p == prev_nonnote_insn (loop_end))
9468 /* If either of these insns uses the biv and sets a pseudo
9469 that has more than one usage, then the biv has uses
9470 other than counting since it's used to derive a value
9471 that is used more than one time. */
9472 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9473 regs);
9474 if (regs->multiple_uses)
9476 no_use_except_counting = 0;
9477 break;
9480 else
9482 no_use_except_counting = 0;
9483 break;
9487 /* A biv has uses besides counting if it is used to set
9488 another biv. */
9489 for (blt = ivs->list; blt; blt = blt->next)
9490 if (blt->init_set
9491 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9493 no_use_except_counting = 0;
9494 break;
9498 if (no_use_except_counting)
9499 /* No need to worry about MEMs. */
9501 else if (loop_info->num_mem_sets <= 1)
9503 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9504 if (INSN_P (p))
9505 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9507 /* If the loop has a single store, and the destination address is
9508 invariant, then we can't reverse the loop, because this address
9509 might then have the wrong value at loop exit.
9510 This would work if the source was invariant also, however, in that
9511 case, the insn should have been moved out of the loop. */
9513 if (loop_info->num_mem_sets == 1)
9515 struct induction *v;
9517 /* If we could prove that each of the memory locations
9518 written to was different, then we could reverse the
9519 store -- but we don't presently have any way of
9520 knowing that. */
9521 reversible_mem_store = 0;
9523 /* If the store depends on a register that is set after the
9524 store, it depends on the initial value, and is thus not
9525 reversible. */
9526 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9528 if (v->giv_type == DEST_REG
9529 && reg_mentioned_p (v->dest_reg,
9530 PATTERN (loop_info->first_loop_store_insn))
9531 && loop_insn_first_p (loop_info->first_loop_store_insn,
9532 v->insn))
9533 reversible_mem_store = 0;
9537 else
9538 return 0;
9540 /* This code only acts for innermost loops. Also it simplifies
9541 the memory address check by only reversing loops with
9542 zero or one memory access.
9543 Two memory accesses could involve parts of the same array,
9544 and that can't be reversed.
9545 If the biv is used only for counting, than we don't need to worry
9546 about all these things. */
9548 if ((num_nonfixed_reads <= 1
9549 && ! loop_info->has_nonconst_call
9550 && ! loop_info->has_prefetch
9551 && ! loop_info->has_volatile
9552 && reversible_mem_store
9553 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9554 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9555 && (bl == ivs->list && bl->next == 0))
9556 || (no_use_except_counting && ! loop_info->has_prefetch))
9558 rtx tem;
9560 /* Loop can be reversed. */
9561 if (loop_dump_stream)
9562 fprintf (loop_dump_stream, "Can reverse loop\n");
9564 /* Now check other conditions:
9566 The increment must be a constant, as must the initial value,
9567 and the comparison code must be LT.
9569 This test can probably be improved since +/- 1 in the constant
9570 can be obtained by changing LT to LE and vice versa; this is
9571 confusing. */
9573 if (comparison
9574 /* for constants, LE gets turned into LT */
9575 && (GET_CODE (comparison) == LT
9576 || (GET_CODE (comparison) == LE
9577 && no_use_except_counting)
9578 || GET_CODE (comparison) == LTU))
9580 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9581 rtx initial_value, comparison_value;
9582 int nonneg = 0;
9583 enum rtx_code cmp_code;
9584 int comparison_const_width;
9585 unsigned HOST_WIDE_INT comparison_sign_mask;
9586 bool keep_first_compare;
9588 add_val = INTVAL (bl->biv->add_val);
9589 comparison_value = XEXP (comparison, 1);
9590 if (GET_MODE (comparison_value) == VOIDmode)
9591 comparison_const_width
9592 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9593 else
9594 comparison_const_width
9595 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9596 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9597 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9598 comparison_sign_mask
9599 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9601 /* If the comparison value is not a loop invariant, then we
9602 can not reverse this loop.
9604 ??? If the insns which initialize the comparison value as
9605 a whole compute an invariant result, then we could move
9606 them out of the loop and proceed with loop reversal. */
9607 if (! loop_invariant_p (loop, comparison_value))
9608 return 0;
9610 if (GET_CODE (comparison_value) == CONST_INT)
9611 comparison_val = INTVAL (comparison_value);
9612 initial_value = bl->initial_value;
9614 /* Normalize the initial value if it is an integer and
9615 has no other use except as a counter. This will allow
9616 a few more loops to be reversed. */
9617 if (no_use_except_counting
9618 && GET_CODE (comparison_value) == CONST_INT
9619 && GET_CODE (initial_value) == CONST_INT)
9621 comparison_val = comparison_val - INTVAL (bl->initial_value);
9622 /* The code below requires comparison_val to be a multiple
9623 of add_val in order to do the loop reversal, so
9624 round up comparison_val to a multiple of add_val.
9625 Since comparison_value is constant, we know that the
9626 current comparison code is LT. */
9627 comparison_val = comparison_val + add_val - 1;
9628 comparison_val
9629 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9630 /* We postpone overflow checks for COMPARISON_VAL here;
9631 even if there is an overflow, we might still be able to
9632 reverse the loop, if converting the loop exit test to
9633 NE is possible. */
9634 initial_value = const0_rtx;
9637 /* First check if we can do a vanilla loop reversal. */
9638 if (initial_value == const0_rtx
9639 && GET_CODE (comparison_value) == CONST_INT
9640 /* Now do postponed overflow checks on COMPARISON_VAL. */
9641 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9642 & comparison_sign_mask))
9644 /* Register will always be nonnegative, with value
9645 0 on last iteration */
9646 add_adjust = add_val;
9647 nonneg = 1;
9648 cmp_code = GE;
9650 else
9651 return 0;
9653 if (GET_CODE (comparison) == LE)
9654 add_adjust -= add_val;
9656 /* If the initial value is not zero, or if the comparison
9657 value is not an exact multiple of the increment, then we
9658 can not reverse this loop. */
9659 if (initial_value == const0_rtx
9660 && GET_CODE (comparison_value) == CONST_INT)
9662 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9663 return 0;
9665 else
9667 if (! no_use_except_counting || add_val != 1)
9668 return 0;
9671 final_value = comparison_value;
9673 /* Reset these in case we normalized the initial value
9674 and comparison value above. */
9675 if (GET_CODE (comparison_value) == CONST_INT
9676 && GET_CODE (initial_value) == CONST_INT)
9678 comparison_value = GEN_INT (comparison_val);
9679 final_value
9680 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9682 bl->initial_value = initial_value;
9684 /* Save some info needed to produce the new insns. */
9685 reg = bl->biv->dest_reg;
9686 mode = GET_MODE (reg);
9687 jump_label = condjump_label (PREV_INSN (loop_end));
9688 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9690 /* Set start_value; if this is not a CONST_INT, we need
9691 to generate a SUB.
9692 Initialize biv to start_value before loop start.
9693 The old initializing insn will be deleted as a
9694 dead store by flow.c. */
9695 if (initial_value == const0_rtx
9696 && GET_CODE (comparison_value) == CONST_INT)
9698 start_value
9699 = gen_int_mode (comparison_val - add_adjust, mode);
9700 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9702 else if (GET_CODE (initial_value) == CONST_INT)
9704 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9705 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9707 if (add_insn == 0)
9708 return 0;
9710 start_value
9711 = gen_rtx_PLUS (mode, comparison_value, offset);
9712 loop_insn_hoist (loop, add_insn);
9713 if (GET_CODE (comparison) == LE)
9714 final_value = gen_rtx_PLUS (mode, comparison_value,
9715 GEN_INT (add_val));
9717 else if (! add_adjust)
9719 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9720 initial_value);
9722 if (sub_insn == 0)
9723 return 0;
9724 start_value
9725 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9726 loop_insn_hoist (loop, sub_insn);
9728 else
9729 /* We could handle the other cases too, but it'll be
9730 better to have a testcase first. */
9731 return 0;
9733 /* We may not have a single insn which can increment a reg, so
9734 create a sequence to hold all the insns from expand_inc. */
9735 start_sequence ();
9736 expand_inc (reg, new_add_val);
9737 tem = get_insns ();
9738 end_sequence ();
9740 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9741 delete_insn (bl->biv->insn);
9743 /* Update biv info to reflect its new status. */
9744 bl->biv->insn = p;
9745 bl->initial_value = start_value;
9746 bl->biv->add_val = new_add_val;
9748 /* Update loop info. */
9749 loop_info->initial_value = reg;
9750 loop_info->initial_equiv_value = reg;
9751 loop_info->final_value = const0_rtx;
9752 loop_info->final_equiv_value = const0_rtx;
9753 loop_info->comparison_value = const0_rtx;
9754 loop_info->comparison_code = cmp_code;
9755 loop_info->increment = new_add_val;
9757 /* Inc LABEL_NUSES so that delete_insn will
9758 not delete the label. */
9759 LABEL_NUSES (XEXP (jump_label, 0))++;
9761 /* If we have a separate comparison insn that does more
9762 than just set cc0, the result of the comparison might
9763 be used outside the loop. */
9764 keep_first_compare = (compare_and_branch == 2
9765 #ifdef HAVE_CC0
9766 && sets_cc0_p (first_compare) <= 0
9767 #endif
9770 /* Emit an insn after the end of the loop to set the biv's
9771 proper exit value if it is used anywhere outside the loop. */
9772 if (keep_first_compare
9773 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9774 || ! bl->init_insn
9775 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9776 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9778 if (keep_first_compare)
9779 loop_insn_sink (loop, PATTERN (first_compare));
9781 /* Delete compare/branch at end of loop. */
9782 delete_related_insns (PREV_INSN (loop_end));
9783 if (compare_and_branch == 2)
9784 delete_related_insns (first_compare);
9786 /* Add new compare/branch insn at end of loop. */
9787 start_sequence ();
9788 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9789 mode, 0,
9790 XEXP (jump_label, 0));
9791 tem = get_insns ();
9792 end_sequence ();
9793 emit_jump_insn_before (tem, loop_end);
9795 for (tem = PREV_INSN (loop_end);
9796 tem && !JUMP_P (tem);
9797 tem = PREV_INSN (tem))
9800 if (tem)
9801 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9803 if (nonneg)
9805 if (tem)
9807 /* Increment of LABEL_NUSES done above. */
9808 /* Register is now always nonnegative,
9809 so add REG_NONNEG note to the branch. */
9810 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9811 REG_NOTES (tem));
9813 bl->nonneg = 1;
9816 /* No insn may reference both the reversed and another biv or it
9817 will fail (see comment near the top of the loop reversal
9818 code).
9819 Earlier on, we have verified that the biv has no use except
9820 counting, or it is the only biv in this function.
9821 However, the code that computes no_use_except_counting does
9822 not verify reg notes. It's possible to have an insn that
9823 references another biv, and has a REG_EQUAL note with an
9824 expression based on the reversed biv. To avoid this case,
9825 remove all REG_EQUAL notes based on the reversed biv
9826 here. */
9827 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9828 if (INSN_P (p))
9830 rtx *pnote;
9831 rtx set = single_set (p);
9832 /* If this is a set of a GIV based on the reversed biv, any
9833 REG_EQUAL notes should still be correct. */
9834 if (! set
9835 || !REG_P (SET_DEST (set))
9836 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
9837 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
9838 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
9839 for (pnote = &REG_NOTES (p); *pnote;)
9841 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
9842 && reg_mentioned_p (regno_reg_rtx[bl->regno],
9843 XEXP (*pnote, 0)))
9844 *pnote = XEXP (*pnote, 1);
9845 else
9846 pnote = &XEXP (*pnote, 1);
9850 /* Mark that this biv has been reversed. Each giv which depends
9851 on this biv, and which is also live past the end of the loop
9852 will have to be fixed up. */
9854 bl->reversed = 1;
9856 if (loop_dump_stream)
9858 fprintf (loop_dump_stream, "Reversed loop");
9859 if (bl->nonneg)
9860 fprintf (loop_dump_stream, " and added reg_nonneg\n");
9861 else
9862 fprintf (loop_dump_stream, "\n");
9865 return 1;
9870 return 0;
9873 /* Verify whether the biv BL appears to be eliminable,
9874 based on the insns in the loop that refer to it.
9876 If ELIMINATE_P is nonzero, actually do the elimination.
9878 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
9879 determine whether invariant insns should be placed inside or at the
9880 start of the loop. */
9882 static int
9883 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
9884 int eliminate_p, int threshold, int insn_count)
9886 struct loop_ivs *ivs = LOOP_IVS (loop);
9887 rtx reg = bl->biv->dest_reg;
9888 rtx p;
9890 /* Scan all insns in the loop, stopping if we find one that uses the
9891 biv in a way that we cannot eliminate. */
9893 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9895 enum rtx_code code = GET_CODE (p);
9896 basic_block where_bb = 0;
9897 rtx where_insn = threshold >= insn_count ? 0 : p;
9898 rtx note;
9900 /* If this is a libcall that sets a giv, skip ahead to its end. */
9901 if (INSN_P (p))
9903 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
9905 if (note)
9907 rtx last = XEXP (note, 0);
9908 rtx set = single_set (last);
9910 if (set && REG_P (SET_DEST (set)))
9912 unsigned int regno = REGNO (SET_DEST (set));
9914 if (regno < ivs->n_regs
9915 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
9916 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
9917 p = last;
9922 /* Closely examine the insn if the biv is mentioned. */
9923 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
9924 && reg_mentioned_p (reg, PATTERN (p))
9925 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
9926 eliminate_p, where_bb, where_insn))
9928 if (loop_dump_stream)
9929 fprintf (loop_dump_stream,
9930 "Cannot eliminate biv %d: biv used in insn %d.\n",
9931 bl->regno, INSN_UID (p));
9932 break;
9935 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
9936 if (eliminate_p
9937 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
9938 && reg_mentioned_p (reg, XEXP (note, 0)))
9939 remove_note (p, note);
9942 if (p == loop->end)
9944 if (loop_dump_stream)
9945 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
9946 bl->regno, eliminate_p ? "was" : "can be");
9947 return 1;
9950 return 0;
9953 /* INSN and REFERENCE are instructions in the same insn chain.
9954 Return nonzero if INSN is first. */
9956 static int
9957 loop_insn_first_p (rtx insn, rtx reference)
9959 rtx p, q;
9961 for (p = insn, q = reference;;)
9963 /* Start with test for not first so that INSN == REFERENCE yields not
9964 first. */
9965 if (q == insn || ! p)
9966 return 0;
9967 if (p == reference || ! q)
9968 return 1;
9970 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
9971 previous insn, hence the <= comparison below does not work if
9972 P is a note. */
9973 if (INSN_UID (p) < max_uid_for_loop
9974 && INSN_UID (q) < max_uid_for_loop
9975 && !NOTE_P (p))
9976 return INSN_LUID (p) <= INSN_LUID (q);
9978 if (INSN_UID (p) >= max_uid_for_loop
9979 || NOTE_P (p))
9980 p = NEXT_INSN (p);
9981 if (INSN_UID (q) >= max_uid_for_loop)
9982 q = NEXT_INSN (q);
9986 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
9987 the offset that we have to take into account due to auto-increment /
9988 div derivation is zero. */
9989 static int
9990 biv_elimination_giv_has_0_offset (struct induction *biv,
9991 struct induction *giv, rtx insn)
9993 /* If the giv V had the auto-inc address optimization applied
9994 to it, and INSN occurs between the giv insn and the biv
9995 insn, then we'd have to adjust the value used here.
9996 This is rare, so we don't bother to make this possible. */
9997 if (giv->auto_inc_opt
9998 && ((loop_insn_first_p (giv->insn, insn)
9999 && loop_insn_first_p (insn, biv->insn))
10000 || (loop_insn_first_p (biv->insn, insn)
10001 && loop_insn_first_p (insn, giv->insn))))
10002 return 0;
10004 return 1;
10007 /* If BL appears in X (part of the pattern of INSN), see if we can
10008 eliminate its use. If so, return 1. If not, return 0.
10010 If BIV does not appear in X, return 1.
10012 If ELIMINATE_P is nonzero, actually do the elimination.
10013 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10014 Depending on how many items have been moved out of the loop, it
10015 will either be before INSN (when WHERE_INSN is nonzero) or at the
10016 start of the loop (when WHERE_INSN is zero). */
10018 static int
10019 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10020 struct iv_class *bl, int eliminate_p,
10021 basic_block where_bb, rtx where_insn)
10023 enum rtx_code code = GET_CODE (x);
10024 rtx reg = bl->biv->dest_reg;
10025 enum machine_mode mode = GET_MODE (reg);
10026 struct induction *v;
10027 rtx arg, tem;
10028 #ifdef HAVE_cc0
10029 rtx new;
10030 #endif
10031 int arg_operand;
10032 const char *fmt;
10033 int i, j;
10035 switch (code)
10037 case REG:
10038 /* If we haven't already been able to do something with this BIV,
10039 we can't eliminate it. */
10040 if (x == reg)
10041 return 0;
10042 return 1;
10044 case SET:
10045 /* If this sets the BIV, it is not a problem. */
10046 if (SET_DEST (x) == reg)
10047 return 1;
10049 /* If this is an insn that defines a giv, it is also ok because
10050 it will go away when the giv is reduced. */
10051 for (v = bl->giv; v; v = v->next_iv)
10052 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10053 return 1;
10055 #ifdef HAVE_cc0
10056 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10058 /* Can replace with any giv that was reduced and
10059 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10060 Require a constant for MULT_VAL, so we know it's nonzero.
10061 ??? We disable this optimization to avoid potential
10062 overflows. */
10064 for (v = bl->giv; v; v = v->next_iv)
10065 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10066 && v->add_val == const0_rtx
10067 && ! v->ignore && ! v->maybe_dead && v->always_computable
10068 && v->mode == mode
10069 && 0)
10071 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10072 continue;
10074 if (! eliminate_p)
10075 return 1;
10077 /* If the giv has the opposite direction of change,
10078 then reverse the comparison. */
10079 if (INTVAL (v->mult_val) < 0)
10080 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10081 const0_rtx, v->new_reg);
10082 else
10083 new = v->new_reg;
10085 /* We can probably test that giv's reduced reg. */
10086 if (validate_change (insn, &SET_SRC (x), new, 0))
10087 return 1;
10090 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10091 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10092 Require a constant for MULT_VAL, so we know it's nonzero.
10093 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10094 overflow problem. */
10096 for (v = bl->giv; v; v = v->next_iv)
10097 if (GET_CODE (v->mult_val) == CONST_INT
10098 && v->mult_val != const0_rtx
10099 && ! v->ignore && ! v->maybe_dead && v->always_computable
10100 && v->mode == mode
10101 && (GET_CODE (v->add_val) == SYMBOL_REF
10102 || GET_CODE (v->add_val) == LABEL_REF
10103 || GET_CODE (v->add_val) == CONST
10104 || (REG_P (v->add_val)
10105 && REG_POINTER (v->add_val))))
10107 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10108 continue;
10110 if (! eliminate_p)
10111 return 1;
10113 /* If the giv has the opposite direction of change,
10114 then reverse the comparison. */
10115 if (INTVAL (v->mult_val) < 0)
10116 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10117 v->new_reg);
10118 else
10119 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10120 copy_rtx (v->add_val));
10122 /* Replace biv with the giv's reduced register. */
10123 update_reg_last_use (v->add_val, insn);
10124 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10125 return 1;
10127 /* Insn doesn't support that constant or invariant. Copy it
10128 into a register (it will be a loop invariant.) */
10129 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10131 loop_insn_emit_before (loop, 0, where_insn,
10132 gen_move_insn (tem,
10133 copy_rtx (v->add_val)));
10135 /* Substitute the new register for its invariant value in
10136 the compare expression. */
10137 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10138 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10139 return 1;
10142 #endif
10143 break;
10145 case COMPARE:
10146 case EQ: case NE:
10147 case GT: case GE: case GTU: case GEU:
10148 case LT: case LE: case LTU: case LEU:
10149 /* See if either argument is the biv. */
10150 if (XEXP (x, 0) == reg)
10151 arg = XEXP (x, 1), arg_operand = 1;
10152 else if (XEXP (x, 1) == reg)
10153 arg = XEXP (x, 0), arg_operand = 0;
10154 else
10155 break;
10157 if (CONSTANT_P (arg))
10159 /* First try to replace with any giv that has constant positive
10160 mult_val and constant add_val. We might be able to support
10161 negative mult_val, but it seems complex to do it in general. */
10163 for (v = bl->giv; v; v = v->next_iv)
10164 if (GET_CODE (v->mult_val) == CONST_INT
10165 && INTVAL (v->mult_val) > 0
10166 && (GET_CODE (v->add_val) == SYMBOL_REF
10167 || GET_CODE (v->add_val) == LABEL_REF
10168 || GET_CODE (v->add_val) == CONST
10169 || (REG_P (v->add_val)
10170 && REG_POINTER (v->add_val)))
10171 && ! v->ignore && ! v->maybe_dead && v->always_computable
10172 && v->mode == mode)
10174 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10175 continue;
10177 /* Don't eliminate if the linear combination that makes up
10178 the giv overflows when it is applied to ARG. */
10179 if (GET_CODE (arg) == CONST_INT)
10181 rtx add_val;
10183 if (GET_CODE (v->add_val) == CONST_INT)
10184 add_val = v->add_val;
10185 else
10186 add_val = const0_rtx;
10188 if (const_mult_add_overflow_p (arg, v->mult_val,
10189 add_val, mode, 1))
10190 continue;
10193 if (! eliminate_p)
10194 return 1;
10196 /* Replace biv with the giv's reduced reg. */
10197 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10199 /* If all constants are actually constant integers and
10200 the derived constant can be directly placed in the COMPARE,
10201 do so. */
10202 if (GET_CODE (arg) == CONST_INT
10203 && GET_CODE (v->add_val) == CONST_INT)
10205 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
10206 v->add_val, mode, 1);
10208 else
10210 /* Otherwise, load it into a register. */
10211 tem = gen_reg_rtx (mode);
10212 loop_iv_add_mult_emit_before (loop, arg,
10213 v->mult_val, v->add_val,
10214 tem, where_bb, where_insn);
10217 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10219 if (apply_change_group ())
10220 return 1;
10223 /* Look for giv with positive constant mult_val and nonconst add_val.
10224 Insert insns to calculate new compare value.
10225 ??? Turn this off due to possible overflow. */
10227 for (v = bl->giv; v; v = v->next_iv)
10228 if (GET_CODE (v->mult_val) == CONST_INT
10229 && INTVAL (v->mult_val) > 0
10230 && ! v->ignore && ! v->maybe_dead && v->always_computable
10231 && v->mode == mode
10232 && 0)
10234 rtx tem;
10236 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10237 continue;
10239 if (! eliminate_p)
10240 return 1;
10242 tem = gen_reg_rtx (mode);
10244 /* Replace biv with giv's reduced register. */
10245 validate_change (insn, &XEXP (x, 1 - arg_operand),
10246 v->new_reg, 1);
10248 /* Compute value to compare against. */
10249 loop_iv_add_mult_emit_before (loop, arg,
10250 v->mult_val, v->add_val,
10251 tem, where_bb, where_insn);
10252 /* Use it in this insn. */
10253 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10254 if (apply_change_group ())
10255 return 1;
10258 else if (REG_P (arg) || MEM_P (arg))
10260 if (loop_invariant_p (loop, arg) == 1)
10262 /* Look for giv with constant positive mult_val and nonconst
10263 add_val. Insert insns to compute new compare value.
10264 ??? Turn this off due to possible overflow. */
10266 for (v = bl->giv; v; v = v->next_iv)
10267 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
10268 && ! v->ignore && ! v->maybe_dead && v->always_computable
10269 && v->mode == mode
10270 && 0)
10272 rtx tem;
10274 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10275 continue;
10277 if (! eliminate_p)
10278 return 1;
10280 tem = gen_reg_rtx (mode);
10282 /* Replace biv with giv's reduced register. */
10283 validate_change (insn, &XEXP (x, 1 - arg_operand),
10284 v->new_reg, 1);
10286 /* Compute value to compare against. */
10287 loop_iv_add_mult_emit_before (loop, arg,
10288 v->mult_val, v->add_val,
10289 tem, where_bb, where_insn);
10290 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10291 if (apply_change_group ())
10292 return 1;
10296 /* This code has problems. Basically, you can't know when
10297 seeing if we will eliminate BL, whether a particular giv
10298 of ARG will be reduced. If it isn't going to be reduced,
10299 we can't eliminate BL. We can try forcing it to be reduced,
10300 but that can generate poor code.
10302 The problem is that the benefit of reducing TV, below should
10303 be increased if BL can actually be eliminated, but this means
10304 we might have to do a topological sort of the order in which
10305 we try to process biv. It doesn't seem worthwhile to do
10306 this sort of thing now. */
10308 #if 0
10309 /* Otherwise the reg compared with had better be a biv. */
10310 if (!REG_P (arg)
10311 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
10312 return 0;
10314 /* Look for a pair of givs, one for each biv,
10315 with identical coefficients. */
10316 for (v = bl->giv; v; v = v->next_iv)
10318 struct induction *tv;
10320 if (v->ignore || v->maybe_dead || v->mode != mode)
10321 continue;
10323 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
10324 tv = tv->next_iv)
10325 if (! tv->ignore && ! tv->maybe_dead
10326 && rtx_equal_p (tv->mult_val, v->mult_val)
10327 && rtx_equal_p (tv->add_val, v->add_val)
10328 && tv->mode == mode)
10330 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10331 continue;
10333 if (! eliminate_p)
10334 return 1;
10336 /* Replace biv with its giv's reduced reg. */
10337 XEXP (x, 1 - arg_operand) = v->new_reg;
10338 /* Replace other operand with the other giv's
10339 reduced reg. */
10340 XEXP (x, arg_operand) = tv->new_reg;
10341 return 1;
10344 #endif
10347 /* If we get here, the biv can't be eliminated. */
10348 return 0;
10350 case MEM:
10351 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10352 biv is used in it, since it will be replaced. */
10353 for (v = bl->giv; v; v = v->next_iv)
10354 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10355 return 1;
10356 break;
10358 default:
10359 break;
10362 /* See if any subexpression fails elimination. */
10363 fmt = GET_RTX_FORMAT (code);
10364 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10366 switch (fmt[i])
10368 case 'e':
10369 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10370 eliminate_p, where_bb, where_insn))
10371 return 0;
10372 break;
10374 case 'E':
10375 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10376 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10377 eliminate_p, where_bb, where_insn))
10378 return 0;
10379 break;
10383 return 1;
10386 /* Return nonzero if the last use of REG
10387 is in an insn following INSN in the same basic block. */
10389 static int
10390 last_use_this_basic_block (rtx reg, rtx insn)
10392 rtx n;
10393 for (n = insn;
10394 n && !LABEL_P (n) && !JUMP_P (n);
10395 n = NEXT_INSN (n))
10397 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10398 return 1;
10400 return 0;
10403 /* Called via `note_stores' to record the initial value of a biv. Here we
10404 just record the location of the set and process it later. */
10406 static void
10407 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10409 struct loop_ivs *ivs = (struct loop_ivs *) data;
10410 struct iv_class *bl;
10412 if (!REG_P (dest)
10413 || REGNO (dest) >= ivs->n_regs
10414 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10415 return;
10417 bl = REG_IV_CLASS (ivs, REGNO (dest));
10419 /* If this is the first set found, record it. */
10420 if (bl->init_insn == 0)
10422 bl->init_insn = note_insn;
10423 bl->init_set = set;
10427 /* If any of the registers in X are "old" and currently have a last use earlier
10428 than INSN, update them to have a last use of INSN. Their actual last use
10429 will be the previous insn but it will not have a valid uid_luid so we can't
10430 use it. X must be a source expression only. */
10432 static void
10433 update_reg_last_use (rtx x, rtx insn)
10435 /* Check for the case where INSN does not have a valid luid. In this case,
10436 there is no need to modify the regno_last_uid, as this can only happen
10437 when code is inserted after the loop_end to set a pseudo's final value,
10438 and hence this insn will never be the last use of x.
10439 ???? This comment is not correct. See for example loop_givs_reduce.
10440 This may insert an insn before another new insn. */
10441 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10442 && INSN_UID (insn) < max_uid_for_loop
10443 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10445 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10447 else
10449 int i, j;
10450 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10451 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10453 if (fmt[i] == 'e')
10454 update_reg_last_use (XEXP (x, i), insn);
10455 else if (fmt[i] == 'E')
10456 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10457 update_reg_last_use (XVECEXP (x, i, j), insn);
10462 /* Similar to rtlanal.c:get_condition, except that we also put an
10463 invariant last unless both operands are invariants. */
10465 static rtx
10466 get_condition_for_loop (const struct loop *loop, rtx x)
10468 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10470 if (comparison == 0
10471 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10472 || loop_invariant_p (loop, XEXP (comparison, 1)))
10473 return comparison;
10475 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10476 XEXP (comparison, 1), XEXP (comparison, 0));
10479 /* Scan the function and determine whether it has indirect (computed) jumps.
10481 This is taken mostly from flow.c; similar code exists elsewhere
10482 in the compiler. It may be useful to put this into rtlanal.c. */
10483 static int
10484 indirect_jump_in_function_p (rtx start)
10486 rtx insn;
10488 for (insn = start; insn; insn = NEXT_INSN (insn))
10489 if (computed_jump_p (insn))
10490 return 1;
10492 return 0;
10495 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10496 documentation for LOOP_MEMS for the definition of `appropriate'.
10497 This function is called from prescan_loop via for_each_rtx. */
10499 static int
10500 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10502 struct loop_info *loop_info = data;
10503 int i;
10504 rtx m = *mem;
10506 if (m == NULL_RTX)
10507 return 0;
10509 switch (GET_CODE (m))
10511 case MEM:
10512 break;
10514 case CLOBBER:
10515 /* We're not interested in MEMs that are only clobbered. */
10516 return -1;
10518 case CONST_DOUBLE:
10519 /* We're not interested in the MEM associated with a
10520 CONST_DOUBLE, so there's no need to traverse into this. */
10521 return -1;
10523 case EXPR_LIST:
10524 /* We're not interested in any MEMs that only appear in notes. */
10525 return -1;
10527 default:
10528 /* This is not a MEM. */
10529 return 0;
10532 /* See if we've already seen this MEM. */
10533 for (i = 0; i < loop_info->mems_idx; ++i)
10534 if (rtx_equal_p (m, loop_info->mems[i].mem))
10536 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10537 loop_info->mems[i].mem = m;
10538 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10539 /* The modes of the two memory accesses are different. If
10540 this happens, something tricky is going on, and we just
10541 don't optimize accesses to this MEM. */
10542 loop_info->mems[i].optimize = 0;
10544 return 0;
10547 /* Resize the array, if necessary. */
10548 if (loop_info->mems_idx == loop_info->mems_allocated)
10550 if (loop_info->mems_allocated != 0)
10551 loop_info->mems_allocated *= 2;
10552 else
10553 loop_info->mems_allocated = 32;
10555 loop_info->mems = xrealloc (loop_info->mems,
10556 loop_info->mems_allocated * sizeof (loop_mem_info));
10559 /* Actually insert the MEM. */
10560 loop_info->mems[loop_info->mems_idx].mem = m;
10561 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10562 because we can't put it in a register. We still store it in the
10563 table, though, so that if we see the same address later, but in a
10564 non-BLK mode, we'll not think we can optimize it at that point. */
10565 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10566 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10567 ++loop_info->mems_idx;
10569 return 0;
10573 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10575 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10576 register that is modified by an insn between FROM and TO. If the
10577 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10578 more, stop incrementing it, to avoid overflow.
10580 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10581 register I is used, if it is only used once. Otherwise, it is set
10582 to 0 (for no uses) or const0_rtx for more than one use. This
10583 parameter may be zero, in which case this processing is not done.
10585 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10586 optimize register I. */
10588 static void
10589 loop_regs_scan (const struct loop *loop, int extra_size)
10591 struct loop_regs *regs = LOOP_REGS (loop);
10592 int old_nregs;
10593 /* last_set[n] is nonzero iff reg n has been set in the current
10594 basic block. In that case, it is the insn that last set reg n. */
10595 rtx *last_set;
10596 rtx insn;
10597 int i;
10599 old_nregs = regs->num;
10600 regs->num = max_reg_num ();
10602 /* Grow the regs array if not allocated or too small. */
10603 if (regs->num >= regs->size)
10605 regs->size = regs->num + extra_size;
10607 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10609 /* Zero the new elements. */
10610 memset (regs->array + old_nregs, 0,
10611 (regs->size - old_nregs) * sizeof (*regs->array));
10614 /* Clear previously scanned fields but do not clear n_times_set. */
10615 for (i = 0; i < old_nregs; i++)
10617 regs->array[i].set_in_loop = 0;
10618 regs->array[i].may_not_optimize = 0;
10619 regs->array[i].single_usage = NULL_RTX;
10622 last_set = xcalloc (regs->num, sizeof (rtx));
10624 /* Scan the loop, recording register usage. */
10625 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10626 insn = NEXT_INSN (insn))
10628 if (INSN_P (insn))
10630 /* Record registers that have exactly one use. */
10631 find_single_use_in_loop (regs, insn, PATTERN (insn));
10633 /* Include uses in REG_EQUAL notes. */
10634 if (REG_NOTES (insn))
10635 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10637 if (GET_CODE (PATTERN (insn)) == SET
10638 || GET_CODE (PATTERN (insn)) == CLOBBER)
10639 count_one_set (regs, insn, PATTERN (insn), last_set);
10640 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10642 int i;
10643 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10644 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10645 last_set);
10649 if (LABEL_P (insn) || JUMP_P (insn))
10650 memset (last_set, 0, regs->num * sizeof (rtx));
10652 /* Invalidate all registers used for function argument passing.
10653 We check rtx_varies_p for the same reason as below, to allow
10654 optimizing PIC calculations. */
10655 if (CALL_P (insn))
10657 rtx link;
10658 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10659 link;
10660 link = XEXP (link, 1))
10662 rtx op, reg;
10664 if (GET_CODE (op = XEXP (link, 0)) == USE
10665 && REG_P (reg = XEXP (op, 0))
10666 && rtx_varies_p (reg, 1))
10667 regs->array[REGNO (reg)].may_not_optimize = 1;
10672 /* Invalidate all hard registers clobbered by calls. With one exception:
10673 a call-clobbered PIC register is still function-invariant for our
10674 purposes, since we can hoist any PIC calculations out of the loop.
10675 Thus the call to rtx_varies_p. */
10676 if (LOOP_INFO (loop)->has_call)
10677 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10678 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10679 && rtx_varies_p (regno_reg_rtx[i], 1))
10681 regs->array[i].may_not_optimize = 1;
10682 regs->array[i].set_in_loop = 1;
10685 #ifdef AVOID_CCMODE_COPIES
10686 /* Don't try to move insns which set CC registers if we should not
10687 create CCmode register copies. */
10688 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10689 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10690 regs->array[i].may_not_optimize = 1;
10691 #endif
10693 /* Set regs->array[I].n_times_set for the new registers. */
10694 for (i = old_nregs; i < regs->num; i++)
10695 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10697 free (last_set);
10700 /* Returns the number of real INSNs in the LOOP. */
10702 static int
10703 count_insns_in_loop (const struct loop *loop)
10705 int count = 0;
10706 rtx insn;
10708 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10709 insn = NEXT_INSN (insn))
10710 if (INSN_P (insn))
10711 ++count;
10713 return count;
10716 /* Move MEMs into registers for the duration of the loop. */
10718 static void
10719 load_mems (const struct loop *loop)
10721 struct loop_info *loop_info = LOOP_INFO (loop);
10722 struct loop_regs *regs = LOOP_REGS (loop);
10723 int maybe_never = 0;
10724 int i;
10725 rtx p, prev_ebb_head;
10726 rtx label = NULL_RTX;
10727 rtx end_label;
10728 /* Nonzero if the next instruction may never be executed. */
10729 int next_maybe_never = 0;
10730 unsigned int last_max_reg = max_reg_num ();
10732 if (loop_info->mems_idx == 0)
10733 return;
10735 /* We cannot use next_label here because it skips over normal insns. */
10736 end_label = next_nonnote_insn (loop->end);
10737 if (end_label && !LABEL_P (end_label))
10738 end_label = NULL_RTX;
10740 /* Check to see if it's possible that some instructions in the loop are
10741 never executed. Also check if there is a goto out of the loop other
10742 than right after the end of the loop. */
10743 for (p = next_insn_in_loop (loop, loop->scan_start);
10744 p != NULL_RTX;
10745 p = next_insn_in_loop (loop, p))
10747 if (LABEL_P (p))
10748 maybe_never = 1;
10749 else if (JUMP_P (p)
10750 /* If we enter the loop in the middle, and scan
10751 around to the beginning, don't set maybe_never
10752 for that. This must be an unconditional jump,
10753 otherwise the code at the top of the loop might
10754 never be executed. Unconditional jumps are
10755 followed a by barrier then loop end. */
10756 && ! (JUMP_P (p)
10757 && JUMP_LABEL (p) == loop->top
10758 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10759 && any_uncondjump_p (p)))
10761 /* If this is a jump outside of the loop but not right
10762 after the end of the loop, we would have to emit new fixup
10763 sequences for each such label. */
10764 if (/* If we can't tell where control might go when this
10765 JUMP_INSN is executed, we must be conservative. */
10766 !JUMP_LABEL (p)
10767 || (JUMP_LABEL (p) != end_label
10768 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10769 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10770 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10771 return;
10773 if (!any_condjump_p (p))
10774 /* Something complicated. */
10775 maybe_never = 1;
10776 else
10777 /* If there are any more instructions in the loop, they
10778 might not be reached. */
10779 next_maybe_never = 1;
10781 else if (next_maybe_never)
10782 maybe_never = 1;
10785 /* Find start of the extended basic block that enters the loop. */
10786 for (p = loop->start;
10787 PREV_INSN (p) && !LABEL_P (p);
10788 p = PREV_INSN (p))
10790 prev_ebb_head = p;
10792 cselib_init (true);
10794 /* Build table of mems that get set to constant values before the
10795 loop. */
10796 for (; p != loop->start; p = NEXT_INSN (p))
10797 cselib_process_insn (p);
10799 /* Actually move the MEMs. */
10800 for (i = 0; i < loop_info->mems_idx; ++i)
10802 regset_head load_copies;
10803 regset_head store_copies;
10804 int written = 0;
10805 rtx reg;
10806 rtx mem = loop_info->mems[i].mem;
10807 rtx mem_list_entry;
10809 if (MEM_VOLATILE_P (mem)
10810 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10811 /* There's no telling whether or not MEM is modified. */
10812 loop_info->mems[i].optimize = 0;
10814 /* Go through the MEMs written to in the loop to see if this
10815 one is aliased by one of them. */
10816 mem_list_entry = loop_info->store_mems;
10817 while (mem_list_entry)
10819 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10820 written = 1;
10821 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10822 mem, rtx_varies_p))
10824 /* MEM is indeed aliased by this store. */
10825 loop_info->mems[i].optimize = 0;
10826 break;
10828 mem_list_entry = XEXP (mem_list_entry, 1);
10831 if (flag_float_store && written
10832 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10833 loop_info->mems[i].optimize = 0;
10835 /* If this MEM is written to, we must be sure that there
10836 are no reads from another MEM that aliases this one. */
10837 if (loop_info->mems[i].optimize && written)
10839 int j;
10841 for (j = 0; j < loop_info->mems_idx; ++j)
10843 if (j == i)
10844 continue;
10845 else if (true_dependence (mem,
10846 VOIDmode,
10847 loop_info->mems[j].mem,
10848 rtx_varies_p))
10850 /* It's not safe to hoist loop_info->mems[i] out of
10851 the loop because writes to it might not be
10852 seen by reads from loop_info->mems[j]. */
10853 loop_info->mems[i].optimize = 0;
10854 break;
10859 if (maybe_never && may_trap_p (mem))
10860 /* We can't access the MEM outside the loop; it might
10861 cause a trap that wouldn't have happened otherwise. */
10862 loop_info->mems[i].optimize = 0;
10864 if (!loop_info->mems[i].optimize)
10865 /* We thought we were going to lift this MEM out of the
10866 loop, but later discovered that we could not. */
10867 continue;
10869 INIT_REG_SET (&load_copies);
10870 INIT_REG_SET (&store_copies);
10872 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10873 order to keep scan_loop from moving stores to this MEM
10874 out of the loop just because this REG is neither a
10875 user-variable nor used in the loop test. */
10876 reg = gen_reg_rtx (GET_MODE (mem));
10877 REG_USERVAR_P (reg) = 1;
10878 loop_info->mems[i].reg = reg;
10880 /* Now, replace all references to the MEM with the
10881 corresponding pseudos. */
10882 maybe_never = 0;
10883 for (p = next_insn_in_loop (loop, loop->scan_start);
10884 p != NULL_RTX;
10885 p = next_insn_in_loop (loop, p))
10887 if (INSN_P (p))
10889 rtx set;
10891 set = single_set (p);
10893 /* See if this copies the mem into a register that isn't
10894 modified afterwards. We'll try to do copy propagation
10895 a little further on. */
10896 if (set
10897 /* @@@ This test is _way_ too conservative. */
10898 && ! maybe_never
10899 && REG_P (SET_DEST (set))
10900 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10901 && REGNO (SET_DEST (set)) < last_max_reg
10902 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10903 && rtx_equal_p (SET_SRC (set), mem))
10904 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10906 /* See if this copies the mem from a register that isn't
10907 modified afterwards. We'll try to remove the
10908 redundant copy later on by doing a little register
10909 renaming and copy propagation. This will help
10910 to untangle things for the BIV detection code. */
10911 if (set
10912 && ! maybe_never
10913 && REG_P (SET_SRC (set))
10914 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10915 && REGNO (SET_SRC (set)) < last_max_reg
10916 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10917 && rtx_equal_p (SET_DEST (set), mem))
10918 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10920 /* If this is a call which uses / clobbers this memory
10921 location, we must not change the interface here. */
10922 if (CALL_P (p)
10923 && reg_mentioned_p (loop_info->mems[i].mem,
10924 CALL_INSN_FUNCTION_USAGE (p)))
10926 cancel_changes (0);
10927 loop_info->mems[i].optimize = 0;
10928 break;
10930 else
10931 /* Replace the memory reference with the shadow register. */
10932 replace_loop_mems (p, loop_info->mems[i].mem,
10933 loop_info->mems[i].reg, written);
10936 if (LABEL_P (p)
10937 || JUMP_P (p))
10938 maybe_never = 1;
10941 if (! loop_info->mems[i].optimize)
10942 ; /* We found we couldn't do the replacement, so do nothing. */
10943 else if (! apply_change_group ())
10944 /* We couldn't replace all occurrences of the MEM. */
10945 loop_info->mems[i].optimize = 0;
10946 else
10948 /* Load the memory immediately before LOOP->START, which is
10949 the NOTE_LOOP_BEG. */
10950 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
10951 rtx set;
10952 rtx best = mem;
10953 unsigned j;
10954 struct elt_loc_list *const_equiv = 0;
10955 reg_set_iterator rsi;
10957 if (e)
10959 struct elt_loc_list *equiv;
10960 struct elt_loc_list *best_equiv = 0;
10961 for (equiv = e->locs; equiv; equiv = equiv->next)
10963 if (CONSTANT_P (equiv->loc))
10964 const_equiv = equiv;
10965 else if (REG_P (equiv->loc)
10966 /* Extending hard register lifetimes causes crash
10967 on SRC targets. Doing so on non-SRC is
10968 probably also not good idea, since we most
10969 probably have pseudoregister equivalence as
10970 well. */
10971 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
10972 best_equiv = equiv;
10974 /* Use the constant equivalence if that is cheap enough. */
10975 if (! best_equiv)
10976 best_equiv = const_equiv;
10977 else if (const_equiv
10978 && (rtx_cost (const_equiv->loc, SET)
10979 <= rtx_cost (best_equiv->loc, SET)))
10981 best_equiv = const_equiv;
10982 const_equiv = 0;
10985 /* If best_equiv is nonzero, we know that MEM is set to a
10986 constant or register before the loop. We will use this
10987 knowledge to initialize the shadow register with that
10988 constant or reg rather than by loading from MEM. */
10989 if (best_equiv)
10990 best = copy_rtx (best_equiv->loc);
10993 set = gen_move_insn (reg, best);
10994 set = loop_insn_hoist (loop, set);
10995 if (REG_P (best))
10997 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10998 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
11000 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
11001 break;
11005 if (const_equiv)
11006 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
11008 if (written)
11010 if (label == NULL_RTX)
11012 label = gen_label_rtx ();
11013 emit_label_after (label, loop->end);
11016 /* Store the memory immediately after END, which is
11017 the NOTE_LOOP_END. */
11018 set = gen_move_insn (copy_rtx (mem), reg);
11019 loop_insn_emit_after (loop, 0, label, set);
11022 if (loop_dump_stream)
11024 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
11025 REGNO (reg), (written ? "r/w" : "r/o"));
11026 print_rtl (loop_dump_stream, mem);
11027 fputc ('\n', loop_dump_stream);
11030 /* Attempt a bit of copy propagation. This helps untangle the
11031 data flow, and enables {basic,general}_induction_var to find
11032 more bivs/givs. */
11033 EXECUTE_IF_SET_IN_REG_SET
11034 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11036 try_copy_prop (loop, reg, j);
11038 CLEAR_REG_SET (&load_copies);
11040 EXECUTE_IF_SET_IN_REG_SET
11041 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11043 try_swap_copy_prop (loop, reg, j);
11045 CLEAR_REG_SET (&store_copies);
11049 /* Now, we need to replace all references to the previous exit
11050 label with the new one. */
11051 if (label != NULL_RTX && end_label != NULL_RTX)
11052 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11053 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11054 redirect_jump (p, label, false);
11056 cselib_finish ();
11059 /* For communication between note_reg_stored and its caller. */
11060 struct note_reg_stored_arg
11062 int set_seen;
11063 rtx reg;
11066 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11067 is equal to ARG. */
11068 static void
11069 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11071 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11072 if (t->reg == x)
11073 t->set_seen = 1;
11076 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11077 There must be exactly one insn that sets this pseudo; it will be
11078 deleted if all replacements succeed and we can prove that the register
11079 is not used after the loop. */
11081 static void
11082 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11084 /* This is the reg that we are copying from. */
11085 rtx reg_rtx = regno_reg_rtx[regno];
11086 rtx init_insn = 0;
11087 rtx insn;
11088 /* These help keep track of whether we replaced all uses of the reg. */
11089 int replaced_last = 0;
11090 int store_is_first = 0;
11092 for (insn = next_insn_in_loop (loop, loop->scan_start);
11093 insn != NULL_RTX;
11094 insn = next_insn_in_loop (loop, insn))
11096 rtx set;
11098 /* Only substitute within one extended basic block from the initializing
11099 insn. */
11100 if (LABEL_P (insn) && init_insn)
11101 break;
11103 if (! INSN_P (insn))
11104 continue;
11106 /* Is this the initializing insn? */
11107 set = single_set (insn);
11108 if (set
11109 && REG_P (SET_DEST (set))
11110 && REGNO (SET_DEST (set)) == regno)
11112 gcc_assert (!init_insn);
11114 init_insn = insn;
11115 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11116 store_is_first = 1;
11119 /* Only substitute after seeing the initializing insn. */
11120 if (init_insn && insn != init_insn)
11122 struct note_reg_stored_arg arg;
11124 replace_loop_regs (insn, reg_rtx, replacement);
11125 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11126 replaced_last = 1;
11128 /* Stop replacing when REPLACEMENT is modified. */
11129 arg.reg = replacement;
11130 arg.set_seen = 0;
11131 note_stores (PATTERN (insn), note_reg_stored, &arg);
11132 if (arg.set_seen)
11134 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11136 /* It is possible that we've turned previously valid REG_EQUAL to
11137 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11138 REPLACEMENT is modified, we get different meaning. */
11139 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11140 remove_note (insn, note);
11141 break;
11145 gcc_assert (init_insn);
11146 if (apply_change_group ())
11148 if (loop_dump_stream)
11149 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11150 if (store_is_first && replaced_last)
11152 rtx first;
11153 rtx retval_note;
11155 /* Assume we're just deleting INIT_INSN. */
11156 first = init_insn;
11157 /* Look for REG_RETVAL note. If we're deleting the end of
11158 the libcall sequence, the whole sequence can go. */
11159 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11160 /* If we found a REG_RETVAL note, find the first instruction
11161 in the sequence. */
11162 if (retval_note)
11163 first = XEXP (retval_note, 0);
11165 /* Delete the instructions. */
11166 loop_delete_insns (first, init_insn);
11168 if (loop_dump_stream)
11169 fprintf (loop_dump_stream, ".\n");
11173 /* Replace all the instructions from FIRST up to and including LAST
11174 with NOTE_INSN_DELETED notes. */
11176 static void
11177 loop_delete_insns (rtx first, rtx last)
11179 while (1)
11181 if (loop_dump_stream)
11182 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11183 INSN_UID (first));
11184 delete_insn (first);
11186 /* If this was the LAST instructions we're supposed to delete,
11187 we're done. */
11188 if (first == last)
11189 break;
11191 first = NEXT_INSN (first);
11195 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11196 loop LOOP if the order of the sets of these registers can be
11197 swapped. There must be exactly one insn within the loop that sets
11198 this pseudo followed immediately by a move insn that sets
11199 REPLACEMENT with REGNO. */
11200 static void
11201 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11202 unsigned int regno)
11204 rtx insn;
11205 rtx set = NULL_RTX;
11206 unsigned int new_regno;
11208 new_regno = REGNO (replacement);
11210 for (insn = next_insn_in_loop (loop, loop->scan_start);
11211 insn != NULL_RTX;
11212 insn = next_insn_in_loop (loop, insn))
11214 /* Search for the insn that copies REGNO to NEW_REGNO? */
11215 if (INSN_P (insn)
11216 && (set = single_set (insn))
11217 && REG_P (SET_DEST (set))
11218 && REGNO (SET_DEST (set)) == new_regno
11219 && REG_P (SET_SRC (set))
11220 && REGNO (SET_SRC (set)) == regno)
11221 break;
11224 if (insn != NULL_RTX)
11226 rtx prev_insn;
11227 rtx prev_set;
11229 /* Some DEF-USE info would come in handy here to make this
11230 function more general. For now, just check the previous insn
11231 which is the most likely candidate for setting REGNO. */
11233 prev_insn = PREV_INSN (insn);
11235 if (INSN_P (insn)
11236 && (prev_set = single_set (prev_insn))
11237 && REG_P (SET_DEST (prev_set))
11238 && REGNO (SET_DEST (prev_set)) == regno)
11240 /* We have:
11241 (set (reg regno) (expr))
11242 (set (reg new_regno) (reg regno))
11244 so try converting this to:
11245 (set (reg new_regno) (expr))
11246 (set (reg regno) (reg new_regno))
11248 The former construct is often generated when a global
11249 variable used for an induction variable is shadowed by a
11250 register (NEW_REGNO). The latter construct improves the
11251 chances of GIV replacement and BIV elimination. */
11253 validate_change (prev_insn, &SET_DEST (prev_set),
11254 replacement, 1);
11255 validate_change (insn, &SET_DEST (set),
11256 SET_SRC (set), 1);
11257 validate_change (insn, &SET_SRC (set),
11258 replacement, 1);
11260 if (apply_change_group ())
11262 if (loop_dump_stream)
11263 fprintf (loop_dump_stream,
11264 " Swapped set of reg %d at %d with reg %d at %d.\n",
11265 regno, INSN_UID (insn),
11266 new_regno, INSN_UID (prev_insn));
11268 /* Update first use of REGNO. */
11269 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11270 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11272 /* Now perform copy propagation to hopefully
11273 remove all uses of REGNO within the loop. */
11274 try_copy_prop (loop, replacement, regno);
11280 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11282 static int
11283 find_mem_in_note_1 (rtx *x, void *data)
11285 if (*x != NULL_RTX && MEM_P (*x))
11287 rtx *res = (rtx *) data;
11288 *res = *x;
11289 return 1;
11291 return 0;
11294 /* Returns the first MEM found in NOTE by depth-first search. */
11296 static rtx
11297 find_mem_in_note (rtx note)
11299 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11300 return note;
11301 return NULL_RTX;
11304 /* Replace MEM with its associated pseudo register. This function is
11305 called from load_mems via for_each_rtx. DATA is actually a pointer
11306 to a structure describing the instruction currently being scanned
11307 and the MEM we are currently replacing. */
11309 static int
11310 replace_loop_mem (rtx *mem, void *data)
11312 loop_replace_args *args = (loop_replace_args *) data;
11313 rtx m = *mem;
11315 if (m == NULL_RTX)
11316 return 0;
11318 switch (GET_CODE (m))
11320 case MEM:
11321 break;
11323 case CONST_DOUBLE:
11324 /* We're not interested in the MEM associated with a
11325 CONST_DOUBLE, so there's no need to traverse into one. */
11326 return -1;
11328 default:
11329 /* This is not a MEM. */
11330 return 0;
11333 if (!rtx_equal_p (args->match, m))
11334 /* This is not the MEM we are currently replacing. */
11335 return 0;
11337 /* Actually replace the MEM. */
11338 validate_change (args->insn, mem, args->replacement, 1);
11340 return 0;
11343 static void
11344 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11346 loop_replace_args args;
11348 args.insn = insn;
11349 args.match = mem;
11350 args.replacement = reg;
11352 for_each_rtx (&insn, replace_loop_mem, &args);
11354 /* If we hoist a mem write out of the loop, then REG_EQUAL
11355 notes referring to the mem are no longer valid. */
11356 if (written)
11358 rtx note, sub;
11359 rtx *link;
11361 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11363 if (REG_NOTE_KIND (note) == REG_EQUAL
11364 && (sub = find_mem_in_note (note))
11365 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11367 /* Remove the note. */
11368 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11369 break;
11375 /* Replace one register with another. Called through for_each_rtx; PX points
11376 to the rtx being scanned. DATA is actually a pointer to
11377 a structure of arguments. */
11379 static int
11380 replace_loop_reg (rtx *px, void *data)
11382 rtx x = *px;
11383 loop_replace_args *args = (loop_replace_args *) data;
11385 if (x == NULL_RTX)
11386 return 0;
11388 if (x == args->match)
11389 validate_change (args->insn, px, args->replacement, 1);
11391 return 0;
11394 static void
11395 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11397 loop_replace_args args;
11399 args.insn = insn;
11400 args.match = reg;
11401 args.replacement = replacement;
11403 for_each_rtx (&insn, replace_loop_reg, &args);
11406 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11407 (ignored in the interim). */
11409 static rtx
11410 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11411 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11412 rtx pattern)
11414 return emit_insn_after (pattern, where_insn);
11418 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11419 in basic block WHERE_BB (ignored in the interim) within the loop
11420 otherwise hoist PATTERN into the loop pre-header. */
11422 static rtx
11423 loop_insn_emit_before (const struct loop *loop,
11424 basic_block where_bb ATTRIBUTE_UNUSED,
11425 rtx where_insn, rtx pattern)
11427 if (! where_insn)
11428 return loop_insn_hoist (loop, pattern);
11429 return emit_insn_before (pattern, where_insn);
11433 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11434 WHERE_BB (ignored in the interim) within the loop. */
11436 static rtx
11437 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11438 basic_block where_bb ATTRIBUTE_UNUSED,
11439 rtx where_insn, rtx pattern)
11441 return emit_call_insn_before (pattern, where_insn);
11445 /* Hoist insn for PATTERN into the loop pre-header. */
11447 static rtx
11448 loop_insn_hoist (const struct loop *loop, rtx pattern)
11450 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11454 /* Hoist call insn for PATTERN into the loop pre-header. */
11456 static rtx
11457 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11459 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11463 /* Sink insn for PATTERN after the loop end. */
11465 static rtx
11466 loop_insn_sink (const struct loop *loop, rtx pattern)
11468 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11471 /* bl->final_value can be either general_operand or PLUS of general_operand
11472 and constant. Emit sequence of instructions to load it into REG. */
11473 static rtx
11474 gen_load_of_final_value (rtx reg, rtx final_value)
11476 rtx seq;
11477 start_sequence ();
11478 final_value = force_operand (final_value, reg);
11479 if (final_value != reg)
11480 emit_move_insn (reg, final_value);
11481 seq = get_insns ();
11482 end_sequence ();
11483 return seq;
11486 /* If the loop has multiple exits, emit insn for PATTERN before the
11487 loop to ensure that it will always be executed no matter how the
11488 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11489 since this is slightly more efficient. */
11491 static rtx
11492 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11494 if (loop->exit_count)
11495 return loop_insn_hoist (loop, pattern);
11496 else
11497 return loop_insn_sink (loop, pattern);
11500 static void
11501 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11503 struct iv_class *bl;
11504 int iv_num = 0;
11506 if (! loop || ! file)
11507 return;
11509 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11510 iv_num++;
11512 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11514 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11516 loop_iv_class_dump (bl, file, verbose);
11517 fputc ('\n', file);
11522 static void
11523 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11524 int verbose ATTRIBUTE_UNUSED)
11526 struct induction *v;
11527 rtx incr;
11528 int i;
11530 if (! bl || ! file)
11531 return;
11533 fprintf (file, "IV class for reg %d, benefit %d\n",
11534 bl->regno, bl->total_benefit);
11536 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11537 if (bl->initial_value)
11539 fprintf (file, ", init val: ");
11540 print_simple_rtl (file, bl->initial_value);
11542 if (bl->initial_test)
11544 fprintf (file, ", init test: ");
11545 print_simple_rtl (file, bl->initial_test);
11547 fputc ('\n', file);
11549 if (bl->final_value)
11551 fprintf (file, " Final val: ");
11552 print_simple_rtl (file, bl->final_value);
11553 fputc ('\n', file);
11556 if ((incr = biv_total_increment (bl)))
11558 fprintf (file, " Total increment: ");
11559 print_simple_rtl (file, incr);
11560 fputc ('\n', file);
11563 /* List the increments. */
11564 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11566 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11567 print_simple_rtl (file, v->add_val);
11568 fputc ('\n', file);
11571 /* List the givs. */
11572 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11574 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11575 i, INSN_UID (v->insn), v->benefit);
11576 if (v->giv_type == DEST_ADDR)
11577 print_simple_rtl (file, v->mem);
11578 else
11579 print_simple_rtl (file, single_set (v->insn));
11580 fputc ('\n', file);
11585 static void
11586 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11588 if (! v || ! file)
11589 return;
11591 fprintf (file,
11592 "Biv %d: insn %d",
11593 REGNO (v->dest_reg), INSN_UID (v->insn));
11594 fprintf (file, " const ");
11595 print_simple_rtl (file, v->add_val);
11597 if (verbose && v->final_value)
11599 fputc ('\n', file);
11600 fprintf (file, " final ");
11601 print_simple_rtl (file, v->final_value);
11604 fputc ('\n', file);
11608 static void
11609 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11611 if (! v || ! file)
11612 return;
11614 if (v->giv_type == DEST_REG)
11615 fprintf (file, "Giv %d: insn %d",
11616 REGNO (v->dest_reg), INSN_UID (v->insn));
11617 else
11618 fprintf (file, "Dest address: insn %d",
11619 INSN_UID (v->insn));
11621 fprintf (file, " src reg %d benefit %d",
11622 REGNO (v->src_reg), v->benefit);
11623 fprintf (file, " lifetime %d",
11624 v->lifetime);
11626 if (v->replaceable)
11627 fprintf (file, " replaceable");
11629 if (v->no_const_addval)
11630 fprintf (file, " ncav");
11632 if (v->ext_dependent)
11634 switch (GET_CODE (v->ext_dependent))
11636 case SIGN_EXTEND:
11637 fprintf (file, " ext se");
11638 break;
11639 case ZERO_EXTEND:
11640 fprintf (file, " ext ze");
11641 break;
11642 case TRUNCATE:
11643 fprintf (file, " ext tr");
11644 break;
11645 default:
11646 gcc_unreachable ();
11650 fputc ('\n', file);
11651 fprintf (file, " mult ");
11652 print_simple_rtl (file, v->mult_val);
11654 fputc ('\n', file);
11655 fprintf (file, " add ");
11656 print_simple_rtl (file, v->add_val);
11658 if (verbose && v->final_value)
11660 fputc ('\n', file);
11661 fprintf (file, " final ");
11662 print_simple_rtl (file, v->final_value);
11665 fputc ('\n', file);
11669 void
11670 debug_ivs (const struct loop *loop)
11672 loop_ivs_dump (loop, stderr, 1);
11676 void
11677 debug_iv_class (const struct iv_class *bl)
11679 loop_iv_class_dump (bl, stderr, 1);
11683 void
11684 debug_biv (const struct induction *v)
11686 loop_biv_dump (v, stderr, 1);
11690 void
11691 debug_giv (const struct induction *v)
11693 loop_giv_dump (v, stderr, 1);
11697 #define LOOP_BLOCK_NUM_1(INSN) \
11698 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11700 /* The notes do not have an assigned block, so look at the next insn. */
11701 #define LOOP_BLOCK_NUM(INSN) \
11702 ((INSN) ? (NOTE_P (INSN) \
11703 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11704 : LOOP_BLOCK_NUM_1 (INSN)) \
11705 : -1)
11707 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11709 static void
11710 loop_dump_aux (const struct loop *loop, FILE *file,
11711 int verbose ATTRIBUTE_UNUSED)
11713 rtx label;
11715 if (! loop || ! file || !BB_HEAD (loop->first))
11716 return;
11718 /* Print diagnostics to compare our concept of a loop with
11719 what the loop notes say. */
11720 if (! PREV_INSN (BB_HEAD (loop->first))
11721 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11722 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11723 != NOTE_INSN_LOOP_BEG)
11724 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11725 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11726 if (! NEXT_INSN (BB_END (loop->last))
11727 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11728 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11729 != NOTE_INSN_LOOP_END)
11730 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11731 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11733 if (loop->start)
11735 fprintf (file,
11736 ";; start %d (%d), end %d (%d)\n",
11737 LOOP_BLOCK_NUM (loop->start),
11738 LOOP_INSN_UID (loop->start),
11739 LOOP_BLOCK_NUM (loop->end),
11740 LOOP_INSN_UID (loop->end));
11741 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11742 LOOP_BLOCK_NUM (loop->top),
11743 LOOP_INSN_UID (loop->top),
11744 LOOP_BLOCK_NUM (loop->scan_start),
11745 LOOP_INSN_UID (loop->scan_start));
11746 fprintf (file, ";; exit_count %d", loop->exit_count);
11747 if (loop->exit_count)
11749 fputs (", labels:", file);
11750 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11752 fprintf (file, " %d ",
11753 LOOP_INSN_UID (XEXP (label, 0)));
11756 fputs ("\n", file);
11760 /* Call this function from the debugger to dump LOOP. */
11762 void
11763 debug_loop (const struct loop *loop)
11765 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11768 /* Call this function from the debugger to dump LOOPS. */
11770 void
11771 debug_loops (const struct loops *loops)
11773 flow_loops_dump (loops, stderr, loop_dump_aux, 1);