* regex.c (wcs_re_match_2_internal, byte_re_match_2_internal):
[official-gcc.git] / gcc / loop.c
blobf432e68e5927d04b7529f0f7033a1cac79977168
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "rtl.h"
51 #include "tm_p.h"
52 #include "function.h"
53 #include "expr.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
57 #include "regs.h"
58 #include "recog.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
70 /* Get the loop info pointer of a loop. */
71 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
73 /* Get a pointer to the loop movables structure. */
74 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
76 /* Get a pointer to the loop registers structure. */
77 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
79 /* Get a pointer to the loop induction variables structure. */
80 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
82 /* Get the luid of an insn. Catch the error of trying to reference the LUID
83 of an insn added during loop, since these don't have LUIDs. */
85 #define INSN_LUID(INSN) \
86 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
88 #define REGNO_FIRST_LUID(REGNO) \
89 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
90 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
91 : 0)
92 #define REGNO_LAST_LUID(REGNO) \
93 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
94 ? uid_luid[REGNO_LAST_UID (REGNO)] \
95 : INT_MAX)
97 /* A "basic induction variable" or biv is a pseudo reg that is set
98 (within this loop) only by incrementing or decrementing it. */
99 /* A "general induction variable" or giv is a pseudo reg whose
100 value is a linear function of a biv. */
102 /* Bivs are recognized by `basic_induction_var';
103 Givs by `general_induction_var'. */
105 /* An enum for the two different types of givs, those that are used
106 as memory addresses and those that are calculated into registers. */
107 enum g_types
109 DEST_ADDR,
110 DEST_REG
114 /* A `struct induction' is created for every instruction that sets
115 an induction variable (either a biv or a giv). */
117 struct induction
119 rtx insn; /* The insn that sets a biv or giv */
120 rtx new_reg; /* New register, containing strength reduced
121 version of this giv. */
122 rtx src_reg; /* Biv from which this giv is computed.
123 (If this is a biv, then this is the biv.) */
124 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
125 rtx dest_reg; /* Destination register for insn: this is the
126 register which was the biv or giv.
127 For a biv, this equals src_reg.
128 For a DEST_ADDR type giv, this is 0. */
129 rtx *location; /* Place in the insn where this giv occurs.
130 If GIV_TYPE is DEST_REG, this is 0. */
131 /* For a biv, this is the place where add_val
132 was found. */
133 enum machine_mode mode; /* The mode of this biv or giv */
134 rtx mem; /* For DEST_ADDR, the memory object. */
135 rtx mult_val; /* Multiplicative factor for src_reg. */
136 rtx add_val; /* Additive constant for that product. */
137 int benefit; /* Gain from eliminating this insn. */
138 rtx final_value; /* If the giv is used outside the loop, and its
139 final value could be calculated, it is put
140 here, and the giv is made replaceable. Set
141 the giv to this value before the loop. */
142 unsigned combined_with; /* The number of givs this giv has been
143 combined with. If nonzero, this giv
144 cannot combine with any other giv. */
145 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
146 variable for the original variable.
147 0 means they must be kept separate and the
148 new one must be copied into the old pseudo
149 reg each time the old one is set. */
150 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
151 1 if we know that the giv definitely can
152 not be made replaceable, in which case we
153 don't bother checking the variable again
154 even if further info is available.
155 Both this and the above can be zero. */
156 unsigned ignore : 1; /* 1 prohibits further processing of giv */
157 unsigned always_computable : 1;/* 1 if this value is computable every
158 iteration. */
159 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
160 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
161 update may be done multiple times per
162 iteration. */
163 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
164 another giv. This occurs in many cases
165 where a giv's lifetime spans an update to
166 a biv. */
167 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
168 we won't use it to eliminate a biv, it
169 would probably lose. */
170 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
171 to it to try to form an auto-inc address. */
172 unsigned shared : 1;
173 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
174 int lifetime; /* Length of life of this giv */
175 rtx derive_adjustment; /* If nonzero, is an adjustment to be
176 subtracted from add_val when this giv
177 derives another. This occurs when the
178 giv spans a biv update by incrementation. */
179 rtx ext_dependent; /* If nonzero, is a sign or zero extension
180 if a biv on which this giv is dependent. */
181 struct induction *next_iv; /* For givs, links together all givs that are
182 based on the same biv. For bivs, links
183 together all biv entries that refer to the
184 same biv register. */
185 struct induction *same; /* For givs, if the giv has been combined with
186 another giv, this points to the base giv.
187 The base giv will have COMBINED_WITH nonzero.
188 For bivs, if the biv has the same LOCATION
189 than another biv, this points to the base
190 biv. */
191 struct induction *same_insn; /* If there are multiple identical givs in
192 the same insn, then all but one have this
193 field set, and they all point to the giv
194 that doesn't have this field set. */
195 rtx last_use; /* For a giv made from a biv increment, this is
196 a substitute for the lifetime information. */
200 /* A `struct iv_class' is created for each biv. */
202 struct iv_class
204 unsigned int regno; /* Pseudo reg which is the biv. */
205 int biv_count; /* Number of insns setting this reg. */
206 struct induction *biv; /* List of all insns that set this reg. */
207 int giv_count; /* Number of DEST_REG givs computed from this
208 biv. The resulting count is only used in
209 check_dbra_loop. */
210 struct induction *giv; /* List of all insns that compute a giv
211 from this reg. */
212 int total_benefit; /* Sum of BENEFITs of all those givs. */
213 rtx initial_value; /* Value of reg at loop start. */
214 rtx initial_test; /* Test performed on BIV before loop. */
215 rtx final_value; /* Value of reg at loop end, if known. */
216 struct iv_class *next; /* Links all class structures together. */
217 rtx init_insn; /* insn which initializes biv, 0 if none. */
218 rtx init_set; /* SET of INIT_INSN, if any. */
219 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
220 unsigned eliminable : 1; /* 1 if plausible candidate for
221 elimination. */
222 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
223 this. */
224 unsigned reversed : 1; /* 1 if we reversed the loop that this
225 biv controls. */
226 unsigned all_reduced : 1; /* 1 if all givs using this biv have
227 been reduced. */
231 /* Definitions used by the basic induction variable discovery code. */
232 enum iv_mode
234 UNKNOWN_INDUCT,
235 BASIC_INDUCT,
236 NOT_BASIC_INDUCT,
237 GENERAL_INDUCT
241 /* A `struct iv' is created for every register. */
243 struct iv
245 enum iv_mode type;
246 union
248 struct iv_class *class;
249 struct induction *info;
250 } iv;
254 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
255 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
256 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
259 struct loop_ivs
261 /* Indexed by register number, contains pointer to `struct
262 iv' if register is an induction variable. */
263 struct iv *regs;
265 /* Size of regs array. */
266 unsigned int n_regs;
268 /* The head of a list which links together (via the next field)
269 every iv class for the current loop. */
270 struct iv_class *list;
274 typedef struct loop_mem_info
276 rtx mem; /* The MEM itself. */
277 rtx reg; /* Corresponding pseudo, if any. */
278 int optimize; /* Nonzero if we can optimize access to this MEM. */
279 } loop_mem_info;
283 struct loop_reg
285 /* Number of times the reg is set during the loop being scanned.
286 During code motion, a negative value indicates a reg that has
287 been made a candidate; in particular -2 means that it is an
288 candidate that we know is equal to a constant and -1 means that
289 it is a candidate not known equal to a constant. After code
290 motion, regs moved have 0 (which is accurate now) while the
291 failed candidates have the original number of times set.
293 Therefore, at all times, == 0 indicates an invariant register;
294 < 0 a conditionally invariant one. */
295 int set_in_loop;
297 /* Original value of set_in_loop; same except that this value
298 is not set negative for a reg whose sets have been made candidates
299 and not set to 0 for a reg that is moved. */
300 int n_times_set;
302 /* Contains the insn in which a register was used if it was used
303 exactly once; contains const0_rtx if it was used more than once. */
304 rtx single_usage;
306 /* Nonzero indicates that the register cannot be moved or strength
307 reduced. */
308 char may_not_optimize;
310 /* Nonzero means reg N has already been moved out of one loop.
311 This reduces the desire to move it out of another. */
312 char moved_once;
316 struct loop_regs
318 int num; /* Number of regs used in table. */
319 int size; /* Size of table. */
320 struct loop_reg *array; /* Register usage info. array. */
321 int multiple_uses; /* Nonzero if a reg has multiple uses. */
326 struct loop_movables
328 /* Head of movable chain. */
329 struct movable *head;
330 /* Last movable in chain. */
331 struct movable *last;
335 /* Information pertaining to a loop. */
337 struct loop_info
339 /* Nonzero if there is a subroutine call in the current loop. */
340 int has_call;
341 /* Nonzero if there is a libcall in the current loop. */
342 int has_libcall;
343 /* Nonzero if there is a non constant call in the current loop. */
344 int has_nonconst_call;
345 /* Nonzero if there is a prefetch instruction in the current loop. */
346 int has_prefetch;
347 /* Nonzero if there is a volatile memory reference in the current
348 loop. */
349 int has_volatile;
350 /* Nonzero if there is a tablejump in the current loop. */
351 int has_tablejump;
352 /* Nonzero if there are ways to leave the loop other than falling
353 off the end. */
354 int has_multiple_exit_targets;
355 /* Nonzero if there is an indirect jump in the current function. */
356 int has_indirect_jump;
357 /* Register or constant initial loop value. */
358 rtx initial_value;
359 /* Register or constant value used for comparison test. */
360 rtx comparison_value;
361 /* Register or constant approximate final value. */
362 rtx final_value;
363 /* Register or constant initial loop value with term common to
364 final_value removed. */
365 rtx initial_equiv_value;
366 /* Register or constant final loop value with term common to
367 initial_value removed. */
368 rtx final_equiv_value;
369 /* Register corresponding to iteration variable. */
370 rtx iteration_var;
371 /* Constant loop increment. */
372 rtx increment;
373 enum rtx_code comparison_code;
374 /* Holds the number of loop iterations. It is zero if the number
375 could not be calculated. Must be unsigned since the number of
376 iterations can be as high as 2^wordsize - 1. For loops with a
377 wider iterator, this number will be zero if the number of loop
378 iterations is too large for an unsigned integer to hold. */
379 unsigned HOST_WIDE_INT n_iterations;
380 int used_count_register;
381 /* The loop iterator induction variable. */
382 struct iv_class *iv;
383 /* List of MEMs that are stored in this loop. */
384 rtx store_mems;
385 /* Array of MEMs that are used (read or written) in this loop, but
386 cannot be aliased by anything in this loop, except perhaps
387 themselves. In other words, if mems[i] is altered during
388 the loop, it is altered by an expression that is rtx_equal_p to
389 it. */
390 loop_mem_info *mems;
391 /* The index of the next available slot in MEMS. */
392 int mems_idx;
393 /* The number of elements allocated in MEMS. */
394 int mems_allocated;
395 /* Nonzero if we don't know what MEMs were changed in the current
396 loop. This happens if the loop contains a call (in which case
397 `has_call' will also be set) or if we store into more than
398 NUM_STORES MEMs. */
399 int unknown_address_altered;
400 /* The above doesn't count any readonly memory locations that are
401 stored. This does. */
402 int unknown_constant_address_altered;
403 /* Count of memory write instructions discovered in the loop. */
404 int num_mem_sets;
405 /* The insn where the first of these was found. */
406 rtx first_loop_store_insn;
407 /* The chain of movable insns in loop. */
408 struct loop_movables movables;
409 /* The registers used the in loop. */
410 struct loop_regs regs;
411 /* The induction variable information in loop. */
412 struct loop_ivs ivs;
413 /* Nonzero if call is in pre_header extended basic block. */
414 int pre_header_has_call;
417 /* Not really meaningful values, but at least something. */
418 #ifndef SIMULTANEOUS_PREFETCHES
419 #define SIMULTANEOUS_PREFETCHES 3
420 #endif
421 #ifndef PREFETCH_BLOCK
422 #define PREFETCH_BLOCK 32
423 #endif
424 #ifndef HAVE_prefetch
425 #define HAVE_prefetch 0
426 #define CODE_FOR_prefetch 0
427 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
428 #endif
430 /* Give up the prefetch optimizations once we exceed a given threshold.
431 It is unlikely that we would be able to optimize something in a loop
432 with so many detected prefetches. */
433 #define MAX_PREFETCHES 100
434 /* The number of prefetch blocks that are beneficial to fetch at once before
435 a loop with a known (and low) iteration count. */
436 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
437 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
438 since it is likely that the data are already in the cache. */
439 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
441 /* Parameterize some prefetch heuristics so they can be turned on and off
442 easily for performance testing on new architectures. These can be
443 defined in target-dependent files. */
445 /* Prefetch is worthwhile only when loads/stores are dense. */
446 #ifndef PREFETCH_ONLY_DENSE_MEM
447 #define PREFETCH_ONLY_DENSE_MEM 1
448 #endif
450 /* Define what we mean by "dense" loads and stores; This value divided by 256
451 is the minimum percentage of memory references that worth prefetching. */
452 #ifndef PREFETCH_DENSE_MEM
453 #define PREFETCH_DENSE_MEM 220
454 #endif
456 /* Do not prefetch for a loop whose iteration count is known to be low. */
457 #ifndef PREFETCH_NO_LOW_LOOPCNT
458 #define PREFETCH_NO_LOW_LOOPCNT 1
459 #endif
461 /* Define what we mean by a "low" iteration count. */
462 #ifndef PREFETCH_LOW_LOOPCNT
463 #define PREFETCH_LOW_LOOPCNT 32
464 #endif
466 /* Do not prefetch for a loop that contains a function call; such a loop is
467 probably not an internal loop. */
468 #ifndef PREFETCH_NO_CALL
469 #define PREFETCH_NO_CALL 1
470 #endif
472 /* Do not prefetch accesses with an extreme stride. */
473 #ifndef PREFETCH_NO_EXTREME_STRIDE
474 #define PREFETCH_NO_EXTREME_STRIDE 1
475 #endif
477 /* Define what we mean by an "extreme" stride. */
478 #ifndef PREFETCH_EXTREME_STRIDE
479 #define PREFETCH_EXTREME_STRIDE 4096
480 #endif
482 /* Define a limit to how far apart indices can be and still be merged
483 into a single prefetch. */
484 #ifndef PREFETCH_EXTREME_DIFFERENCE
485 #define PREFETCH_EXTREME_DIFFERENCE 4096
486 #endif
488 /* Issue prefetch instructions before the loop to fetch data to be used
489 in the first few loop iterations. */
490 #ifndef PREFETCH_BEFORE_LOOP
491 #define PREFETCH_BEFORE_LOOP 1
492 #endif
494 /* Do not handle reversed order prefetches (negative stride). */
495 #ifndef PREFETCH_NO_REVERSE_ORDER
496 #define PREFETCH_NO_REVERSE_ORDER 1
497 #endif
499 /* Prefetch even if the GIV is in conditional code. */
500 #ifndef PREFETCH_CONDITIONAL
501 #define PREFETCH_CONDITIONAL 1
502 #endif
504 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
505 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
507 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
508 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
509 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
511 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
512 ((REGNO) < FIRST_PSEUDO_REGISTER \
513 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
516 /* Vector mapping INSN_UIDs to luids.
517 The luids are like uids but increase monotonically always.
518 We use them to see whether a jump comes from outside a given loop. */
520 static int *uid_luid;
522 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
523 number the insn is contained in. */
525 static struct loop **uid_loop;
527 /* 1 + largest uid of any insn. */
529 static int max_uid_for_loop;
531 /* Number of loops detected in current function. Used as index to the
532 next few tables. */
534 static int max_loop_num;
536 /* Bound on pseudo register number before loop optimization.
537 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
538 static unsigned int max_reg_before_loop;
540 /* The value to pass to the next call of reg_scan_update. */
541 static int loop_max_reg;
543 /* During the analysis of a loop, a chain of `struct movable's
544 is made to record all the movable insns found.
545 Then the entire chain can be scanned to decide which to move. */
547 struct movable
549 rtx insn; /* A movable insn */
550 rtx set_src; /* The expression this reg is set from. */
551 rtx set_dest; /* The destination of this SET. */
552 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
553 of any registers used within the LIBCALL. */
554 int consec; /* Number of consecutive following insns
555 that must be moved with this one. */
556 unsigned int regno; /* The register it sets */
557 short lifetime; /* lifetime of that register;
558 may be adjusted when matching movables
559 that load the same value are found. */
560 short savings; /* Number of insns we can move for this reg,
561 including other movables that force this
562 or match this one. */
563 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
564 a low part that we should avoid changing when
565 clearing the rest of the reg. */
566 unsigned int cond : 1; /* 1 if only conditionally movable */
567 unsigned int force : 1; /* 1 means MUST move this insn */
568 unsigned int global : 1; /* 1 means reg is live outside this loop */
569 /* If PARTIAL is 1, GLOBAL means something different:
570 that the reg is live outside the range from where it is set
571 to the following label. */
572 unsigned int done : 1; /* 1 inhibits further processing of this */
574 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
575 In particular, moving it does not make it
576 invariant. */
577 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
578 load SRC, rather than copying INSN. */
579 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
580 first insn of a consecutive sets group. */
581 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
582 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
583 the original insn with a copy from that
584 pseudo, rather than deleting it. */
585 struct movable *match; /* First entry for same value */
586 struct movable *forces; /* An insn that must be moved if this is */
587 struct movable *next;
591 static FILE *loop_dump_stream;
593 /* Forward declarations. */
595 static void invalidate_loops_containing_label (rtx);
596 static void find_and_verify_loops (rtx, struct loops *);
597 static void mark_loop_jump (rtx, struct loop *);
598 static void prescan_loop (struct loop *);
599 static int reg_in_basic_block_p (rtx, rtx);
600 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
601 static int labels_in_range_p (rtx, int);
602 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
603 static void note_addr_stored (rtx, rtx, void *);
604 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
605 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
606 static rtx find_regs_nested (rtx, rtx);
607 static void scan_loop (struct loop*, int);
608 #if 0
609 static void replace_call_address (rtx, rtx, rtx);
610 #endif
611 static rtx skip_consec_insns (rtx, int);
612 static int libcall_benefit (rtx);
613 static rtx libcall_other_reg (rtx, rtx);
614 static void record_excess_regs (rtx, rtx, rtx *);
615 static void ignore_some_movables (struct loop_movables *);
616 static void force_movables (struct loop_movables *);
617 static void combine_movables (struct loop_movables *, struct loop_regs *);
618 static int num_unmoved_movables (const struct loop *);
619 static int regs_match_p (rtx, rtx, struct loop_movables *);
620 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
621 struct loop_regs *);
622 static void add_label_notes (rtx, rtx);
623 static void move_movables (struct loop *loop, struct loop_movables *, int,
624 int);
625 static void loop_movables_add (struct loop_movables *, struct movable *);
626 static void loop_movables_free (struct loop_movables *);
627 static int count_nonfixed_reads (const struct loop *, rtx);
628 static void loop_bivs_find (struct loop *);
629 static void loop_bivs_init_find (struct loop *);
630 static void loop_bivs_check (struct loop *);
631 static void loop_givs_find (struct loop *);
632 static void loop_givs_check (struct loop *);
633 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
634 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
635 struct induction *, rtx);
636 static void loop_givs_dead_check (struct loop *, struct iv_class *);
637 static void loop_givs_reduce (struct loop *, struct iv_class *);
638 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
639 static void loop_ivs_free (struct loop *);
640 static void strength_reduce (struct loop *, int);
641 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
642 static int valid_initial_value_p (rtx, rtx, int, rtx);
643 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
644 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
645 rtx, rtx *, int, int);
646 static void check_final_value (const struct loop *, struct induction *);
647 static void loop_ivs_dump (const struct loop *, FILE *, int);
648 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
649 static void loop_biv_dump (const struct induction *, FILE *, int);
650 static void loop_giv_dump (const struct induction *, FILE *, int);
651 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
652 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
653 rtx *);
654 static void update_giv_derive (const struct loop *, rtx);
655 static HOST_WIDE_INT get_monotonic_increment (struct iv_class *);
656 static bool biased_biv_fits_mode_p (const struct loop *, struct iv_class *,
657 HOST_WIDE_INT, enum machine_mode,
658 unsigned HOST_WIDE_INT);
659 static bool biv_fits_mode_p (const struct loop *, struct iv_class *,
660 HOST_WIDE_INT, enum machine_mode, bool);
661 static bool extension_within_bounds_p (const struct loop *, struct iv_class *,
662 HOST_WIDE_INT, rtx);
663 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
664 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
665 rtx, rtx, rtx *, rtx *, rtx **);
666 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
667 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
668 rtx *, rtx *, int, int *, enum machine_mode);
669 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
670 rtx *, rtx *, rtx *);
671 static int check_dbra_loop (struct loop *, int);
672 static rtx express_from_1 (rtx, rtx, rtx);
673 static rtx combine_givs_p (struct induction *, struct induction *);
674 static int cmp_combine_givs_stats (const void *, const void *);
675 static void combine_givs (struct loop_regs *, struct iv_class *);
676 static int product_cheap_p (rtx, rtx);
677 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
678 int, int);
679 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
680 struct iv_class *, int, basic_block, rtx);
681 static int last_use_this_basic_block (rtx, rtx);
682 static void record_initial (rtx, rtx, void *);
683 static void update_reg_last_use (rtx, rtx);
684 static rtx next_insn_in_loop (const struct loop *, rtx);
685 static void loop_regs_scan (const struct loop *, int);
686 static int count_insns_in_loop (const struct loop *);
687 static int find_mem_in_note_1 (rtx *, void *);
688 static rtx find_mem_in_note (rtx);
689 static void load_mems (const struct loop *);
690 static int insert_loop_mem (rtx *, void *);
691 static int replace_loop_mem (rtx *, void *);
692 static void replace_loop_mems (rtx, rtx, rtx, int);
693 static int replace_loop_reg (rtx *, void *);
694 static void replace_loop_regs (rtx insn, rtx, rtx);
695 static void note_reg_stored (rtx, rtx, void *);
696 static void try_copy_prop (const struct loop *, rtx, unsigned int);
697 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
698 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
699 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
700 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
701 static void loop_regs_update (const struct loop *, rtx);
702 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
703 static int loop_invariant_p (const struct loop *, rtx);
704 static rtx loop_insn_hoist (const struct loop *, rtx);
705 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
706 rtx, basic_block, rtx);
707 static rtx loop_insn_emit_before (const struct loop *, basic_block,
708 rtx, rtx);
709 static int loop_insn_first_p (rtx, rtx);
710 static rtx get_condition_for_loop (const struct loop *, rtx);
711 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
712 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
713 static rtx extend_value_for_giv (struct induction *, rtx);
714 static rtx loop_insn_sink (const struct loop *, rtx);
716 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
717 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
718 rtx, rtx);
719 static rtx loop_call_insn_hoist (const struct loop *, rtx);
720 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
722 static void loop_dump_aux (const struct loop *, FILE *, int);
723 static void loop_delete_insns (rtx, rtx);
724 static HOST_WIDE_INT remove_constant_addition (rtx *);
725 static rtx gen_load_of_final_value (rtx, rtx);
726 void debug_ivs (const struct loop *);
727 void debug_iv_class (const struct iv_class *);
728 void debug_biv (const struct induction *);
729 void debug_giv (const struct induction *);
730 void debug_loop (const struct loop *);
731 void debug_loops (const struct loops *);
733 typedef struct loop_replace_args
735 rtx match;
736 rtx replacement;
737 rtx insn;
738 } loop_replace_args;
740 /* Nonzero iff INSN is between START and END, inclusive. */
741 #define INSN_IN_RANGE_P(INSN, START, END) \
742 (INSN_UID (INSN) < max_uid_for_loop \
743 && INSN_LUID (INSN) >= INSN_LUID (START) \
744 && INSN_LUID (INSN) <= INSN_LUID (END))
746 /* Indirect_jump_in_function is computed once per function. */
747 static int indirect_jump_in_function;
748 static int indirect_jump_in_function_p (rtx);
750 static int compute_luids (rtx, rtx, int);
752 static int biv_elimination_giv_has_0_offset (struct induction *,
753 struct induction *, rtx);
755 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
756 copy the value of the strength reduced giv to its original register. */
757 static int copy_cost;
759 /* Cost of using a register, to normalize the benefits of a giv. */
760 static int reg_address_cost;
762 void
763 init_loop (void)
765 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
767 reg_address_cost = address_cost (reg, SImode);
769 copy_cost = COSTS_N_INSNS (1);
772 /* Compute the mapping from uids to luids.
773 LUIDs are numbers assigned to insns, like uids,
774 except that luids increase monotonically through the code.
775 Start at insn START and stop just before END. Assign LUIDs
776 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
777 static int
778 compute_luids (rtx start, rtx end, int prev_luid)
780 int i;
781 rtx insn;
783 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
785 if (INSN_UID (insn) >= max_uid_for_loop)
786 continue;
787 /* Don't assign luids to line-number NOTEs, so that the distance in
788 luids between two insns is not affected by -g. */
789 if (!NOTE_P (insn)
790 || NOTE_LINE_NUMBER (insn) <= 0)
791 uid_luid[INSN_UID (insn)] = ++i;
792 else
793 /* Give a line number note the same luid as preceding insn. */
794 uid_luid[INSN_UID (insn)] = i;
796 return i + 1;
799 /* Entry point of this file. Perform loop optimization
800 on the current function. F is the first insn of the function
801 and DUMPFILE is a stream for output of a trace of actions taken
802 (or 0 if none should be output). */
804 void
805 loop_optimize (rtx f, FILE *dumpfile, int flags)
807 rtx insn;
808 int i;
809 struct loops loops_data;
810 struct loops *loops = &loops_data;
811 struct loop_info *loops_info;
813 loop_dump_stream = dumpfile;
815 init_recog_no_volatile ();
817 max_reg_before_loop = max_reg_num ();
818 loop_max_reg = max_reg_before_loop;
820 regs_may_share = 0;
822 /* Count the number of loops. */
824 max_loop_num = 0;
825 for (insn = f; insn; insn = NEXT_INSN (insn))
827 if (NOTE_P (insn)
828 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
829 max_loop_num++;
832 /* Don't waste time if no loops. */
833 if (max_loop_num == 0)
834 return;
836 loops->num = max_loop_num;
838 /* Get size to use for tables indexed by uids.
839 Leave some space for labels allocated by find_and_verify_loops. */
840 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
842 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
843 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
845 /* Allocate storage for array of loops. */
846 loops->array = xcalloc (loops->num, sizeof (struct loop));
848 /* Find and process each loop.
849 First, find them, and record them in order of their beginnings. */
850 find_and_verify_loops (f, loops);
852 /* Allocate and initialize auxiliary loop information. */
853 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
854 for (i = 0; i < (int) loops->num; i++)
855 loops->array[i].aux = loops_info + i;
857 /* Now find all register lifetimes. This must be done after
858 find_and_verify_loops, because it might reorder the insns in the
859 function. */
860 reg_scan (f, max_reg_before_loop);
862 /* This must occur after reg_scan so that registers created by gcse
863 will have entries in the register tables.
865 We could have added a call to reg_scan after gcse_main in toplev.c,
866 but moving this call to init_alias_analysis is more efficient. */
867 init_alias_analysis ();
869 /* See if we went too far. Note that get_max_uid already returns
870 one more that the maximum uid of all insn. */
871 gcc_assert (get_max_uid () <= max_uid_for_loop);
872 /* Now reset it to the actual size we need. See above. */
873 max_uid_for_loop = get_max_uid ();
875 /* find_and_verify_loops has already called compute_luids, but it
876 might have rearranged code afterwards, so we need to recompute
877 the luids now. */
878 compute_luids (f, NULL_RTX, 0);
880 /* Don't leave gaps in uid_luid for insns that have been
881 deleted. It is possible that the first or last insn
882 using some register has been deleted by cross-jumping.
883 Make sure that uid_luid for that former insn's uid
884 points to the general area where that insn used to be. */
885 for (i = 0; i < max_uid_for_loop; i++)
887 uid_luid[0] = uid_luid[i];
888 if (uid_luid[0] != 0)
889 break;
891 for (i = 0; i < max_uid_for_loop; i++)
892 if (uid_luid[i] == 0)
893 uid_luid[i] = uid_luid[i - 1];
895 /* Determine if the function has indirect jump. On some systems
896 this prevents low overhead loop instructions from being used. */
897 indirect_jump_in_function = indirect_jump_in_function_p (f);
899 /* Now scan the loops, last ones first, since this means inner ones are done
900 before outer ones. */
901 for (i = max_loop_num - 1; i >= 0; i--)
903 struct loop *loop = &loops->array[i];
905 if (! loop->invalid && loop->end)
907 scan_loop (loop, flags);
908 ggc_collect ();
912 end_alias_analysis ();
914 /* Clean up. */
915 for (i = 0; i < (int) loops->num; i++)
916 free (loops_info[i].mems);
918 free (uid_luid);
919 free (uid_loop);
920 free (loops_info);
921 free (loops->array);
924 /* Returns the next insn, in execution order, after INSN. START and
925 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
926 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
927 insn-stream; it is used with loops that are entered near the
928 bottom. */
930 static rtx
931 next_insn_in_loop (const struct loop *loop, rtx insn)
933 insn = NEXT_INSN (insn);
935 if (insn == loop->end)
937 if (loop->top)
938 /* Go to the top of the loop, and continue there. */
939 insn = loop->top;
940 else
941 /* We're done. */
942 insn = NULL_RTX;
945 if (insn == loop->scan_start)
946 /* We're done. */
947 insn = NULL_RTX;
949 return insn;
952 /* Find any register references hidden inside X and add them to
953 the dependency list DEPS. This is used to look inside CLOBBER (MEM
954 when checking whether a PARALLEL can be pulled out of a loop. */
956 static rtx
957 find_regs_nested (rtx deps, rtx x)
959 enum rtx_code code = GET_CODE (x);
960 if (code == REG)
961 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
962 else
964 const char *fmt = GET_RTX_FORMAT (code);
965 int i, j;
966 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
968 if (fmt[i] == 'e')
969 deps = find_regs_nested (deps, XEXP (x, i));
970 else if (fmt[i] == 'E')
971 for (j = 0; j < XVECLEN (x, i); j++)
972 deps = find_regs_nested (deps, XVECEXP (x, i, j));
975 return deps;
978 /* Optimize one loop described by LOOP. */
980 /* ??? Could also move memory writes out of loops if the destination address
981 is invariant, the source is invariant, the memory write is not volatile,
982 and if we can prove that no read inside the loop can read this address
983 before the write occurs. If there is a read of this address after the
984 write, then we can also mark the memory read as invariant. */
986 static void
987 scan_loop (struct loop *loop, int flags)
989 struct loop_info *loop_info = LOOP_INFO (loop);
990 struct loop_regs *regs = LOOP_REGS (loop);
991 int i;
992 rtx loop_start = loop->start;
993 rtx loop_end = loop->end;
994 rtx p;
995 /* 1 if we are scanning insns that could be executed zero times. */
996 int maybe_never = 0;
997 /* 1 if we are scanning insns that might never be executed
998 due to a subroutine call which might exit before they are reached. */
999 int call_passed = 0;
1000 /* Number of insns in the loop. */
1001 int insn_count;
1002 int tem;
1003 rtx temp, update_start, update_end;
1004 /* The SET from an insn, if it is the only SET in the insn. */
1005 rtx set, set1;
1006 /* Chain describing insns movable in current loop. */
1007 struct loop_movables *movables = LOOP_MOVABLES (loop);
1008 /* Ratio of extra register life span we can justify
1009 for saving an instruction. More if loop doesn't call subroutines
1010 since in that case saving an insn makes more difference
1011 and more registers are available. */
1012 int threshold;
1013 int in_libcall;
1015 loop->top = 0;
1017 movables->head = 0;
1018 movables->last = 0;
1020 /* Determine whether this loop starts with a jump down to a test at
1021 the end. This will occur for a small number of loops with a test
1022 that is too complex to duplicate in front of the loop.
1024 We search for the first insn or label in the loop, skipping NOTEs.
1025 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1026 (because we might have a loop executed only once that contains a
1027 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1028 (in case we have a degenerate loop).
1030 Note that if we mistakenly think that a loop is entered at the top
1031 when, in fact, it is entered at the exit test, the only effect will be
1032 slightly poorer optimization. Making the opposite error can generate
1033 incorrect code. Since very few loops now start with a jump to the
1034 exit test, the code here to detect that case is very conservative. */
1036 for (p = NEXT_INSN (loop_start);
1037 p != loop_end
1038 && !LABEL_P (p) && ! INSN_P (p)
1039 && (!NOTE_P (p)
1040 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1041 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1042 p = NEXT_INSN (p))
1045 loop->scan_start = p;
1047 /* If loop end is the end of the current function, then emit a
1048 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1049 note insn. This is the position we use when sinking insns out of
1050 the loop. */
1051 if (NEXT_INSN (loop->end) != 0)
1052 loop->sink = NEXT_INSN (loop->end);
1053 else
1054 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1056 /* Set up variables describing this loop. */
1057 prescan_loop (loop);
1058 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1060 /* If loop has a jump before the first label,
1061 the true entry is the target of that jump.
1062 Start scan from there.
1063 But record in LOOP->TOP the place where the end-test jumps
1064 back to so we can scan that after the end of the loop. */
1065 if (JUMP_P (p)
1066 /* Loop entry must be unconditional jump (and not a RETURN) */
1067 && any_uncondjump_p (p)
1068 && JUMP_LABEL (p) != 0
1069 /* Check to see whether the jump actually
1070 jumps out of the loop (meaning it's no loop).
1071 This case can happen for things like
1072 do {..} while (0). If this label was generated previously
1073 by loop, we can't tell anything about it and have to reject
1074 the loop. */
1075 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1077 loop->top = next_label (loop->scan_start);
1078 loop->scan_start = JUMP_LABEL (p);
1081 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1082 as required by loop_reg_used_before_p. So skip such loops. (This
1083 test may never be true, but it's best to play it safe.)
1085 Also, skip loops where we do not start scanning at a label. This
1086 test also rejects loops starting with a JUMP_INSN that failed the
1087 test above. */
1089 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1090 || !LABEL_P (loop->scan_start))
1092 if (loop_dump_stream)
1093 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1094 INSN_UID (loop_start), INSN_UID (loop_end));
1095 return;
1098 /* Allocate extra space for REGs that might be created by load_mems.
1099 We allocate a little extra slop as well, in the hopes that we
1100 won't have to reallocate the regs array. */
1101 loop_regs_scan (loop, loop_info->mems_idx + 16);
1102 insn_count = count_insns_in_loop (loop);
1104 if (loop_dump_stream)
1105 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1106 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1108 /* Scan through the loop finding insns that are safe to move.
1109 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1110 this reg will be considered invariant for subsequent insns.
1111 We consider whether subsequent insns use the reg
1112 in deciding whether it is worth actually moving.
1114 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1115 and therefore it is possible that the insns we are scanning
1116 would never be executed. At such times, we must make sure
1117 that it is safe to execute the insn once instead of zero times.
1118 When MAYBE_NEVER is 0, all insns will be executed at least once
1119 so that is not a problem. */
1121 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1122 p != NULL_RTX;
1123 p = next_insn_in_loop (loop, p))
1125 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1126 in_libcall--;
1127 if (NONJUMP_INSN_P (p))
1129 /* Do not scan past an optimization barrier. */
1130 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1131 break;
1132 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1133 if (temp)
1134 in_libcall++;
1135 if (! in_libcall
1136 && (set = single_set (p))
1137 && REG_P (SET_DEST (set))
1138 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1139 && SET_DEST (set) != pic_offset_table_rtx
1140 #endif
1141 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1143 int tem1 = 0;
1144 int tem2 = 0;
1145 int move_insn = 0;
1146 int insert_temp = 0;
1147 rtx src = SET_SRC (set);
1148 rtx dependencies = 0;
1150 /* Figure out what to use as a source of this insn. If a
1151 REG_EQUIV note is given or if a REG_EQUAL note with a
1152 constant operand is specified, use it as the source and
1153 mark that we should move this insn by calling
1154 emit_move_insn rather that duplicating the insn.
1156 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1157 note is present. */
1158 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1159 if (temp)
1160 src = XEXP (temp, 0), move_insn = 1;
1161 else
1163 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1164 if (temp && CONSTANT_P (XEXP (temp, 0)))
1165 src = XEXP (temp, 0), move_insn = 1;
1166 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1168 src = XEXP (temp, 0);
1169 /* A libcall block can use regs that don't appear in
1170 the equivalent expression. To move the libcall,
1171 we must move those regs too. */
1172 dependencies = libcall_other_reg (p, src);
1176 /* For parallels, add any possible uses to the dependencies, as
1177 we can't move the insn without resolving them first.
1178 MEMs inside CLOBBERs may also reference registers; these
1179 count as implicit uses. */
1180 if (GET_CODE (PATTERN (p)) == PARALLEL)
1182 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1184 rtx x = XVECEXP (PATTERN (p), 0, i);
1185 if (GET_CODE (x) == USE)
1186 dependencies
1187 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1188 dependencies);
1189 else if (GET_CODE (x) == CLOBBER
1190 && MEM_P (XEXP (x, 0)))
1191 dependencies = find_regs_nested (dependencies,
1192 XEXP (XEXP (x, 0), 0));
1196 if (/* The register is used in basic blocks other
1197 than the one where it is set (meaning that
1198 something after this point in the loop might
1199 depend on its value before the set). */
1200 ! reg_in_basic_block_p (p, SET_DEST (set))
1201 /* And the set is not guaranteed to be executed once
1202 the loop starts, or the value before the set is
1203 needed before the set occurs...
1205 ??? Note we have quadratic behavior here, mitigated
1206 by the fact that the previous test will often fail for
1207 large loops. Rather than re-scanning the entire loop
1208 each time for register usage, we should build tables
1209 of the register usage and use them here instead. */
1210 && (maybe_never
1211 || loop_reg_used_before_p (loop, set, p)))
1212 /* It is unsafe to move the set. However, it may be OK to
1213 move the source into a new pseudo, and substitute a
1214 reg-to-reg copy for the original insn.
1216 This code used to consider it OK to move a set of a variable
1217 which was not created by the user and not used in an exit
1218 test.
1219 That behavior is incorrect and was removed. */
1220 insert_temp = 1;
1222 /* Don't try to optimize a MODE_CC set with a constant
1223 source. It probably will be combined with a conditional
1224 jump. */
1225 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1226 && CONSTANT_P (src))
1228 /* Don't try to optimize a register that was made
1229 by loop-optimization for an inner loop.
1230 We don't know its life-span, so we can't compute
1231 the benefit. */
1232 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1234 /* Don't move the source and add a reg-to-reg copy:
1235 - with -Os (this certainly increases size),
1236 - if the mode doesn't support copy operations (obviously),
1237 - if the source is already a reg (the motion will gain nothing),
1238 - if the source is a legitimate constant (likewise). */
1239 else if (insert_temp
1240 && (optimize_size
1241 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1242 || REG_P (SET_SRC (set))
1243 || (CONSTANT_P (SET_SRC (set))
1244 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
1246 else if ((tem = loop_invariant_p (loop, src))
1247 && (dependencies == 0
1248 || (tem2
1249 = loop_invariant_p (loop, dependencies)) != 0)
1250 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1251 || (tem1
1252 = consec_sets_invariant_p
1253 (loop, SET_DEST (set),
1254 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1255 p)))
1256 /* If the insn can cause a trap (such as divide by zero),
1257 can't move it unless it's guaranteed to be executed
1258 once loop is entered. Even a function call might
1259 prevent the trap insn from being reached
1260 (since it might exit!) */
1261 && ! ((maybe_never || call_passed)
1262 && may_trap_p (src)))
1264 struct movable *m;
1265 int regno = REGNO (SET_DEST (set));
1267 /* A potential lossage is where we have a case where two insns
1268 can be combined as long as they are both in the loop, but
1269 we move one of them outside the loop. For large loops,
1270 this can lose. The most common case of this is the address
1271 of a function being called.
1273 Therefore, if this register is marked as being used
1274 exactly once if we are in a loop with calls
1275 (a "large loop"), see if we can replace the usage of
1276 this register with the source of this SET. If we can,
1277 delete this insn.
1279 Don't do this if P has a REG_RETVAL note or if we have
1280 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
1282 if (loop_info->has_call
1283 && regs->array[regno].single_usage != 0
1284 && regs->array[regno].single_usage != const0_rtx
1285 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1286 && (REGNO_LAST_UID (regno)
1287 == INSN_UID (regs->array[regno].single_usage))
1288 && regs->array[regno].set_in_loop == 1
1289 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1290 && ! side_effects_p (SET_SRC (set))
1291 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1292 && (! SMALL_REGISTER_CLASSES
1293 || (! (REG_P (SET_SRC (set))
1294 && (REGNO (SET_SRC (set))
1295 < FIRST_PSEUDO_REGISTER))))
1296 && regno >= FIRST_PSEUDO_REGISTER
1297 /* This test is not redundant; SET_SRC (set) might be
1298 a call-clobbered register and the life of REGNO
1299 might span a call. */
1300 && ! modified_between_p (SET_SRC (set), p,
1301 regs->array[regno].single_usage)
1302 && no_labels_between_p (p,
1303 regs->array[regno].single_usage)
1304 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
1305 regs->array[regno].single_usage))
1307 /* Replace any usage in a REG_EQUAL note. Must copy
1308 the new source, so that we don't get rtx sharing
1309 between the SET_SOURCE and REG_NOTES of insn p. */
1310 REG_NOTES (regs->array[regno].single_usage)
1311 = (replace_rtx
1312 (REG_NOTES (regs->array[regno].single_usage),
1313 SET_DEST (set), copy_rtx (SET_SRC (set))));
1315 delete_insn (p);
1316 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1317 i++)
1318 regs->array[regno+i].set_in_loop = 0;
1319 continue;
1322 m = xmalloc (sizeof (struct movable));
1323 m->next = 0;
1324 m->insn = p;
1325 m->set_src = src;
1326 m->dependencies = dependencies;
1327 m->set_dest = SET_DEST (set);
1328 m->force = 0;
1329 m->consec
1330 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1331 m->done = 0;
1332 m->forces = 0;
1333 m->partial = 0;
1334 m->move_insn = move_insn;
1335 m->move_insn_first = 0;
1336 m->insert_temp = insert_temp;
1337 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1338 m->savemode = VOIDmode;
1339 m->regno = regno;
1340 /* Set M->cond if either loop_invariant_p
1341 or consec_sets_invariant_p returned 2
1342 (only conditionally invariant). */
1343 m->cond = ((tem | tem1 | tem2) > 1);
1344 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1345 m->match = 0;
1346 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1347 m->savings = regs->array[regno].n_times_set;
1348 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1349 m->savings += libcall_benefit (p);
1350 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1351 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1352 /* Add M to the end of the chain MOVABLES. */
1353 loop_movables_add (movables, m);
1355 if (m->consec > 0)
1357 /* It is possible for the first instruction to have a
1358 REG_EQUAL note but a non-invariant SET_SRC, so we must
1359 remember the status of the first instruction in case
1360 the last instruction doesn't have a REG_EQUAL note. */
1361 m->move_insn_first = m->move_insn;
1363 /* Skip this insn, not checking REG_LIBCALL notes. */
1364 p = next_nonnote_insn (p);
1365 /* Skip the consecutive insns, if there are any. */
1366 p = skip_consec_insns (p, m->consec);
1367 /* Back up to the last insn of the consecutive group. */
1368 p = prev_nonnote_insn (p);
1370 /* We must now reset m->move_insn, m->is_equiv, and
1371 possibly m->set_src to correspond to the effects of
1372 all the insns. */
1373 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1374 if (temp)
1375 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1376 else
1378 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1379 if (temp && CONSTANT_P (XEXP (temp, 0)))
1380 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1381 else
1382 m->move_insn = 0;
1385 m->is_equiv
1386 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1389 /* If this register is always set within a STRICT_LOW_PART
1390 or set to zero, then its high bytes are constant.
1391 So clear them outside the loop and within the loop
1392 just load the low bytes.
1393 We must check that the machine has an instruction to do so.
1394 Also, if the value loaded into the register
1395 depends on the same register, this cannot be done. */
1396 else if (SET_SRC (set) == const0_rtx
1397 && NONJUMP_INSN_P (NEXT_INSN (p))
1398 && (set1 = single_set (NEXT_INSN (p)))
1399 && GET_CODE (set1) == SET
1400 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1401 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1402 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1403 == SET_DEST (set))
1404 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1406 int regno = REGNO (SET_DEST (set));
1407 if (regs->array[regno].set_in_loop == 2)
1409 struct movable *m;
1410 m = xmalloc (sizeof (struct movable));
1411 m->next = 0;
1412 m->insn = p;
1413 m->set_dest = SET_DEST (set);
1414 m->dependencies = 0;
1415 m->force = 0;
1416 m->consec = 0;
1417 m->done = 0;
1418 m->forces = 0;
1419 m->move_insn = 0;
1420 m->move_insn_first = 0;
1421 m->insert_temp = insert_temp;
1422 m->partial = 1;
1423 /* If the insn may not be executed on some cycles,
1424 we can't clear the whole reg; clear just high part.
1425 Not even if the reg is used only within this loop.
1426 Consider this:
1427 while (1)
1428 while (s != t) {
1429 if (foo ()) x = *s;
1430 use (x);
1432 Clearing x before the inner loop could clobber a value
1433 being saved from the last time around the outer loop.
1434 However, if the reg is not used outside this loop
1435 and all uses of the register are in the same
1436 basic block as the store, there is no problem.
1438 If this insn was made by loop, we don't know its
1439 INSN_LUID and hence must make a conservative
1440 assumption. */
1441 m->global = (INSN_UID (p) >= max_uid_for_loop
1442 || LOOP_REG_GLOBAL_P (loop, regno)
1443 || (labels_in_range_p
1444 (p, REGNO_FIRST_LUID (regno))));
1445 if (maybe_never && m->global)
1446 m->savemode = GET_MODE (SET_SRC (set1));
1447 else
1448 m->savemode = VOIDmode;
1449 m->regno = regno;
1450 m->cond = 0;
1451 m->match = 0;
1452 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1453 m->savings = 1;
1454 for (i = 0;
1455 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1456 i++)
1457 regs->array[regno+i].set_in_loop = -1;
1458 /* Add M to the end of the chain MOVABLES. */
1459 loop_movables_add (movables, m);
1464 /* Past a call insn, we get to insns which might not be executed
1465 because the call might exit. This matters for insns that trap.
1466 Constant and pure call insns always return, so they don't count. */
1467 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1468 call_passed = 1;
1469 /* Past a label or a jump, we get to insns for which we
1470 can't count on whether or how many times they will be
1471 executed during each iteration. Therefore, we can
1472 only move out sets of trivial variables
1473 (those not used after the loop). */
1474 /* Similar code appears twice in strength_reduce. */
1475 else if ((LABEL_P (p) || JUMP_P (p))
1476 /* If we enter the loop in the middle, and scan around to the
1477 beginning, don't set maybe_never for that. This must be an
1478 unconditional jump, otherwise the code at the top of the
1479 loop might never be executed. Unconditional jumps are
1480 followed by a barrier then the loop_end. */
1481 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1482 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1483 && any_uncondjump_p (p)))
1484 maybe_never = 1;
1487 /* If one movable subsumes another, ignore that other. */
1489 ignore_some_movables (movables);
1491 /* For each movable insn, see if the reg that it loads
1492 leads when it dies right into another conditionally movable insn.
1493 If so, record that the second insn "forces" the first one,
1494 since the second can be moved only if the first is. */
1496 force_movables (movables);
1498 /* See if there are multiple movable insns that load the same value.
1499 If there are, make all but the first point at the first one
1500 through the `match' field, and add the priorities of them
1501 all together as the priority of the first. */
1503 combine_movables (movables, regs);
1505 /* Now consider each movable insn to decide whether it is worth moving.
1506 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1508 For machines with few registers this increases code size, so do not
1509 move moveables when optimizing for code size on such machines.
1510 (The 18 below is the value for i386.) */
1512 if (!optimize_size
1513 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1515 move_movables (loop, movables, threshold, insn_count);
1517 /* Recalculate regs->array if move_movables has created new
1518 registers. */
1519 if (max_reg_num () > regs->num)
1521 loop_regs_scan (loop, 0);
1522 for (update_start = loop_start;
1523 PREV_INSN (update_start)
1524 && !LABEL_P (PREV_INSN (update_start));
1525 update_start = PREV_INSN (update_start))
1527 update_end = NEXT_INSN (loop_end);
1529 reg_scan_update (update_start, update_end, loop_max_reg);
1530 loop_max_reg = max_reg_num ();
1534 /* Now candidates that still are negative are those not moved.
1535 Change regs->array[I].set_in_loop to indicate that those are not actually
1536 invariant. */
1537 for (i = 0; i < regs->num; i++)
1538 if (regs->array[i].set_in_loop < 0)
1539 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1541 /* Now that we've moved some things out of the loop, we might be able to
1542 hoist even more memory references. */
1543 load_mems (loop);
1545 /* Recalculate regs->array if load_mems has created new registers. */
1546 if (max_reg_num () > regs->num)
1547 loop_regs_scan (loop, 0);
1549 for (update_start = loop_start;
1550 PREV_INSN (update_start)
1551 && !LABEL_P (PREV_INSN (update_start));
1552 update_start = PREV_INSN (update_start))
1554 update_end = NEXT_INSN (loop_end);
1556 reg_scan_update (update_start, update_end, loop_max_reg);
1557 loop_max_reg = max_reg_num ();
1559 if (flag_strength_reduce)
1561 if (update_end && LABEL_P (update_end))
1562 /* Ensure our label doesn't go away. */
1563 LABEL_NUSES (update_end)++;
1565 strength_reduce (loop, flags);
1567 reg_scan_update (update_start, update_end, loop_max_reg);
1568 loop_max_reg = max_reg_num ();
1570 if (update_end && LABEL_P (update_end)
1571 && --LABEL_NUSES (update_end) == 0)
1572 delete_related_insns (update_end);
1576 /* The movable information is required for strength reduction. */
1577 loop_movables_free (movables);
1579 free (regs->array);
1580 regs->array = 0;
1581 regs->num = 0;
1584 /* Add elements to *OUTPUT to record all the pseudo-regs
1585 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1587 static void
1588 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1590 enum rtx_code code;
1591 const char *fmt;
1592 int i;
1594 code = GET_CODE (in_this);
1596 switch (code)
1598 case PC:
1599 case CC0:
1600 case CONST_INT:
1601 case CONST_DOUBLE:
1602 case CONST:
1603 case SYMBOL_REF:
1604 case LABEL_REF:
1605 return;
1607 case REG:
1608 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1609 && ! reg_mentioned_p (in_this, not_in_this))
1610 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1611 return;
1613 default:
1614 break;
1617 fmt = GET_RTX_FORMAT (code);
1618 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1620 int j;
1622 switch (fmt[i])
1624 case 'E':
1625 for (j = 0; j < XVECLEN (in_this, i); j++)
1626 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1627 break;
1629 case 'e':
1630 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1631 break;
1636 /* Check what regs are referred to in the libcall block ending with INSN,
1637 aside from those mentioned in the equivalent value.
1638 If there are none, return 0.
1639 If there are one or more, return an EXPR_LIST containing all of them. */
1641 static rtx
1642 libcall_other_reg (rtx insn, rtx equiv)
1644 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1645 rtx p = XEXP (note, 0);
1646 rtx output = 0;
1648 /* First, find all the regs used in the libcall block
1649 that are not mentioned as inputs to the result. */
1651 while (p != insn)
1653 if (INSN_P (p))
1654 record_excess_regs (PATTERN (p), equiv, &output);
1655 p = NEXT_INSN (p);
1658 return output;
1661 /* Return 1 if all uses of REG
1662 are between INSN and the end of the basic block. */
1664 static int
1665 reg_in_basic_block_p (rtx insn, rtx reg)
1667 int regno = REGNO (reg);
1668 rtx p;
1670 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1671 return 0;
1673 /* Search this basic block for the already recorded last use of the reg. */
1674 for (p = insn; p; p = NEXT_INSN (p))
1676 switch (GET_CODE (p))
1678 case NOTE:
1679 break;
1681 case INSN:
1682 case CALL_INSN:
1683 /* Ordinary insn: if this is the last use, we win. */
1684 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1685 return 1;
1686 break;
1688 case JUMP_INSN:
1689 /* Jump insn: if this is the last use, we win. */
1690 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1691 return 1;
1692 /* Otherwise, it's the end of the basic block, so we lose. */
1693 return 0;
1695 case CODE_LABEL:
1696 case BARRIER:
1697 /* It's the end of the basic block, so we lose. */
1698 return 0;
1700 default:
1701 break;
1705 /* The "last use" that was recorded can't be found after the first
1706 use. This can happen when the last use was deleted while
1707 processing an inner loop, this inner loop was then completely
1708 unrolled, and the outer loop is always exited after the inner loop,
1709 so that everything after the first use becomes a single basic block. */
1710 return 1;
1713 /* Compute the benefit of eliminating the insns in the block whose
1714 last insn is LAST. This may be a group of insns used to compute a
1715 value directly or can contain a library call. */
1717 static int
1718 libcall_benefit (rtx last)
1720 rtx insn;
1721 int benefit = 0;
1723 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1724 insn != last; insn = NEXT_INSN (insn))
1726 if (CALL_P (insn))
1727 benefit += 10; /* Assume at least this many insns in a library
1728 routine. */
1729 else if (NONJUMP_INSN_P (insn)
1730 && GET_CODE (PATTERN (insn)) != USE
1731 && GET_CODE (PATTERN (insn)) != CLOBBER)
1732 benefit++;
1735 return benefit;
1738 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1740 static rtx
1741 skip_consec_insns (rtx insn, int count)
1743 for (; count > 0; count--)
1745 rtx temp;
1747 /* If first insn of libcall sequence, skip to end. */
1748 /* Do this at start of loop, since INSN is guaranteed to
1749 be an insn here. */
1750 if (!NOTE_P (insn)
1751 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1752 insn = XEXP (temp, 0);
1755 insn = NEXT_INSN (insn);
1756 while (NOTE_P (insn));
1759 return insn;
1762 /* Ignore any movable whose insn falls within a libcall
1763 which is part of another movable.
1764 We make use of the fact that the movable for the libcall value
1765 was made later and so appears later on the chain. */
1767 static void
1768 ignore_some_movables (struct loop_movables *movables)
1770 struct movable *m, *m1;
1772 for (m = movables->head; m; m = m->next)
1774 /* Is this a movable for the value of a libcall? */
1775 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1776 if (note)
1778 rtx insn;
1779 /* Check for earlier movables inside that range,
1780 and mark them invalid. We cannot use LUIDs here because
1781 insns created by loop.c for prior loops don't have LUIDs.
1782 Rather than reject all such insns from movables, we just
1783 explicitly check each insn in the libcall (since invariant
1784 libcalls aren't that common). */
1785 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1786 for (m1 = movables->head; m1 != m; m1 = m1->next)
1787 if (m1->insn == insn)
1788 m1->done = 1;
1793 /* For each movable insn, see if the reg that it loads
1794 leads when it dies right into another conditionally movable insn.
1795 If so, record that the second insn "forces" the first one,
1796 since the second can be moved only if the first is. */
1798 static void
1799 force_movables (struct loop_movables *movables)
1801 struct movable *m, *m1;
1803 for (m1 = movables->head; m1; m1 = m1->next)
1804 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1805 if (!m1->partial && !m1->done)
1807 int regno = m1->regno;
1808 for (m = m1->next; m; m = m->next)
1809 /* ??? Could this be a bug? What if CSE caused the
1810 register of M1 to be used after this insn?
1811 Since CSE does not update regno_last_uid,
1812 this insn M->insn might not be where it dies.
1813 But very likely this doesn't matter; what matters is
1814 that M's reg is computed from M1's reg. */
1815 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1816 && !m->done)
1817 break;
1818 if (m != 0 && m->set_src == m1->set_dest
1819 /* If m->consec, m->set_src isn't valid. */
1820 && m->consec == 0)
1821 m = 0;
1823 /* Increase the priority of the moving the first insn
1824 since it permits the second to be moved as well.
1825 Likewise for insns already forced by the first insn. */
1826 if (m != 0)
1828 struct movable *m2;
1830 m->forces = m1;
1831 for (m2 = m1; m2; m2 = m2->forces)
1833 m2->lifetime += m->lifetime;
1834 m2->savings += m->savings;
1840 /* Find invariant expressions that are equal and can be combined into
1841 one register. */
1843 static void
1844 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1846 struct movable *m;
1847 char *matched_regs = xmalloc (regs->num);
1848 enum machine_mode mode;
1850 /* Regs that are set more than once are not allowed to match
1851 or be matched. I'm no longer sure why not. */
1852 /* Only pseudo registers are allowed to match or be matched,
1853 since move_movables does not validate the change. */
1854 /* Perhaps testing m->consec_sets would be more appropriate here? */
1856 for (m = movables->head; m; m = m->next)
1857 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1858 && m->regno >= FIRST_PSEUDO_REGISTER
1859 && !m->insert_temp
1860 && !m->partial)
1862 struct movable *m1;
1863 int regno = m->regno;
1865 memset (matched_regs, 0, regs->num);
1866 matched_regs[regno] = 1;
1868 /* We want later insns to match the first one. Don't make the first
1869 one match any later ones. So start this loop at m->next. */
1870 for (m1 = m->next; m1; m1 = m1->next)
1871 if (m != m1 && m1->match == 0
1872 && !m1->insert_temp
1873 && regs->array[m1->regno].n_times_set == 1
1874 && m1->regno >= FIRST_PSEUDO_REGISTER
1875 /* A reg used outside the loop mustn't be eliminated. */
1876 && !m1->global
1877 /* A reg used for zero-extending mustn't be eliminated. */
1878 && !m1->partial
1879 && (matched_regs[m1->regno]
1882 /* Can combine regs with different modes loaded from the
1883 same constant only if the modes are the same or
1884 if both are integer modes with M wider or the same
1885 width as M1. The check for integer is redundant, but
1886 safe, since the only case of differing destination
1887 modes with equal sources is when both sources are
1888 VOIDmode, i.e., CONST_INT. */
1889 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1890 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1891 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1892 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1893 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1894 /* See if the source of M1 says it matches M. */
1895 && ((REG_P (m1->set_src)
1896 && matched_regs[REGNO (m1->set_src)])
1897 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1898 movables, regs))))
1899 && ((m->dependencies == m1->dependencies)
1900 || rtx_equal_p (m->dependencies, m1->dependencies)))
1902 m->lifetime += m1->lifetime;
1903 m->savings += m1->savings;
1904 m1->done = 1;
1905 m1->match = m;
1906 matched_regs[m1->regno] = 1;
1910 /* Now combine the regs used for zero-extension.
1911 This can be done for those not marked `global'
1912 provided their lives don't overlap. */
1914 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1915 mode = GET_MODE_WIDER_MODE (mode))
1917 struct movable *m0 = 0;
1919 /* Combine all the registers for extension from mode MODE.
1920 Don't combine any that are used outside this loop. */
1921 for (m = movables->head; m; m = m->next)
1922 if (m->partial && ! m->global
1923 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1925 struct movable *m1;
1927 int first = REGNO_FIRST_LUID (m->regno);
1928 int last = REGNO_LAST_LUID (m->regno);
1930 if (m0 == 0)
1932 /* First one: don't check for overlap, just record it. */
1933 m0 = m;
1934 continue;
1937 /* Make sure they extend to the same mode.
1938 (Almost always true.) */
1939 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1940 continue;
1942 /* We already have one: check for overlap with those
1943 already combined together. */
1944 for (m1 = movables->head; m1 != m; m1 = m1->next)
1945 if (m1 == m0 || (m1->partial && m1->match == m0))
1946 if (! (REGNO_FIRST_LUID (m1->regno) > last
1947 || REGNO_LAST_LUID (m1->regno) < first))
1948 goto overlap;
1950 /* No overlap: we can combine this with the others. */
1951 m0->lifetime += m->lifetime;
1952 m0->savings += m->savings;
1953 m->done = 1;
1954 m->match = m0;
1956 overlap:
1961 /* Clean up. */
1962 free (matched_regs);
1965 /* Returns the number of movable instructions in LOOP that were not
1966 moved outside the loop. */
1968 static int
1969 num_unmoved_movables (const struct loop *loop)
1971 int num = 0;
1972 struct movable *m;
1974 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1975 if (!m->done)
1976 ++num;
1978 return num;
1982 /* Return 1 if regs X and Y will become the same if moved. */
1984 static int
1985 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1987 unsigned int xn = REGNO (x);
1988 unsigned int yn = REGNO (y);
1989 struct movable *mx, *my;
1991 for (mx = movables->head; mx; mx = mx->next)
1992 if (mx->regno == xn)
1993 break;
1995 for (my = movables->head; my; my = my->next)
1996 if (my->regno == yn)
1997 break;
1999 return (mx && my
2000 && ((mx->match == my->match && mx->match != 0)
2001 || mx->match == my
2002 || mx == my->match));
2005 /* Return 1 if X and Y are identical-looking rtx's.
2006 This is the Lisp function EQUAL for rtx arguments.
2008 If two registers are matching movables or a movable register and an
2009 equivalent constant, consider them equal. */
2011 static int
2012 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2013 struct loop_regs *regs)
2015 int i;
2016 int j;
2017 struct movable *m;
2018 enum rtx_code code;
2019 const char *fmt;
2021 if (x == y)
2022 return 1;
2023 if (x == 0 || y == 0)
2024 return 0;
2026 code = GET_CODE (x);
2028 /* If we have a register and a constant, they may sometimes be
2029 equal. */
2030 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2031 && CONSTANT_P (y))
2033 for (m = movables->head; m; m = m->next)
2034 if (m->move_insn && m->regno == REGNO (x)
2035 && rtx_equal_p (m->set_src, y))
2036 return 1;
2038 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2039 && CONSTANT_P (x))
2041 for (m = movables->head; m; m = m->next)
2042 if (m->move_insn && m->regno == REGNO (y)
2043 && rtx_equal_p (m->set_src, x))
2044 return 1;
2047 /* Otherwise, rtx's of different codes cannot be equal. */
2048 if (code != GET_CODE (y))
2049 return 0;
2051 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2052 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2054 if (GET_MODE (x) != GET_MODE (y))
2055 return 0;
2057 /* These three types of rtx's can be compared nonrecursively. */
2058 if (code == REG)
2059 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2061 if (code == LABEL_REF)
2062 return XEXP (x, 0) == XEXP (y, 0);
2063 if (code == SYMBOL_REF)
2064 return XSTR (x, 0) == XSTR (y, 0);
2066 /* Compare the elements. If any pair of corresponding elements
2067 fail to match, return 0 for the whole things. */
2069 fmt = GET_RTX_FORMAT (code);
2070 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2072 switch (fmt[i])
2074 case 'w':
2075 if (XWINT (x, i) != XWINT (y, i))
2076 return 0;
2077 break;
2079 case 'i':
2080 if (XINT (x, i) != XINT (y, i))
2081 return 0;
2082 break;
2084 case 'E':
2085 /* Two vectors must have the same length. */
2086 if (XVECLEN (x, i) != XVECLEN (y, i))
2087 return 0;
2089 /* And the corresponding elements must match. */
2090 for (j = 0; j < XVECLEN (x, i); j++)
2091 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2092 movables, regs) == 0)
2093 return 0;
2094 break;
2096 case 'e':
2097 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2098 == 0)
2099 return 0;
2100 break;
2102 case 's':
2103 if (strcmp (XSTR (x, i), XSTR (y, i)))
2104 return 0;
2105 break;
2107 case 'u':
2108 /* These are just backpointers, so they don't matter. */
2109 break;
2111 case '0':
2112 break;
2114 /* It is believed that rtx's at this level will never
2115 contain anything but integers and other rtx's,
2116 except for within LABEL_REFs and SYMBOL_REFs. */
2117 default:
2118 gcc_unreachable ();
2121 return 1;
2124 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2125 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2126 references is incremented once for each added note. */
2128 static void
2129 add_label_notes (rtx x, rtx insns)
2131 enum rtx_code code = GET_CODE (x);
2132 int i, j;
2133 const char *fmt;
2134 rtx insn;
2136 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2138 /* This code used to ignore labels that referred to dispatch tables to
2139 avoid flow generating (slightly) worse code.
2141 We no longer ignore such label references (see LABEL_REF handling in
2142 mark_jump_label for additional information). */
2143 for (insn = insns; insn; insn = NEXT_INSN (insn))
2144 if (reg_mentioned_p (XEXP (x, 0), insn))
2146 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2147 REG_NOTES (insn));
2148 if (LABEL_P (XEXP (x, 0)))
2149 LABEL_NUSES (XEXP (x, 0))++;
2153 fmt = GET_RTX_FORMAT (code);
2154 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2156 if (fmt[i] == 'e')
2157 add_label_notes (XEXP (x, i), insns);
2158 else if (fmt[i] == 'E')
2159 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2160 add_label_notes (XVECEXP (x, i, j), insns);
2164 /* Scan MOVABLES, and move the insns that deserve to be moved.
2165 If two matching movables are combined, replace one reg with the
2166 other throughout. */
2168 static void
2169 move_movables (struct loop *loop, struct loop_movables *movables,
2170 int threshold, int insn_count)
2172 struct loop_regs *regs = LOOP_REGS (loop);
2173 int nregs = regs->num;
2174 rtx new_start = 0;
2175 struct movable *m;
2176 rtx p;
2177 rtx loop_start = loop->start;
2178 rtx loop_end = loop->end;
2179 /* Map of pseudo-register replacements to handle combining
2180 when we move several insns that load the same value
2181 into different pseudo-registers. */
2182 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2183 char *already_moved = xcalloc (nregs, sizeof (char));
2185 for (m = movables->head; m; m = m->next)
2187 /* Describe this movable insn. */
2189 if (loop_dump_stream)
2191 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2192 INSN_UID (m->insn), m->regno, m->lifetime);
2193 if (m->consec > 0)
2194 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2195 if (m->cond)
2196 fprintf (loop_dump_stream, "cond ");
2197 if (m->force)
2198 fprintf (loop_dump_stream, "force ");
2199 if (m->global)
2200 fprintf (loop_dump_stream, "global ");
2201 if (m->done)
2202 fprintf (loop_dump_stream, "done ");
2203 if (m->move_insn)
2204 fprintf (loop_dump_stream, "move-insn ");
2205 if (m->match)
2206 fprintf (loop_dump_stream, "matches %d ",
2207 INSN_UID (m->match->insn));
2208 if (m->forces)
2209 fprintf (loop_dump_stream, "forces %d ",
2210 INSN_UID (m->forces->insn));
2213 /* Ignore the insn if it's already done (it matched something else).
2214 Otherwise, see if it is now safe to move. */
2216 if (!m->done
2217 && (! m->cond
2218 || (1 == loop_invariant_p (loop, m->set_src)
2219 && (m->dependencies == 0
2220 || 1 == loop_invariant_p (loop, m->dependencies))
2221 && (m->consec == 0
2222 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2223 m->consec + 1,
2224 m->insn))))
2225 && (! m->forces || m->forces->done))
2227 int regno;
2228 rtx p;
2229 int savings = m->savings;
2231 /* We have an insn that is safe to move.
2232 Compute its desirability. */
2234 p = m->insn;
2235 regno = m->regno;
2237 if (loop_dump_stream)
2238 fprintf (loop_dump_stream, "savings %d ", savings);
2240 if (regs->array[regno].moved_once && loop_dump_stream)
2241 fprintf (loop_dump_stream, "halved since already moved ");
2243 /* An insn MUST be moved if we already moved something else
2244 which is safe only if this one is moved too: that is,
2245 if already_moved[REGNO] is nonzero. */
2247 /* An insn is desirable to move if the new lifetime of the
2248 register is no more than THRESHOLD times the old lifetime.
2249 If it's not desirable, it means the loop is so big
2250 that moving won't speed things up much,
2251 and it is liable to make register usage worse. */
2253 /* It is also desirable to move if it can be moved at no
2254 extra cost because something else was already moved. */
2256 if (already_moved[regno]
2257 || (threshold * savings * m->lifetime) >=
2258 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2259 || (m->forces && m->forces->done
2260 && regs->array[m->forces->regno].n_times_set == 1))
2262 int count;
2263 struct movable *m1;
2264 rtx first = NULL_RTX;
2265 rtx newreg = NULL_RTX;
2267 if (m->insert_temp)
2268 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2270 /* Now move the insns that set the reg. */
2272 if (m->partial && m->match)
2274 rtx newpat, i1;
2275 rtx r1, r2;
2276 /* Find the end of this chain of matching regs.
2277 Thus, we load each reg in the chain from that one reg.
2278 And that reg is loaded with 0 directly,
2279 since it has ->match == 0. */
2280 for (m1 = m; m1->match; m1 = m1->match);
2281 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2282 SET_DEST (PATTERN (m1->insn)));
2283 i1 = loop_insn_hoist (loop, newpat);
2285 /* Mark the moved, invariant reg as being allowed to
2286 share a hard reg with the other matching invariant. */
2287 REG_NOTES (i1) = REG_NOTES (m->insn);
2288 r1 = SET_DEST (PATTERN (m->insn));
2289 r2 = SET_DEST (PATTERN (m1->insn));
2290 regs_may_share
2291 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2292 gen_rtx_EXPR_LIST (VOIDmode, r2,
2293 regs_may_share));
2294 delete_insn (m->insn);
2296 if (new_start == 0)
2297 new_start = i1;
2299 if (loop_dump_stream)
2300 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2302 /* If we are to re-generate the item being moved with a
2303 new move insn, first delete what we have and then emit
2304 the move insn before the loop. */
2305 else if (m->move_insn)
2307 rtx i1, temp, seq;
2309 for (count = m->consec; count >= 0; count--)
2311 if (!NOTE_P (p))
2313 /* If this is the first insn of a library
2314 call sequence, something is very
2315 wrong. */
2316 gcc_assert (!find_reg_note
2317 (p, REG_LIBCALL, NULL_RTX));
2319 /* If this is the last insn of a libcall
2320 sequence, then delete every insn in the
2321 sequence except the last. The last insn
2322 is handled in the normal manner. */
2323 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
2325 if (temp)
2327 temp = XEXP (temp, 0);
2328 while (temp != p)
2329 temp = delete_insn (temp);
2333 temp = p;
2334 p = delete_insn (p);
2336 /* simplify_giv_expr expects that it can walk the insns
2337 at m->insn forwards and see this old sequence we are
2338 tossing here. delete_insn does preserve the next
2339 pointers, but when we skip over a NOTE we must fix
2340 it up. Otherwise that code walks into the non-deleted
2341 insn stream. */
2342 while (p && NOTE_P (p))
2343 p = NEXT_INSN (temp) = NEXT_INSN (p);
2345 if (m->insert_temp)
2347 /* Replace the original insn with a move from
2348 our newly created temp. */
2349 start_sequence ();
2350 emit_move_insn (m->set_dest, newreg);
2351 seq = get_insns ();
2352 end_sequence ();
2353 emit_insn_before (seq, p);
2357 start_sequence ();
2358 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2359 m->set_src);
2360 seq = get_insns ();
2361 end_sequence ();
2363 add_label_notes (m->set_src, seq);
2365 i1 = loop_insn_hoist (loop, seq);
2366 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2367 set_unique_reg_note (i1,
2368 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2369 m->set_src);
2371 if (loop_dump_stream)
2372 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2374 /* The more regs we move, the less we like moving them. */
2375 threshold -= 3;
2377 else
2379 for (count = m->consec; count >= 0; count--)
2381 rtx i1, temp;
2383 /* If first insn of libcall sequence, skip to end. */
2384 /* Do this at start of loop, since p is guaranteed to
2385 be an insn here. */
2386 if (!NOTE_P (p)
2387 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2388 p = XEXP (temp, 0);
2390 /* If last insn of libcall sequence, move all
2391 insns except the last before the loop. The last
2392 insn is handled in the normal manner. */
2393 if (!NOTE_P (p)
2394 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2396 rtx fn_address = 0;
2397 rtx fn_reg = 0;
2398 rtx fn_address_insn = 0;
2400 first = 0;
2401 for (temp = XEXP (temp, 0); temp != p;
2402 temp = NEXT_INSN (temp))
2404 rtx body;
2405 rtx n;
2406 rtx next;
2408 if (NOTE_P (temp))
2409 continue;
2411 body = PATTERN (temp);
2413 /* Find the next insn after TEMP,
2414 not counting USE or NOTE insns. */
2415 for (next = NEXT_INSN (temp); next != p;
2416 next = NEXT_INSN (next))
2417 if (! (NONJUMP_INSN_P (next)
2418 && GET_CODE (PATTERN (next)) == USE)
2419 && !NOTE_P (next))
2420 break;
2422 /* If that is the call, this may be the insn
2423 that loads the function address.
2425 Extract the function address from the insn
2426 that loads it into a register.
2427 If this insn was cse'd, we get incorrect code.
2429 So emit a new move insn that copies the
2430 function address into the register that the
2431 call insn will use. flow.c will delete any
2432 redundant stores that we have created. */
2433 if (CALL_P (next)
2434 && GET_CODE (body) == SET
2435 && REG_P (SET_DEST (body))
2436 && (n = find_reg_note (temp, REG_EQUAL,
2437 NULL_RTX)))
2439 fn_reg = SET_SRC (body);
2440 if (!REG_P (fn_reg))
2441 fn_reg = SET_DEST (body);
2442 fn_address = XEXP (n, 0);
2443 fn_address_insn = temp;
2445 /* We have the call insn.
2446 If it uses the register we suspect it might,
2447 load it with the correct address directly. */
2448 if (CALL_P (temp)
2449 && fn_address != 0
2450 && reg_referenced_p (fn_reg, body))
2451 loop_insn_emit_after (loop, 0, fn_address_insn,
2452 gen_move_insn
2453 (fn_reg, fn_address));
2455 if (CALL_P (temp))
2457 i1 = loop_call_insn_hoist (loop, body);
2458 /* Because the USAGE information potentially
2459 contains objects other than hard registers
2460 we need to copy it. */
2461 if (CALL_INSN_FUNCTION_USAGE (temp))
2462 CALL_INSN_FUNCTION_USAGE (i1)
2463 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2465 else
2466 i1 = loop_insn_hoist (loop, body);
2467 if (first == 0)
2468 first = i1;
2469 if (temp == fn_address_insn)
2470 fn_address_insn = i1;
2471 REG_NOTES (i1) = REG_NOTES (temp);
2472 REG_NOTES (temp) = NULL;
2473 delete_insn (temp);
2475 if (new_start == 0)
2476 new_start = first;
2478 if (m->savemode != VOIDmode)
2480 /* P sets REG to zero; but we should clear only
2481 the bits that are not covered by the mode
2482 m->savemode. */
2483 rtx reg = m->set_dest;
2484 rtx sequence;
2485 rtx tem;
2487 start_sequence ();
2488 tem = expand_simple_binop
2489 (GET_MODE (reg), AND, reg,
2490 GEN_INT ((((HOST_WIDE_INT) 1
2491 << GET_MODE_BITSIZE (m->savemode)))
2492 - 1),
2493 reg, 1, OPTAB_LIB_WIDEN);
2494 gcc_assert (tem);
2495 if (tem != reg)
2496 emit_move_insn (reg, tem);
2497 sequence = get_insns ();
2498 end_sequence ();
2499 i1 = loop_insn_hoist (loop, sequence);
2501 else if (CALL_P (p))
2503 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2504 /* Because the USAGE information potentially
2505 contains objects other than hard registers
2506 we need to copy it. */
2507 if (CALL_INSN_FUNCTION_USAGE (p))
2508 CALL_INSN_FUNCTION_USAGE (i1)
2509 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2511 else if (count == m->consec && m->move_insn_first)
2513 rtx seq;
2514 /* The SET_SRC might not be invariant, so we must
2515 use the REG_EQUAL note. */
2516 start_sequence ();
2517 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2518 m->set_src);
2519 seq = get_insns ();
2520 end_sequence ();
2522 add_label_notes (m->set_src, seq);
2524 i1 = loop_insn_hoist (loop, seq);
2525 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2526 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2527 : REG_EQUAL, m->set_src);
2529 else if (m->insert_temp)
2531 rtx *reg_map2 = xcalloc (REGNO (newreg),
2532 sizeof(rtx));
2533 reg_map2 [m->regno] = newreg;
2535 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2536 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2537 free (reg_map2);
2539 else
2540 i1 = loop_insn_hoist (loop, PATTERN (p));
2542 if (REG_NOTES (i1) == 0)
2544 REG_NOTES (i1) = REG_NOTES (p);
2545 REG_NOTES (p) = NULL;
2547 /* If there is a REG_EQUAL note present whose value
2548 is not loop invariant, then delete it, since it
2549 may cause problems with later optimization passes.
2550 It is possible for cse to create such notes
2551 like this as a result of record_jump_cond. */
2553 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2554 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2555 remove_note (i1, temp);
2558 if (new_start == 0)
2559 new_start = i1;
2561 if (loop_dump_stream)
2562 fprintf (loop_dump_stream, " moved to %d",
2563 INSN_UID (i1));
2565 /* If library call, now fix the REG_NOTES that contain
2566 insn pointers, namely REG_LIBCALL on FIRST
2567 and REG_RETVAL on I1. */
2568 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2570 XEXP (temp, 0) = first;
2571 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2572 XEXP (temp, 0) = i1;
2575 temp = p;
2576 delete_insn (p);
2577 p = NEXT_INSN (p);
2579 /* simplify_giv_expr expects that it can walk the insns
2580 at m->insn forwards and see this old sequence we are
2581 tossing here. delete_insn does preserve the next
2582 pointers, but when we skip over a NOTE we must fix
2583 it up. Otherwise that code walks into the non-deleted
2584 insn stream. */
2585 while (p && NOTE_P (p))
2586 p = NEXT_INSN (temp) = NEXT_INSN (p);
2588 if (m->insert_temp)
2590 rtx seq;
2591 /* Replace the original insn with a move from
2592 our newly created temp. */
2593 start_sequence ();
2594 emit_move_insn (m->set_dest, newreg);
2595 seq = get_insns ();
2596 end_sequence ();
2597 emit_insn_before (seq, p);
2601 /* The more regs we move, the less we like moving them. */
2602 threshold -= 3;
2605 m->done = 1;
2607 if (!m->insert_temp)
2609 /* Any other movable that loads the same register
2610 MUST be moved. */
2611 already_moved[regno] = 1;
2613 /* This reg has been moved out of one loop. */
2614 regs->array[regno].moved_once = 1;
2616 /* The reg set here is now invariant. */
2617 if (! m->partial)
2619 int i;
2620 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2621 regs->array[regno+i].set_in_loop = 0;
2624 /* Change the length-of-life info for the register
2625 to say it lives at least the full length of this loop.
2626 This will help guide optimizations in outer loops. */
2628 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2629 /* This is the old insn before all the moved insns.
2630 We can't use the moved insn because it is out of range
2631 in uid_luid. Only the old insns have luids. */
2632 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2633 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2634 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2637 /* Combine with this moved insn any other matching movables. */
2639 if (! m->partial)
2640 for (m1 = movables->head; m1; m1 = m1->next)
2641 if (m1->match == m)
2643 rtx temp;
2645 /* Schedule the reg loaded by M1
2646 for replacement so that shares the reg of M.
2647 If the modes differ (only possible in restricted
2648 circumstances, make a SUBREG.
2650 Note this assumes that the target dependent files
2651 treat REG and SUBREG equally, including within
2652 GO_IF_LEGITIMATE_ADDRESS and in all the
2653 predicates since we never verify that replacing the
2654 original register with a SUBREG results in a
2655 recognizable insn. */
2656 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2657 reg_map[m1->regno] = m->set_dest;
2658 else
2659 reg_map[m1->regno]
2660 = gen_lowpart_common (GET_MODE (m1->set_dest),
2661 m->set_dest);
2663 /* Get rid of the matching insn
2664 and prevent further processing of it. */
2665 m1->done = 1;
2667 /* If library call, delete all insns. */
2668 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2669 NULL_RTX)))
2670 delete_insn_chain (XEXP (temp, 0), m1->insn);
2671 else
2672 delete_insn (m1->insn);
2674 /* Any other movable that loads the same register
2675 MUST be moved. */
2676 already_moved[m1->regno] = 1;
2678 /* The reg merged here is now invariant,
2679 if the reg it matches is invariant. */
2680 if (! m->partial)
2682 int i;
2683 for (i = 0;
2684 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2685 i++)
2686 regs->array[m1->regno+i].set_in_loop = 0;
2690 else if (loop_dump_stream)
2691 fprintf (loop_dump_stream, "not desirable");
2693 else if (loop_dump_stream && !m->match)
2694 fprintf (loop_dump_stream, "not safe");
2696 if (loop_dump_stream)
2697 fprintf (loop_dump_stream, "\n");
2700 if (new_start == 0)
2701 new_start = loop_start;
2703 /* Go through all the instructions in the loop, making
2704 all the register substitutions scheduled in REG_MAP. */
2705 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2706 if (INSN_P (p))
2708 replace_regs (PATTERN (p), reg_map, nregs, 0);
2709 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2710 INSN_CODE (p) = -1;
2713 /* Clean up. */
2714 free (reg_map);
2715 free (already_moved);
2719 static void
2720 loop_movables_add (struct loop_movables *movables, struct movable *m)
2722 if (movables->head == 0)
2723 movables->head = m;
2724 else
2725 movables->last->next = m;
2726 movables->last = m;
2730 static void
2731 loop_movables_free (struct loop_movables *movables)
2733 struct movable *m;
2734 struct movable *m_next;
2736 for (m = movables->head; m; m = m_next)
2738 m_next = m->next;
2739 free (m);
2743 #if 0
2744 /* Scan X and replace the address of any MEM in it with ADDR.
2745 REG is the address that MEM should have before the replacement. */
2747 static void
2748 replace_call_address (rtx x, rtx reg, rtx addr)
2750 enum rtx_code code;
2751 int i;
2752 const char *fmt;
2754 if (x == 0)
2755 return;
2756 code = GET_CODE (x);
2757 switch (code)
2759 case PC:
2760 case CC0:
2761 case CONST_INT:
2762 case CONST_DOUBLE:
2763 case CONST:
2764 case SYMBOL_REF:
2765 case LABEL_REF:
2766 case REG:
2767 return;
2769 case SET:
2770 /* Short cut for very common case. */
2771 replace_call_address (XEXP (x, 1), reg, addr);
2772 return;
2774 case CALL:
2775 /* Short cut for very common case. */
2776 replace_call_address (XEXP (x, 0), reg, addr);
2777 return;
2779 case MEM:
2780 /* If this MEM uses a reg other than the one we expected,
2781 something is wrong. */
2782 gcc_assert (XEXP (x, 0) == reg);
2783 XEXP (x, 0) = addr;
2784 return;
2786 default:
2787 break;
2790 fmt = GET_RTX_FORMAT (code);
2791 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2793 if (fmt[i] == 'e')
2794 replace_call_address (XEXP (x, i), reg, addr);
2795 else if (fmt[i] == 'E')
2797 int j;
2798 for (j = 0; j < XVECLEN (x, i); j++)
2799 replace_call_address (XVECEXP (x, i, j), reg, addr);
2803 #endif
2805 /* Return the number of memory refs to addresses that vary
2806 in the rtx X. */
2808 static int
2809 count_nonfixed_reads (const struct loop *loop, rtx x)
2811 enum rtx_code code;
2812 int i;
2813 const char *fmt;
2814 int value;
2816 if (x == 0)
2817 return 0;
2819 code = GET_CODE (x);
2820 switch (code)
2822 case PC:
2823 case CC0:
2824 case CONST_INT:
2825 case CONST_DOUBLE:
2826 case CONST:
2827 case SYMBOL_REF:
2828 case LABEL_REF:
2829 case REG:
2830 return 0;
2832 case MEM:
2833 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2834 + count_nonfixed_reads (loop, XEXP (x, 0)));
2836 default:
2837 break;
2840 value = 0;
2841 fmt = GET_RTX_FORMAT (code);
2842 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2844 if (fmt[i] == 'e')
2845 value += count_nonfixed_reads (loop, XEXP (x, i));
2846 if (fmt[i] == 'E')
2848 int j;
2849 for (j = 0; j < XVECLEN (x, i); j++)
2850 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2853 return value;
2856 /* Scan a loop setting the elements `loops_enclosed',
2857 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2858 `unknown_address_altered', `unknown_constant_address_altered', and
2859 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2860 list `store_mems' in LOOP. */
2862 static void
2863 prescan_loop (struct loop *loop)
2865 int level = 1;
2866 rtx insn;
2867 struct loop_info *loop_info = LOOP_INFO (loop);
2868 rtx start = loop->start;
2869 rtx end = loop->end;
2870 /* The label after END. Jumping here is just like falling off the
2871 end of the loop. We use next_nonnote_insn instead of next_label
2872 as a hedge against the (pathological) case where some actual insn
2873 might end up between the two. */
2874 rtx exit_target = next_nonnote_insn (end);
2876 loop_info->has_indirect_jump = indirect_jump_in_function;
2877 loop_info->pre_header_has_call = 0;
2878 loop_info->has_call = 0;
2879 loop_info->has_nonconst_call = 0;
2880 loop_info->has_prefetch = 0;
2881 loop_info->has_volatile = 0;
2882 loop_info->has_tablejump = 0;
2883 loop_info->has_multiple_exit_targets = 0;
2884 loop->level = 1;
2886 loop_info->unknown_address_altered = 0;
2887 loop_info->unknown_constant_address_altered = 0;
2888 loop_info->store_mems = NULL_RTX;
2889 loop_info->first_loop_store_insn = NULL_RTX;
2890 loop_info->mems_idx = 0;
2891 loop_info->num_mem_sets = 0;
2893 for (insn = start; insn && !LABEL_P (insn);
2894 insn = PREV_INSN (insn))
2896 if (CALL_P (insn))
2898 loop_info->pre_header_has_call = 1;
2899 break;
2903 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2904 insn = NEXT_INSN (insn))
2906 switch (GET_CODE (insn))
2908 case NOTE:
2909 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2911 ++level;
2912 /* Count number of loops contained in this one. */
2913 loop->level++;
2915 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2916 --level;
2917 break;
2919 case CALL_INSN:
2920 if (! CONST_OR_PURE_CALL_P (insn))
2922 loop_info->unknown_address_altered = 1;
2923 loop_info->has_nonconst_call = 1;
2925 else if (pure_call_p (insn))
2926 loop_info->has_nonconst_call = 1;
2927 loop_info->has_call = 1;
2928 if (can_throw_internal (insn))
2929 loop_info->has_multiple_exit_targets = 1;
2930 break;
2932 case JUMP_INSN:
2933 if (! loop_info->has_multiple_exit_targets)
2935 rtx set = pc_set (insn);
2937 if (set)
2939 rtx src = SET_SRC (set);
2940 rtx label1, label2;
2942 if (GET_CODE (src) == IF_THEN_ELSE)
2944 label1 = XEXP (src, 1);
2945 label2 = XEXP (src, 2);
2947 else
2949 label1 = src;
2950 label2 = NULL_RTX;
2955 if (label1 && label1 != pc_rtx)
2957 if (GET_CODE (label1) != LABEL_REF)
2959 /* Something tricky. */
2960 loop_info->has_multiple_exit_targets = 1;
2961 break;
2963 else if (XEXP (label1, 0) != exit_target
2964 && LABEL_OUTSIDE_LOOP_P (label1))
2966 /* A jump outside the current loop. */
2967 loop_info->has_multiple_exit_targets = 1;
2968 break;
2972 label1 = label2;
2973 label2 = NULL_RTX;
2975 while (label1);
2977 else
2979 /* A return, or something tricky. */
2980 loop_info->has_multiple_exit_targets = 1;
2983 /* Fall through. */
2985 case INSN:
2986 if (volatile_refs_p (PATTERN (insn)))
2987 loop_info->has_volatile = 1;
2989 if (JUMP_P (insn)
2990 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2991 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2992 loop_info->has_tablejump = 1;
2994 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2995 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2996 loop_info->first_loop_store_insn = insn;
2998 if (flag_non_call_exceptions && can_throw_internal (insn))
2999 loop_info->has_multiple_exit_targets = 1;
3000 break;
3002 default:
3003 break;
3007 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
3008 if (/* An exception thrown by a called function might land us
3009 anywhere. */
3010 ! loop_info->has_nonconst_call
3011 /* We don't want loads for MEMs moved to a location before the
3012 one at which their stack memory becomes allocated. (Note
3013 that this is not a problem for malloc, etc., since those
3014 require actual function calls. */
3015 && ! current_function_calls_alloca
3016 /* There are ways to leave the loop other than falling off the
3017 end. */
3018 && ! loop_info->has_multiple_exit_targets)
3019 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3020 insn = NEXT_INSN (insn))
3021 for_each_rtx (&insn, insert_loop_mem, loop_info);
3023 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3024 that loop_invariant_p and load_mems can use true_dependence
3025 to determine what is really clobbered. */
3026 if (loop_info->unknown_address_altered)
3028 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3030 loop_info->store_mems
3031 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3033 if (loop_info->unknown_constant_address_altered)
3035 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3036 MEM_READONLY_P (mem) = 1;
3037 loop_info->store_mems
3038 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3042 /* Invalidate all loops containing LABEL. */
3044 static void
3045 invalidate_loops_containing_label (rtx label)
3047 struct loop *loop;
3048 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3049 loop->invalid = 1;
3052 /* Scan the function looking for loops. Record the start and end of each loop.
3053 Also mark as invalid loops any loops that contain a setjmp or are branched
3054 to from outside the loop. */
3056 static void
3057 find_and_verify_loops (rtx f, struct loops *loops)
3059 rtx insn;
3060 rtx label;
3061 int num_loops;
3062 struct loop *current_loop;
3063 struct loop *next_loop;
3064 struct loop *loop;
3066 num_loops = loops->num;
3068 compute_luids (f, NULL_RTX, 0);
3070 /* If there are jumps to undefined labels,
3071 treat them as jumps out of any/all loops.
3072 This also avoids writing past end of tables when there are no loops. */
3073 uid_loop[0] = NULL;
3075 /* Find boundaries of loops, mark which loops are contained within
3076 loops, and invalidate loops that have setjmp. */
3078 num_loops = 0;
3079 current_loop = NULL;
3080 for (insn = f; insn; insn = NEXT_INSN (insn))
3082 if (NOTE_P (insn))
3083 switch (NOTE_LINE_NUMBER (insn))
3085 case NOTE_INSN_LOOP_BEG:
3086 next_loop = loops->array + num_loops;
3087 next_loop->num = num_loops;
3088 num_loops++;
3089 next_loop->start = insn;
3090 next_loop->outer = current_loop;
3091 current_loop = next_loop;
3092 break;
3094 case NOTE_INSN_LOOP_END:
3095 gcc_assert (current_loop);
3097 current_loop->end = insn;
3098 current_loop = current_loop->outer;
3099 break;
3101 default:
3102 break;
3105 if (CALL_P (insn)
3106 && find_reg_note (insn, REG_SETJMP, NULL))
3108 /* In this case, we must invalidate our current loop and any
3109 enclosing loop. */
3110 for (loop = current_loop; loop; loop = loop->outer)
3112 loop->invalid = 1;
3113 if (loop_dump_stream)
3114 fprintf (loop_dump_stream,
3115 "\nLoop at %d ignored due to setjmp.\n",
3116 INSN_UID (loop->start));
3120 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3121 enclosing loop, but this doesn't matter. */
3122 uid_loop[INSN_UID (insn)] = current_loop;
3125 /* Any loop containing a label used in an initializer must be invalidated,
3126 because it can be jumped into from anywhere. */
3127 for (label = forced_labels; label; label = XEXP (label, 1))
3128 invalidate_loops_containing_label (XEXP (label, 0));
3130 /* Any loop containing a label used for an exception handler must be
3131 invalidated, because it can be jumped into from anywhere. */
3132 for_each_eh_label (invalidate_loops_containing_label);
3134 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3135 loop that it is not contained within, that loop is marked invalid.
3136 If any INSN or CALL_INSN uses a label's address, then the loop containing
3137 that label is marked invalid, because it could be jumped into from
3138 anywhere.
3140 Also look for blocks of code ending in an unconditional branch that
3141 exits the loop. If such a block is surrounded by a conditional
3142 branch around the block, move the block elsewhere (see below) and
3143 invert the jump to point to the code block. This may eliminate a
3144 label in our loop and will simplify processing by both us and a
3145 possible second cse pass. */
3147 for (insn = f; insn; insn = NEXT_INSN (insn))
3148 if (INSN_P (insn))
3150 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3152 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3154 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3155 if (note)
3156 invalidate_loops_containing_label (XEXP (note, 0));
3159 if (!JUMP_P (insn))
3160 continue;
3162 mark_loop_jump (PATTERN (insn), this_loop);
3164 /* See if this is an unconditional branch outside the loop. */
3165 if (this_loop
3166 && (GET_CODE (PATTERN (insn)) == RETURN
3167 || (any_uncondjump_p (insn)
3168 && onlyjump_p (insn)
3169 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3170 != this_loop)))
3171 && get_max_uid () < max_uid_for_loop)
3173 rtx p;
3174 rtx our_next = next_real_insn (insn);
3175 rtx last_insn_to_move = NEXT_INSN (insn);
3176 struct loop *dest_loop;
3177 struct loop *outer_loop = NULL;
3179 /* Go backwards until we reach the start of the loop, a label,
3180 or a JUMP_INSN. */
3181 for (p = PREV_INSN (insn);
3182 !LABEL_P (p)
3183 && ! (NOTE_P (p)
3184 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3185 && !JUMP_P (p);
3186 p = PREV_INSN (p))
3189 /* Check for the case where we have a jump to an inner nested
3190 loop, and do not perform the optimization in that case. */
3192 if (JUMP_LABEL (insn))
3194 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3195 if (dest_loop)
3197 for (outer_loop = dest_loop; outer_loop;
3198 outer_loop = outer_loop->outer)
3199 if (outer_loop == this_loop)
3200 break;
3204 /* Make sure that the target of P is within the current loop. */
3206 if (JUMP_P (p) && JUMP_LABEL (p)
3207 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3208 outer_loop = this_loop;
3210 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3211 we have a block of code to try to move.
3213 We look backward and then forward from the target of INSN
3214 to find a BARRIER at the same loop depth as the target.
3215 If we find such a BARRIER, we make a new label for the start
3216 of the block, invert the jump in P and point it to that label,
3217 and move the block of code to the spot we found. */
3219 if (! outer_loop
3220 && JUMP_P (p)
3221 && JUMP_LABEL (p) != 0
3222 /* Just ignore jumps to labels that were never emitted.
3223 These always indicate compilation errors. */
3224 && INSN_UID (JUMP_LABEL (p)) != 0
3225 && any_condjump_p (p) && onlyjump_p (p)
3226 && next_real_insn (JUMP_LABEL (p)) == our_next
3227 /* If it's not safe to move the sequence, then we
3228 mustn't try. */
3229 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3230 &last_insn_to_move))
3232 rtx target
3233 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3234 struct loop *target_loop = uid_loop[INSN_UID (target)];
3235 rtx loc, loc2;
3236 rtx tmp;
3238 /* Search for possible garbage past the conditional jumps
3239 and look for the last barrier. */
3240 for (tmp = last_insn_to_move;
3241 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3242 if (BARRIER_P (tmp))
3243 last_insn_to_move = tmp;
3245 for (loc = target; loc; loc = PREV_INSN (loc))
3246 if (BARRIER_P (loc)
3247 /* Don't move things inside a tablejump. */
3248 && ((loc2 = next_nonnote_insn (loc)) == 0
3249 || !LABEL_P (loc2)
3250 || (loc2 = next_nonnote_insn (loc2)) == 0
3251 || !JUMP_P (loc2)
3252 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3253 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3254 && uid_loop[INSN_UID (loc)] == target_loop)
3255 break;
3257 if (loc == 0)
3258 for (loc = target; loc; loc = NEXT_INSN (loc))
3259 if (BARRIER_P (loc)
3260 /* Don't move things inside a tablejump. */
3261 && ((loc2 = next_nonnote_insn (loc)) == 0
3262 || !LABEL_P (loc2)
3263 || (loc2 = next_nonnote_insn (loc2)) == 0
3264 || !JUMP_P (loc2)
3265 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3266 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3267 && uid_loop[INSN_UID (loc)] == target_loop)
3268 break;
3270 if (loc)
3272 rtx cond_label = JUMP_LABEL (p);
3273 rtx new_label = get_label_after (p);
3275 /* Ensure our label doesn't go away. */
3276 LABEL_NUSES (cond_label)++;
3278 /* Verify that uid_loop is large enough and that
3279 we can invert P. */
3280 if (invert_jump (p, new_label, 1))
3282 rtx q, r;
3284 /* If no suitable BARRIER was found, create a suitable
3285 one before TARGET. Since TARGET is a fall through
3286 path, we'll need to insert a jump around our block
3287 and add a BARRIER before TARGET.
3289 This creates an extra unconditional jump outside
3290 the loop. However, the benefits of removing rarely
3291 executed instructions from inside the loop usually
3292 outweighs the cost of the extra unconditional jump
3293 outside the loop. */
3294 if (loc == 0)
3296 rtx temp;
3298 temp = gen_jump (JUMP_LABEL (insn));
3299 temp = emit_jump_insn_before (temp, target);
3300 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3301 LABEL_NUSES (JUMP_LABEL (insn))++;
3302 loc = emit_barrier_before (target);
3305 /* Include the BARRIER after INSN and copy the
3306 block after LOC. */
3307 if (squeeze_notes (&new_label, &last_insn_to_move))
3308 abort ();
3309 reorder_insns (new_label, last_insn_to_move, loc);
3311 /* All those insns are now in TARGET_LOOP. */
3312 for (q = new_label;
3313 q != NEXT_INSN (last_insn_to_move);
3314 q = NEXT_INSN (q))
3315 uid_loop[INSN_UID (q)] = target_loop;
3317 /* The label jumped to by INSN is no longer a loop
3318 exit. Unless INSN does not have a label (e.g.,
3319 it is a RETURN insn), search loop->exit_labels
3320 to find its label_ref, and remove it. Also turn
3321 off LABEL_OUTSIDE_LOOP_P bit. */
3322 if (JUMP_LABEL (insn))
3324 for (q = 0, r = this_loop->exit_labels;
3326 q = r, r = LABEL_NEXTREF (r))
3327 if (XEXP (r, 0) == JUMP_LABEL (insn))
3329 LABEL_OUTSIDE_LOOP_P (r) = 0;
3330 if (q)
3331 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3332 else
3333 this_loop->exit_labels = LABEL_NEXTREF (r);
3334 break;
3337 for (loop = this_loop; loop && loop != target_loop;
3338 loop = loop->outer)
3339 loop->exit_count--;
3341 /* If we didn't find it, then something is
3342 wrong. */
3343 gcc_assert (r);
3346 /* P is now a jump outside the loop, so it must be put
3347 in loop->exit_labels, and marked as such.
3348 The easiest way to do this is to just call
3349 mark_loop_jump again for P. */
3350 mark_loop_jump (PATTERN (p), this_loop);
3352 /* If INSN now jumps to the insn after it,
3353 delete INSN. */
3354 if (JUMP_LABEL (insn) != 0
3355 && (next_real_insn (JUMP_LABEL (insn))
3356 == next_real_insn (insn)))
3357 delete_related_insns (insn);
3360 /* Continue the loop after where the conditional
3361 branch used to jump, since the only branch insn
3362 in the block (if it still remains) is an inter-loop
3363 branch and hence needs no processing. */
3364 insn = NEXT_INSN (cond_label);
3366 if (--LABEL_NUSES (cond_label) == 0)
3367 delete_related_insns (cond_label);
3369 /* This loop will be continued with NEXT_INSN (insn). */
3370 insn = PREV_INSN (insn);
3377 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3378 loops it is contained in, mark the target loop invalid.
3380 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3382 static void
3383 mark_loop_jump (rtx x, struct loop *loop)
3385 struct loop *dest_loop;
3386 struct loop *outer_loop;
3387 int i;
3389 switch (GET_CODE (x))
3391 case PC:
3392 case USE:
3393 case CLOBBER:
3394 case REG:
3395 case MEM:
3396 case CONST_INT:
3397 case CONST_DOUBLE:
3398 case RETURN:
3399 return;
3401 case CONST:
3402 /* There could be a label reference in here. */
3403 mark_loop_jump (XEXP (x, 0), loop);
3404 return;
3406 case PLUS:
3407 case MINUS:
3408 case MULT:
3409 mark_loop_jump (XEXP (x, 0), loop);
3410 mark_loop_jump (XEXP (x, 1), loop);
3411 return;
3413 case LO_SUM:
3414 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3415 mark_loop_jump (XEXP (x, 1), loop);
3416 return;
3418 case SIGN_EXTEND:
3419 case ZERO_EXTEND:
3420 mark_loop_jump (XEXP (x, 0), loop);
3421 return;
3423 case LABEL_REF:
3424 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3426 /* Link together all labels that branch outside the loop. This
3427 is used by final_[bg]iv_value and the loop unrolling code. Also
3428 mark this LABEL_REF so we know that this branch should predict
3429 false. */
3431 /* A check to make sure the label is not in an inner nested loop,
3432 since this does not count as a loop exit. */
3433 if (dest_loop)
3435 for (outer_loop = dest_loop; outer_loop;
3436 outer_loop = outer_loop->outer)
3437 if (outer_loop == loop)
3438 break;
3440 else
3441 outer_loop = NULL;
3443 if (loop && ! outer_loop)
3445 LABEL_OUTSIDE_LOOP_P (x) = 1;
3446 LABEL_NEXTREF (x) = loop->exit_labels;
3447 loop->exit_labels = x;
3449 for (outer_loop = loop;
3450 outer_loop && outer_loop != dest_loop;
3451 outer_loop = outer_loop->outer)
3452 outer_loop->exit_count++;
3455 /* If this is inside a loop, but not in the current loop or one enclosed
3456 by it, it invalidates at least one loop. */
3458 if (! dest_loop)
3459 return;
3461 /* We must invalidate every nested loop containing the target of this
3462 label, except those that also contain the jump insn. */
3464 for (; dest_loop; dest_loop = dest_loop->outer)
3466 /* Stop when we reach a loop that also contains the jump insn. */
3467 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3468 if (dest_loop == outer_loop)
3469 return;
3471 /* If we get here, we know we need to invalidate a loop. */
3472 if (loop_dump_stream && ! dest_loop->invalid)
3473 fprintf (loop_dump_stream,
3474 "\nLoop at %d ignored due to multiple entry points.\n",
3475 INSN_UID (dest_loop->start));
3477 dest_loop->invalid = 1;
3479 return;
3481 case SET:
3482 /* If this is not setting pc, ignore. */
3483 if (SET_DEST (x) == pc_rtx)
3484 mark_loop_jump (SET_SRC (x), loop);
3485 return;
3487 case IF_THEN_ELSE:
3488 mark_loop_jump (XEXP (x, 1), loop);
3489 mark_loop_jump (XEXP (x, 2), loop);
3490 return;
3492 case PARALLEL:
3493 case ADDR_VEC:
3494 for (i = 0; i < XVECLEN (x, 0); i++)
3495 mark_loop_jump (XVECEXP (x, 0, i), loop);
3496 return;
3498 case ADDR_DIFF_VEC:
3499 for (i = 0; i < XVECLEN (x, 1); i++)
3500 mark_loop_jump (XVECEXP (x, 1, i), loop);
3501 return;
3503 default:
3504 /* Strictly speaking this is not a jump into the loop, only a possible
3505 jump out of the loop. However, we have no way to link the destination
3506 of this jump onto the list of exit labels. To be safe we mark this
3507 loop and any containing loops as invalid. */
3508 if (loop)
3510 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3512 if (loop_dump_stream && ! outer_loop->invalid)
3513 fprintf (loop_dump_stream,
3514 "\nLoop at %d ignored due to unknown exit jump.\n",
3515 INSN_UID (outer_loop->start));
3516 outer_loop->invalid = 1;
3519 return;
3523 /* Return nonzero if there is a label in the range from
3524 insn INSN to and including the insn whose luid is END
3525 INSN must have an assigned luid (i.e., it must not have
3526 been previously created by loop.c). */
3528 static int
3529 labels_in_range_p (rtx insn, int end)
3531 while (insn && INSN_LUID (insn) <= end)
3533 if (LABEL_P (insn))
3534 return 1;
3535 insn = NEXT_INSN (insn);
3538 return 0;
3541 /* Record that a memory reference X is being set. */
3543 static void
3544 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3545 void *data ATTRIBUTE_UNUSED)
3547 struct loop_info *loop_info = data;
3549 if (x == 0 || !MEM_P (x))
3550 return;
3552 /* Count number of memory writes.
3553 This affects heuristics in strength_reduce. */
3554 loop_info->num_mem_sets++;
3556 /* BLKmode MEM means all memory is clobbered. */
3557 if (GET_MODE (x) == BLKmode)
3559 if (MEM_READONLY_P (x))
3560 loop_info->unknown_constant_address_altered = 1;
3561 else
3562 loop_info->unknown_address_altered = 1;
3564 return;
3567 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3568 loop_info->store_mems);
3571 /* X is a value modified by an INSN that references a biv inside a loop
3572 exit test (i.e., X is somehow related to the value of the biv). If X
3573 is a pseudo that is used more than once, then the biv is (effectively)
3574 used more than once. DATA is a pointer to a loop_regs structure. */
3576 static void
3577 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3579 struct loop_regs *regs = (struct loop_regs *) data;
3581 if (x == 0)
3582 return;
3584 while (GET_CODE (x) == STRICT_LOW_PART
3585 || GET_CODE (x) == SIGN_EXTRACT
3586 || GET_CODE (x) == ZERO_EXTRACT
3587 || GET_CODE (x) == SUBREG)
3588 x = XEXP (x, 0);
3590 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3591 return;
3593 /* If we do not have usage information, or if we know the register
3594 is used more than once, note that fact for check_dbra_loop. */
3595 if (REGNO (x) >= max_reg_before_loop
3596 || ! regs->array[REGNO (x)].single_usage
3597 || regs->array[REGNO (x)].single_usage == const0_rtx)
3598 regs->multiple_uses = 1;
3601 /* Return nonzero if the rtx X is invariant over the current loop.
3603 The value is 2 if we refer to something only conditionally invariant.
3605 A memory ref is invariant if it is not volatile and does not conflict
3606 with anything stored in `loop_info->store_mems'. */
3608 static int
3609 loop_invariant_p (const struct loop *loop, rtx x)
3611 struct loop_info *loop_info = LOOP_INFO (loop);
3612 struct loop_regs *regs = LOOP_REGS (loop);
3613 int i;
3614 enum rtx_code code;
3615 const char *fmt;
3616 int conditional = 0;
3617 rtx mem_list_entry;
3619 if (x == 0)
3620 return 1;
3621 code = GET_CODE (x);
3622 switch (code)
3624 case CONST_INT:
3625 case CONST_DOUBLE:
3626 case SYMBOL_REF:
3627 case CONST:
3628 return 1;
3630 case LABEL_REF:
3631 return 1;
3633 case PC:
3634 case CC0:
3635 case UNSPEC_VOLATILE:
3636 return 0;
3638 case REG:
3639 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3640 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3641 && ! current_function_has_nonlocal_goto)
3642 return 1;
3644 if (LOOP_INFO (loop)->has_call
3645 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3646 return 0;
3648 /* Out-of-range regs can occur when we are called from unrolling.
3649 These registers created by the unroller are set in the loop,
3650 hence are never invariant.
3651 Other out-of-range regs can be generated by load_mems; those that
3652 are written to in the loop are not invariant, while those that are
3653 not written to are invariant. It would be easy for load_mems
3654 to set n_times_set correctly for these registers, however, there
3655 is no easy way to distinguish them from registers created by the
3656 unroller. */
3658 if (REGNO (x) >= (unsigned) regs->num)
3659 return 0;
3661 if (regs->array[REGNO (x)].set_in_loop < 0)
3662 return 2;
3664 return regs->array[REGNO (x)].set_in_loop == 0;
3666 case MEM:
3667 /* Volatile memory references must be rejected. Do this before
3668 checking for read-only items, so that volatile read-only items
3669 will be rejected also. */
3670 if (MEM_VOLATILE_P (x))
3671 return 0;
3673 /* See if there is any dependence between a store and this load. */
3674 mem_list_entry = loop_info->store_mems;
3675 while (mem_list_entry)
3677 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3678 x, rtx_varies_p))
3679 return 0;
3681 mem_list_entry = XEXP (mem_list_entry, 1);
3684 /* It's not invalidated by a store in memory
3685 but we must still verify the address is invariant. */
3686 break;
3688 case ASM_OPERANDS:
3689 /* Don't mess with insns declared volatile. */
3690 if (MEM_VOLATILE_P (x))
3691 return 0;
3692 break;
3694 default:
3695 break;
3698 fmt = GET_RTX_FORMAT (code);
3699 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3701 if (fmt[i] == 'e')
3703 int tem = loop_invariant_p (loop, XEXP (x, i));
3704 if (tem == 0)
3705 return 0;
3706 if (tem == 2)
3707 conditional = 1;
3709 else if (fmt[i] == 'E')
3711 int j;
3712 for (j = 0; j < XVECLEN (x, i); j++)
3714 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3715 if (tem == 0)
3716 return 0;
3717 if (tem == 2)
3718 conditional = 1;
3724 return 1 + conditional;
3727 /* Return nonzero if all the insns in the loop that set REG
3728 are INSN and the immediately following insns,
3729 and if each of those insns sets REG in an invariant way
3730 (not counting uses of REG in them).
3732 The value is 2 if some of these insns are only conditionally invariant.
3734 We assume that INSN itself is the first set of REG
3735 and that its source is invariant. */
3737 static int
3738 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3739 rtx insn)
3741 struct loop_regs *regs = LOOP_REGS (loop);
3742 rtx p = insn;
3743 unsigned int regno = REGNO (reg);
3744 rtx temp;
3745 /* Number of sets we have to insist on finding after INSN. */
3746 int count = n_sets - 1;
3747 int old = regs->array[regno].set_in_loop;
3748 int value = 0;
3749 int this;
3751 /* If N_SETS hit the limit, we can't rely on its value. */
3752 if (n_sets == 127)
3753 return 0;
3755 regs->array[regno].set_in_loop = 0;
3757 while (count > 0)
3759 enum rtx_code code;
3760 rtx set;
3762 p = NEXT_INSN (p);
3763 code = GET_CODE (p);
3765 /* If library call, skip to end of it. */
3766 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3767 p = XEXP (temp, 0);
3769 this = 0;
3770 if (code == INSN
3771 && (set = single_set (p))
3772 && REG_P (SET_DEST (set))
3773 && REGNO (SET_DEST (set)) == regno)
3775 this = loop_invariant_p (loop, SET_SRC (set));
3776 if (this != 0)
3777 value |= this;
3778 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3780 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3781 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3782 notes are OK. */
3783 this = (CONSTANT_P (XEXP (temp, 0))
3784 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3785 && loop_invariant_p (loop, XEXP (temp, 0))));
3786 if (this != 0)
3787 value |= this;
3790 if (this != 0)
3791 count--;
3792 else if (code != NOTE)
3794 regs->array[regno].set_in_loop = old;
3795 return 0;
3799 regs->array[regno].set_in_loop = old;
3800 /* If loop_invariant_p ever returned 2, we return 2. */
3801 return 1 + (value & 2);
3804 /* Look at all uses (not sets) of registers in X. For each, if it is
3805 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3806 a different insn, set USAGE[REGNO] to const0_rtx. */
3808 static void
3809 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3811 enum rtx_code code = GET_CODE (x);
3812 const char *fmt = GET_RTX_FORMAT (code);
3813 int i, j;
3815 if (code == REG)
3816 regs->array[REGNO (x)].single_usage
3817 = (regs->array[REGNO (x)].single_usage != 0
3818 && regs->array[REGNO (x)].single_usage != insn)
3819 ? const0_rtx : insn;
3821 else if (code == SET)
3823 /* Don't count SET_DEST if it is a REG; otherwise count things
3824 in SET_DEST because if a register is partially modified, it won't
3825 show up as a potential movable so we don't care how USAGE is set
3826 for it. */
3827 if (!REG_P (SET_DEST (x)))
3828 find_single_use_in_loop (regs, insn, SET_DEST (x));
3829 find_single_use_in_loop (regs, insn, SET_SRC (x));
3831 else
3832 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3834 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3835 find_single_use_in_loop (regs, insn, XEXP (x, i));
3836 else if (fmt[i] == 'E')
3837 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3838 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3842 /* Count and record any set in X which is contained in INSN. Update
3843 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3844 in X. */
3846 static void
3847 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3849 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3850 /* Don't move a reg that has an explicit clobber.
3851 It's not worth the pain to try to do it correctly. */
3852 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3854 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3856 rtx dest = SET_DEST (x);
3857 while (GET_CODE (dest) == SUBREG
3858 || GET_CODE (dest) == ZERO_EXTRACT
3859 || GET_CODE (dest) == STRICT_LOW_PART)
3860 dest = XEXP (dest, 0);
3861 if (REG_P (dest))
3863 int i;
3864 int regno = REGNO (dest);
3865 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3867 /* If this is the first setting of this reg
3868 in current basic block, and it was set before,
3869 it must be set in two basic blocks, so it cannot
3870 be moved out of the loop. */
3871 if (regs->array[regno].set_in_loop > 0
3872 && last_set[regno] == 0)
3873 regs->array[regno+i].may_not_optimize = 1;
3874 /* If this is not first setting in current basic block,
3875 see if reg was used in between previous one and this.
3876 If so, neither one can be moved. */
3877 if (last_set[regno] != 0
3878 && reg_used_between_p (dest, last_set[regno], insn))
3879 regs->array[regno+i].may_not_optimize = 1;
3880 if (regs->array[regno+i].set_in_loop < 127)
3881 ++regs->array[regno+i].set_in_loop;
3882 last_set[regno+i] = insn;
3888 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3889 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3890 contained in insn INSN is used by any insn that precedes INSN in
3891 cyclic order starting from the loop entry point.
3893 We don't want to use INSN_LUID here because if we restrict INSN to those
3894 that have a valid INSN_LUID, it means we cannot move an invariant out
3895 from an inner loop past two loops. */
3897 static int
3898 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3900 rtx reg = SET_DEST (set);
3901 rtx p;
3903 /* Scan forward checking for register usage. If we hit INSN, we
3904 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3905 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3907 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3908 return 1;
3910 if (p == loop->end)
3911 p = loop->start;
3914 return 0;
3918 /* Information we collect about arrays that we might want to prefetch. */
3919 struct prefetch_info
3921 struct iv_class *class; /* Class this prefetch is based on. */
3922 struct induction *giv; /* GIV this prefetch is based on. */
3923 rtx base_address; /* Start prefetching from this address plus
3924 index. */
3925 HOST_WIDE_INT index;
3926 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3927 iteration. */
3928 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3929 prefetch area in one iteration. */
3930 unsigned int total_bytes; /* Total bytes loop will access in this block.
3931 This is set only for loops with known
3932 iteration counts and is 0xffffffff
3933 otherwise. */
3934 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3935 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3936 unsigned int write : 1; /* 1 for read/write prefetches. */
3939 /* Data used by check_store function. */
3940 struct check_store_data
3942 rtx mem_address;
3943 int mem_write;
3946 static void check_store (rtx, rtx, void *);
3947 static void emit_prefetch_instructions (struct loop *);
3948 static int rtx_equal_for_prefetch_p (rtx, rtx);
3950 /* Set mem_write when mem_address is found. Used as callback to
3951 note_stores. */
3952 static void
3953 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3955 struct check_store_data *d = (struct check_store_data *) data;
3957 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3958 d->mem_write = 1;
3961 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3962 important to get some addresses combined. Later more sophisticated
3963 transformations can be added when necessary.
3965 ??? Same trick with swapping operand is done at several other places.
3966 It can be nice to develop some common way to handle this. */
3968 static int
3969 rtx_equal_for_prefetch_p (rtx x, rtx y)
3971 int i;
3972 int j;
3973 enum rtx_code code = GET_CODE (x);
3974 const char *fmt;
3976 if (x == y)
3977 return 1;
3978 if (code != GET_CODE (y))
3979 return 0;
3981 if (COMMUTATIVE_ARITH_P (x))
3983 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3984 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3985 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3986 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3989 /* Compare the elements. If any pair of corresponding elements fails to
3990 match, return 0 for the whole thing. */
3992 fmt = GET_RTX_FORMAT (code);
3993 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3995 switch (fmt[i])
3997 case 'w':
3998 if (XWINT (x, i) != XWINT (y, i))
3999 return 0;
4000 break;
4002 case 'i':
4003 if (XINT (x, i) != XINT (y, i))
4004 return 0;
4005 break;
4007 case 'E':
4008 /* Two vectors must have the same length. */
4009 if (XVECLEN (x, i) != XVECLEN (y, i))
4010 return 0;
4012 /* And the corresponding elements must match. */
4013 for (j = 0; j < XVECLEN (x, i); j++)
4014 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4015 XVECEXP (y, i, j)) == 0)
4016 return 0;
4017 break;
4019 case 'e':
4020 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4021 return 0;
4022 break;
4024 case 's':
4025 if (strcmp (XSTR (x, i), XSTR (y, i)))
4026 return 0;
4027 break;
4029 case 'u':
4030 /* These are just backpointers, so they don't matter. */
4031 break;
4033 case '0':
4034 break;
4036 /* It is believed that rtx's at this level will never
4037 contain anything but integers and other rtx's,
4038 except for within LABEL_REFs and SYMBOL_REFs. */
4039 default:
4040 gcc_unreachable ();
4043 return 1;
4046 /* Remove constant addition value from the expression X (when present)
4047 and return it. */
4049 static HOST_WIDE_INT
4050 remove_constant_addition (rtx *x)
4052 HOST_WIDE_INT addval = 0;
4053 rtx exp = *x;
4055 /* Avoid clobbering a shared CONST expression. */
4056 if (GET_CODE (exp) == CONST)
4058 if (GET_CODE (XEXP (exp, 0)) == PLUS
4059 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4060 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4062 *x = XEXP (XEXP (exp, 0), 0);
4063 return INTVAL (XEXP (XEXP (exp, 0), 1));
4065 return 0;
4068 if (GET_CODE (exp) == CONST_INT)
4070 addval = INTVAL (exp);
4071 *x = const0_rtx;
4074 /* For plus expression recurse on ourself. */
4075 else if (GET_CODE (exp) == PLUS)
4077 addval += remove_constant_addition (&XEXP (exp, 0));
4078 addval += remove_constant_addition (&XEXP (exp, 1));
4080 /* In case our parameter was constant, remove extra zero from the
4081 expression. */
4082 if (XEXP (exp, 0) == const0_rtx)
4083 *x = XEXP (exp, 1);
4084 else if (XEXP (exp, 1) == const0_rtx)
4085 *x = XEXP (exp, 0);
4088 return addval;
4091 /* Attempt to identify accesses to arrays that are most likely to cause cache
4092 misses, and emit prefetch instructions a few prefetch blocks forward.
4094 To detect the arrays we use the GIV information that was collected by the
4095 strength reduction pass.
4097 The prefetch instructions are generated after the GIV information is done
4098 and before the strength reduction process. The new GIVs are injected into
4099 the strength reduction tables, so the prefetch addresses are optimized as
4100 well.
4102 GIVs are split into base address, stride, and constant addition values.
4103 GIVs with the same address, stride and close addition values are combined
4104 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4105 for write instructions can be used for the block we write to, on machines
4106 that support write prefetches.
4108 Several heuristics are used to determine when to prefetch. They are
4109 controlled by defined symbols that can be overridden for each target. */
4111 static void
4112 emit_prefetch_instructions (struct loop *loop)
4114 int num_prefetches = 0;
4115 int num_real_prefetches = 0;
4116 int num_real_write_prefetches = 0;
4117 int num_prefetches_before = 0;
4118 int num_write_prefetches_before = 0;
4119 int ahead = 0;
4120 int i;
4121 struct iv_class *bl;
4122 struct induction *iv;
4123 struct prefetch_info info[MAX_PREFETCHES];
4124 struct loop_ivs *ivs = LOOP_IVS (loop);
4126 if (!HAVE_prefetch || PREFETCH_BLOCK == 0)
4127 return;
4129 /* Consider only loops w/o calls. When a call is done, the loop is probably
4130 slow enough to read the memory. */
4131 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4133 if (loop_dump_stream)
4134 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4136 return;
4139 /* Don't prefetch in loops known to have few iterations. */
4140 if (PREFETCH_NO_LOW_LOOPCNT
4141 && LOOP_INFO (loop)->n_iterations
4142 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4144 if (loop_dump_stream)
4145 fprintf (loop_dump_stream,
4146 "Prefetch: ignoring loop: not enough iterations.\n");
4147 return;
4150 /* Search all induction variables and pick those interesting for the prefetch
4151 machinery. */
4152 for (bl = ivs->list; bl; bl = bl->next)
4154 struct induction *biv = bl->biv, *biv1;
4155 int basestride = 0;
4157 biv1 = biv;
4159 /* Expect all BIVs to be executed in each iteration. This makes our
4160 analysis more conservative. */
4161 while (biv1)
4163 /* Discard non-constant additions that we can't handle well yet, and
4164 BIVs that are executed multiple times; such BIVs ought to be
4165 handled in the nested loop. We accept not_every_iteration BIVs,
4166 since these only result in larger strides and make our
4167 heuristics more conservative. */
4168 if (GET_CODE (biv->add_val) != CONST_INT)
4170 if (loop_dump_stream)
4172 fprintf (loop_dump_stream,
4173 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4174 REGNO (biv->src_reg), INSN_UID (biv->insn));
4175 print_rtl (loop_dump_stream, biv->add_val);
4176 fprintf (loop_dump_stream, "\n");
4178 break;
4181 if (biv->maybe_multiple)
4183 if (loop_dump_stream)
4185 fprintf (loop_dump_stream,
4186 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4187 REGNO (biv->src_reg), INSN_UID (biv->insn));
4188 print_rtl (loop_dump_stream, biv->add_val);
4189 fprintf (loop_dump_stream, "\n");
4191 break;
4194 basestride += INTVAL (biv1->add_val);
4195 biv1 = biv1->next_iv;
4198 if (biv1 || !basestride)
4199 continue;
4201 for (iv = bl->giv; iv; iv = iv->next_iv)
4203 rtx address;
4204 rtx temp;
4205 HOST_WIDE_INT index = 0;
4206 int add = 1;
4207 HOST_WIDE_INT stride = 0;
4208 int stride_sign = 1;
4209 struct check_store_data d;
4210 const char *ignore_reason = NULL;
4211 int size = GET_MODE_SIZE (GET_MODE (iv));
4213 /* See whether an induction variable is interesting to us and if
4214 not, report the reason. */
4215 if (iv->giv_type != DEST_ADDR)
4216 ignore_reason = "giv is not a destination address";
4218 /* We are interested only in constant stride memory references
4219 in order to be able to compute density easily. */
4220 else if (GET_CODE (iv->mult_val) != CONST_INT)
4221 ignore_reason = "stride is not constant";
4223 else
4225 stride = INTVAL (iv->mult_val) * basestride;
4226 if (stride < 0)
4228 stride = -stride;
4229 stride_sign = -1;
4232 /* On some targets, reversed order prefetches are not
4233 worthwhile. */
4234 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4235 ignore_reason = "reversed order stride";
4237 /* Prefetch of accesses with an extreme stride might not be
4238 worthwhile, either. */
4239 else if (PREFETCH_NO_EXTREME_STRIDE
4240 && stride > PREFETCH_EXTREME_STRIDE)
4241 ignore_reason = "extreme stride";
4243 /* Ignore GIVs with varying add values; we can't predict the
4244 value for the next iteration. */
4245 else if (!loop_invariant_p (loop, iv->add_val))
4246 ignore_reason = "giv has varying add value";
4248 /* Ignore GIVs in the nested loops; they ought to have been
4249 handled already. */
4250 else if (iv->maybe_multiple)
4251 ignore_reason = "giv is in nested loop";
4254 if (ignore_reason != NULL)
4256 if (loop_dump_stream)
4257 fprintf (loop_dump_stream,
4258 "Prefetch: ignoring giv at %d: %s.\n",
4259 INSN_UID (iv->insn), ignore_reason);
4260 continue;
4263 /* Determine the pointer to the basic array we are examining. It is
4264 the sum of the BIV's initial value and the GIV's add_val. */
4265 address = copy_rtx (iv->add_val);
4266 temp = copy_rtx (bl->initial_value);
4268 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4269 index = remove_constant_addition (&address);
4271 d.mem_write = 0;
4272 d.mem_address = *iv->location;
4274 /* When the GIV is not always executed, we might be better off by
4275 not dirtying the cache pages. */
4276 if (PREFETCH_CONDITIONAL || iv->always_executed)
4277 note_stores (PATTERN (iv->insn), check_store, &d);
4278 else
4280 if (loop_dump_stream)
4281 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4282 INSN_UID (iv->insn), "in conditional code.");
4283 continue;
4286 /* Attempt to find another prefetch to the same array and see if we
4287 can merge this one. */
4288 for (i = 0; i < num_prefetches; i++)
4289 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4290 && stride == info[i].stride)
4292 /* In case both access same array (same location
4293 just with small difference in constant indexes), merge
4294 the prefetches. Just do the later and the earlier will
4295 get prefetched from previous iteration.
4296 The artificial threshold should not be too small,
4297 but also not bigger than small portion of memory usually
4298 traversed by single loop. */
4299 if (index >= info[i].index
4300 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4302 info[i].write |= d.mem_write;
4303 info[i].bytes_accessed += size;
4304 info[i].index = index;
4305 info[i].giv = iv;
4306 info[i].class = bl;
4307 info[num_prefetches].base_address = address;
4308 add = 0;
4309 break;
4312 if (index < info[i].index
4313 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4315 info[i].write |= d.mem_write;
4316 info[i].bytes_accessed += size;
4317 add = 0;
4318 break;
4322 /* Merging failed. */
4323 if (add)
4325 info[num_prefetches].giv = iv;
4326 info[num_prefetches].class = bl;
4327 info[num_prefetches].index = index;
4328 info[num_prefetches].stride = stride;
4329 info[num_prefetches].base_address = address;
4330 info[num_prefetches].write = d.mem_write;
4331 info[num_prefetches].bytes_accessed = size;
4332 num_prefetches++;
4333 if (num_prefetches >= MAX_PREFETCHES)
4335 if (loop_dump_stream)
4336 fprintf (loop_dump_stream,
4337 "Maximal number of prefetches exceeded.\n");
4338 return;
4344 for (i = 0; i < num_prefetches; i++)
4346 int density;
4348 /* Attempt to calculate the total number of bytes fetched by all
4349 iterations of the loop. Avoid overflow. */
4350 if (LOOP_INFO (loop)->n_iterations
4351 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4352 >= LOOP_INFO (loop)->n_iterations))
4353 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4354 else
4355 info[i].total_bytes = 0xffffffff;
4357 density = info[i].bytes_accessed * 100 / info[i].stride;
4359 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4360 if (PREFETCH_ONLY_DENSE_MEM)
4361 if (density * 256 > PREFETCH_DENSE_MEM * 100
4362 && (info[i].total_bytes / PREFETCH_BLOCK
4363 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4365 info[i].prefetch_before_loop = 1;
4366 info[i].prefetch_in_loop
4367 = (info[i].total_bytes / PREFETCH_BLOCK
4368 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4370 else
4372 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4373 if (loop_dump_stream)
4374 fprintf (loop_dump_stream,
4375 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4376 INSN_UID (info[i].giv->insn), density);
4378 else
4379 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4381 /* Find how many prefetch instructions we'll use within the loop. */
4382 if (info[i].prefetch_in_loop != 0)
4384 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4385 / PREFETCH_BLOCK);
4386 num_real_prefetches += info[i].prefetch_in_loop;
4387 if (info[i].write)
4388 num_real_write_prefetches += info[i].prefetch_in_loop;
4392 /* Determine how many iterations ahead to prefetch within the loop, based
4393 on how many prefetches we currently expect to do within the loop. */
4394 if (num_real_prefetches != 0)
4396 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4398 if (loop_dump_stream)
4399 fprintf (loop_dump_stream,
4400 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4401 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4402 num_real_prefetches = 0, num_real_write_prefetches = 0;
4405 /* We'll also use AHEAD to determine how many prefetch instructions to
4406 emit before a loop, so don't leave it zero. */
4407 if (ahead == 0)
4408 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4410 for (i = 0; i < num_prefetches; i++)
4412 /* Update if we've decided not to prefetch anything within the loop. */
4413 if (num_real_prefetches == 0)
4414 info[i].prefetch_in_loop = 0;
4416 /* Find how many prefetch instructions we'll use before the loop. */
4417 if (info[i].prefetch_before_loop != 0)
4419 int n = info[i].total_bytes / PREFETCH_BLOCK;
4420 if (n > ahead)
4421 n = ahead;
4422 info[i].prefetch_before_loop = n;
4423 num_prefetches_before += n;
4424 if (info[i].write)
4425 num_write_prefetches_before += n;
4428 if (loop_dump_stream)
4430 if (info[i].prefetch_in_loop == 0
4431 && info[i].prefetch_before_loop == 0)
4432 continue;
4433 fprintf (loop_dump_stream, "Prefetch insn: %d",
4434 INSN_UID (info[i].giv->insn));
4435 fprintf (loop_dump_stream,
4436 "; in loop: %d; before: %d; %s\n",
4437 info[i].prefetch_in_loop,
4438 info[i].prefetch_before_loop,
4439 info[i].write ? "read/write" : "read only");
4440 fprintf (loop_dump_stream,
4441 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4442 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4443 info[i].bytes_accessed, info[i].total_bytes);
4444 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4445 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4446 info[i].index, info[i].stride);
4447 print_rtl (loop_dump_stream, info[i].base_address);
4448 fprintf (loop_dump_stream, "\n");
4452 if (num_real_prefetches + num_prefetches_before > 0)
4454 /* Record that this loop uses prefetch instructions. */
4455 LOOP_INFO (loop)->has_prefetch = 1;
4457 if (loop_dump_stream)
4459 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4460 num_real_prefetches, num_real_write_prefetches);
4461 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4462 num_prefetches_before, num_write_prefetches_before);
4466 for (i = 0; i < num_prefetches; i++)
4468 int y;
4470 for (y = 0; y < info[i].prefetch_in_loop; y++)
4472 rtx loc = copy_rtx (*info[i].giv->location);
4473 rtx insn;
4474 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4475 rtx before_insn = info[i].giv->insn;
4476 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4477 rtx seq;
4479 /* We can save some effort by offsetting the address on
4480 architectures with offsettable memory references. */
4481 if (offsettable_address_p (0, VOIDmode, loc))
4482 loc = plus_constant (loc, bytes_ahead);
4483 else
4485 rtx reg = gen_reg_rtx (Pmode);
4486 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4487 GEN_INT (bytes_ahead), reg,
4488 0, before_insn);
4489 loc = reg;
4492 start_sequence ();
4493 /* Make sure the address operand is valid for prefetch. */
4494 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4495 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4496 loc = force_reg (Pmode, loc);
4497 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4498 GEN_INT (3)));
4499 seq = get_insns ();
4500 end_sequence ();
4501 emit_insn_before (seq, before_insn);
4503 /* Check all insns emitted and record the new GIV
4504 information. */
4505 insn = NEXT_INSN (prev_insn);
4506 while (insn != before_insn)
4508 insn = check_insn_for_givs (loop, insn,
4509 info[i].giv->always_executed,
4510 info[i].giv->maybe_multiple);
4511 insn = NEXT_INSN (insn);
4515 if (PREFETCH_BEFORE_LOOP)
4517 /* Emit insns before the loop to fetch the first cache lines or,
4518 if we're not prefetching within the loop, everything we expect
4519 to need. */
4520 for (y = 0; y < info[i].prefetch_before_loop; y++)
4522 rtx reg = gen_reg_rtx (Pmode);
4523 rtx loop_start = loop->start;
4524 rtx init_val = info[i].class->initial_value;
4525 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4526 info[i].giv->add_val,
4527 GEN_INT (y * PREFETCH_BLOCK));
4529 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4530 non-constant INIT_VAL to have the same mode as REG, which
4531 in this case we know to be Pmode. */
4532 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4534 rtx seq;
4536 start_sequence ();
4537 init_val = convert_to_mode (Pmode, init_val, 0);
4538 seq = get_insns ();
4539 end_sequence ();
4540 loop_insn_emit_before (loop, 0, loop_start, seq);
4542 loop_iv_add_mult_emit_before (loop, init_val,
4543 info[i].giv->mult_val,
4544 add_val, reg, 0, loop_start);
4545 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4546 GEN_INT (3)),
4547 loop_start);
4552 return;
4555 /* Communication with routines called via `note_stores'. */
4557 static rtx note_insn;
4559 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4561 static rtx addr_placeholder;
4563 /* ??? Unfinished optimizations, and possible future optimizations,
4564 for the strength reduction code. */
4566 /* ??? The interaction of biv elimination, and recognition of 'constant'
4567 bivs, may cause problems. */
4569 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4570 performance problems.
4572 Perhaps don't eliminate things that can be combined with an addressing
4573 mode. Find all givs that have the same biv, mult_val, and add_val;
4574 then for each giv, check to see if its only use dies in a following
4575 memory address. If so, generate a new memory address and check to see
4576 if it is valid. If it is valid, then store the modified memory address,
4577 otherwise, mark the giv as not done so that it will get its own iv. */
4579 /* ??? Could try to optimize branches when it is known that a biv is always
4580 positive. */
4582 /* ??? When replace a biv in a compare insn, we should replace with closest
4583 giv so that an optimized branch can still be recognized by the combiner,
4584 e.g. the VAX acb insn. */
4586 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4587 was rerun in loop_optimize whenever a register was added or moved.
4588 Also, some of the optimizations could be a little less conservative. */
4590 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4591 is a backward branch in that range that branches to somewhere between
4592 LOOP->START and INSN. Returns 0 otherwise. */
4594 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4595 In practice, this is not a problem, because this function is seldom called,
4596 and uses a negligible amount of CPU time on average. */
4598 static int
4599 back_branch_in_range_p (const struct loop *loop, rtx insn)
4601 rtx p, q, target_insn;
4602 rtx loop_start = loop->start;
4603 rtx loop_end = loop->end;
4604 rtx orig_loop_end = loop->end;
4606 /* Stop before we get to the backward branch at the end of the loop. */
4607 loop_end = prev_nonnote_insn (loop_end);
4608 if (BARRIER_P (loop_end))
4609 loop_end = PREV_INSN (loop_end);
4611 /* Check in case insn has been deleted, search forward for first non
4612 deleted insn following it. */
4613 while (INSN_DELETED_P (insn))
4614 insn = NEXT_INSN (insn);
4616 /* Check for the case where insn is the last insn in the loop. Deal
4617 with the case where INSN was a deleted loop test insn, in which case
4618 it will now be the NOTE_LOOP_END. */
4619 if (insn == loop_end || insn == orig_loop_end)
4620 return 0;
4622 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4624 if (JUMP_P (p))
4626 target_insn = JUMP_LABEL (p);
4628 /* Search from loop_start to insn, to see if one of them is
4629 the target_insn. We can't use INSN_LUID comparisons here,
4630 since insn may not have an LUID entry. */
4631 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4632 if (q == target_insn)
4633 return 1;
4637 return 0;
4640 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4641 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4642 callback.
4644 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4645 least once for every loop iteration except for the last one.
4647 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4648 loop iteration.
4650 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4651 static void
4652 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4654 int not_every_iteration = 0;
4655 int maybe_multiple = 0;
4656 int past_loop_latch = 0;
4657 bool exit_test_is_entry = false;
4658 rtx p;
4660 /* If loop_scan_start points to the loop exit test, the loop body
4661 cannot be counted on running on every iteration, and we have to
4662 be wary of subversive use of gotos inside expression
4663 statements. */
4664 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4666 exit_test_is_entry = true;
4667 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4670 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4671 for (p = next_insn_in_loop (loop, loop->scan_start);
4672 p != NULL_RTX;
4673 p = next_insn_in_loop (loop, p))
4675 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4677 /* Past CODE_LABEL, we get to insns that may be executed multiple
4678 times. The only way we can be sure that they can't is if every
4679 jump insn between here and the end of the loop either
4680 returns, exits the loop, is a jump to a location that is still
4681 behind the label, or is a jump to the loop start. */
4683 if (LABEL_P (p))
4685 rtx insn = p;
4687 maybe_multiple = 0;
4689 while (1)
4691 insn = NEXT_INSN (insn);
4692 if (insn == loop->scan_start)
4693 break;
4694 if (insn == loop->end)
4696 if (loop->top != 0)
4697 insn = loop->top;
4698 else
4699 break;
4700 if (insn == loop->scan_start)
4701 break;
4704 if (JUMP_P (insn)
4705 && GET_CODE (PATTERN (insn)) != RETURN
4706 && (!any_condjump_p (insn)
4707 || (JUMP_LABEL (insn) != 0
4708 && JUMP_LABEL (insn) != loop->scan_start
4709 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4711 maybe_multiple = 1;
4712 break;
4717 /* Past a jump, we get to insns for which we can't count
4718 on whether they will be executed during each iteration. */
4719 /* This code appears twice in strength_reduce. There is also similar
4720 code in scan_loop. */
4721 if (JUMP_P (p)
4722 /* If we enter the loop in the middle, and scan around to the
4723 beginning, don't set not_every_iteration for that.
4724 This can be any kind of jump, since we want to know if insns
4725 will be executed if the loop is executed. */
4726 && (exit_test_is_entry
4727 || !(JUMP_LABEL (p) == loop->top
4728 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4729 && any_uncondjump_p (p))
4730 || (NEXT_INSN (p) == loop->end
4731 && any_condjump_p (p))))))
4733 rtx label = 0;
4735 /* If this is a jump outside the loop, then it also doesn't
4736 matter. Check to see if the target of this branch is on the
4737 loop->exits_labels list. */
4739 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4740 if (XEXP (label, 0) == JUMP_LABEL (p))
4741 break;
4743 if (!label)
4744 not_every_iteration = 1;
4747 /* Note if we pass a loop latch. If we do, then we can not clear
4748 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4749 a loop since a jump before the last CODE_LABEL may have started
4750 a new loop iteration.
4752 Note that LOOP_TOP is only set for rotated loops and we need
4753 this check for all loops, so compare against the CODE_LABEL
4754 which immediately follows LOOP_START. */
4755 if (JUMP_P (p)
4756 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4757 past_loop_latch = 1;
4759 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4760 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4761 or not an insn is known to be executed each iteration of the
4762 loop, whether or not any iterations are known to occur.
4764 Therefore, if we have just passed a label and have no more labels
4765 between here and the test insn of the loop, and we have not passed
4766 a jump to the top of the loop, then we know these insns will be
4767 executed each iteration. */
4769 if (not_every_iteration
4770 && !past_loop_latch
4771 && LABEL_P (p)
4772 && no_labels_between_p (p, loop->end))
4773 not_every_iteration = 0;
4777 static void
4778 loop_bivs_find (struct loop *loop)
4780 struct loop_regs *regs = LOOP_REGS (loop);
4781 struct loop_ivs *ivs = LOOP_IVS (loop);
4782 /* Temporary list pointers for traversing ivs->list. */
4783 struct iv_class *bl, **backbl;
4785 ivs->list = 0;
4787 for_each_insn_in_loop (loop, check_insn_for_bivs);
4789 /* Scan ivs->list to remove all regs that proved not to be bivs.
4790 Make a sanity check against regs->n_times_set. */
4791 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4793 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4794 /* Above happens if register modified by subreg, etc. */
4795 /* Make sure it is not recognized as a basic induction var: */
4796 || regs->array[bl->regno].n_times_set != bl->biv_count
4797 /* If never incremented, it is invariant that we decided not to
4798 move. So leave it alone. */
4799 || ! bl->incremented)
4801 if (loop_dump_stream)
4802 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4803 bl->regno,
4804 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4805 ? "not induction variable"
4806 : (! bl->incremented ? "never incremented"
4807 : "count error")));
4809 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4810 *backbl = bl->next;
4812 else
4814 backbl = &bl->next;
4816 if (loop_dump_stream)
4817 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4823 /* Determine how BIVS are initialized by looking through pre-header
4824 extended basic block. */
4825 static void
4826 loop_bivs_init_find (struct loop *loop)
4828 struct loop_ivs *ivs = LOOP_IVS (loop);
4829 /* Temporary list pointers for traversing ivs->list. */
4830 struct iv_class *bl;
4831 int call_seen;
4832 rtx p;
4834 /* Find initial value for each biv by searching backwards from loop_start,
4835 halting at first label. Also record any test condition. */
4837 call_seen = 0;
4838 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4840 rtx test;
4842 note_insn = p;
4844 if (CALL_P (p))
4845 call_seen = 1;
4847 if (INSN_P (p))
4848 note_stores (PATTERN (p), record_initial, ivs);
4850 /* Record any test of a biv that branches around the loop if no store
4851 between it and the start of loop. We only care about tests with
4852 constants and registers and only certain of those. */
4853 if (JUMP_P (p)
4854 && JUMP_LABEL (p) != 0
4855 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4856 && (test = get_condition_for_loop (loop, p)) != 0
4857 && REG_P (XEXP (test, 0))
4858 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4859 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4860 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4861 && bl->init_insn == 0)
4863 /* If an NE test, we have an initial value! */
4864 if (GET_CODE (test) == NE)
4866 bl->init_insn = p;
4867 bl->init_set = gen_rtx_SET (VOIDmode,
4868 XEXP (test, 0), XEXP (test, 1));
4870 else
4871 bl->initial_test = test;
4877 /* Look at the each biv and see if we can say anything better about its
4878 initial value from any initializing insns set up above. (This is done
4879 in two passes to avoid missing SETs in a PARALLEL.) */
4880 static void
4881 loop_bivs_check (struct loop *loop)
4883 struct loop_ivs *ivs = LOOP_IVS (loop);
4884 /* Temporary list pointers for traversing ivs->list. */
4885 struct iv_class *bl;
4886 struct iv_class **backbl;
4888 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4890 rtx src;
4891 rtx note;
4893 if (! bl->init_insn)
4894 continue;
4896 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4897 is a constant, use the value of that. */
4898 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4899 && CONSTANT_P (XEXP (note, 0)))
4900 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4901 && CONSTANT_P (XEXP (note, 0))))
4902 src = XEXP (note, 0);
4903 else
4904 src = SET_SRC (bl->init_set);
4906 if (loop_dump_stream)
4907 fprintf (loop_dump_stream,
4908 "Biv %d: initialized at insn %d: initial value ",
4909 bl->regno, INSN_UID (bl->init_insn));
4911 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4912 || GET_MODE (src) == VOIDmode)
4913 && valid_initial_value_p (src, bl->init_insn,
4914 LOOP_INFO (loop)->pre_header_has_call,
4915 loop->start))
4917 bl->initial_value = src;
4919 if (loop_dump_stream)
4921 print_simple_rtl (loop_dump_stream, src);
4922 fputc ('\n', loop_dump_stream);
4925 /* If we can't make it a giv,
4926 let biv keep initial value of "itself". */
4927 else if (loop_dump_stream)
4928 fprintf (loop_dump_stream, "is complex\n");
4933 /* Search the loop for general induction variables. */
4935 static void
4936 loop_givs_find (struct loop* loop)
4938 for_each_insn_in_loop (loop, check_insn_for_givs);
4942 /* For each giv for which we still don't know whether or not it is
4943 replaceable, check to see if it is replaceable because its final value
4944 can be calculated. */
4946 static void
4947 loop_givs_check (struct loop *loop)
4949 struct loop_ivs *ivs = LOOP_IVS (loop);
4950 struct iv_class *bl;
4952 for (bl = ivs->list; bl; bl = bl->next)
4954 struct induction *v;
4956 for (v = bl->giv; v; v = v->next_iv)
4957 if (! v->replaceable && ! v->not_replaceable)
4958 check_final_value (loop, v);
4962 /* Try to generate the simplest rtx for the expression
4963 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4964 value of giv's. */
4966 static rtx
4967 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4969 rtx temp, mult_res;
4970 rtx result;
4972 /* The modes must all be the same. This should always be true. For now,
4973 check to make sure. */
4974 gcc_assert (GET_MODE (mult1) == mode || GET_MODE (mult1) == VOIDmode);
4975 gcc_assert (GET_MODE (mult2) == mode || GET_MODE (mult2) == VOIDmode);
4976 gcc_assert (GET_MODE (add1) == mode || GET_MODE (add1) == VOIDmode);
4978 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4979 will be a constant. */
4980 if (GET_CODE (mult1) == CONST_INT)
4982 temp = mult2;
4983 mult2 = mult1;
4984 mult1 = temp;
4987 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
4988 if (! mult_res)
4989 mult_res = gen_rtx_MULT (mode, mult1, mult2);
4991 /* Again, put the constant second. */
4992 if (GET_CODE (add1) == CONST_INT)
4994 temp = add1;
4995 add1 = mult_res;
4996 mult_res = temp;
4999 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
5000 if (! result)
5001 result = gen_rtx_PLUS (mode, add1, mult_res);
5003 return result;
5006 /* Searches the list of induction struct's for the biv BL, to try to calculate
5007 the total increment value for one iteration of the loop as a constant.
5009 Returns the increment value as an rtx, simplified as much as possible,
5010 if it can be calculated. Otherwise, returns 0. */
5012 static rtx
5013 biv_total_increment (const struct iv_class *bl)
5015 struct induction *v;
5016 rtx result;
5018 /* For increment, must check every instruction that sets it. Each
5019 instruction must be executed only once each time through the loop.
5020 To verify this, we check that the insn is always executed, and that
5021 there are no backward branches after the insn that branch to before it.
5022 Also, the insn must have a mult_val of one (to make sure it really is
5023 an increment). */
5025 result = const0_rtx;
5026 for (v = bl->biv; v; v = v->next_iv)
5028 if (v->always_computable && v->mult_val == const1_rtx
5029 && ! v->maybe_multiple
5030 && SCALAR_INT_MODE_P (v->mode))
5032 /* If we have already counted it, skip it. */
5033 if (v->same)
5034 continue;
5036 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5038 else
5039 return 0;
5042 return result;
5045 /* Try to prove that the register is dead after the loop exits. Trace every
5046 loop exit looking for an insn that will always be executed, which sets
5047 the register to some value, and appears before the first use of the register
5048 is found. If successful, then return 1, otherwise return 0. */
5050 /* ?? Could be made more intelligent in the handling of jumps, so that
5051 it can search past if statements and other similar structures. */
5053 static int
5054 reg_dead_after_loop (const struct loop *loop, rtx reg)
5056 rtx insn, label;
5057 int jump_count = 0;
5058 int label_count = 0;
5060 /* In addition to checking all exits of this loop, we must also check
5061 all exits of inner nested loops that would exit this loop. We don't
5062 have any way to identify those, so we just give up if there are any
5063 such inner loop exits. */
5065 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5066 label_count++;
5068 if (label_count != loop->exit_count)
5069 return 0;
5071 /* HACK: Must also search the loop fall through exit, create a label_ref
5072 here which points to the loop->end, and append the loop_number_exit_labels
5073 list to it. */
5074 label = gen_rtx_LABEL_REF (VOIDmode, loop->end);
5075 LABEL_NEXTREF (label) = loop->exit_labels;
5077 for (; label; label = LABEL_NEXTREF (label))
5079 /* Succeed if find an insn which sets the biv or if reach end of
5080 function. Fail if find an insn that uses the biv, or if come to
5081 a conditional jump. */
5083 insn = NEXT_INSN (XEXP (label, 0));
5084 while (insn)
5086 if (INSN_P (insn))
5088 rtx set, note;
5090 if (reg_referenced_p (reg, PATTERN (insn)))
5091 return 0;
5093 note = find_reg_equal_equiv_note (insn);
5094 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5095 return 0;
5097 set = single_set (insn);
5098 if (set && rtx_equal_p (SET_DEST (set), reg))
5099 break;
5101 if (JUMP_P (insn))
5103 if (GET_CODE (PATTERN (insn)) == RETURN)
5104 break;
5105 else if (!any_uncondjump_p (insn)
5106 /* Prevent infinite loop following infinite loops. */
5107 || jump_count++ > 20)
5108 return 0;
5109 else
5110 insn = JUMP_LABEL (insn);
5114 insn = NEXT_INSN (insn);
5118 /* Success, the register is dead on all loop exits. */
5119 return 1;
5122 /* Try to calculate the final value of the biv, the value it will have at
5123 the end of the loop. If we can do it, return that value. */
5125 static rtx
5126 final_biv_value (const struct loop *loop, struct iv_class *bl)
5128 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5129 rtx increment, tem;
5131 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5133 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5134 return 0;
5136 /* The final value for reversed bivs must be calculated differently than
5137 for ordinary bivs. In this case, there is already an insn after the
5138 loop which sets this biv's final value (if necessary), and there are
5139 no other loop exits, so we can return any value. */
5140 if (bl->reversed)
5142 if (loop_dump_stream)
5143 fprintf (loop_dump_stream,
5144 "Final biv value for %d, reversed biv.\n", bl->regno);
5146 return const0_rtx;
5149 /* Try to calculate the final value as initial value + (number of iterations
5150 * increment). For this to work, increment must be invariant, the only
5151 exit from the loop must be the fall through at the bottom (otherwise
5152 it may not have its final value when the loop exits), and the initial
5153 value of the biv must be invariant. */
5155 if (n_iterations != 0
5156 && ! loop->exit_count
5157 && loop_invariant_p (loop, bl->initial_value))
5159 increment = biv_total_increment (bl);
5161 if (increment && loop_invariant_p (loop, increment))
5163 /* Can calculate the loop exit value, emit insns after loop
5164 end to calculate this value into a temporary register in
5165 case it is needed later. */
5167 tem = gen_reg_rtx (bl->biv->mode);
5168 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5169 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5170 bl->initial_value, tem);
5172 if (loop_dump_stream)
5173 fprintf (loop_dump_stream,
5174 "Final biv value for %d, calculated.\n", bl->regno);
5176 return tem;
5180 /* Check to see if the biv is dead at all loop exits. */
5181 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5183 if (loop_dump_stream)
5184 fprintf (loop_dump_stream,
5185 "Final biv value for %d, biv dead after loop exit.\n",
5186 bl->regno);
5188 return const0_rtx;
5191 return 0;
5194 /* Return nonzero if it is possible to eliminate the biv BL provided
5195 all givs are reduced. This is possible if either the reg is not
5196 used outside the loop, or we can compute what its final value will
5197 be. */
5199 static int
5200 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5201 int threshold, int insn_count)
5203 /* For architectures with a decrement_and_branch_until_zero insn,
5204 don't do this if we put a REG_NONNEG note on the endtest for this
5205 biv. */
5207 #ifdef HAVE_decrement_and_branch_until_zero
5208 if (bl->nonneg)
5210 if (loop_dump_stream)
5211 fprintf (loop_dump_stream,
5212 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5213 return 0;
5215 #endif
5217 /* Check that biv is used outside loop or if it has a final value.
5218 Compare against bl->init_insn rather than loop->start. We aren't
5219 concerned with any uses of the biv between init_insn and
5220 loop->start since these won't be affected by the value of the biv
5221 elsewhere in the function, so long as init_insn doesn't use the
5222 biv itself. */
5224 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5225 && bl->init_insn
5226 && INSN_UID (bl->init_insn) < max_uid_for_loop
5227 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5228 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5229 || (bl->final_value = final_biv_value (loop, bl)))
5230 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5232 if (loop_dump_stream)
5234 fprintf (loop_dump_stream,
5235 "Cannot eliminate biv %d.\n",
5236 bl->regno);
5237 fprintf (loop_dump_stream,
5238 "First use: insn %d, last use: insn %d.\n",
5239 REGNO_FIRST_UID (bl->regno),
5240 REGNO_LAST_UID (bl->regno));
5242 return 0;
5246 /* Reduce each giv of BL that we have decided to reduce. */
5248 static void
5249 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5251 struct induction *v;
5253 for (v = bl->giv; v; v = v->next_iv)
5255 struct induction *tv;
5256 if (! v->ignore && v->same == 0)
5258 int auto_inc_opt = 0;
5260 /* If the code for derived givs immediately below has already
5261 allocated a new_reg, we must keep it. */
5262 if (! v->new_reg)
5263 v->new_reg = gen_reg_rtx (v->mode);
5265 #ifdef AUTO_INC_DEC
5266 /* If the target has auto-increment addressing modes, and
5267 this is an address giv, then try to put the increment
5268 immediately after its use, so that flow can create an
5269 auto-increment addressing mode. */
5270 /* Don't do this for loops entered at the bottom, to avoid
5271 this invalid transformation:
5272 jmp L; -> jmp L;
5273 TOP: TOP:
5274 use giv use giv
5275 L: inc giv
5276 inc biv L:
5277 test biv test giv
5278 cbr TOP cbr TOP
5280 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5281 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5282 /* We don't handle reversed biv's because bl->biv->insn
5283 does not have a valid INSN_LUID. */
5284 && ! bl->reversed
5285 && v->always_executed && ! v->maybe_multiple
5286 && INSN_UID (v->insn) < max_uid_for_loop
5287 && !loop->top)
5289 /* If other giv's have been combined with this one, then
5290 this will work only if all uses of the other giv's occur
5291 before this giv's insn. This is difficult to check.
5293 We simplify this by looking for the common case where
5294 there is one DEST_REG giv, and this giv's insn is the
5295 last use of the dest_reg of that DEST_REG giv. If the
5296 increment occurs after the address giv, then we can
5297 perform the optimization. (Otherwise, the increment
5298 would have to go before other_giv, and we would not be
5299 able to combine it with the address giv to get an
5300 auto-inc address.) */
5301 if (v->combined_with)
5303 struct induction *other_giv = 0;
5305 for (tv = bl->giv; tv; tv = tv->next_iv)
5306 if (tv->same == v)
5308 if (other_giv)
5309 break;
5310 else
5311 other_giv = tv;
5313 if (! tv && other_giv
5314 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5315 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5316 == INSN_UID (v->insn))
5317 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5318 auto_inc_opt = 1;
5320 /* Check for case where increment is before the address
5321 giv. Do this test in "loop order". */
5322 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5323 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5324 || (INSN_LUID (bl->biv->insn)
5325 > INSN_LUID (loop->scan_start))))
5326 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5327 && (INSN_LUID (loop->scan_start)
5328 < INSN_LUID (bl->biv->insn))))
5329 auto_inc_opt = -1;
5330 else
5331 auto_inc_opt = 1;
5333 #ifdef HAVE_cc0
5335 rtx prev;
5337 /* We can't put an insn immediately after one setting
5338 cc0, or immediately before one using cc0. */
5339 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5340 || (auto_inc_opt == -1
5341 && (prev = prev_nonnote_insn (v->insn)) != 0
5342 && INSN_P (prev)
5343 && sets_cc0_p (PATTERN (prev))))
5344 auto_inc_opt = 0;
5346 #endif
5348 if (auto_inc_opt)
5349 v->auto_inc_opt = 1;
5351 #endif
5353 /* For each place where the biv is incremented, add an insn
5354 to increment the new, reduced reg for the giv. */
5355 for (tv = bl->biv; tv; tv = tv->next_iv)
5357 rtx insert_before;
5359 /* Skip if location is the same as a previous one. */
5360 if (tv->same)
5361 continue;
5362 if (! auto_inc_opt)
5363 insert_before = NEXT_INSN (tv->insn);
5364 else if (auto_inc_opt == 1)
5365 insert_before = NEXT_INSN (v->insn);
5366 else
5367 insert_before = v->insn;
5369 if (tv->mult_val == const1_rtx)
5370 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5371 v->new_reg, v->new_reg,
5372 0, insert_before);
5373 else /* tv->mult_val == const0_rtx */
5374 /* A multiply is acceptable here
5375 since this is presumed to be seldom executed. */
5376 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5377 v->add_val, v->new_reg,
5378 0, insert_before);
5381 /* Add code at loop start to initialize giv's reduced reg. */
5383 loop_iv_add_mult_hoist (loop,
5384 extend_value_for_giv (v, bl->initial_value),
5385 v->mult_val, v->add_val, v->new_reg);
5391 /* Check for givs whose first use is their definition and whose
5392 last use is the definition of another giv. If so, it is likely
5393 dead and should not be used to derive another giv nor to
5394 eliminate a biv. */
5396 static void
5397 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5399 struct induction *v;
5401 for (v = bl->giv; v; v = v->next_iv)
5403 if (v->ignore
5404 || (v->same && v->same->ignore))
5405 continue;
5407 if (v->giv_type == DEST_REG
5408 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5410 struct induction *v1;
5412 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5413 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5414 v->maybe_dead = 1;
5420 static void
5421 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5423 struct induction *v;
5425 for (v = bl->giv; v; v = v->next_iv)
5427 if (v->same && v->same->ignore)
5428 v->ignore = 1;
5430 if (v->ignore)
5431 continue;
5433 /* Update expression if this was combined, in case other giv was
5434 replaced. */
5435 if (v->same)
5436 v->new_reg = replace_rtx (v->new_reg,
5437 v->same->dest_reg, v->same->new_reg);
5439 /* See if this register is known to be a pointer to something. If
5440 so, see if we can find the alignment. First see if there is a
5441 destination register that is a pointer. If so, this shares the
5442 alignment too. Next see if we can deduce anything from the
5443 computational information. If not, and this is a DEST_ADDR
5444 giv, at least we know that it's a pointer, though we don't know
5445 the alignment. */
5446 if (REG_P (v->new_reg)
5447 && v->giv_type == DEST_REG
5448 && REG_POINTER (v->dest_reg))
5449 mark_reg_pointer (v->new_reg,
5450 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5451 else if (REG_P (v->new_reg)
5452 && REG_POINTER (v->src_reg))
5454 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5456 if (align == 0
5457 || GET_CODE (v->add_val) != CONST_INT
5458 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5459 align = 0;
5461 mark_reg_pointer (v->new_reg, align);
5463 else if (REG_P (v->new_reg)
5464 && REG_P (v->add_val)
5465 && REG_POINTER (v->add_val))
5467 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5469 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5470 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5471 align = 0;
5473 mark_reg_pointer (v->new_reg, align);
5475 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5476 mark_reg_pointer (v->new_reg, 0);
5478 if (v->giv_type == DEST_ADDR)
5480 /* Store reduced reg as the address in the memref where we found
5481 this giv. */
5482 if (validate_change_maybe_volatile (v->insn, v->location,
5483 v->new_reg))
5484 /* Yay, it worked! */;
5485 /* Not replaceable; emit an insn to set the original
5486 giv reg from the reduced giv. */
5487 else if (REG_P (*v->location))
5488 loop_insn_emit_before (loop, 0, v->insn,
5489 gen_move_insn (*v->location,
5490 v->new_reg));
5491 else
5493 /* If it wasn't a reg, create a pseudo and use that. */
5494 rtx reg, seq;
5495 start_sequence ();
5496 reg = force_reg (v->mode, *v->location);
5497 seq = get_insns ();
5498 end_sequence ();
5499 loop_insn_emit_before (loop, 0, v->insn, seq);
5500 if (!validate_change_maybe_volatile (v->insn, v->location, reg))
5501 gcc_unreachable ();
5504 else if (v->replaceable)
5506 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5508 else
5510 rtx original_insn = v->insn;
5511 rtx note;
5513 /* Not replaceable; emit an insn to set the original giv reg from
5514 the reduced giv, same as above. */
5515 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5516 gen_move_insn (v->dest_reg,
5517 v->new_reg));
5519 /* The original insn may have a REG_EQUAL note. This note is
5520 now incorrect and may result in invalid substitutions later.
5521 The original insn is dead, but may be part of a libcall
5522 sequence, which doesn't seem worth the bother of handling. */
5523 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5524 if (note)
5525 remove_note (original_insn, note);
5528 /* When a loop is reversed, givs which depend on the reversed
5529 biv, and which are live outside the loop, must be set to their
5530 correct final value. This insn is only needed if the giv is
5531 not replaceable. The correct final value is the same as the
5532 value that the giv starts the reversed loop with. */
5533 if (bl->reversed && ! v->replaceable)
5534 loop_iv_add_mult_sink (loop,
5535 extend_value_for_giv (v, bl->initial_value),
5536 v->mult_val, v->add_val, v->dest_reg);
5537 else if (v->final_value)
5538 loop_insn_sink_or_swim (loop,
5539 gen_load_of_final_value (v->dest_reg,
5540 v->final_value));
5542 if (loop_dump_stream)
5544 fprintf (loop_dump_stream, "giv at %d reduced to ",
5545 INSN_UID (v->insn));
5546 print_simple_rtl (loop_dump_stream, v->new_reg);
5547 fprintf (loop_dump_stream, "\n");
5553 static int
5554 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5555 struct iv_class *bl, struct induction *v,
5556 rtx test_reg)
5558 int add_cost;
5559 int benefit;
5561 benefit = v->benefit;
5562 PUT_MODE (test_reg, v->mode);
5563 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
5564 test_reg, test_reg);
5566 /* Reduce benefit if not replaceable, since we will insert a
5567 move-insn to replace the insn that calculates this giv. Don't do
5568 this unless the giv is a user variable, since it will often be
5569 marked non-replaceable because of the duplication of the exit
5570 code outside the loop. In such a case, the copies we insert are
5571 dead and will be deleted. So they don't have a cost. Similar
5572 situations exist. */
5573 /* ??? The new final_[bg]iv_value code does a much better job of
5574 finding replaceable giv's, and hence this code may no longer be
5575 necessary. */
5576 if (! v->replaceable && ! bl->eliminable
5577 && REG_USERVAR_P (v->dest_reg))
5578 benefit -= copy_cost;
5580 /* Decrease the benefit to count the add-insns that we will insert
5581 to increment the reduced reg for the giv. ??? This can
5582 overestimate the run-time cost of the additional insns, e.g. if
5583 there are multiple basic blocks that increment the biv, but only
5584 one of these blocks is executed during each iteration. There is
5585 no good way to detect cases like this with the current structure
5586 of the loop optimizer. This code is more accurate for
5587 determining code size than run-time benefits. */
5588 benefit -= add_cost * bl->biv_count;
5590 /* Decide whether to strength-reduce this giv or to leave the code
5591 unchanged (recompute it from the biv each time it is used). This
5592 decision can be made independently for each giv. */
5594 #ifdef AUTO_INC_DEC
5595 /* Attempt to guess whether autoincrement will handle some of the
5596 new add insns; if so, increase BENEFIT (undo the subtraction of
5597 add_cost that was done above). */
5598 if (v->giv_type == DEST_ADDR
5599 /* Increasing the benefit is risky, since this is only a guess.
5600 Avoid increasing register pressure in cases where there would
5601 be no other benefit from reducing this giv. */
5602 && benefit > 0
5603 && GET_CODE (v->mult_val) == CONST_INT)
5605 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5607 if (HAVE_POST_INCREMENT
5608 && INTVAL (v->mult_val) == size)
5609 benefit += add_cost * bl->biv_count;
5610 else if (HAVE_PRE_INCREMENT
5611 && INTVAL (v->mult_val) == size)
5612 benefit += add_cost * bl->biv_count;
5613 else if (HAVE_POST_DECREMENT
5614 && -INTVAL (v->mult_val) == size)
5615 benefit += add_cost * bl->biv_count;
5616 else if (HAVE_PRE_DECREMENT
5617 && -INTVAL (v->mult_val) == size)
5618 benefit += add_cost * bl->biv_count;
5620 #endif
5622 return benefit;
5626 /* Free IV structures for LOOP. */
5628 static void
5629 loop_ivs_free (struct loop *loop)
5631 struct loop_ivs *ivs = LOOP_IVS (loop);
5632 struct iv_class *iv = ivs->list;
5634 free (ivs->regs);
5636 while (iv)
5638 struct iv_class *next = iv->next;
5639 struct induction *induction;
5640 struct induction *next_induction;
5642 for (induction = iv->biv; induction; induction = next_induction)
5644 next_induction = induction->next_iv;
5645 free (induction);
5647 for (induction = iv->giv; induction; induction = next_induction)
5649 next_induction = induction->next_iv;
5650 free (induction);
5653 free (iv);
5654 iv = next;
5658 /* Look back before LOOP->START for the insn that sets REG and return
5659 the equivalent constant if there is a REG_EQUAL note otherwise just
5660 the SET_SRC of REG. */
5662 static rtx
5663 loop_find_equiv_value (const struct loop *loop, rtx reg)
5665 rtx loop_start = loop->start;
5666 rtx insn, set;
5667 rtx ret;
5669 ret = reg;
5670 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5672 if (LABEL_P (insn))
5673 break;
5675 else if (INSN_P (insn) && reg_set_p (reg, insn))
5677 /* We found the last insn before the loop that sets the register.
5678 If it sets the entire register, and has a REG_EQUAL note,
5679 then use the value of the REG_EQUAL note. */
5680 if ((set = single_set (insn))
5681 && (SET_DEST (set) == reg))
5683 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5685 /* Only use the REG_EQUAL note if it is a constant.
5686 Other things, divide in particular, will cause
5687 problems later if we use them. */
5688 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5689 && CONSTANT_P (XEXP (note, 0)))
5690 ret = XEXP (note, 0);
5691 else
5692 ret = SET_SRC (set);
5694 /* We cannot do this if it changes between the
5695 assignment and loop start though. */
5696 if (modified_between_p (ret, insn, loop_start))
5697 ret = reg;
5699 break;
5702 return ret;
5705 /* Find and return register term common to both expressions OP0 and
5706 OP1 or NULL_RTX if no such term exists. Each expression must be a
5707 REG or a PLUS of a REG. */
5709 static rtx
5710 find_common_reg_term (rtx op0, rtx op1)
5712 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5713 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5715 rtx op00;
5716 rtx op01;
5717 rtx op10;
5718 rtx op11;
5720 if (GET_CODE (op0) == PLUS)
5721 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5722 else
5723 op01 = const0_rtx, op00 = op0;
5725 if (GET_CODE (op1) == PLUS)
5726 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5727 else
5728 op11 = const0_rtx, op10 = op1;
5730 /* Find and return common register term if present. */
5731 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5732 return op00;
5733 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5734 return op01;
5737 /* No common register term found. */
5738 return NULL_RTX;
5741 /* Determine the loop iterator and calculate the number of loop
5742 iterations. Returns the exact number of loop iterations if it can
5743 be calculated, otherwise returns zero. */
5745 static unsigned HOST_WIDE_INT
5746 loop_iterations (struct loop *loop)
5748 struct loop_info *loop_info = LOOP_INFO (loop);
5749 struct loop_ivs *ivs = LOOP_IVS (loop);
5750 rtx comparison, comparison_value;
5751 rtx iteration_var, initial_value, increment, final_value;
5752 enum rtx_code comparison_code;
5753 HOST_WIDE_INT inc;
5754 unsigned HOST_WIDE_INT abs_inc;
5755 unsigned HOST_WIDE_INT abs_diff;
5756 int off_by_one;
5757 int increment_dir;
5758 int unsigned_p, compare_dir, final_larger;
5759 rtx last_loop_insn;
5760 struct iv_class *bl;
5762 loop_info->n_iterations = 0;
5763 loop_info->initial_value = 0;
5764 loop_info->initial_equiv_value = 0;
5765 loop_info->comparison_value = 0;
5766 loop_info->final_value = 0;
5767 loop_info->final_equiv_value = 0;
5768 loop_info->increment = 0;
5769 loop_info->iteration_var = 0;
5770 loop_info->iv = 0;
5772 /* We used to use prev_nonnote_insn here, but that fails because it might
5773 accidentally get the branch for a contained loop if the branch for this
5774 loop was deleted. We can only trust branches immediately before the
5775 loop_end. */
5776 last_loop_insn = PREV_INSN (loop->end);
5778 /* ??? We should probably try harder to find the jump insn
5779 at the end of the loop. The following code assumes that
5780 the last loop insn is a jump to the top of the loop. */
5781 if (!JUMP_P (last_loop_insn))
5783 if (loop_dump_stream)
5784 fprintf (loop_dump_stream,
5785 "Loop iterations: No final conditional branch found.\n");
5786 return 0;
5789 /* If there is a more than a single jump to the top of the loop
5790 we cannot (easily) determine the iteration count. */
5791 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5793 if (loop_dump_stream)
5794 fprintf (loop_dump_stream,
5795 "Loop iterations: Loop has multiple back edges.\n");
5796 return 0;
5799 /* Find the iteration variable. If the last insn is a conditional
5800 branch, and the insn before tests a register value, make that the
5801 iteration variable. */
5803 comparison = get_condition_for_loop (loop, last_loop_insn);
5804 if (comparison == 0)
5806 if (loop_dump_stream)
5807 fprintf (loop_dump_stream,
5808 "Loop iterations: No final comparison found.\n");
5809 return 0;
5812 /* ??? Get_condition may switch position of induction variable and
5813 invariant register when it canonicalizes the comparison. */
5815 comparison_code = GET_CODE (comparison);
5816 iteration_var = XEXP (comparison, 0);
5817 comparison_value = XEXP (comparison, 1);
5819 if (!REG_P (iteration_var))
5821 if (loop_dump_stream)
5822 fprintf (loop_dump_stream,
5823 "Loop iterations: Comparison not against register.\n");
5824 return 0;
5827 /* The only new registers that are created before loop iterations
5828 are givs made from biv increments or registers created by
5829 load_mems. In the latter case, it is possible that try_copy_prop
5830 will propagate a new pseudo into the old iteration register but
5831 this will be marked by having the REG_USERVAR_P bit set. */
5833 gcc_assert ((unsigned) REGNO (iteration_var) < ivs->n_regs
5834 || REG_USERVAR_P (iteration_var));
5836 /* Determine the initial value of the iteration variable, and the amount
5837 that it is incremented each loop. Use the tables constructed by
5838 the strength reduction pass to calculate these values. */
5840 /* Clear the result values, in case no answer can be found. */
5841 initial_value = 0;
5842 increment = 0;
5844 /* The iteration variable can be either a giv or a biv. Check to see
5845 which it is, and compute the variable's initial value, and increment
5846 value if possible. */
5848 /* If this is a new register, can't handle it since we don't have any
5849 reg_iv_type entry for it. */
5850 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5852 if (loop_dump_stream)
5853 fprintf (loop_dump_stream,
5854 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5855 return 0;
5858 /* Reject iteration variables larger than the host wide int size, since they
5859 could result in a number of iterations greater than the range of our
5860 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5861 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5862 > HOST_BITS_PER_WIDE_INT))
5864 if (loop_dump_stream)
5865 fprintf (loop_dump_stream,
5866 "Loop iterations: Iteration var rejected because mode too large.\n");
5867 return 0;
5869 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5871 if (loop_dump_stream)
5872 fprintf (loop_dump_stream,
5873 "Loop iterations: Iteration var not an integer.\n");
5874 return 0;
5877 /* Try swapping the comparison to identify a suitable iv. */
5878 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5879 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5880 && REG_P (comparison_value)
5881 && REGNO (comparison_value) < ivs->n_regs)
5883 rtx temp = comparison_value;
5884 comparison_code = swap_condition (comparison_code);
5885 comparison_value = iteration_var;
5886 iteration_var = temp;
5889 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5891 gcc_assert (REGNO (iteration_var) < ivs->n_regs);
5893 /* Grab initial value, only useful if it is a constant. */
5894 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5895 initial_value = bl->initial_value;
5896 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5898 if (loop_dump_stream)
5899 fprintf (loop_dump_stream,
5900 "Loop iterations: Basic induction var not set once in each iteration.\n");
5901 return 0;
5904 increment = biv_total_increment (bl);
5906 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5908 HOST_WIDE_INT offset = 0;
5909 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5910 rtx biv_initial_value;
5912 gcc_assert (REGNO (v->src_reg) < ivs->n_regs);
5914 if (!v->always_executed || v->maybe_multiple)
5916 if (loop_dump_stream)
5917 fprintf (loop_dump_stream,
5918 "Loop iterations: General induction var not set once in each iteration.\n");
5919 return 0;
5922 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5924 /* Increment value is mult_val times the increment value of the biv. */
5926 increment = biv_total_increment (bl);
5927 if (increment)
5929 struct induction *biv_inc;
5931 increment = fold_rtx_mult_add (v->mult_val,
5932 extend_value_for_giv (v, increment),
5933 const0_rtx, v->mode);
5934 /* The caller assumes that one full increment has occurred at the
5935 first loop test. But that's not true when the biv is incremented
5936 after the giv is set (which is the usual case), e.g.:
5937 i = 6; do {;} while (i++ < 9) .
5938 Therefore, we bias the initial value by subtracting the amount of
5939 the increment that occurs between the giv set and the giv test. */
5940 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5942 if (loop_insn_first_p (v->insn, biv_inc->insn))
5944 if (REG_P (biv_inc->add_val))
5946 if (loop_dump_stream)
5947 fprintf (loop_dump_stream,
5948 "Loop iterations: Basic induction var add_val is REG %d.\n",
5949 REGNO (biv_inc->add_val));
5950 return 0;
5953 /* If we have already counted it, skip it. */
5954 if (biv_inc->same)
5955 continue;
5957 offset -= INTVAL (biv_inc->add_val);
5961 if (loop_dump_stream)
5962 fprintf (loop_dump_stream,
5963 "Loop iterations: Giv iterator, initial value bias %ld.\n",
5964 (long) offset);
5966 /* Initial value is mult_val times the biv's initial value plus
5967 add_val. Only useful if it is a constant. */
5968 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
5969 initial_value
5970 = fold_rtx_mult_add (v->mult_val,
5971 plus_constant (biv_initial_value, offset),
5972 v->add_val, v->mode);
5974 else
5976 if (loop_dump_stream)
5977 fprintf (loop_dump_stream,
5978 "Loop iterations: Not basic or general induction var.\n");
5979 return 0;
5982 if (initial_value == 0)
5983 return 0;
5985 unsigned_p = 0;
5986 off_by_one = 0;
5987 switch (comparison_code)
5989 case LEU:
5990 unsigned_p = 1;
5991 case LE:
5992 compare_dir = 1;
5993 off_by_one = 1;
5994 break;
5995 case GEU:
5996 unsigned_p = 1;
5997 case GE:
5998 compare_dir = -1;
5999 off_by_one = -1;
6000 break;
6001 case EQ:
6002 /* Cannot determine loop iterations with this case. */
6003 compare_dir = 0;
6004 break;
6005 case LTU:
6006 unsigned_p = 1;
6007 case LT:
6008 compare_dir = 1;
6009 break;
6010 case GTU:
6011 unsigned_p = 1;
6012 case GT:
6013 compare_dir = -1;
6014 break;
6015 case NE:
6016 compare_dir = 0;
6017 break;
6018 default:
6019 gcc_unreachable ();
6022 /* If the comparison value is an invariant register, then try to find
6023 its value from the insns before the start of the loop. */
6025 final_value = comparison_value;
6026 if (REG_P (comparison_value)
6027 && loop_invariant_p (loop, comparison_value))
6029 final_value = loop_find_equiv_value (loop, comparison_value);
6031 /* If we don't get an invariant final value, we are better
6032 off with the original register. */
6033 if (! loop_invariant_p (loop, final_value))
6034 final_value = comparison_value;
6037 /* Calculate the approximate final value of the induction variable
6038 (on the last successful iteration). The exact final value
6039 depends on the branch operator, and increment sign. It will be
6040 wrong if the iteration variable is not incremented by one each
6041 time through the loop and (comparison_value + off_by_one -
6042 initial_value) % increment != 0.
6043 ??? Note that the final_value may overflow and thus final_larger
6044 will be bogus. A potentially infinite loop will be classified
6045 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6046 if (off_by_one)
6047 final_value = plus_constant (final_value, off_by_one);
6049 /* Save the calculated values describing this loop's bounds, in case
6050 precondition_loop_p will need them later. These values can not be
6051 recalculated inside precondition_loop_p because strength reduction
6052 optimizations may obscure the loop's structure.
6054 These values are only required by precondition_loop_p and insert_bct
6055 whenever the number of iterations cannot be computed at compile time.
6056 Only the difference between final_value and initial_value is
6057 important. Note that final_value is only approximate. */
6058 loop_info->initial_value = initial_value;
6059 loop_info->comparison_value = comparison_value;
6060 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6061 loop_info->increment = increment;
6062 loop_info->iteration_var = iteration_var;
6063 loop_info->comparison_code = comparison_code;
6064 loop_info->iv = bl;
6066 /* Try to determine the iteration count for loops such
6067 as (for i = init; i < init + const; i++). When running the
6068 loop optimization twice, the first pass often converts simple
6069 loops into this form. */
6071 if (REG_P (initial_value))
6073 rtx reg1;
6074 rtx reg2;
6075 rtx const2;
6077 reg1 = initial_value;
6078 if (GET_CODE (final_value) == PLUS)
6079 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6080 else
6081 reg2 = final_value, const2 = const0_rtx;
6083 /* Check for initial_value = reg1, final_value = reg2 + const2,
6084 where reg1 != reg2. */
6085 if (REG_P (reg2) && reg2 != reg1)
6087 rtx temp;
6089 /* Find what reg1 is equivalent to. Hopefully it will
6090 either be reg2 or reg2 plus a constant. */
6091 temp = loop_find_equiv_value (loop, reg1);
6093 if (find_common_reg_term (temp, reg2))
6094 initial_value = temp;
6095 else if (loop_invariant_p (loop, reg2))
6097 /* Find what reg2 is equivalent to. Hopefully it will
6098 either be reg1 or reg1 plus a constant. Let's ignore
6099 the latter case for now since it is not so common. */
6100 temp = loop_find_equiv_value (loop, reg2);
6102 if (temp == loop_info->iteration_var)
6103 temp = initial_value;
6104 if (temp == reg1)
6105 final_value = (const2 == const0_rtx)
6106 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6111 loop_info->initial_equiv_value = initial_value;
6112 loop_info->final_equiv_value = final_value;
6114 /* For EQ comparison loops, we don't have a valid final value.
6115 Check this now so that we won't leave an invalid value if we
6116 return early for any other reason. */
6117 if (comparison_code == EQ)
6118 loop_info->final_equiv_value = loop_info->final_value = 0;
6120 if (increment == 0)
6122 if (loop_dump_stream)
6123 fprintf (loop_dump_stream,
6124 "Loop iterations: Increment value can't be calculated.\n");
6125 return 0;
6128 if (GET_CODE (increment) != CONST_INT)
6130 /* If we have a REG, check to see if REG holds a constant value. */
6131 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6132 clear if it is worthwhile to try to handle such RTL. */
6133 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6134 increment = loop_find_equiv_value (loop, increment);
6136 if (GET_CODE (increment) != CONST_INT)
6138 if (loop_dump_stream)
6140 fprintf (loop_dump_stream,
6141 "Loop iterations: Increment value not constant ");
6142 print_simple_rtl (loop_dump_stream, increment);
6143 fprintf (loop_dump_stream, ".\n");
6145 return 0;
6147 loop_info->increment = increment;
6150 if (GET_CODE (initial_value) != CONST_INT)
6152 if (loop_dump_stream)
6154 fprintf (loop_dump_stream,
6155 "Loop iterations: Initial value not constant ");
6156 print_simple_rtl (loop_dump_stream, initial_value);
6157 fprintf (loop_dump_stream, ".\n");
6159 return 0;
6161 else if (GET_CODE (final_value) != CONST_INT)
6163 if (loop_dump_stream)
6165 fprintf (loop_dump_stream,
6166 "Loop iterations: Final value not constant ");
6167 print_simple_rtl (loop_dump_stream, final_value);
6168 fprintf (loop_dump_stream, ".\n");
6170 return 0;
6172 else if (comparison_code == EQ)
6174 rtx inc_once;
6176 if (loop_dump_stream)
6177 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6179 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6180 GET_MODE (iteration_var));
6182 if (inc_once == final_value)
6184 /* The iterator value once through the loop is equal to the
6185 comparison value. Either we have an infinite loop, or
6186 we'll loop twice. */
6187 if (increment == const0_rtx)
6188 return 0;
6189 loop_info->n_iterations = 2;
6191 else
6192 loop_info->n_iterations = 1;
6194 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6195 loop_info->final_value
6196 = gen_int_mode ((INTVAL (loop_info->initial_value)
6197 + loop_info->n_iterations * INTVAL (increment)),
6198 GET_MODE (iteration_var));
6199 else
6200 loop_info->final_value
6201 = plus_constant (loop_info->initial_value,
6202 loop_info->n_iterations * INTVAL (increment));
6203 loop_info->final_equiv_value
6204 = gen_int_mode ((INTVAL (initial_value)
6205 + loop_info->n_iterations * INTVAL (increment)),
6206 GET_MODE (iteration_var));
6207 return loop_info->n_iterations;
6210 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6211 if (unsigned_p)
6212 final_larger
6213 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6214 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6215 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6216 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6217 else
6218 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6219 - (INTVAL (final_value) < INTVAL (initial_value));
6221 if (INTVAL (increment) > 0)
6222 increment_dir = 1;
6223 else if (INTVAL (increment) == 0)
6224 increment_dir = 0;
6225 else
6226 increment_dir = -1;
6228 /* There are 27 different cases: compare_dir = -1, 0, 1;
6229 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6230 There are 4 normal cases, 4 reverse cases (where the iteration variable
6231 will overflow before the loop exits), 4 infinite loop cases, and 15
6232 immediate exit (0 or 1 iteration depending on loop type) cases.
6233 Only try to optimize the normal cases. */
6235 /* (compare_dir/final_larger/increment_dir)
6236 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6237 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6238 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6239 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6241 /* ?? If the meaning of reverse loops (where the iteration variable
6242 will overflow before the loop exits) is undefined, then could
6243 eliminate all of these special checks, and just always assume
6244 the loops are normal/immediate/infinite. Note that this means
6245 the sign of increment_dir does not have to be known. Also,
6246 since it does not really hurt if immediate exit loops or infinite loops
6247 are optimized, then that case could be ignored also, and hence all
6248 loops can be optimized.
6250 According to ANSI Spec, the reverse loop case result is undefined,
6251 because the action on overflow is undefined.
6253 See also the special test for NE loops below. */
6255 if (final_larger == increment_dir && final_larger != 0
6256 && (final_larger == compare_dir || compare_dir == 0))
6257 /* Normal case. */
6259 else
6261 if (loop_dump_stream)
6262 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6263 return 0;
6266 /* Calculate the number of iterations, final_value is only an approximation,
6267 so correct for that. Note that abs_diff and n_iterations are
6268 unsigned, because they can be as large as 2^n - 1. */
6270 inc = INTVAL (increment);
6271 gcc_assert (inc);
6272 if (inc > 0)
6274 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6275 abs_inc = inc;
6277 else
6279 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6280 abs_inc = -inc;
6283 /* Given that iteration_var is going to iterate over its own mode,
6284 not HOST_WIDE_INT, disregard higher bits that might have come
6285 into the picture due to sign extension of initial and final
6286 values. */
6287 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6288 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6289 << 1) - 1;
6291 /* For NE tests, make sure that the iteration variable won't miss
6292 the final value. If abs_diff mod abs_incr is not zero, then the
6293 iteration variable will overflow before the loop exits, and we
6294 can not calculate the number of iterations. */
6295 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6296 return 0;
6298 /* Note that the number of iterations could be calculated using
6299 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6300 handle potential overflow of the summation. */
6301 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6302 return loop_info->n_iterations;
6305 /* Perform strength reduction and induction variable elimination.
6307 Pseudo registers created during this function will be beyond the
6308 last valid index in several tables including
6309 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6310 problem here, because the added registers cannot be givs outside of
6311 their loop, and hence will never be reconsidered. But scan_loop
6312 must check regnos to make sure they are in bounds. */
6314 static void
6315 strength_reduce (struct loop *loop, int flags)
6317 struct loop_info *loop_info = LOOP_INFO (loop);
6318 struct loop_regs *regs = LOOP_REGS (loop);
6319 struct loop_ivs *ivs = LOOP_IVS (loop);
6320 rtx p;
6321 /* Temporary list pointer for traversing ivs->list. */
6322 struct iv_class *bl;
6323 /* Ratio of extra register life span we can justify
6324 for saving an instruction. More if loop doesn't call subroutines
6325 since in that case saving an insn makes more difference
6326 and more registers are available. */
6327 /* ??? could set this to last value of threshold in move_movables */
6328 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6329 /* Map of pseudo-register replacements. */
6330 rtx *reg_map = NULL;
6331 int reg_map_size;
6332 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6333 int insn_count = count_insns_in_loop (loop);
6335 addr_placeholder = gen_reg_rtx (Pmode);
6337 ivs->n_regs = max_reg_before_loop;
6338 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6340 /* Find all BIVs in loop. */
6341 loop_bivs_find (loop);
6343 /* Exit if there are no bivs. */
6344 if (! ivs->list)
6346 loop_ivs_free (loop);
6347 return;
6350 /* Determine how BIVS are initialized by looking through pre-header
6351 extended basic block. */
6352 loop_bivs_init_find (loop);
6354 /* Look at the each biv and see if we can say anything better about its
6355 initial value from any initializing insns set up above. */
6356 loop_bivs_check (loop);
6358 /* Search the loop for general induction variables. */
6359 loop_givs_find (loop);
6361 /* Try to calculate and save the number of loop iterations. This is
6362 set to zero if the actual number can not be calculated. This must
6363 be called after all giv's have been identified, since otherwise it may
6364 fail if the iteration variable is a giv. */
6365 loop_iterations (loop);
6367 #ifdef HAVE_prefetch
6368 if (flags & LOOP_PREFETCH)
6369 emit_prefetch_instructions (loop);
6370 #endif
6372 /* Now for each giv for which we still don't know whether or not it is
6373 replaceable, check to see if it is replaceable because its final value
6374 can be calculated. This must be done after loop_iterations is called,
6375 so that final_giv_value will work correctly. */
6376 loop_givs_check (loop);
6378 /* Try to prove that the loop counter variable (if any) is always
6379 nonnegative; if so, record that fact with a REG_NONNEG note
6380 so that "decrement and branch until zero" insn can be used. */
6381 check_dbra_loop (loop, insn_count);
6383 /* Create reg_map to hold substitutions for replaceable giv regs.
6384 Some givs might have been made from biv increments, so look at
6385 ivs->reg_iv_type for a suitable size. */
6386 reg_map_size = ivs->n_regs;
6387 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6389 /* Examine each iv class for feasibility of strength reduction/induction
6390 variable elimination. */
6392 for (bl = ivs->list; bl; bl = bl->next)
6394 struct induction *v;
6395 int benefit;
6397 /* Test whether it will be possible to eliminate this biv
6398 provided all givs are reduced. */
6399 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6401 /* This will be true at the end, if all givs which depend on this
6402 biv have been strength reduced.
6403 We can't (currently) eliminate the biv unless this is so. */
6404 bl->all_reduced = 1;
6406 /* Check each extension dependent giv in this class to see if its
6407 root biv is safe from wrapping in the interior mode. */
6408 check_ext_dependent_givs (loop, bl);
6410 /* Combine all giv's for this iv_class. */
6411 combine_givs (regs, bl);
6413 for (v = bl->giv; v; v = v->next_iv)
6415 struct induction *tv;
6417 if (v->ignore || v->same)
6418 continue;
6420 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6422 /* If an insn is not to be strength reduced, then set its ignore
6423 flag, and clear bl->all_reduced. */
6425 /* A giv that depends on a reversed biv must be reduced if it is
6426 used after the loop exit, otherwise, it would have the wrong
6427 value after the loop exit. To make it simple, just reduce all
6428 of such giv's whether or not we know they are used after the loop
6429 exit. */
6431 if (v->lifetime * threshold * benefit < insn_count
6432 && ! bl->reversed)
6434 if (loop_dump_stream)
6435 fprintf (loop_dump_stream,
6436 "giv of insn %d not worth while, %d vs %d.\n",
6437 INSN_UID (v->insn),
6438 v->lifetime * threshold * benefit, insn_count);
6439 v->ignore = 1;
6440 bl->all_reduced = 0;
6442 else
6444 /* Check that we can increment the reduced giv without a
6445 multiply insn. If not, reject it. */
6447 for (tv = bl->biv; tv; tv = tv->next_iv)
6448 if (tv->mult_val == const1_rtx
6449 && ! product_cheap_p (tv->add_val, v->mult_val))
6451 if (loop_dump_stream)
6452 fprintf (loop_dump_stream,
6453 "giv of insn %d: would need a multiply.\n",
6454 INSN_UID (v->insn));
6455 v->ignore = 1;
6456 bl->all_reduced = 0;
6457 break;
6462 /* Check for givs whose first use is their definition and whose
6463 last use is the definition of another giv. If so, it is likely
6464 dead and should not be used to derive another giv nor to
6465 eliminate a biv. */
6466 loop_givs_dead_check (loop, bl);
6468 /* Reduce each giv that we decided to reduce. */
6469 loop_givs_reduce (loop, bl);
6471 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6472 as not reduced.
6474 For each giv register that can be reduced now: if replaceable,
6475 substitute reduced reg wherever the old giv occurs;
6476 else add new move insn "giv_reg = reduced_reg". */
6477 loop_givs_rescan (loop, bl, reg_map);
6479 /* All the givs based on the biv bl have been reduced if they
6480 merit it. */
6482 /* For each giv not marked as maybe dead that has been combined with a
6483 second giv, clear any "maybe dead" mark on that second giv.
6484 v->new_reg will either be or refer to the register of the giv it
6485 combined with.
6487 Doing this clearing avoids problems in biv elimination where
6488 a giv's new_reg is a complex value that can't be put in the
6489 insn but the giv combined with (with a reg as new_reg) is
6490 marked maybe_dead. Since the register will be used in either
6491 case, we'd prefer it be used from the simpler giv. */
6493 for (v = bl->giv; v; v = v->next_iv)
6494 if (! v->maybe_dead && v->same)
6495 v->same->maybe_dead = 0;
6497 /* Try to eliminate the biv, if it is a candidate.
6498 This won't work if ! bl->all_reduced,
6499 since the givs we planned to use might not have been reduced.
6501 We have to be careful that we didn't initially think we could
6502 eliminate this biv because of a giv that we now think may be
6503 dead and shouldn't be used as a biv replacement.
6505 Also, there is the possibility that we may have a giv that looks
6506 like it can be used to eliminate a biv, but the resulting insn
6507 isn't valid. This can happen, for example, on the 88k, where a
6508 JUMP_INSN can compare a register only with zero. Attempts to
6509 replace it with a compare with a constant will fail.
6511 Note that in cases where this call fails, we may have replaced some
6512 of the occurrences of the biv with a giv, but no harm was done in
6513 doing so in the rare cases where it can occur. */
6515 if (bl->all_reduced == 1 && bl->eliminable
6516 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6518 /* ?? If we created a new test to bypass the loop entirely,
6519 or otherwise drop straight in, based on this test, then
6520 we might want to rewrite it also. This way some later
6521 pass has more hope of removing the initialization of this
6522 biv entirely. */
6524 /* If final_value != 0, then the biv may be used after loop end
6525 and we must emit an insn to set it just in case.
6527 Reversed bivs already have an insn after the loop setting their
6528 value, so we don't need another one. We can't calculate the
6529 proper final value for such a biv here anyways. */
6530 if (bl->final_value && ! bl->reversed)
6531 loop_insn_sink_or_swim (loop,
6532 gen_load_of_final_value (bl->biv->dest_reg,
6533 bl->final_value));
6535 if (loop_dump_stream)
6536 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6537 bl->regno);
6539 /* See above note wrt final_value. But since we couldn't eliminate
6540 the biv, we must set the value after the loop instead of before. */
6541 else if (bl->final_value && ! bl->reversed)
6542 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6543 bl->final_value));
6546 /* Go through all the instructions in the loop, making all the
6547 register substitutions scheduled in REG_MAP. */
6549 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6550 if (INSN_P (p))
6552 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6553 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6554 INSN_CODE (p) = -1;
6557 if (loop_dump_stream)
6558 fprintf (loop_dump_stream, "\n");
6560 loop_ivs_free (loop);
6561 if (reg_map)
6562 free (reg_map);
6565 /*Record all basic induction variables calculated in the insn. */
6566 static rtx
6567 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6568 int maybe_multiple)
6570 struct loop_ivs *ivs = LOOP_IVS (loop);
6571 rtx set;
6572 rtx dest_reg;
6573 rtx inc_val;
6574 rtx mult_val;
6575 rtx *location;
6577 if (NONJUMP_INSN_P (p)
6578 && (set = single_set (p))
6579 && REG_P (SET_DEST (set)))
6581 dest_reg = SET_DEST (set);
6582 if (REGNO (dest_reg) < max_reg_before_loop
6583 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6584 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6586 if (basic_induction_var (loop, SET_SRC (set),
6587 GET_MODE (SET_SRC (set)),
6588 dest_reg, p, &inc_val, &mult_val,
6589 &location))
6591 /* It is a possible basic induction variable.
6592 Create and initialize an induction structure for it. */
6594 struct induction *v = xmalloc (sizeof (struct induction));
6596 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6597 not_every_iteration, maybe_multiple);
6598 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6600 else if (REGNO (dest_reg) < ivs->n_regs)
6601 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6604 return p;
6607 /* Record all givs calculated in the insn.
6608 A register is a giv if: it is only set once, it is a function of a
6609 biv and a constant (or invariant), and it is not a biv. */
6610 static rtx
6611 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6612 int maybe_multiple)
6614 struct loop_regs *regs = LOOP_REGS (loop);
6616 rtx set;
6617 /* Look for a general induction variable in a register. */
6618 if (NONJUMP_INSN_P (p)
6619 && (set = single_set (p))
6620 && REG_P (SET_DEST (set))
6621 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6623 rtx src_reg;
6624 rtx dest_reg;
6625 rtx add_val;
6626 rtx mult_val;
6627 rtx ext_val;
6628 int benefit;
6629 rtx regnote = 0;
6630 rtx last_consec_insn;
6632 dest_reg = SET_DEST (set);
6633 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6634 return p;
6636 if (/* SET_SRC is a giv. */
6637 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6638 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6639 /* Equivalent expression is a giv. */
6640 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6641 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6642 &add_val, &mult_val, &ext_val, 0,
6643 &benefit, VOIDmode)))
6644 /* Don't try to handle any regs made by loop optimization.
6645 We have nothing on them in regno_first_uid, etc. */
6646 && REGNO (dest_reg) < max_reg_before_loop
6647 /* Don't recognize a BASIC_INDUCT_VAR here. */
6648 && dest_reg != src_reg
6649 /* This must be the only place where the register is set. */
6650 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6651 /* or all sets must be consecutive and make a giv. */
6652 || (benefit = consec_sets_giv (loop, benefit, p,
6653 src_reg, dest_reg,
6654 &add_val, &mult_val, &ext_val,
6655 &last_consec_insn))))
6657 struct induction *v = xmalloc (sizeof (struct induction));
6659 /* If this is a library call, increase benefit. */
6660 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6661 benefit += libcall_benefit (p);
6663 /* Skip the consecutive insns, if there are any. */
6664 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6665 p = last_consec_insn;
6667 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6668 ext_val, benefit, DEST_REG, not_every_iteration,
6669 maybe_multiple, (rtx*) 0);
6674 /* Look for givs which are memory addresses. */
6675 if (NONJUMP_INSN_P (p))
6676 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6677 maybe_multiple);
6679 /* Update the status of whether giv can derive other givs. This can
6680 change when we pass a label or an insn that updates a biv. */
6681 if (INSN_P (p) || LABEL_P (p))
6682 update_giv_derive (loop, p);
6683 return p;
6686 /* Return 1 if X is a valid source for an initial value (or as value being
6687 compared against in an initial test).
6689 X must be either a register or constant and must not be clobbered between
6690 the current insn and the start of the loop.
6692 INSN is the insn containing X. */
6694 static int
6695 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6697 if (CONSTANT_P (x))
6698 return 1;
6700 /* Only consider pseudos we know about initialized in insns whose luids
6701 we know. */
6702 if (!REG_P (x)
6703 || REGNO (x) >= max_reg_before_loop)
6704 return 0;
6706 /* Don't use call-clobbered registers across a call which clobbers it. On
6707 some machines, don't use any hard registers at all. */
6708 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6709 && (SMALL_REGISTER_CLASSES
6710 || (call_used_regs[REGNO (x)] && call_seen)))
6711 return 0;
6713 /* Don't use registers that have been clobbered before the start of the
6714 loop. */
6715 if (reg_set_between_p (x, insn, loop_start))
6716 return 0;
6718 return 1;
6721 /* Scan X for memory refs and check each memory address
6722 as a possible giv. INSN is the insn whose pattern X comes from.
6723 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6724 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6725 more than once in each loop iteration. */
6727 static void
6728 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6729 int not_every_iteration, int maybe_multiple)
6731 int i, j;
6732 enum rtx_code code;
6733 const char *fmt;
6735 if (x == 0)
6736 return;
6738 code = GET_CODE (x);
6739 switch (code)
6741 case REG:
6742 case CONST_INT:
6743 case CONST:
6744 case CONST_DOUBLE:
6745 case SYMBOL_REF:
6746 case LABEL_REF:
6747 case PC:
6748 case CC0:
6749 case ADDR_VEC:
6750 case ADDR_DIFF_VEC:
6751 case USE:
6752 case CLOBBER:
6753 return;
6755 case MEM:
6757 rtx src_reg;
6758 rtx add_val;
6759 rtx mult_val;
6760 rtx ext_val;
6761 int benefit;
6763 /* This code used to disable creating GIVs with mult_val == 1 and
6764 add_val == 0. However, this leads to lost optimizations when
6765 it comes time to combine a set of related DEST_ADDR GIVs, since
6766 this one would not be seen. */
6768 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6769 &mult_val, &ext_val, 1, &benefit,
6770 GET_MODE (x)))
6772 /* Found one; record it. */
6773 struct induction *v = xmalloc (sizeof (struct induction));
6775 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6776 add_val, ext_val, benefit, DEST_ADDR,
6777 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6779 v->mem = x;
6782 return;
6784 default:
6785 break;
6788 /* Recursively scan the subexpressions for other mem refs. */
6790 fmt = GET_RTX_FORMAT (code);
6791 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6792 if (fmt[i] == 'e')
6793 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6794 maybe_multiple);
6795 else if (fmt[i] == 'E')
6796 for (j = 0; j < XVECLEN (x, i); j++)
6797 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6798 maybe_multiple);
6801 /* Fill in the data about one biv update.
6802 V is the `struct induction' in which we record the biv. (It is
6803 allocated by the caller, with alloca.)
6804 INSN is the insn that sets it.
6805 DEST_REG is the biv's reg.
6807 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6808 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6809 being set to INC_VAL.
6811 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6812 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6813 can be executed more than once per iteration. If MAYBE_MULTIPLE
6814 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6815 executed exactly once per iteration. */
6817 static void
6818 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6819 rtx inc_val, rtx mult_val, rtx *location,
6820 int not_every_iteration, int maybe_multiple)
6822 struct loop_ivs *ivs = LOOP_IVS (loop);
6823 struct iv_class *bl;
6825 v->insn = insn;
6826 v->src_reg = dest_reg;
6827 v->dest_reg = dest_reg;
6828 v->mult_val = mult_val;
6829 v->add_val = inc_val;
6830 v->ext_dependent = NULL_RTX;
6831 v->location = location;
6832 v->mode = GET_MODE (dest_reg);
6833 v->always_computable = ! not_every_iteration;
6834 v->always_executed = ! not_every_iteration;
6835 v->maybe_multiple = maybe_multiple;
6836 v->same = 0;
6838 /* Add this to the reg's iv_class, creating a class
6839 if this is the first incrementation of the reg. */
6841 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6842 if (bl == 0)
6844 /* Create and initialize new iv_class. */
6846 bl = xmalloc (sizeof (struct iv_class));
6848 bl->regno = REGNO (dest_reg);
6849 bl->biv = 0;
6850 bl->giv = 0;
6851 bl->biv_count = 0;
6852 bl->giv_count = 0;
6854 /* Set initial value to the reg itself. */
6855 bl->initial_value = dest_reg;
6856 bl->final_value = 0;
6857 /* We haven't seen the initializing insn yet. */
6858 bl->init_insn = 0;
6859 bl->init_set = 0;
6860 bl->initial_test = 0;
6861 bl->incremented = 0;
6862 bl->eliminable = 0;
6863 bl->nonneg = 0;
6864 bl->reversed = 0;
6865 bl->total_benefit = 0;
6867 /* Add this class to ivs->list. */
6868 bl->next = ivs->list;
6869 ivs->list = bl;
6871 /* Put it in the array of biv register classes. */
6872 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6874 else
6876 /* Check if location is the same as a previous one. */
6877 struct induction *induction;
6878 for (induction = bl->biv; induction; induction = induction->next_iv)
6879 if (location == induction->location)
6881 v->same = induction;
6882 break;
6886 /* Update IV_CLASS entry for this biv. */
6887 v->next_iv = bl->biv;
6888 bl->biv = v;
6889 bl->biv_count++;
6890 if (mult_val == const1_rtx)
6891 bl->incremented = 1;
6893 if (loop_dump_stream)
6894 loop_biv_dump (v, loop_dump_stream, 0);
6897 /* Fill in the data about one giv.
6898 V is the `struct induction' in which we record the giv. (It is
6899 allocated by the caller, with alloca.)
6900 INSN is the insn that sets it.
6901 BENEFIT estimates the savings from deleting this insn.
6902 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6903 into a register or is used as a memory address.
6905 SRC_REG is the biv reg which the giv is computed from.
6906 DEST_REG is the giv's reg (if the giv is stored in a reg).
6907 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6908 LOCATION points to the place where this giv's value appears in INSN. */
6910 static void
6911 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6912 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6913 rtx ext_val, int benefit, enum g_types type,
6914 int not_every_iteration, int maybe_multiple, rtx *location)
6916 struct loop_ivs *ivs = LOOP_IVS (loop);
6917 struct induction *b;
6918 struct iv_class *bl;
6919 rtx set = single_set (insn);
6920 rtx temp;
6922 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6923 undo the MULT canonicalization that we performed earlier. */
6924 temp = simplify_rtx (add_val);
6925 if (temp
6926 && ! (GET_CODE (add_val) == MULT
6927 && GET_CODE (temp) == ASHIFT))
6928 add_val = temp;
6930 v->insn = insn;
6931 v->src_reg = src_reg;
6932 v->giv_type = type;
6933 v->dest_reg = dest_reg;
6934 v->mult_val = mult_val;
6935 v->add_val = add_val;
6936 v->ext_dependent = ext_val;
6937 v->benefit = benefit;
6938 v->location = location;
6939 v->cant_derive = 0;
6940 v->combined_with = 0;
6941 v->maybe_multiple = maybe_multiple;
6942 v->maybe_dead = 0;
6943 v->derive_adjustment = 0;
6944 v->same = 0;
6945 v->ignore = 0;
6946 v->new_reg = 0;
6947 v->final_value = 0;
6948 v->same_insn = 0;
6949 v->auto_inc_opt = 0;
6950 v->shared = 0;
6952 /* The v->always_computable field is used in update_giv_derive, to
6953 determine whether a giv can be used to derive another giv. For a
6954 DEST_REG giv, INSN computes a new value for the giv, so its value
6955 isn't computable if INSN insn't executed every iteration.
6956 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
6957 it does not compute a new value. Hence the value is always computable
6958 regardless of whether INSN is executed each iteration. */
6960 if (type == DEST_ADDR)
6961 v->always_computable = 1;
6962 else
6963 v->always_computable = ! not_every_iteration;
6965 v->always_executed = ! not_every_iteration;
6967 if (type == DEST_ADDR)
6969 v->mode = GET_MODE (*location);
6970 v->lifetime = 1;
6972 else /* type == DEST_REG */
6974 v->mode = GET_MODE (SET_DEST (set));
6976 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
6978 /* If the lifetime is zero, it means that this register is
6979 really a dead store. So mark this as a giv that can be
6980 ignored. This will not prevent the biv from being eliminated. */
6981 if (v->lifetime == 0)
6982 v->ignore = 1;
6984 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6985 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6988 /* Add the giv to the class of givs computed from one biv. */
6990 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
6991 gcc_assert (bl);
6992 v->next_iv = bl->giv;
6993 bl->giv = v;
6995 /* Don't count DEST_ADDR. This is supposed to count the number of
6996 insns that calculate givs. */
6997 if (type == DEST_REG)
6998 bl->giv_count++;
6999 bl->total_benefit += benefit;
7001 if (type == DEST_ADDR)
7003 v->replaceable = 1;
7004 v->not_replaceable = 0;
7006 else
7008 /* The giv can be replaced outright by the reduced register only if all
7009 of the following conditions are true:
7010 - the insn that sets the giv is always executed on any iteration
7011 on which the giv is used at all
7012 (there are two ways to deduce this:
7013 either the insn is executed on every iteration,
7014 or all uses follow that insn in the same basic block),
7015 - the giv is not used outside the loop
7016 - no assignments to the biv occur during the giv's lifetime. */
7018 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
7019 /* Previous line always fails if INSN was moved by loop opt. */
7020 && REGNO_LAST_LUID (REGNO (dest_reg))
7021 < INSN_LUID (loop->end)
7022 && (! not_every_iteration
7023 || last_use_this_basic_block (dest_reg, insn)))
7025 /* Now check that there are no assignments to the biv within the
7026 giv's lifetime. This requires two separate checks. */
7028 /* Check each biv update, and fail if any are between the first
7029 and last use of the giv.
7031 If this loop contains an inner loop that was unrolled, then
7032 the insn modifying the biv may have been emitted by the loop
7033 unrolling code, and hence does not have a valid luid. Just
7034 mark the biv as not replaceable in this case. It is not very
7035 useful as a biv, because it is used in two different loops.
7036 It is very unlikely that we would be able to optimize the giv
7037 using this biv anyways. */
7039 v->replaceable = 1;
7040 v->not_replaceable = 0;
7041 for (b = bl->biv; b; b = b->next_iv)
7043 if (INSN_UID (b->insn) >= max_uid_for_loop
7044 || ((INSN_LUID (b->insn)
7045 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7046 && (INSN_LUID (b->insn)
7047 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7049 v->replaceable = 0;
7050 v->not_replaceable = 1;
7051 break;
7055 /* If there are any backwards branches that go from after the
7056 biv update to before it, then this giv is not replaceable. */
7057 if (v->replaceable)
7058 for (b = bl->biv; b; b = b->next_iv)
7059 if (back_branch_in_range_p (loop, b->insn))
7061 v->replaceable = 0;
7062 v->not_replaceable = 1;
7063 break;
7066 else
7068 /* May still be replaceable, we don't have enough info here to
7069 decide. */
7070 v->replaceable = 0;
7071 v->not_replaceable = 0;
7075 /* Record whether the add_val contains a const_int, for later use by
7076 combine_givs. */
7078 rtx tem = add_val;
7080 v->no_const_addval = 1;
7081 if (tem == const0_rtx)
7083 else if (CONSTANT_P (add_val))
7084 v->no_const_addval = 0;
7085 if (GET_CODE (tem) == PLUS)
7087 while (1)
7089 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7090 tem = XEXP (tem, 0);
7091 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7092 tem = XEXP (tem, 1);
7093 else
7094 break;
7096 if (CONSTANT_P (XEXP (tem, 1)))
7097 v->no_const_addval = 0;
7101 if (loop_dump_stream)
7102 loop_giv_dump (v, loop_dump_stream, 0);
7105 /* Try to calculate the final value of the giv, the value it will have at
7106 the end of the loop. If we can do it, return that value. */
7108 static rtx
7109 final_giv_value (const struct loop *loop, struct induction *v)
7111 struct loop_ivs *ivs = LOOP_IVS (loop);
7112 struct iv_class *bl;
7113 rtx insn;
7114 rtx increment, tem;
7115 rtx seq;
7116 rtx loop_end = loop->end;
7117 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7119 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7121 /* The final value for givs which depend on reversed bivs must be calculated
7122 differently than for ordinary givs. In this case, there is already an
7123 insn after the loop which sets this giv's final value (if necessary),
7124 and there are no other loop exits, so we can return any value. */
7125 if (bl->reversed)
7127 if (loop_dump_stream)
7128 fprintf (loop_dump_stream,
7129 "Final giv value for %d, depends on reversed biv\n",
7130 REGNO (v->dest_reg));
7131 return const0_rtx;
7134 /* Try to calculate the final value as a function of the biv it depends
7135 upon. The only exit from the loop must be the fall through at the bottom
7136 and the insn that sets the giv must be executed on every iteration
7137 (otherwise the giv may not have its final value when the loop exits). */
7139 /* ??? Can calculate the final giv value by subtracting off the
7140 extra biv increments times the giv's mult_val. The loop must have
7141 only one exit for this to work, but the loop iterations does not need
7142 to be known. */
7144 if (n_iterations != 0
7145 && ! loop->exit_count
7146 && v->always_executed)
7148 /* ?? It is tempting to use the biv's value here since these insns will
7149 be put after the loop, and hence the biv will have its final value
7150 then. However, this fails if the biv is subsequently eliminated.
7151 Perhaps determine whether biv's are eliminable before trying to
7152 determine whether giv's are replaceable so that we can use the
7153 biv value here if it is not eliminable. */
7155 /* We are emitting code after the end of the loop, so we must make
7156 sure that bl->initial_value is still valid then. It will still
7157 be valid if it is invariant. */
7159 increment = biv_total_increment (bl);
7161 if (increment && loop_invariant_p (loop, increment)
7162 && loop_invariant_p (loop, bl->initial_value))
7164 /* Can calculate the loop exit value of its biv as
7165 (n_iterations * increment) + initial_value */
7167 /* The loop exit value of the giv is then
7168 (final_biv_value - extra increments) * mult_val + add_val.
7169 The extra increments are any increments to the biv which
7170 occur in the loop after the giv's value is calculated.
7171 We must search from the insn that sets the giv to the end
7172 of the loop to calculate this value. */
7174 /* Put the final biv value in tem. */
7175 tem = gen_reg_rtx (v->mode);
7176 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7177 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7178 GEN_INT (n_iterations),
7179 extend_value_for_giv (v, bl->initial_value),
7180 tem);
7182 /* Subtract off extra increments as we find them. */
7183 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7184 insn = NEXT_INSN (insn))
7186 struct induction *biv;
7188 for (biv = bl->biv; biv; biv = biv->next_iv)
7189 if (biv->insn == insn)
7191 start_sequence ();
7192 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7193 biv->add_val, NULL_RTX, 0,
7194 OPTAB_LIB_WIDEN);
7195 seq = get_insns ();
7196 end_sequence ();
7197 loop_insn_sink (loop, seq);
7201 /* Now calculate the giv's final value. */
7202 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7204 if (loop_dump_stream)
7205 fprintf (loop_dump_stream,
7206 "Final giv value for %d, calc from biv's value.\n",
7207 REGNO (v->dest_reg));
7209 return tem;
7213 /* Replaceable giv's should never reach here. */
7214 gcc_assert (!v->replaceable);
7216 /* Check to see if the biv is dead at all loop exits. */
7217 if (reg_dead_after_loop (loop, v->dest_reg))
7219 if (loop_dump_stream)
7220 fprintf (loop_dump_stream,
7221 "Final giv value for %d, giv dead after loop exit.\n",
7222 REGNO (v->dest_reg));
7224 return const0_rtx;
7227 return 0;
7230 /* All this does is determine whether a giv can be made replaceable because
7231 its final value can be calculated. This code can not be part of record_giv
7232 above, because final_giv_value requires that the number of loop iterations
7233 be known, and that can not be accurately calculated until after all givs
7234 have been identified. */
7236 static void
7237 check_final_value (const struct loop *loop, struct induction *v)
7239 rtx final_value = 0;
7241 /* DEST_ADDR givs will never reach here, because they are always marked
7242 replaceable above in record_giv. */
7244 /* The giv can be replaced outright by the reduced register only if all
7245 of the following conditions are true:
7246 - the insn that sets the giv is always executed on any iteration
7247 on which the giv is used at all
7248 (there are two ways to deduce this:
7249 either the insn is executed on every iteration,
7250 or all uses follow that insn in the same basic block),
7251 - its final value can be calculated (this condition is different
7252 than the one above in record_giv)
7253 - it's not used before the it's set
7254 - no assignments to the biv occur during the giv's lifetime. */
7256 #if 0
7257 /* This is only called now when replaceable is known to be false. */
7258 /* Clear replaceable, so that it won't confuse final_giv_value. */
7259 v->replaceable = 0;
7260 #endif
7262 if ((final_value = final_giv_value (loop, v))
7263 && (v->always_executed
7264 || last_use_this_basic_block (v->dest_reg, v->insn)))
7266 int biv_increment_seen = 0, before_giv_insn = 0;
7267 rtx p = v->insn;
7268 rtx last_giv_use;
7270 v->replaceable = 1;
7271 v->not_replaceable = 0;
7273 /* When trying to determine whether or not a biv increment occurs
7274 during the lifetime of the giv, we can ignore uses of the variable
7275 outside the loop because final_value is true. Hence we can not
7276 use regno_last_uid and regno_first_uid as above in record_giv. */
7278 /* Search the loop to determine whether any assignments to the
7279 biv occur during the giv's lifetime. Start with the insn
7280 that sets the giv, and search around the loop until we come
7281 back to that insn again.
7283 Also fail if there is a jump within the giv's lifetime that jumps
7284 to somewhere outside the lifetime but still within the loop. This
7285 catches spaghetti code where the execution order is not linear, and
7286 hence the above test fails. Here we assume that the giv lifetime
7287 does not extend from one iteration of the loop to the next, so as
7288 to make the test easier. Since the lifetime isn't known yet,
7289 this requires two loops. See also record_giv above. */
7291 last_giv_use = v->insn;
7293 while (1)
7295 p = NEXT_INSN (p);
7296 if (p == loop->end)
7298 before_giv_insn = 1;
7299 p = NEXT_INSN (loop->start);
7301 if (p == v->insn)
7302 break;
7304 if (INSN_P (p))
7306 /* It is possible for the BIV increment to use the GIV if we
7307 have a cycle. Thus we must be sure to check each insn for
7308 both BIV and GIV uses, and we must check for BIV uses
7309 first. */
7311 if (! biv_increment_seen
7312 && reg_set_p (v->src_reg, PATTERN (p)))
7313 biv_increment_seen = 1;
7315 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7317 if (biv_increment_seen || before_giv_insn)
7319 v->replaceable = 0;
7320 v->not_replaceable = 1;
7321 break;
7323 last_giv_use = p;
7328 /* Now that the lifetime of the giv is known, check for branches
7329 from within the lifetime to outside the lifetime if it is still
7330 replaceable. */
7332 if (v->replaceable)
7334 p = v->insn;
7335 while (1)
7337 p = NEXT_INSN (p);
7338 if (p == loop->end)
7339 p = NEXT_INSN (loop->start);
7340 if (p == last_giv_use)
7341 break;
7343 if (JUMP_P (p) && JUMP_LABEL (p)
7344 && LABEL_NAME (JUMP_LABEL (p))
7345 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7346 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7347 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7348 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7350 v->replaceable = 0;
7351 v->not_replaceable = 1;
7353 if (loop_dump_stream)
7354 fprintf (loop_dump_stream,
7355 "Found branch outside giv lifetime.\n");
7357 break;
7362 /* If it is replaceable, then save the final value. */
7363 if (v->replaceable)
7364 v->final_value = final_value;
7367 if (loop_dump_stream && v->replaceable)
7368 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7369 INSN_UID (v->insn), REGNO (v->dest_reg));
7372 /* Update the status of whether a giv can derive other givs.
7374 We need to do something special if there is or may be an update to the biv
7375 between the time the giv is defined and the time it is used to derive
7376 another giv.
7378 In addition, a giv that is only conditionally set is not allowed to
7379 derive another giv once a label has been passed.
7381 The cases we look at are when a label or an update to a biv is passed. */
7383 static void
7384 update_giv_derive (const struct loop *loop, rtx p)
7386 struct loop_ivs *ivs = LOOP_IVS (loop);
7387 struct iv_class *bl;
7388 struct induction *biv, *giv;
7389 rtx tem;
7390 int dummy;
7392 /* Search all IV classes, then all bivs, and finally all givs.
7394 There are three cases we are concerned with. First we have the situation
7395 of a giv that is only updated conditionally. In that case, it may not
7396 derive any givs after a label is passed.
7398 The second case is when a biv update occurs, or may occur, after the
7399 definition of a giv. For certain biv updates (see below) that are
7400 known to occur between the giv definition and use, we can adjust the
7401 giv definition. For others, or when the biv update is conditional,
7402 we must prevent the giv from deriving any other givs. There are two
7403 sub-cases within this case.
7405 If this is a label, we are concerned with any biv update that is done
7406 conditionally, since it may be done after the giv is defined followed by
7407 a branch here (actually, we need to pass both a jump and a label, but
7408 this extra tracking doesn't seem worth it).
7410 If this is a jump, we are concerned about any biv update that may be
7411 executed multiple times. We are actually only concerned about
7412 backward jumps, but it is probably not worth performing the test
7413 on the jump again here.
7415 If this is a biv update, we must adjust the giv status to show that a
7416 subsequent biv update was performed. If this adjustment cannot be done,
7417 the giv cannot derive further givs. */
7419 for (bl = ivs->list; bl; bl = bl->next)
7420 for (biv = bl->biv; biv; biv = biv->next_iv)
7421 if (LABEL_P (p) || JUMP_P (p)
7422 || biv->insn == p)
7424 /* Skip if location is the same as a previous one. */
7425 if (biv->same)
7426 continue;
7428 for (giv = bl->giv; giv; giv = giv->next_iv)
7430 /* If cant_derive is already true, there is no point in
7431 checking all of these conditions again. */
7432 if (giv->cant_derive)
7433 continue;
7435 /* If this giv is conditionally set and we have passed a label,
7436 it cannot derive anything. */
7437 if (LABEL_P (p) && ! giv->always_computable)
7438 giv->cant_derive = 1;
7440 /* Skip givs that have mult_val == 0, since
7441 they are really invariants. Also skip those that are
7442 replaceable, since we know their lifetime doesn't contain
7443 any biv update. */
7444 else if (giv->mult_val == const0_rtx || giv->replaceable)
7445 continue;
7447 /* The only way we can allow this giv to derive another
7448 is if this is a biv increment and we can form the product
7449 of biv->add_val and giv->mult_val. In this case, we will
7450 be able to compute a compensation. */
7451 else if (biv->insn == p)
7453 rtx ext_val_dummy;
7455 tem = 0;
7456 if (biv->mult_val == const1_rtx)
7457 tem = simplify_giv_expr (loop,
7458 gen_rtx_MULT (giv->mode,
7459 biv->add_val,
7460 giv->mult_val),
7461 &ext_val_dummy, &dummy);
7463 if (tem && giv->derive_adjustment)
7464 tem = simplify_giv_expr
7465 (loop,
7466 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7467 &ext_val_dummy, &dummy);
7469 if (tem)
7470 giv->derive_adjustment = tem;
7471 else
7472 giv->cant_derive = 1;
7474 else if ((LABEL_P (p) && ! biv->always_computable)
7475 || (JUMP_P (p) && biv->maybe_multiple))
7476 giv->cant_derive = 1;
7481 /* Check whether an insn is an increment legitimate for a basic induction var.
7482 X is the source of insn P, or a part of it.
7483 MODE is the mode in which X should be interpreted.
7485 DEST_REG is the putative biv, also the destination of the insn.
7486 We accept patterns of these forms:
7487 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7488 REG = INVARIANT + REG
7490 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7491 store the additive term into *INC_VAL, and store the place where
7492 we found the additive term into *LOCATION.
7494 If X is an assignment of an invariant into DEST_REG, we set
7495 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7497 We also want to detect a BIV when it corresponds to a variable
7498 whose mode was promoted. In that case, an increment
7499 of the variable may be a PLUS that adds a SUBREG of that variable to
7500 an invariant and then sign- or zero-extends the result of the PLUS
7501 into the variable.
7503 Most GIVs in such cases will be in the promoted mode, since that is the
7504 probably the natural computation mode (and almost certainly the mode
7505 used for addresses) on the machine. So we view the pseudo-reg containing
7506 the variable as the BIV, as if it were simply incremented.
7508 Note that treating the entire pseudo as a BIV will result in making
7509 simple increments to any GIVs based on it. However, if the variable
7510 overflows in its declared mode but not its promoted mode, the result will
7511 be incorrect. This is acceptable if the variable is signed, since
7512 overflows in such cases are undefined, but not if it is unsigned, since
7513 those overflows are defined. So we only check for SIGN_EXTEND and
7514 not ZERO_EXTEND.
7516 If we cannot find a biv, we return 0. */
7518 static int
7519 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7520 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7521 rtx **location)
7523 enum rtx_code code;
7524 rtx *argp, arg;
7525 rtx insn, set = 0, last, inc;
7527 code = GET_CODE (x);
7528 *location = NULL;
7529 switch (code)
7531 case PLUS:
7532 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7533 || (GET_CODE (XEXP (x, 0)) == SUBREG
7534 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7535 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7537 argp = &XEXP (x, 1);
7539 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7540 || (GET_CODE (XEXP (x, 1)) == SUBREG
7541 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7542 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7544 argp = &XEXP (x, 0);
7546 else
7547 return 0;
7549 arg = *argp;
7550 if (loop_invariant_p (loop, arg) != 1)
7551 return 0;
7553 /* convert_modes can emit new instructions, e.g. when arg is a loop
7554 invariant MEM and dest_reg has a different mode.
7555 These instructions would be emitted after the end of the function
7556 and then *inc_val would be an uninitialized pseudo.
7557 Detect this and bail in this case.
7558 Other alternatives to solve this can be introducing a convert_modes
7559 variant which is allowed to fail but not allowed to emit new
7560 instructions, emit these instructions before loop start and let
7561 it be garbage collected if *inc_val is never used or saving the
7562 *inc_val initialization sequence generated here and when *inc_val
7563 is going to be actually used, emit it at some suitable place. */
7564 last = get_last_insn ();
7565 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7566 if (get_last_insn () != last)
7568 delete_insns_since (last);
7569 return 0;
7572 *inc_val = inc;
7573 *mult_val = const1_rtx;
7574 *location = argp;
7575 return 1;
7577 case SUBREG:
7578 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7579 handle addition of promoted variables.
7580 ??? The comment at the start of this function is wrong: promoted
7581 variable increments don't look like it says they do. */
7582 return basic_induction_var (loop, SUBREG_REG (x),
7583 GET_MODE (SUBREG_REG (x)),
7584 dest_reg, p, inc_val, mult_val, location);
7586 case REG:
7587 /* If this register is assigned in a previous insn, look at its
7588 source, but don't go outside the loop or past a label. */
7590 /* If this sets a register to itself, we would repeat any previous
7591 biv increment if we applied this strategy blindly. */
7592 if (rtx_equal_p (dest_reg, x))
7593 return 0;
7595 insn = p;
7596 while (1)
7598 rtx dest;
7601 insn = PREV_INSN (insn);
7603 while (insn && NOTE_P (insn)
7604 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7606 if (!insn)
7607 break;
7608 set = single_set (insn);
7609 if (set == 0)
7610 break;
7611 dest = SET_DEST (set);
7612 if (dest == x
7613 || (GET_CODE (dest) == SUBREG
7614 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7615 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7616 && SUBREG_REG (dest) == x))
7617 return basic_induction_var (loop, SET_SRC (set),
7618 (GET_MODE (SET_SRC (set)) == VOIDmode
7619 ? GET_MODE (x)
7620 : GET_MODE (SET_SRC (set))),
7621 dest_reg, insn,
7622 inc_val, mult_val, location);
7624 while (GET_CODE (dest) == SUBREG
7625 || GET_CODE (dest) == ZERO_EXTRACT
7626 || GET_CODE (dest) == STRICT_LOW_PART)
7627 dest = XEXP (dest, 0);
7628 if (dest == x)
7629 break;
7631 /* Fall through. */
7633 /* Can accept constant setting of biv only when inside inner most loop.
7634 Otherwise, a biv of an inner loop may be incorrectly recognized
7635 as a biv of the outer loop,
7636 causing code to be moved INTO the inner loop. */
7637 case MEM:
7638 if (loop_invariant_p (loop, x) != 1)
7639 return 0;
7640 case CONST_INT:
7641 case SYMBOL_REF:
7642 case CONST:
7643 /* convert_modes aborts if we try to convert to or from CCmode, so just
7644 exclude that case. It is very unlikely that a condition code value
7645 would be a useful iterator anyways. convert_modes aborts if we try to
7646 convert a float mode to non-float or vice versa too. */
7647 if (loop->level == 1
7648 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7649 && GET_MODE_CLASS (mode) != MODE_CC)
7651 /* Possible bug here? Perhaps we don't know the mode of X. */
7652 last = get_last_insn ();
7653 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7654 if (get_last_insn () != last)
7656 delete_insns_since (last);
7657 return 0;
7660 *inc_val = inc;
7661 *mult_val = const0_rtx;
7662 return 1;
7664 else
7665 return 0;
7667 case SIGN_EXTEND:
7668 /* Ignore this BIV if signed arithmetic overflow is defined. */
7669 if (flag_wrapv)
7670 return 0;
7671 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7672 dest_reg, p, inc_val, mult_val, location);
7674 case ASHIFTRT:
7675 /* Similar, since this can be a sign extension. */
7676 for (insn = PREV_INSN (p);
7677 (insn && NOTE_P (insn)
7678 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7679 insn = PREV_INSN (insn))
7682 if (insn)
7683 set = single_set (insn);
7685 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
7686 && set && SET_DEST (set) == XEXP (x, 0)
7687 && GET_CODE (XEXP (x, 1)) == CONST_INT
7688 && INTVAL (XEXP (x, 1)) >= 0
7689 && GET_CODE (SET_SRC (set)) == ASHIFT
7690 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7691 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7692 GET_MODE (XEXP (x, 0)),
7693 dest_reg, insn, inc_val, mult_val,
7694 location);
7695 return 0;
7697 default:
7698 return 0;
7702 /* A general induction variable (giv) is any quantity that is a linear
7703 function of a basic induction variable,
7704 i.e. giv = biv * mult_val + add_val.
7705 The coefficients can be any loop invariant quantity.
7706 A giv need not be computed directly from the biv;
7707 it can be computed by way of other givs. */
7709 /* Determine whether X computes a giv.
7710 If it does, return a nonzero value
7711 which is the benefit from eliminating the computation of X;
7712 set *SRC_REG to the register of the biv that it is computed from;
7713 set *ADD_VAL and *MULT_VAL to the coefficients,
7714 such that the value of X is biv * mult + add; */
7716 static int
7717 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7718 rtx *add_val, rtx *mult_val, rtx *ext_val,
7719 int is_addr, int *pbenefit,
7720 enum machine_mode addr_mode)
7722 struct loop_ivs *ivs = LOOP_IVS (loop);
7723 rtx orig_x = x;
7725 /* If this is an invariant, forget it, it isn't a giv. */
7726 if (loop_invariant_p (loop, x) == 1)
7727 return 0;
7729 *pbenefit = 0;
7730 *ext_val = NULL_RTX;
7731 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7732 if (x == 0)
7733 return 0;
7735 switch (GET_CODE (x))
7737 case USE:
7738 case CONST_INT:
7739 /* Since this is now an invariant and wasn't before, it must be a giv
7740 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7741 with. */
7742 *src_reg = ivs->list->biv->dest_reg;
7743 *mult_val = const0_rtx;
7744 *add_val = x;
7745 break;
7747 case REG:
7748 /* This is equivalent to a BIV. */
7749 *src_reg = x;
7750 *mult_val = const1_rtx;
7751 *add_val = const0_rtx;
7752 break;
7754 case PLUS:
7755 /* Either (plus (biv) (invar)) or
7756 (plus (mult (biv) (invar_1)) (invar_2)). */
7757 if (GET_CODE (XEXP (x, 0)) == MULT)
7759 *src_reg = XEXP (XEXP (x, 0), 0);
7760 *mult_val = XEXP (XEXP (x, 0), 1);
7762 else
7764 *src_reg = XEXP (x, 0);
7765 *mult_val = const1_rtx;
7767 *add_val = XEXP (x, 1);
7768 break;
7770 case MULT:
7771 /* ADD_VAL is zero. */
7772 *src_reg = XEXP (x, 0);
7773 *mult_val = XEXP (x, 1);
7774 *add_val = const0_rtx;
7775 break;
7777 default:
7778 gcc_unreachable ();
7781 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7782 unless they are CONST_INT). */
7783 if (GET_CODE (*add_val) == USE)
7784 *add_val = XEXP (*add_val, 0);
7785 if (GET_CODE (*mult_val) == USE)
7786 *mult_val = XEXP (*mult_val, 0);
7788 if (is_addr)
7789 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7790 else
7791 *pbenefit += rtx_cost (orig_x, SET);
7793 /* Always return true if this is a giv so it will be detected as such,
7794 even if the benefit is zero or negative. This allows elimination
7795 of bivs that might otherwise not be eliminated. */
7796 return 1;
7799 /* Given an expression, X, try to form it as a linear function of a biv.
7800 We will canonicalize it to be of the form
7801 (plus (mult (BIV) (invar_1))
7802 (invar_2))
7803 with possible degeneracies.
7805 The invariant expressions must each be of a form that can be used as a
7806 machine operand. We surround then with a USE rtx (a hack, but localized
7807 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7808 routine; it is the caller's responsibility to strip them.
7810 If no such canonicalization is possible (i.e., two biv's are used or an
7811 expression that is neither invariant nor a biv or giv), this routine
7812 returns 0.
7814 For a nonzero return, the result will have a code of CONST_INT, USE,
7815 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7817 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7819 static rtx sge_plus (enum machine_mode, rtx, rtx);
7820 static rtx sge_plus_constant (rtx, rtx);
7822 static rtx
7823 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7825 struct loop_ivs *ivs = LOOP_IVS (loop);
7826 struct loop_regs *regs = LOOP_REGS (loop);
7827 enum machine_mode mode = GET_MODE (x);
7828 rtx arg0, arg1;
7829 rtx tem;
7831 /* If this is not an integer mode, or if we cannot do arithmetic in this
7832 mode, this can't be a giv. */
7833 if (mode != VOIDmode
7834 && (GET_MODE_CLASS (mode) != MODE_INT
7835 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7836 return NULL_RTX;
7838 switch (GET_CODE (x))
7840 case PLUS:
7841 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7842 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7843 if (arg0 == 0 || arg1 == 0)
7844 return NULL_RTX;
7846 /* Put constant last, CONST_INT last if both constant. */
7847 if ((GET_CODE (arg0) == USE
7848 || GET_CODE (arg0) == CONST_INT)
7849 && ! ((GET_CODE (arg0) == USE
7850 && GET_CODE (arg1) == USE)
7851 || GET_CODE (arg1) == CONST_INT))
7852 tem = arg0, arg0 = arg1, arg1 = tem;
7854 /* Handle addition of zero, then addition of an invariant. */
7855 if (arg1 == const0_rtx)
7856 return arg0;
7857 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7858 switch (GET_CODE (arg0))
7860 case CONST_INT:
7861 case USE:
7862 /* Adding two invariants must result in an invariant, so enclose
7863 addition operation inside a USE and return it. */
7864 if (GET_CODE (arg0) == USE)
7865 arg0 = XEXP (arg0, 0);
7866 if (GET_CODE (arg1) == USE)
7867 arg1 = XEXP (arg1, 0);
7869 if (GET_CODE (arg0) == CONST_INT)
7870 tem = arg0, arg0 = arg1, arg1 = tem;
7871 if (GET_CODE (arg1) == CONST_INT)
7872 tem = sge_plus_constant (arg0, arg1);
7873 else
7874 tem = sge_plus (mode, arg0, arg1);
7876 if (GET_CODE (tem) != CONST_INT)
7877 tem = gen_rtx_USE (mode, tem);
7878 return tem;
7880 case REG:
7881 case MULT:
7882 /* biv + invar or mult + invar. Return sum. */
7883 return gen_rtx_PLUS (mode, arg0, arg1);
7885 case PLUS:
7886 /* (a + invar_1) + invar_2. Associate. */
7887 return
7888 simplify_giv_expr (loop,
7889 gen_rtx_PLUS (mode,
7890 XEXP (arg0, 0),
7891 gen_rtx_PLUS (mode,
7892 XEXP (arg0, 1),
7893 arg1)),
7894 ext_val, benefit);
7896 default:
7897 gcc_unreachable ();
7900 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7901 MULT to reduce cases. */
7902 if (REG_P (arg0))
7903 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
7904 if (REG_P (arg1))
7905 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
7907 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7908 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7909 Recurse to associate the second PLUS. */
7910 if (GET_CODE (arg1) == MULT)
7911 tem = arg0, arg0 = arg1, arg1 = tem;
7913 if (GET_CODE (arg1) == PLUS)
7914 return
7915 simplify_giv_expr (loop,
7916 gen_rtx_PLUS (mode,
7917 gen_rtx_PLUS (mode, arg0,
7918 XEXP (arg1, 0)),
7919 XEXP (arg1, 1)),
7920 ext_val, benefit);
7922 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7923 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
7924 return NULL_RTX;
7926 if (!rtx_equal_p (arg0, arg1))
7927 return NULL_RTX;
7929 return simplify_giv_expr (loop,
7930 gen_rtx_MULT (mode,
7931 XEXP (arg0, 0),
7932 gen_rtx_PLUS (mode,
7933 XEXP (arg0, 1),
7934 XEXP (arg1, 1))),
7935 ext_val, benefit);
7937 case MINUS:
7938 /* Handle "a - b" as "a + b * (-1)". */
7939 return simplify_giv_expr (loop,
7940 gen_rtx_PLUS (mode,
7941 XEXP (x, 0),
7942 gen_rtx_MULT (mode,
7943 XEXP (x, 1),
7944 constm1_rtx)),
7945 ext_val, benefit);
7947 case MULT:
7948 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7949 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7950 if (arg0 == 0 || arg1 == 0)
7951 return NULL_RTX;
7953 /* Put constant last, CONST_INT last if both constant. */
7954 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
7955 && GET_CODE (arg1) != CONST_INT)
7956 tem = arg0, arg0 = arg1, arg1 = tem;
7958 /* If second argument is not now constant, not giv. */
7959 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
7960 return NULL_RTX;
7962 /* Handle multiply by 0 or 1. */
7963 if (arg1 == const0_rtx)
7964 return const0_rtx;
7966 else if (arg1 == const1_rtx)
7967 return arg0;
7969 switch (GET_CODE (arg0))
7971 case REG:
7972 /* biv * invar. Done. */
7973 return gen_rtx_MULT (mode, arg0, arg1);
7975 case CONST_INT:
7976 /* Product of two constants. */
7977 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
7979 case USE:
7980 /* invar * invar is a giv, but attempt to simplify it somehow. */
7981 if (GET_CODE (arg1) != CONST_INT)
7982 return NULL_RTX;
7984 arg0 = XEXP (arg0, 0);
7985 if (GET_CODE (arg0) == MULT)
7987 /* (invar_0 * invar_1) * invar_2. Associate. */
7988 return simplify_giv_expr (loop,
7989 gen_rtx_MULT (mode,
7990 XEXP (arg0, 0),
7991 gen_rtx_MULT (mode,
7992 XEXP (arg0,
7994 arg1)),
7995 ext_val, benefit);
7997 /* Propagate the MULT expressions to the innermost nodes. */
7998 else if (GET_CODE (arg0) == PLUS)
8000 /* (invar_0 + invar_1) * invar_2. Distribute. */
8001 return simplify_giv_expr (loop,
8002 gen_rtx_PLUS (mode,
8003 gen_rtx_MULT (mode,
8004 XEXP (arg0,
8006 arg1),
8007 gen_rtx_MULT (mode,
8008 XEXP (arg0,
8010 arg1)),
8011 ext_val, benefit);
8013 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
8015 case MULT:
8016 /* (a * invar_1) * invar_2. Associate. */
8017 return simplify_giv_expr (loop,
8018 gen_rtx_MULT (mode,
8019 XEXP (arg0, 0),
8020 gen_rtx_MULT (mode,
8021 XEXP (arg0, 1),
8022 arg1)),
8023 ext_val, benefit);
8025 case PLUS:
8026 /* (a + invar_1) * invar_2. Distribute. */
8027 return simplify_giv_expr (loop,
8028 gen_rtx_PLUS (mode,
8029 gen_rtx_MULT (mode,
8030 XEXP (arg0, 0),
8031 arg1),
8032 gen_rtx_MULT (mode,
8033 XEXP (arg0, 1),
8034 arg1)),
8035 ext_val, benefit);
8037 default:
8038 gcc_unreachable ();
8041 case ASHIFT:
8042 /* Shift by constant is multiply by power of two. */
8043 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8044 return 0;
8046 return
8047 simplify_giv_expr (loop,
8048 gen_rtx_MULT (mode,
8049 XEXP (x, 0),
8050 GEN_INT ((HOST_WIDE_INT) 1
8051 << INTVAL (XEXP (x, 1)))),
8052 ext_val, benefit);
8054 case NEG:
8055 /* "-a" is "a * (-1)" */
8056 return simplify_giv_expr (loop,
8057 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8058 ext_val, benefit);
8060 case NOT:
8061 /* "~a" is "-a - 1". Silly, but easy. */
8062 return simplify_giv_expr (loop,
8063 gen_rtx_MINUS (mode,
8064 gen_rtx_NEG (mode, XEXP (x, 0)),
8065 const1_rtx),
8066 ext_val, benefit);
8068 case USE:
8069 /* Already in proper form for invariant. */
8070 return x;
8072 case SIGN_EXTEND:
8073 case ZERO_EXTEND:
8074 case TRUNCATE:
8075 /* Conditionally recognize extensions of simple IVs. After we've
8076 computed loop traversal counts and verified the range of the
8077 source IV, we'll reevaluate this as a GIV. */
8078 if (*ext_val == NULL_RTX)
8080 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8081 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8083 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8084 return arg0;
8087 goto do_default;
8089 case REG:
8090 /* If this is a new register, we can't deal with it. */
8091 if (REGNO (x) >= max_reg_before_loop)
8092 return 0;
8094 /* Check for biv or giv. */
8095 switch (REG_IV_TYPE (ivs, REGNO (x)))
8097 case BASIC_INDUCT:
8098 return x;
8099 case GENERAL_INDUCT:
8101 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8103 /* Form expression from giv and add benefit. Ensure this giv
8104 can derive another and subtract any needed adjustment if so. */
8106 /* Increasing the benefit here is risky. The only case in which it
8107 is arguably correct is if this is the only use of V. In other
8108 cases, this will artificially inflate the benefit of the current
8109 giv, and lead to suboptimal code. Thus, it is disabled, since
8110 potentially not reducing an only marginally beneficial giv is
8111 less harmful than reducing many givs that are not really
8112 beneficial. */
8114 rtx single_use = regs->array[REGNO (x)].single_usage;
8115 if (single_use && single_use != const0_rtx)
8116 *benefit += v->benefit;
8119 if (v->cant_derive)
8120 return 0;
8122 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8123 v->src_reg, v->mult_val),
8124 v->add_val);
8126 if (v->derive_adjustment)
8127 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8128 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8129 if (*ext_val)
8131 if (!v->ext_dependent)
8132 return arg0;
8134 else
8136 *ext_val = v->ext_dependent;
8137 return arg0;
8139 return 0;
8142 default:
8143 do_default:
8144 /* If it isn't an induction variable, and it is invariant, we
8145 may be able to simplify things further by looking through
8146 the bits we just moved outside the loop. */
8147 if (loop_invariant_p (loop, x) == 1)
8149 struct movable *m;
8150 struct loop_movables *movables = LOOP_MOVABLES (loop);
8152 for (m = movables->head; m; m = m->next)
8153 if (rtx_equal_p (x, m->set_dest))
8155 /* Ok, we found a match. Substitute and simplify. */
8157 /* If we match another movable, we must use that, as
8158 this one is going away. */
8159 if (m->match)
8160 return simplify_giv_expr (loop, m->match->set_dest,
8161 ext_val, benefit);
8163 /* If consec is nonzero, this is a member of a group of
8164 instructions that were moved together. We handle this
8165 case only to the point of seeking to the last insn and
8166 looking for a REG_EQUAL. Fail if we don't find one. */
8167 if (m->consec != 0)
8169 int i = m->consec;
8170 tem = m->insn;
8173 tem = NEXT_INSN (tem);
8175 while (--i > 0);
8177 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8178 if (tem)
8179 tem = XEXP (tem, 0);
8181 else
8183 tem = single_set (m->insn);
8184 if (tem)
8185 tem = SET_SRC (tem);
8188 if (tem)
8190 /* What we are most interested in is pointer
8191 arithmetic on invariants -- only take
8192 patterns we may be able to do something with. */
8193 if (GET_CODE (tem) == PLUS
8194 || GET_CODE (tem) == MULT
8195 || GET_CODE (tem) == ASHIFT
8196 || GET_CODE (tem) == CONST_INT
8197 || GET_CODE (tem) == SYMBOL_REF)
8199 tem = simplify_giv_expr (loop, tem, ext_val,
8200 benefit);
8201 if (tem)
8202 return tem;
8204 else if (GET_CODE (tem) == CONST
8205 && GET_CODE (XEXP (tem, 0)) == PLUS
8206 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8207 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8209 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8210 ext_val, benefit);
8211 if (tem)
8212 return tem;
8215 break;
8218 break;
8221 /* Fall through to general case. */
8222 default:
8223 /* If invariant, return as USE (unless CONST_INT).
8224 Otherwise, not giv. */
8225 if (GET_CODE (x) == USE)
8226 x = XEXP (x, 0);
8228 if (loop_invariant_p (loop, x) == 1)
8230 if (GET_CODE (x) == CONST_INT)
8231 return x;
8232 if (GET_CODE (x) == CONST
8233 && GET_CODE (XEXP (x, 0)) == PLUS
8234 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8235 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8236 x = XEXP (x, 0);
8237 return gen_rtx_USE (mode, x);
8239 else
8240 return 0;
8244 /* This routine folds invariants such that there is only ever one
8245 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8247 static rtx
8248 sge_plus_constant (rtx x, rtx c)
8250 if (GET_CODE (x) == CONST_INT)
8251 return GEN_INT (INTVAL (x) + INTVAL (c));
8252 else if (GET_CODE (x) != PLUS)
8253 return gen_rtx_PLUS (GET_MODE (x), x, c);
8254 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8256 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8257 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8259 else if (GET_CODE (XEXP (x, 0)) == PLUS
8260 || GET_CODE (XEXP (x, 1)) != PLUS)
8262 return gen_rtx_PLUS (GET_MODE (x),
8263 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8265 else
8267 return gen_rtx_PLUS (GET_MODE (x),
8268 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8272 static rtx
8273 sge_plus (enum machine_mode mode, rtx x, rtx y)
8275 while (GET_CODE (y) == PLUS)
8277 rtx a = XEXP (y, 0);
8278 if (GET_CODE (a) == CONST_INT)
8279 x = sge_plus_constant (x, a);
8280 else
8281 x = gen_rtx_PLUS (mode, x, a);
8282 y = XEXP (y, 1);
8284 if (GET_CODE (y) == CONST_INT)
8285 x = sge_plus_constant (x, y);
8286 else
8287 x = gen_rtx_PLUS (mode, x, y);
8288 return x;
8291 /* Help detect a giv that is calculated by several consecutive insns;
8292 for example,
8293 giv = biv * M
8294 giv = giv + A
8295 The caller has already identified the first insn P as having a giv as dest;
8296 we check that all other insns that set the same register follow
8297 immediately after P, that they alter nothing else,
8298 and that the result of the last is still a giv.
8300 The value is 0 if the reg set in P is not really a giv.
8301 Otherwise, the value is the amount gained by eliminating
8302 all the consecutive insns that compute the value.
8304 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8305 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8307 The coefficients of the ultimate giv value are stored in
8308 *MULT_VAL and *ADD_VAL. */
8310 static int
8311 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8312 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8313 rtx *ext_val, rtx *last_consec_insn)
8315 struct loop_ivs *ivs = LOOP_IVS (loop);
8316 struct loop_regs *regs = LOOP_REGS (loop);
8317 int count;
8318 enum rtx_code code;
8319 int benefit;
8320 rtx temp;
8321 rtx set;
8323 /* Indicate that this is a giv so that we can update the value produced in
8324 each insn of the multi-insn sequence.
8326 This induction structure will be used only by the call to
8327 general_induction_var below, so we can allocate it on our stack.
8328 If this is a giv, our caller will replace the induct var entry with
8329 a new induction structure. */
8330 struct induction *v;
8332 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8333 return 0;
8335 v = alloca (sizeof (struct induction));
8336 v->src_reg = src_reg;
8337 v->mult_val = *mult_val;
8338 v->add_val = *add_val;
8339 v->benefit = first_benefit;
8340 v->cant_derive = 0;
8341 v->derive_adjustment = 0;
8342 v->ext_dependent = NULL_RTX;
8344 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8345 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8347 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8349 while (count > 0)
8351 p = NEXT_INSN (p);
8352 code = GET_CODE (p);
8354 /* If libcall, skip to end of call sequence. */
8355 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8356 p = XEXP (temp, 0);
8358 if (code == INSN
8359 && (set = single_set (p))
8360 && REG_P (SET_DEST (set))
8361 && SET_DEST (set) == dest_reg
8362 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8363 add_val, mult_val, ext_val, 0,
8364 &benefit, VOIDmode)
8365 /* Giv created by equivalent expression. */
8366 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8367 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8368 add_val, mult_val, ext_val, 0,
8369 &benefit, VOIDmode)))
8370 && src_reg == v->src_reg)
8372 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8373 benefit += libcall_benefit (p);
8375 count--;
8376 v->mult_val = *mult_val;
8377 v->add_val = *add_val;
8378 v->benefit += benefit;
8380 else if (code != NOTE)
8382 /* Allow insns that set something other than this giv to a
8383 constant. Such insns are needed on machines which cannot
8384 include long constants and should not disqualify a giv. */
8385 if (code == INSN
8386 && (set = single_set (p))
8387 && SET_DEST (set) != dest_reg
8388 && CONSTANT_P (SET_SRC (set)))
8389 continue;
8391 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8392 return 0;
8396 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8397 *last_consec_insn = p;
8398 return v->benefit;
8401 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8402 represented by G1. If no such expression can be found, or it is clear that
8403 it cannot possibly be a valid address, 0 is returned.
8405 To perform the computation, we note that
8406 G1 = x * v + a and
8407 G2 = y * v + b
8408 where `v' is the biv.
8410 So G2 = (y/b) * G1 + (b - a*y/x).
8412 Note that MULT = y/x.
8414 Update: A and B are now allowed to be additive expressions such that
8415 B contains all variables in A. That is, computing B-A will not require
8416 subtracting variables. */
8418 static rtx
8419 express_from_1 (rtx a, rtx b, rtx mult)
8421 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8423 if (mult == const0_rtx)
8424 return b;
8426 /* If MULT is not 1, we cannot handle A with non-constants, since we
8427 would then be required to subtract multiples of the registers in A.
8428 This is theoretically possible, and may even apply to some Fortran
8429 constructs, but it is a lot of work and we do not attempt it here. */
8431 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8432 return NULL_RTX;
8434 /* In general these structures are sorted top to bottom (down the PLUS
8435 chain), but not left to right across the PLUS. If B is a higher
8436 order giv than A, we can strip one level and recurse. If A is higher
8437 order, we'll eventually bail out, but won't know that until the end.
8438 If they are the same, we'll strip one level around this loop. */
8440 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8442 rtx ra, rb, oa, ob, tmp;
8444 ra = XEXP (a, 0), oa = XEXP (a, 1);
8445 if (GET_CODE (ra) == PLUS)
8446 tmp = ra, ra = oa, oa = tmp;
8448 rb = XEXP (b, 0), ob = XEXP (b, 1);
8449 if (GET_CODE (rb) == PLUS)
8450 tmp = rb, rb = ob, ob = tmp;
8452 if (rtx_equal_p (ra, rb))
8453 /* We matched: remove one reg completely. */
8454 a = oa, b = ob;
8455 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8456 /* An alternate match. */
8457 a = oa, b = rb;
8458 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8459 /* An alternate match. */
8460 a = ra, b = ob;
8461 else
8463 /* Indicates an extra register in B. Strip one level from B and
8464 recurse, hoping B was the higher order expression. */
8465 ob = express_from_1 (a, ob, mult);
8466 if (ob == NULL_RTX)
8467 return NULL_RTX;
8468 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8472 /* Here we are at the last level of A, go through the cases hoping to
8473 get rid of everything but a constant. */
8475 if (GET_CODE (a) == PLUS)
8477 rtx ra, oa;
8479 ra = XEXP (a, 0), oa = XEXP (a, 1);
8480 if (rtx_equal_p (oa, b))
8481 oa = ra;
8482 else if (!rtx_equal_p (ra, b))
8483 return NULL_RTX;
8485 if (GET_CODE (oa) != CONST_INT)
8486 return NULL_RTX;
8488 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8490 else if (GET_CODE (a) == CONST_INT)
8492 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8494 else if (CONSTANT_P (a))
8496 enum machine_mode mode_a = GET_MODE (a);
8497 enum machine_mode mode_b = GET_MODE (b);
8498 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8499 return simplify_gen_binary (MINUS, mode, b, a);
8501 else if (GET_CODE (b) == PLUS)
8503 if (rtx_equal_p (a, XEXP (b, 0)))
8504 return XEXP (b, 1);
8505 else if (rtx_equal_p (a, XEXP (b, 1)))
8506 return XEXP (b, 0);
8507 else
8508 return NULL_RTX;
8510 else if (rtx_equal_p (a, b))
8511 return const0_rtx;
8513 return NULL_RTX;
8516 static rtx
8517 express_from (struct induction *g1, struct induction *g2)
8519 rtx mult, add;
8521 /* The value that G1 will be multiplied by must be a constant integer. Also,
8522 the only chance we have of getting a valid address is if b*c/a (see above
8523 for notation) is also an integer. */
8524 if (GET_CODE (g1->mult_val) == CONST_INT
8525 && GET_CODE (g2->mult_val) == CONST_INT)
8527 if (g1->mult_val == const0_rtx
8528 || (g1->mult_val == constm1_rtx
8529 && INTVAL (g2->mult_val)
8530 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8531 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8532 return NULL_RTX;
8533 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8535 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8536 mult = const1_rtx;
8537 else
8539 /* ??? Find out if the one is a multiple of the other? */
8540 return NULL_RTX;
8543 add = express_from_1 (g1->add_val, g2->add_val, mult);
8544 if (add == NULL_RTX)
8546 /* Failed. If we've got a multiplication factor between G1 and G2,
8547 scale G1's addend and try again. */
8548 if (INTVAL (mult) > 1)
8550 rtx g1_add_val = g1->add_val;
8551 if (GET_CODE (g1_add_val) == MULT
8552 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8554 HOST_WIDE_INT m;
8555 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8556 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8557 XEXP (g1_add_val, 0), GEN_INT (m));
8559 else
8561 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8562 mult);
8565 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8568 if (add == NULL_RTX)
8569 return NULL_RTX;
8571 /* Form simplified final result. */
8572 if (mult == const0_rtx)
8573 return add;
8574 else if (mult == const1_rtx)
8575 mult = g1->dest_reg;
8576 else
8577 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8579 if (add == const0_rtx)
8580 return mult;
8581 else
8583 if (GET_CODE (add) == PLUS
8584 && CONSTANT_P (XEXP (add, 1)))
8586 rtx tem = XEXP (add, 1);
8587 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8588 add = tem;
8591 return gen_rtx_PLUS (g2->mode, mult, add);
8595 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8596 represented by G1. This indicates that G2 should be combined with G1 and
8597 that G2 can use (either directly or via an address expression) a register
8598 used to represent G1. */
8600 static rtx
8601 combine_givs_p (struct induction *g1, struct induction *g2)
8603 rtx comb, ret;
8605 /* With the introduction of ext dependent givs, we must care for modes.
8606 G2 must not use a wider mode than G1. */
8607 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8608 return NULL_RTX;
8610 ret = comb = express_from (g1, g2);
8611 if (comb == NULL_RTX)
8612 return NULL_RTX;
8613 if (g1->mode != g2->mode)
8614 ret = gen_lowpart (g2->mode, comb);
8616 /* If these givs are identical, they can be combined. We use the results
8617 of express_from because the addends are not in a canonical form, so
8618 rtx_equal_p is a weaker test. */
8619 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8620 combination to be the other way round. */
8621 if (comb == g1->dest_reg
8622 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8624 return ret;
8627 /* If G2 can be expressed as a function of G1 and that function is valid
8628 as an address and no more expensive than using a register for G2,
8629 the expression of G2 in terms of G1 can be used. */
8630 if (ret != NULL_RTX
8631 && g2->giv_type == DEST_ADDR
8632 && memory_address_p (GET_MODE (g2->mem), ret))
8633 return ret;
8635 return NULL_RTX;
8638 /* See if BL is monotonic and has a constant per-iteration increment.
8639 Return the increment if so, otherwise return 0. */
8641 static HOST_WIDE_INT
8642 get_monotonic_increment (struct iv_class *bl)
8644 struct induction *v;
8645 rtx incr;
8647 /* Get the total increment and check that it is constant. */
8648 incr = biv_total_increment (bl);
8649 if (incr == 0 || GET_CODE (incr) != CONST_INT)
8650 return 0;
8652 for (v = bl->biv; v != 0; v = v->next_iv)
8654 if (GET_CODE (v->add_val) != CONST_INT)
8655 return 0;
8657 if (INTVAL (v->add_val) < 0 && INTVAL (incr) >= 0)
8658 return 0;
8660 if (INTVAL (v->add_val) > 0 && INTVAL (incr) <= 0)
8661 return 0;
8663 return INTVAL (incr);
8667 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8668 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8669 to which the biv belongs and INCR is its per-iteration increment. */
8671 static bool
8672 biased_biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8673 HOST_WIDE_INT incr, enum machine_mode mode,
8674 unsigned HOST_WIDE_INT bias)
8676 unsigned HOST_WIDE_INT initial, maximum, span, delta;
8678 /* We need to be able to manipulate MODE-size constants. */
8679 if (HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode))
8680 return false;
8682 /* The number of loop iterations must be constant. */
8683 if (LOOP_INFO (loop)->n_iterations == 0)
8684 return false;
8686 /* So must the biv's initial value. */
8687 if (bl->initial_value == 0 || GET_CODE (bl->initial_value) != CONST_INT)
8688 return false;
8690 initial = bias + INTVAL (bl->initial_value);
8691 maximum = GET_MODE_MASK (mode);
8693 /* Make sure that the initial value is within range. */
8694 if (initial > maximum)
8695 return false;
8697 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8698 (calculated to arbitrary precision) must be <= SPAN. */
8699 if (incr < 0)
8701 delta = -incr;
8702 span = initial;
8704 else
8706 delta = incr;
8707 /* Handle the special case in which MAXIMUM is the largest
8708 unsigned HOST_WIDE_INT and INITIAL is 0. */
8709 if (maximum + 1 == initial)
8710 span = LOOP_INFO (loop)->n_iterations * delta;
8711 else
8712 span = maximum + 1 - initial;
8714 return (span / LOOP_INFO (loop)->n_iterations >= delta);
8718 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8719 the loop to which BL belongs and INCR is its per-iteration increment.
8720 UNSIGNEDP is true if the biv should be treated as unsigned. */
8722 static bool
8723 biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8724 HOST_WIDE_INT incr, enum machine_mode mode, bool unsignedp)
8726 struct loop_info *loop_info;
8727 unsigned HOST_WIDE_INT bias;
8729 /* A biv's value will always be limited to its natural mode.
8730 Larger modes will observe the same wrap-around. */
8731 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (bl->biv->src_reg)))
8732 mode = GET_MODE (bl->biv->src_reg);
8734 loop_info = LOOP_INFO (loop);
8736 bias = (unsignedp ? 0 : (GET_MODE_MASK (mode) >> 1) + 1);
8737 if (biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8738 return true;
8740 if (mode == GET_MODE (bl->biv->src_reg)
8741 && bl->biv->src_reg == loop_info->iteration_var
8742 && loop_info->comparison_value
8743 && loop_invariant_p (loop, loop_info->comparison_value))
8745 /* If the increment is +1, and the exit test is a <, the BIV
8746 cannot overflow. (For <=, we have the problematic case that
8747 the comparison value might be the maximum value of the range.) */
8748 if (incr == 1)
8750 if (loop_info->comparison_code == LT)
8751 return true;
8752 if (loop_info->comparison_code == LTU && unsignedp)
8753 return true;
8756 /* Likewise for increment -1 and exit test >. */
8757 if (incr == -1)
8759 if (loop_info->comparison_code == GT)
8760 return true;
8761 if (loop_info->comparison_code == GTU && unsignedp)
8762 return true;
8765 return false;
8769 /* Given that X is an extension or truncation of BL, return true
8770 if it is unaffected by overflow. LOOP is the loop to which
8771 BL belongs and INCR is its per-iteration increment. */
8773 static bool
8774 extension_within_bounds_p (const struct loop *loop, struct iv_class *bl,
8775 HOST_WIDE_INT incr, rtx x)
8777 enum machine_mode mode;
8778 bool signedp, unsignedp;
8780 switch (GET_CODE (x))
8782 case SIGN_EXTEND:
8783 case ZERO_EXTEND:
8784 mode = GET_MODE (XEXP (x, 0));
8785 signedp = (GET_CODE (x) == SIGN_EXTEND);
8786 unsignedp = (GET_CODE (x) == ZERO_EXTEND);
8787 break;
8789 case TRUNCATE:
8790 /* We don't know whether this value is being used as signed
8791 or unsigned, so check the conditions for both. */
8792 mode = GET_MODE (x);
8793 signedp = unsignedp = true;
8794 break;
8796 default:
8797 gcc_unreachable ();
8800 return ((!signedp || biv_fits_mode_p (loop, bl, incr, mode, false))
8801 && (!unsignedp || biv_fits_mode_p (loop, bl, incr, mode, true)));
8805 /* Check each extension dependent giv in this class to see if its
8806 root biv is safe from wrapping in the interior mode, which would
8807 make the giv illegal. */
8809 static void
8810 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8812 struct induction *v;
8813 HOST_WIDE_INT incr;
8815 incr = get_monotonic_increment (bl);
8817 /* Invalidate givs that fail the tests. */
8818 for (v = bl->giv; v; v = v->next_iv)
8819 if (v->ext_dependent)
8821 if (incr != 0
8822 && extension_within_bounds_p (loop, bl, incr, v->ext_dependent))
8824 if (loop_dump_stream)
8825 fprintf (loop_dump_stream,
8826 "Verified ext dependent giv at %d of reg %d\n",
8827 INSN_UID (v->insn), bl->regno);
8829 else
8831 if (loop_dump_stream)
8832 fprintf (loop_dump_stream,
8833 "Failed ext dependent giv at %d\n",
8834 INSN_UID (v->insn));
8836 v->ignore = 1;
8837 bl->all_reduced = 0;
8842 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8844 static rtx
8845 extend_value_for_giv (struct induction *v, rtx value)
8847 rtx ext_dep = v->ext_dependent;
8849 if (! ext_dep)
8850 return value;
8852 /* Recall that check_ext_dependent_givs verified that the known bounds
8853 of a biv did not overflow or wrap with respect to the extension for
8854 the giv. Therefore, constants need no additional adjustment. */
8855 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
8856 return value;
8858 /* Otherwise, we must adjust the value to compensate for the
8859 differing modes of the biv and the giv. */
8860 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
8863 struct combine_givs_stats
8865 int giv_number;
8866 int total_benefit;
8869 static int
8870 cmp_combine_givs_stats (const void *xp, const void *yp)
8872 const struct combine_givs_stats * const x =
8873 (const struct combine_givs_stats *) xp;
8874 const struct combine_givs_stats * const y =
8875 (const struct combine_givs_stats *) yp;
8876 int d;
8877 d = y->total_benefit - x->total_benefit;
8878 /* Stabilize the sort. */
8879 if (!d)
8880 d = x->giv_number - y->giv_number;
8881 return d;
8884 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8885 any other. If so, point SAME to the giv combined with and set NEW_REG to
8886 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8887 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8889 static void
8890 combine_givs (struct loop_regs *regs, struct iv_class *bl)
8892 /* Additional benefit to add for being combined multiple times. */
8893 const int extra_benefit = 3;
8895 struct induction *g1, *g2, **giv_array;
8896 int i, j, k, giv_count;
8897 struct combine_givs_stats *stats;
8898 rtx *can_combine;
8900 /* Count givs, because bl->giv_count is incorrect here. */
8901 giv_count = 0;
8902 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8903 if (!g1->ignore)
8904 giv_count++;
8906 giv_array = alloca (giv_count * sizeof (struct induction *));
8907 i = 0;
8908 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8909 if (!g1->ignore)
8910 giv_array[i++] = g1;
8912 stats = xcalloc (giv_count, sizeof (*stats));
8913 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
8915 for (i = 0; i < giv_count; i++)
8917 int this_benefit;
8918 rtx single_use;
8920 g1 = giv_array[i];
8921 stats[i].giv_number = i;
8923 /* If a DEST_REG GIV is used only once, do not allow it to combine
8924 with anything, for in doing so we will gain nothing that cannot
8925 be had by simply letting the GIV with which we would have combined
8926 to be reduced on its own. The lossage shows up in particular with
8927 DEST_ADDR targets on hosts with reg+reg addressing, though it can
8928 be seen elsewhere as well. */
8929 if (g1->giv_type == DEST_REG
8930 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
8931 && single_use != const0_rtx)
8932 continue;
8934 this_benefit = g1->benefit;
8935 /* Add an additional weight for zero addends. */
8936 if (g1->no_const_addval)
8937 this_benefit += 1;
8939 for (j = 0; j < giv_count; j++)
8941 rtx this_combine;
8943 g2 = giv_array[j];
8944 if (g1 != g2
8945 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
8947 can_combine[i * giv_count + j] = this_combine;
8948 this_benefit += g2->benefit + extra_benefit;
8951 stats[i].total_benefit = this_benefit;
8954 /* Iterate, combining until we can't. */
8955 restart:
8956 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
8958 if (loop_dump_stream)
8960 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
8961 for (k = 0; k < giv_count; k++)
8963 g1 = giv_array[stats[k].giv_number];
8964 if (!g1->combined_with && !g1->same)
8965 fprintf (loop_dump_stream, " {%d, %d}",
8966 INSN_UID (giv_array[stats[k].giv_number]->insn),
8967 stats[k].total_benefit);
8969 putc ('\n', loop_dump_stream);
8972 for (k = 0; k < giv_count; k++)
8974 int g1_add_benefit = 0;
8976 i = stats[k].giv_number;
8977 g1 = giv_array[i];
8979 /* If it has already been combined, skip. */
8980 if (g1->combined_with || g1->same)
8981 continue;
8983 for (j = 0; j < giv_count; j++)
8985 g2 = giv_array[j];
8986 if (g1 != g2 && can_combine[i * giv_count + j]
8987 /* If it has already been combined, skip. */
8988 && ! g2->same && ! g2->combined_with)
8990 int l;
8992 g2->new_reg = can_combine[i * giv_count + j];
8993 g2->same = g1;
8994 /* For destination, we now may replace by mem expression instead
8995 of register. This changes the costs considerably, so add the
8996 compensation. */
8997 if (g2->giv_type == DEST_ADDR)
8998 g2->benefit = (g2->benefit + reg_address_cost
8999 - address_cost (g2->new_reg,
9000 GET_MODE (g2->mem)));
9001 g1->combined_with++;
9002 g1->lifetime += g2->lifetime;
9004 g1_add_benefit += g2->benefit;
9006 /* ??? The new final_[bg]iv_value code does a much better job
9007 of finding replaceable giv's, and hence this code may no
9008 longer be necessary. */
9009 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
9010 g1_add_benefit -= copy_cost;
9012 /* To help optimize the next set of combinations, remove
9013 this giv from the benefits of other potential mates. */
9014 for (l = 0; l < giv_count; ++l)
9016 int m = stats[l].giv_number;
9017 if (can_combine[m * giv_count + j])
9018 stats[l].total_benefit -= g2->benefit + extra_benefit;
9021 if (loop_dump_stream)
9022 fprintf (loop_dump_stream,
9023 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9024 INSN_UID (g2->insn), INSN_UID (g1->insn),
9025 g1->benefit, g1_add_benefit, g1->lifetime);
9029 /* To help optimize the next set of combinations, remove
9030 this giv from the benefits of other potential mates. */
9031 if (g1->combined_with)
9033 for (j = 0; j < giv_count; ++j)
9035 int m = stats[j].giv_number;
9036 if (can_combine[m * giv_count + i])
9037 stats[j].total_benefit -= g1->benefit + extra_benefit;
9040 g1->benefit += g1_add_benefit;
9042 /* We've finished with this giv, and everything it touched.
9043 Restart the combination so that proper weights for the
9044 rest of the givs are properly taken into account. */
9045 /* ??? Ideally we would compact the arrays at this point, so
9046 as to not cover old ground. But sanely compacting
9047 can_combine is tricky. */
9048 goto restart;
9052 /* Clean up. */
9053 free (stats);
9054 free (can_combine);
9057 /* Generate sequence for REG = B * M + A. B is the initial value of
9058 the basic induction variable, M a multiplicative constant, A an
9059 additive constant and REG the destination register. */
9061 static rtx
9062 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9064 rtx seq;
9065 rtx result;
9067 start_sequence ();
9068 /* Use unsigned arithmetic. */
9069 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9070 if (reg != result)
9071 emit_move_insn (reg, result);
9072 seq = get_insns ();
9073 end_sequence ();
9075 return seq;
9079 /* Update registers created in insn sequence SEQ. */
9081 static void
9082 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9084 rtx insn;
9086 /* Update register info for alias analysis. */
9088 insn = seq;
9089 while (insn != NULL_RTX)
9091 rtx set = single_set (insn);
9093 if (set && REG_P (SET_DEST (set)))
9094 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9096 insn = NEXT_INSN (insn);
9101 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9102 is the initial value of the basic induction variable, M a
9103 multiplicative constant, A an additive constant and REG the
9104 destination register. */
9106 static void
9107 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9108 rtx reg, basic_block before_bb, rtx before_insn)
9110 rtx seq;
9112 if (! before_insn)
9114 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9115 return;
9118 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9119 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9121 /* Increase the lifetime of any invariants moved further in code. */
9122 update_reg_last_use (a, before_insn);
9123 update_reg_last_use (b, before_insn);
9124 update_reg_last_use (m, before_insn);
9126 /* It is possible that the expansion created lots of new registers.
9127 Iterate over the sequence we just created and record them all. We
9128 must do this before inserting the sequence. */
9129 loop_regs_update (loop, seq);
9131 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9135 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9136 initial value of the basic induction variable, M a multiplicative
9137 constant, A an additive constant and REG the destination
9138 register. */
9140 static void
9141 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9143 rtx seq;
9145 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9146 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9148 /* Increase the lifetime of any invariants moved further in code.
9149 ???? Is this really necessary? */
9150 update_reg_last_use (a, loop->sink);
9151 update_reg_last_use (b, loop->sink);
9152 update_reg_last_use (m, loop->sink);
9154 /* It is possible that the expansion created lots of new registers.
9155 Iterate over the sequence we just created and record them all. We
9156 must do this before inserting the sequence. */
9157 loop_regs_update (loop, seq);
9159 loop_insn_sink (loop, seq);
9163 /* Emit insns after loop to set REG = B * M + A. B is the initial
9164 value of the basic induction variable, M a multiplicative constant,
9165 A an additive constant and REG the destination register. */
9167 static void
9168 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9170 rtx seq;
9172 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9173 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9175 /* It is possible that the expansion created lots of new registers.
9176 Iterate over the sequence we just created and record them all. We
9177 must do this before inserting the sequence. */
9178 loop_regs_update (loop, seq);
9180 loop_insn_hoist (loop, seq);
9185 /* Similar to gen_add_mult, but compute cost rather than generating
9186 sequence. */
9188 static int
9189 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9191 int cost = 0;
9192 rtx last, result;
9194 start_sequence ();
9195 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9196 if (reg != result)
9197 emit_move_insn (reg, result);
9198 last = get_last_insn ();
9199 while (last)
9201 rtx t = single_set (last);
9202 if (t)
9203 cost += rtx_cost (SET_SRC (t), SET);
9204 last = PREV_INSN (last);
9206 end_sequence ();
9207 return cost;
9210 /* Test whether A * B can be computed without
9211 an actual multiply insn. Value is 1 if so.
9213 ??? This function stinks because it generates a ton of wasted RTL
9214 ??? and as a result fragments GC memory to no end. There are other
9215 ??? places in the compiler which are invoked a lot and do the same
9216 ??? thing, generate wasted RTL just to see if something is possible. */
9218 static int
9219 product_cheap_p (rtx a, rtx b)
9221 rtx tmp;
9222 int win, n_insns;
9224 /* If only one is constant, make it B. */
9225 if (GET_CODE (a) == CONST_INT)
9226 tmp = a, a = b, b = tmp;
9228 /* If first constant, both constant, so don't need multiply. */
9229 if (GET_CODE (a) == CONST_INT)
9230 return 1;
9232 /* If second not constant, neither is constant, so would need multiply. */
9233 if (GET_CODE (b) != CONST_INT)
9234 return 0;
9236 /* One operand is constant, so might not need multiply insn. Generate the
9237 code for the multiply and see if a call or multiply, or long sequence
9238 of insns is generated. */
9240 start_sequence ();
9241 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9242 tmp = get_insns ();
9243 end_sequence ();
9245 win = 1;
9246 if (tmp == NULL_RTX)
9248 else if (INSN_P (tmp))
9250 n_insns = 0;
9251 while (tmp != NULL_RTX)
9253 rtx next = NEXT_INSN (tmp);
9255 if (++n_insns > 3
9256 || !NONJUMP_INSN_P (tmp)
9257 || (GET_CODE (PATTERN (tmp)) == SET
9258 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9259 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9260 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9261 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9263 win = 0;
9264 break;
9267 tmp = next;
9270 else if (GET_CODE (tmp) == SET
9271 && GET_CODE (SET_SRC (tmp)) == MULT)
9272 win = 0;
9273 else if (GET_CODE (tmp) == PARALLEL
9274 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9275 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9276 win = 0;
9278 return win;
9281 /* Check to see if loop can be terminated by a "decrement and branch until
9282 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9283 Also try reversing an increment loop to a decrement loop
9284 to see if the optimization can be performed.
9285 Value is nonzero if optimization was performed. */
9287 /* This is useful even if the architecture doesn't have such an insn,
9288 because it might change a loops which increments from 0 to n to a loop
9289 which decrements from n to 0. A loop that decrements to zero is usually
9290 faster than one that increments from zero. */
9292 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9293 such as approx_final_value, biv_total_increment, loop_iterations, and
9294 final_[bg]iv_value. */
9296 static int
9297 check_dbra_loop (struct loop *loop, int insn_count)
9299 struct loop_info *loop_info = LOOP_INFO (loop);
9300 struct loop_regs *regs = LOOP_REGS (loop);
9301 struct loop_ivs *ivs = LOOP_IVS (loop);
9302 struct iv_class *bl;
9303 rtx reg;
9304 enum machine_mode mode;
9305 rtx jump_label;
9306 rtx final_value;
9307 rtx start_value;
9308 rtx new_add_val;
9309 rtx comparison;
9310 rtx before_comparison;
9311 rtx p;
9312 rtx jump;
9313 rtx first_compare;
9314 int compare_and_branch;
9315 rtx loop_start = loop->start;
9316 rtx loop_end = loop->end;
9318 /* If last insn is a conditional branch, and the insn before tests a
9319 register value, try to optimize it. Otherwise, we can't do anything. */
9321 jump = PREV_INSN (loop_end);
9322 comparison = get_condition_for_loop (loop, jump);
9323 if (comparison == 0)
9324 return 0;
9325 if (!onlyjump_p (jump))
9326 return 0;
9328 /* Try to compute whether the compare/branch at the loop end is one or
9329 two instructions. */
9330 get_condition (jump, &first_compare, false, true);
9331 if (first_compare == jump)
9332 compare_and_branch = 1;
9333 else if (first_compare == prev_nonnote_insn (jump))
9334 compare_and_branch = 2;
9335 else
9336 return 0;
9339 /* If more than one condition is present to control the loop, then
9340 do not proceed, as this function does not know how to rewrite
9341 loop tests with more than one condition.
9343 Look backwards from the first insn in the last comparison
9344 sequence and see if we've got another comparison sequence. */
9346 rtx jump1;
9347 if ((jump1 = prev_nonnote_insn (first_compare))
9348 && JUMP_P (jump1))
9349 return 0;
9352 /* Check all of the bivs to see if the compare uses one of them.
9353 Skip biv's set more than once because we can't guarantee that
9354 it will be zero on the last iteration. Also skip if the biv is
9355 used between its update and the test insn. */
9357 for (bl = ivs->list; bl; bl = bl->next)
9359 if (bl->biv_count == 1
9360 && ! bl->biv->maybe_multiple
9361 && bl->biv->dest_reg == XEXP (comparison, 0)
9362 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9363 first_compare))
9364 break;
9367 /* Try swapping the comparison to identify a suitable biv. */
9368 if (!bl)
9369 for (bl = ivs->list; bl; bl = bl->next)
9370 if (bl->biv_count == 1
9371 && ! bl->biv->maybe_multiple
9372 && bl->biv->dest_reg == XEXP (comparison, 1)
9373 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9374 first_compare))
9376 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9377 VOIDmode,
9378 XEXP (comparison, 1),
9379 XEXP (comparison, 0));
9380 break;
9383 if (! bl)
9384 return 0;
9386 /* Look for the case where the basic induction variable is always
9387 nonnegative, and equals zero on the last iteration.
9388 In this case, add a reg_note REG_NONNEG, which allows the
9389 m68k DBRA instruction to be used. */
9391 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9392 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9393 && GET_CODE (bl->biv->add_val) == CONST_INT
9394 && INTVAL (bl->biv->add_val) < 0)
9396 /* Initial value must be greater than 0,
9397 init_val % -dec_value == 0 to ensure that it equals zero on
9398 the last iteration */
9400 if (GET_CODE (bl->initial_value) == CONST_INT
9401 && INTVAL (bl->initial_value) > 0
9402 && (INTVAL (bl->initial_value)
9403 % (-INTVAL (bl->biv->add_val))) == 0)
9405 /* Register always nonnegative, add REG_NOTE to branch. */
9406 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9407 REG_NOTES (jump)
9408 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9409 REG_NOTES (jump));
9410 bl->nonneg = 1;
9412 return 1;
9415 /* If the decrement is 1 and the value was tested as >= 0 before
9416 the loop, then we can safely optimize. */
9417 for (p = loop_start; p; p = PREV_INSN (p))
9419 if (LABEL_P (p))
9420 break;
9421 if (!JUMP_P (p))
9422 continue;
9424 before_comparison = get_condition_for_loop (loop, p);
9425 if (before_comparison
9426 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9427 && (GET_CODE (before_comparison) == LT
9428 || GET_CODE (before_comparison) == LTU)
9429 && XEXP (before_comparison, 1) == const0_rtx
9430 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9431 && INTVAL (bl->biv->add_val) == -1)
9433 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9434 REG_NOTES (jump)
9435 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9436 REG_NOTES (jump));
9437 bl->nonneg = 1;
9439 return 1;
9443 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9444 && INTVAL (bl->biv->add_val) > 0)
9446 /* Try to change inc to dec, so can apply above optimization. */
9447 /* Can do this if:
9448 all registers modified are induction variables or invariant,
9449 all memory references have non-overlapping addresses
9450 (obviously true if only one write)
9451 allow 2 insns for the compare/jump at the end of the loop. */
9452 /* Also, we must avoid any instructions which use both the reversed
9453 biv and another biv. Such instructions will fail if the loop is
9454 reversed. We meet this condition by requiring that either
9455 no_use_except_counting is true, or else that there is only
9456 one biv. */
9457 int num_nonfixed_reads = 0;
9458 /* 1 if the iteration var is used only to count iterations. */
9459 int no_use_except_counting = 0;
9460 /* 1 if the loop has no memory store, or it has a single memory store
9461 which is reversible. */
9462 int reversible_mem_store = 1;
9464 if (bl->giv_count == 0
9465 && !loop->exit_count
9466 && !loop_info->has_multiple_exit_targets)
9468 rtx bivreg = regno_reg_rtx[bl->regno];
9469 struct iv_class *blt;
9471 /* If there are no givs for this biv, and the only exit is the
9472 fall through at the end of the loop, then
9473 see if perhaps there are no uses except to count. */
9474 no_use_except_counting = 1;
9475 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9476 if (INSN_P (p))
9478 rtx set = single_set (p);
9480 if (set && REG_P (SET_DEST (set))
9481 && REGNO (SET_DEST (set)) == bl->regno)
9482 /* An insn that sets the biv is okay. */
9484 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9485 /* An insn that doesn't mention the biv is okay. */
9487 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9488 || p == prev_nonnote_insn (loop_end))
9490 /* If either of these insns uses the biv and sets a pseudo
9491 that has more than one usage, then the biv has uses
9492 other than counting since it's used to derive a value
9493 that is used more than one time. */
9494 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9495 regs);
9496 if (regs->multiple_uses)
9498 no_use_except_counting = 0;
9499 break;
9502 else
9504 no_use_except_counting = 0;
9505 break;
9509 /* A biv has uses besides counting if it is used to set
9510 another biv. */
9511 for (blt = ivs->list; blt; blt = blt->next)
9512 if (blt->init_set
9513 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9515 no_use_except_counting = 0;
9516 break;
9520 if (no_use_except_counting)
9521 /* No need to worry about MEMs. */
9523 else if (loop_info->num_mem_sets <= 1)
9525 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9526 if (INSN_P (p))
9527 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9529 /* If the loop has a single store, and the destination address is
9530 invariant, then we can't reverse the loop, because this address
9531 might then have the wrong value at loop exit.
9532 This would work if the source was invariant also, however, in that
9533 case, the insn should have been moved out of the loop. */
9535 if (loop_info->num_mem_sets == 1)
9537 struct induction *v;
9539 /* If we could prove that each of the memory locations
9540 written to was different, then we could reverse the
9541 store -- but we don't presently have any way of
9542 knowing that. */
9543 reversible_mem_store = 0;
9545 /* If the store depends on a register that is set after the
9546 store, it depends on the initial value, and is thus not
9547 reversible. */
9548 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9550 if (v->giv_type == DEST_REG
9551 && reg_mentioned_p (v->dest_reg,
9552 PATTERN (loop_info->first_loop_store_insn))
9553 && loop_insn_first_p (loop_info->first_loop_store_insn,
9554 v->insn))
9555 reversible_mem_store = 0;
9559 else
9560 return 0;
9562 /* This code only acts for innermost loops. Also it simplifies
9563 the memory address check by only reversing loops with
9564 zero or one memory access.
9565 Two memory accesses could involve parts of the same array,
9566 and that can't be reversed.
9567 If the biv is used only for counting, than we don't need to worry
9568 about all these things. */
9570 if ((num_nonfixed_reads <= 1
9571 && ! loop_info->has_nonconst_call
9572 && ! loop_info->has_prefetch
9573 && ! loop_info->has_volatile
9574 && reversible_mem_store
9575 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9576 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9577 && (bl == ivs->list && bl->next == 0))
9578 || (no_use_except_counting && ! loop_info->has_prefetch))
9580 rtx tem;
9582 /* Loop can be reversed. */
9583 if (loop_dump_stream)
9584 fprintf (loop_dump_stream, "Can reverse loop\n");
9586 /* Now check other conditions:
9588 The increment must be a constant, as must the initial value,
9589 and the comparison code must be LT.
9591 This test can probably be improved since +/- 1 in the constant
9592 can be obtained by changing LT to LE and vice versa; this is
9593 confusing. */
9595 if (comparison
9596 /* for constants, LE gets turned into LT */
9597 && (GET_CODE (comparison) == LT
9598 || (GET_CODE (comparison) == LE
9599 && no_use_except_counting)
9600 || GET_CODE (comparison) == LTU))
9602 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9603 rtx initial_value, comparison_value;
9604 int nonneg = 0;
9605 enum rtx_code cmp_code;
9606 int comparison_const_width;
9607 unsigned HOST_WIDE_INT comparison_sign_mask;
9608 bool keep_first_compare;
9610 add_val = INTVAL (bl->biv->add_val);
9611 comparison_value = XEXP (comparison, 1);
9612 if (GET_MODE (comparison_value) == VOIDmode)
9613 comparison_const_width
9614 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9615 else
9616 comparison_const_width
9617 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9618 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9619 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9620 comparison_sign_mask
9621 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9623 /* If the comparison value is not a loop invariant, then we
9624 can not reverse this loop.
9626 ??? If the insns which initialize the comparison value as
9627 a whole compute an invariant result, then we could move
9628 them out of the loop and proceed with loop reversal. */
9629 if (! loop_invariant_p (loop, comparison_value))
9630 return 0;
9632 if (GET_CODE (comparison_value) == CONST_INT)
9633 comparison_val = INTVAL (comparison_value);
9634 initial_value = bl->initial_value;
9636 /* Normalize the initial value if it is an integer and
9637 has no other use except as a counter. This will allow
9638 a few more loops to be reversed. */
9639 if (no_use_except_counting
9640 && GET_CODE (comparison_value) == CONST_INT
9641 && GET_CODE (initial_value) == CONST_INT)
9643 comparison_val = comparison_val - INTVAL (bl->initial_value);
9644 /* The code below requires comparison_val to be a multiple
9645 of add_val in order to do the loop reversal, so
9646 round up comparison_val to a multiple of add_val.
9647 Since comparison_value is constant, we know that the
9648 current comparison code is LT. */
9649 comparison_val = comparison_val + add_val - 1;
9650 comparison_val
9651 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9652 /* We postpone overflow checks for COMPARISON_VAL here;
9653 even if there is an overflow, we might still be able to
9654 reverse the loop, if converting the loop exit test to
9655 NE is possible. */
9656 initial_value = const0_rtx;
9659 /* First check if we can do a vanilla loop reversal. */
9660 if (initial_value == const0_rtx
9661 && GET_CODE (comparison_value) == CONST_INT
9662 /* Now do postponed overflow checks on COMPARISON_VAL. */
9663 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9664 & comparison_sign_mask))
9666 /* Register will always be nonnegative, with value
9667 0 on last iteration */
9668 add_adjust = add_val;
9669 nonneg = 1;
9670 cmp_code = GE;
9672 else
9673 return 0;
9675 if (GET_CODE (comparison) == LE)
9676 add_adjust -= add_val;
9678 /* If the initial value is not zero, or if the comparison
9679 value is not an exact multiple of the increment, then we
9680 can not reverse this loop. */
9681 if (initial_value == const0_rtx
9682 && GET_CODE (comparison_value) == CONST_INT)
9684 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9685 return 0;
9687 else
9689 if (! no_use_except_counting || add_val != 1)
9690 return 0;
9693 final_value = comparison_value;
9695 /* Reset these in case we normalized the initial value
9696 and comparison value above. */
9697 if (GET_CODE (comparison_value) == CONST_INT
9698 && GET_CODE (initial_value) == CONST_INT)
9700 comparison_value = GEN_INT (comparison_val);
9701 final_value
9702 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9704 bl->initial_value = initial_value;
9706 /* Save some info needed to produce the new insns. */
9707 reg = bl->biv->dest_reg;
9708 mode = GET_MODE (reg);
9709 jump_label = condjump_label (PREV_INSN (loop_end));
9710 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9712 /* Set start_value; if this is not a CONST_INT, we need
9713 to generate a SUB.
9714 Initialize biv to start_value before loop start.
9715 The old initializing insn will be deleted as a
9716 dead store by flow.c. */
9717 if (initial_value == const0_rtx
9718 && GET_CODE (comparison_value) == CONST_INT)
9720 start_value
9721 = gen_int_mode (comparison_val - add_adjust, mode);
9722 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9724 else if (GET_CODE (initial_value) == CONST_INT)
9726 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9727 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9729 if (add_insn == 0)
9730 return 0;
9732 start_value
9733 = gen_rtx_PLUS (mode, comparison_value, offset);
9734 loop_insn_hoist (loop, add_insn);
9735 if (GET_CODE (comparison) == LE)
9736 final_value = gen_rtx_PLUS (mode, comparison_value,
9737 GEN_INT (add_val));
9739 else if (! add_adjust)
9741 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9742 initial_value);
9744 if (sub_insn == 0)
9745 return 0;
9746 start_value
9747 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9748 loop_insn_hoist (loop, sub_insn);
9750 else
9751 /* We could handle the other cases too, but it'll be
9752 better to have a testcase first. */
9753 return 0;
9755 /* We may not have a single insn which can increment a reg, so
9756 create a sequence to hold all the insns from expand_inc. */
9757 start_sequence ();
9758 expand_inc (reg, new_add_val);
9759 tem = get_insns ();
9760 end_sequence ();
9762 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9763 delete_insn (bl->biv->insn);
9765 /* Update biv info to reflect its new status. */
9766 bl->biv->insn = p;
9767 bl->initial_value = start_value;
9768 bl->biv->add_val = new_add_val;
9770 /* Update loop info. */
9771 loop_info->initial_value = reg;
9772 loop_info->initial_equiv_value = reg;
9773 loop_info->final_value = const0_rtx;
9774 loop_info->final_equiv_value = const0_rtx;
9775 loop_info->comparison_value = const0_rtx;
9776 loop_info->comparison_code = cmp_code;
9777 loop_info->increment = new_add_val;
9779 /* Inc LABEL_NUSES so that delete_insn will
9780 not delete the label. */
9781 LABEL_NUSES (XEXP (jump_label, 0))++;
9783 /* If we have a separate comparison insn that does more
9784 than just set cc0, the result of the comparison might
9785 be used outside the loop. */
9786 keep_first_compare = (compare_and_branch == 2
9787 #ifdef HAVE_CC0
9788 && sets_cc0_p (first_compare) <= 0
9789 #endif
9792 /* Emit an insn after the end of the loop to set the biv's
9793 proper exit value if it is used anywhere outside the loop. */
9794 if (keep_first_compare
9795 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9796 || ! bl->init_insn
9797 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9798 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9800 if (keep_first_compare)
9801 loop_insn_sink (loop, PATTERN (first_compare));
9803 /* Delete compare/branch at end of loop. */
9804 delete_related_insns (PREV_INSN (loop_end));
9805 if (compare_and_branch == 2)
9806 delete_related_insns (first_compare);
9808 /* Add new compare/branch insn at end of loop. */
9809 start_sequence ();
9810 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9811 mode, 0,
9812 XEXP (jump_label, 0));
9813 tem = get_insns ();
9814 end_sequence ();
9815 emit_jump_insn_before (tem, loop_end);
9817 for (tem = PREV_INSN (loop_end);
9818 tem && !JUMP_P (tem);
9819 tem = PREV_INSN (tem))
9822 if (tem)
9823 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9825 if (nonneg)
9827 if (tem)
9829 /* Increment of LABEL_NUSES done above. */
9830 /* Register is now always nonnegative,
9831 so add REG_NONNEG note to the branch. */
9832 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9833 REG_NOTES (tem));
9835 bl->nonneg = 1;
9838 /* No insn may reference both the reversed and another biv or it
9839 will fail (see comment near the top of the loop reversal
9840 code).
9841 Earlier on, we have verified that the biv has no use except
9842 counting, or it is the only biv in this function.
9843 However, the code that computes no_use_except_counting does
9844 not verify reg notes. It's possible to have an insn that
9845 references another biv, and has a REG_EQUAL note with an
9846 expression based on the reversed biv. To avoid this case,
9847 remove all REG_EQUAL notes based on the reversed biv
9848 here. */
9849 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9850 if (INSN_P (p))
9852 rtx *pnote;
9853 rtx set = single_set (p);
9854 /* If this is a set of a GIV based on the reversed biv, any
9855 REG_EQUAL notes should still be correct. */
9856 if (! set
9857 || !REG_P (SET_DEST (set))
9858 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
9859 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
9860 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
9861 for (pnote = &REG_NOTES (p); *pnote;)
9863 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
9864 && reg_mentioned_p (regno_reg_rtx[bl->regno],
9865 XEXP (*pnote, 0)))
9866 *pnote = XEXP (*pnote, 1);
9867 else
9868 pnote = &XEXP (*pnote, 1);
9872 /* Mark that this biv has been reversed. Each giv which depends
9873 on this biv, and which is also live past the end of the loop
9874 will have to be fixed up. */
9876 bl->reversed = 1;
9878 if (loop_dump_stream)
9880 fprintf (loop_dump_stream, "Reversed loop");
9881 if (bl->nonneg)
9882 fprintf (loop_dump_stream, " and added reg_nonneg\n");
9883 else
9884 fprintf (loop_dump_stream, "\n");
9887 return 1;
9892 return 0;
9895 /* Verify whether the biv BL appears to be eliminable,
9896 based on the insns in the loop that refer to it.
9898 If ELIMINATE_P is nonzero, actually do the elimination.
9900 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
9901 determine whether invariant insns should be placed inside or at the
9902 start of the loop. */
9904 static int
9905 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
9906 int eliminate_p, int threshold, int insn_count)
9908 struct loop_ivs *ivs = LOOP_IVS (loop);
9909 rtx reg = bl->biv->dest_reg;
9910 rtx p;
9912 /* Scan all insns in the loop, stopping if we find one that uses the
9913 biv in a way that we cannot eliminate. */
9915 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9917 enum rtx_code code = GET_CODE (p);
9918 basic_block where_bb = 0;
9919 rtx where_insn = threshold >= insn_count ? 0 : p;
9920 rtx note;
9922 /* If this is a libcall that sets a giv, skip ahead to its end. */
9923 if (INSN_P (p))
9925 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
9927 if (note)
9929 rtx last = XEXP (note, 0);
9930 rtx set = single_set (last);
9932 if (set && REG_P (SET_DEST (set)))
9934 unsigned int regno = REGNO (SET_DEST (set));
9936 if (regno < ivs->n_regs
9937 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
9938 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
9939 p = last;
9944 /* Closely examine the insn if the biv is mentioned. */
9945 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
9946 && reg_mentioned_p (reg, PATTERN (p))
9947 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
9948 eliminate_p, where_bb, where_insn))
9950 if (loop_dump_stream)
9951 fprintf (loop_dump_stream,
9952 "Cannot eliminate biv %d: biv used in insn %d.\n",
9953 bl->regno, INSN_UID (p));
9954 break;
9957 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
9958 if (eliminate_p
9959 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
9960 && reg_mentioned_p (reg, XEXP (note, 0)))
9961 remove_note (p, note);
9964 if (p == loop->end)
9966 if (loop_dump_stream)
9967 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
9968 bl->regno, eliminate_p ? "was" : "can be");
9969 return 1;
9972 return 0;
9975 /* INSN and REFERENCE are instructions in the same insn chain.
9976 Return nonzero if INSN is first. */
9978 static int
9979 loop_insn_first_p (rtx insn, rtx reference)
9981 rtx p, q;
9983 for (p = insn, q = reference;;)
9985 /* Start with test for not first so that INSN == REFERENCE yields not
9986 first. */
9987 if (q == insn || ! p)
9988 return 0;
9989 if (p == reference || ! q)
9990 return 1;
9992 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
9993 previous insn, hence the <= comparison below does not work if
9994 P is a note. */
9995 if (INSN_UID (p) < max_uid_for_loop
9996 && INSN_UID (q) < max_uid_for_loop
9997 && !NOTE_P (p))
9998 return INSN_LUID (p) <= INSN_LUID (q);
10000 if (INSN_UID (p) >= max_uid_for_loop
10001 || NOTE_P (p))
10002 p = NEXT_INSN (p);
10003 if (INSN_UID (q) >= max_uid_for_loop)
10004 q = NEXT_INSN (q);
10008 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
10009 the offset that we have to take into account due to auto-increment /
10010 div derivation is zero. */
10011 static int
10012 biv_elimination_giv_has_0_offset (struct induction *biv,
10013 struct induction *giv, rtx insn)
10015 /* If the giv V had the auto-inc address optimization applied
10016 to it, and INSN occurs between the giv insn and the biv
10017 insn, then we'd have to adjust the value used here.
10018 This is rare, so we don't bother to make this possible. */
10019 if (giv->auto_inc_opt
10020 && ((loop_insn_first_p (giv->insn, insn)
10021 && loop_insn_first_p (insn, biv->insn))
10022 || (loop_insn_first_p (biv->insn, insn)
10023 && loop_insn_first_p (insn, giv->insn))))
10024 return 0;
10026 return 1;
10029 /* If BL appears in X (part of the pattern of INSN), see if we can
10030 eliminate its use. If so, return 1. If not, return 0.
10032 If BIV does not appear in X, return 1.
10034 If ELIMINATE_P is nonzero, actually do the elimination.
10035 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10036 Depending on how many items have been moved out of the loop, it
10037 will either be before INSN (when WHERE_INSN is nonzero) or at the
10038 start of the loop (when WHERE_INSN is zero). */
10040 static int
10041 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10042 struct iv_class *bl, int eliminate_p,
10043 basic_block where_bb, rtx where_insn)
10045 enum rtx_code code = GET_CODE (x);
10046 rtx reg = bl->biv->dest_reg;
10047 enum machine_mode mode = GET_MODE (reg);
10048 struct induction *v;
10049 rtx arg, tem;
10050 #ifdef HAVE_cc0
10051 rtx new;
10052 #endif
10053 int arg_operand;
10054 const char *fmt;
10055 int i, j;
10057 switch (code)
10059 case REG:
10060 /* If we haven't already been able to do something with this BIV,
10061 we can't eliminate it. */
10062 if (x == reg)
10063 return 0;
10064 return 1;
10066 case SET:
10067 /* If this sets the BIV, it is not a problem. */
10068 if (SET_DEST (x) == reg)
10069 return 1;
10071 /* If this is an insn that defines a giv, it is also ok because
10072 it will go away when the giv is reduced. */
10073 for (v = bl->giv; v; v = v->next_iv)
10074 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10075 return 1;
10077 #ifdef HAVE_cc0
10078 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10080 /* Can replace with any giv that was reduced and
10081 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10082 Require a constant for MULT_VAL, so we know it's nonzero.
10083 ??? We disable this optimization to avoid potential
10084 overflows. */
10086 for (v = bl->giv; v; v = v->next_iv)
10087 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10088 && v->add_val == const0_rtx
10089 && ! v->ignore && ! v->maybe_dead && v->always_computable
10090 && v->mode == mode
10091 && 0)
10093 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10094 continue;
10096 if (! eliminate_p)
10097 return 1;
10099 /* If the giv has the opposite direction of change,
10100 then reverse the comparison. */
10101 if (INTVAL (v->mult_val) < 0)
10102 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10103 const0_rtx, v->new_reg);
10104 else
10105 new = v->new_reg;
10107 /* We can probably test that giv's reduced reg. */
10108 if (validate_change (insn, &SET_SRC (x), new, 0))
10109 return 1;
10112 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10113 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10114 Require a constant for MULT_VAL, so we know it's nonzero.
10115 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10116 overflow problem. */
10118 for (v = bl->giv; v; v = v->next_iv)
10119 if (GET_CODE (v->mult_val) == CONST_INT
10120 && v->mult_val != const0_rtx
10121 && ! v->ignore && ! v->maybe_dead && v->always_computable
10122 && v->mode == mode
10123 && (GET_CODE (v->add_val) == SYMBOL_REF
10124 || GET_CODE (v->add_val) == LABEL_REF
10125 || GET_CODE (v->add_val) == CONST
10126 || (REG_P (v->add_val)
10127 && REG_POINTER (v->add_val))))
10129 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10130 continue;
10132 if (! eliminate_p)
10133 return 1;
10135 /* If the giv has the opposite direction of change,
10136 then reverse the comparison. */
10137 if (INTVAL (v->mult_val) < 0)
10138 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10139 v->new_reg);
10140 else
10141 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10142 copy_rtx (v->add_val));
10144 /* Replace biv with the giv's reduced register. */
10145 update_reg_last_use (v->add_val, insn);
10146 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10147 return 1;
10149 /* Insn doesn't support that constant or invariant. Copy it
10150 into a register (it will be a loop invariant.) */
10151 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10153 loop_insn_emit_before (loop, 0, where_insn,
10154 gen_move_insn (tem,
10155 copy_rtx (v->add_val)));
10157 /* Substitute the new register for its invariant value in
10158 the compare expression. */
10159 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10160 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10161 return 1;
10164 #endif
10165 break;
10167 case COMPARE:
10168 case EQ: case NE:
10169 case GT: case GE: case GTU: case GEU:
10170 case LT: case LE: case LTU: case LEU:
10171 /* See if either argument is the biv. */
10172 if (XEXP (x, 0) == reg)
10173 arg = XEXP (x, 1), arg_operand = 1;
10174 else if (XEXP (x, 1) == reg)
10175 arg = XEXP (x, 0), arg_operand = 0;
10176 else
10177 break;
10179 if (CONSTANT_P (arg))
10181 /* First try to replace with any giv that has constant positive
10182 mult_val and constant add_val. We might be able to support
10183 negative mult_val, but it seems complex to do it in general. */
10185 for (v = bl->giv; v; v = v->next_iv)
10186 if (GET_CODE (v->mult_val) == CONST_INT
10187 && INTVAL (v->mult_val) > 0
10188 && (GET_CODE (v->add_val) == SYMBOL_REF
10189 || GET_CODE (v->add_val) == LABEL_REF
10190 || GET_CODE (v->add_val) == CONST
10191 || (REG_P (v->add_val)
10192 && REG_POINTER (v->add_val)))
10193 && ! v->ignore && ! v->maybe_dead && v->always_computable
10194 && v->mode == mode)
10196 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10197 continue;
10199 /* Don't eliminate if the linear combination that makes up
10200 the giv overflows when it is applied to ARG. */
10201 if (GET_CODE (arg) == CONST_INT)
10203 rtx add_val;
10205 if (GET_CODE (v->add_val) == CONST_INT)
10206 add_val = v->add_val;
10207 else
10208 add_val = const0_rtx;
10210 if (const_mult_add_overflow_p (arg, v->mult_val,
10211 add_val, mode, 1))
10212 continue;
10215 if (! eliminate_p)
10216 return 1;
10218 /* Replace biv with the giv's reduced reg. */
10219 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10221 /* If all constants are actually constant integers and
10222 the derived constant can be directly placed in the COMPARE,
10223 do so. */
10224 if (GET_CODE (arg) == CONST_INT
10225 && GET_CODE (v->add_val) == CONST_INT)
10227 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
10228 v->add_val, mode, 1);
10230 else
10232 /* Otherwise, load it into a register. */
10233 tem = gen_reg_rtx (mode);
10234 loop_iv_add_mult_emit_before (loop, arg,
10235 v->mult_val, v->add_val,
10236 tem, where_bb, where_insn);
10239 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10241 if (apply_change_group ())
10242 return 1;
10245 /* Look for giv with positive constant mult_val and nonconst add_val.
10246 Insert insns to calculate new compare value.
10247 ??? Turn this off due to possible overflow. */
10249 for (v = bl->giv; v; v = v->next_iv)
10250 if (GET_CODE (v->mult_val) == CONST_INT
10251 && INTVAL (v->mult_val) > 0
10252 && ! v->ignore && ! v->maybe_dead && v->always_computable
10253 && v->mode == mode
10254 && 0)
10256 rtx tem;
10258 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10259 continue;
10261 if (! eliminate_p)
10262 return 1;
10264 tem = gen_reg_rtx (mode);
10266 /* Replace biv with giv's reduced register. */
10267 validate_change (insn, &XEXP (x, 1 - arg_operand),
10268 v->new_reg, 1);
10270 /* Compute value to compare against. */
10271 loop_iv_add_mult_emit_before (loop, arg,
10272 v->mult_val, v->add_val,
10273 tem, where_bb, where_insn);
10274 /* Use it in this insn. */
10275 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10276 if (apply_change_group ())
10277 return 1;
10280 else if (REG_P (arg) || MEM_P (arg))
10282 if (loop_invariant_p (loop, arg) == 1)
10284 /* Look for giv with constant positive mult_val and nonconst
10285 add_val. Insert insns to compute new compare value.
10286 ??? Turn this off due to possible overflow. */
10288 for (v = bl->giv; v; v = v->next_iv)
10289 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
10290 && ! v->ignore && ! v->maybe_dead && v->always_computable
10291 && v->mode == mode
10292 && 0)
10294 rtx tem;
10296 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10297 continue;
10299 if (! eliminate_p)
10300 return 1;
10302 tem = gen_reg_rtx (mode);
10304 /* Replace biv with giv's reduced register. */
10305 validate_change (insn, &XEXP (x, 1 - arg_operand),
10306 v->new_reg, 1);
10308 /* Compute value to compare against. */
10309 loop_iv_add_mult_emit_before (loop, arg,
10310 v->mult_val, v->add_val,
10311 tem, where_bb, where_insn);
10312 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10313 if (apply_change_group ())
10314 return 1;
10318 /* This code has problems. Basically, you can't know when
10319 seeing if we will eliminate BL, whether a particular giv
10320 of ARG will be reduced. If it isn't going to be reduced,
10321 we can't eliminate BL. We can try forcing it to be reduced,
10322 but that can generate poor code.
10324 The problem is that the benefit of reducing TV, below should
10325 be increased if BL can actually be eliminated, but this means
10326 we might have to do a topological sort of the order in which
10327 we try to process biv. It doesn't seem worthwhile to do
10328 this sort of thing now. */
10330 #if 0
10331 /* Otherwise the reg compared with had better be a biv. */
10332 if (!REG_P (arg)
10333 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
10334 return 0;
10336 /* Look for a pair of givs, one for each biv,
10337 with identical coefficients. */
10338 for (v = bl->giv; v; v = v->next_iv)
10340 struct induction *tv;
10342 if (v->ignore || v->maybe_dead || v->mode != mode)
10343 continue;
10345 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
10346 tv = tv->next_iv)
10347 if (! tv->ignore && ! tv->maybe_dead
10348 && rtx_equal_p (tv->mult_val, v->mult_val)
10349 && rtx_equal_p (tv->add_val, v->add_val)
10350 && tv->mode == mode)
10352 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10353 continue;
10355 if (! eliminate_p)
10356 return 1;
10358 /* Replace biv with its giv's reduced reg. */
10359 XEXP (x, 1 - arg_operand) = v->new_reg;
10360 /* Replace other operand with the other giv's
10361 reduced reg. */
10362 XEXP (x, arg_operand) = tv->new_reg;
10363 return 1;
10366 #endif
10369 /* If we get here, the biv can't be eliminated. */
10370 return 0;
10372 case MEM:
10373 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10374 biv is used in it, since it will be replaced. */
10375 for (v = bl->giv; v; v = v->next_iv)
10376 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10377 return 1;
10378 break;
10380 default:
10381 break;
10384 /* See if any subexpression fails elimination. */
10385 fmt = GET_RTX_FORMAT (code);
10386 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10388 switch (fmt[i])
10390 case 'e':
10391 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10392 eliminate_p, where_bb, where_insn))
10393 return 0;
10394 break;
10396 case 'E':
10397 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10398 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10399 eliminate_p, where_bb, where_insn))
10400 return 0;
10401 break;
10405 return 1;
10408 /* Return nonzero if the last use of REG
10409 is in an insn following INSN in the same basic block. */
10411 static int
10412 last_use_this_basic_block (rtx reg, rtx insn)
10414 rtx n;
10415 for (n = insn;
10416 n && !LABEL_P (n) && !JUMP_P (n);
10417 n = NEXT_INSN (n))
10419 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10420 return 1;
10422 return 0;
10425 /* Called via `note_stores' to record the initial value of a biv. Here we
10426 just record the location of the set and process it later. */
10428 static void
10429 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10431 struct loop_ivs *ivs = (struct loop_ivs *) data;
10432 struct iv_class *bl;
10434 if (!REG_P (dest)
10435 || REGNO (dest) >= ivs->n_regs
10436 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10437 return;
10439 bl = REG_IV_CLASS (ivs, REGNO (dest));
10441 /* If this is the first set found, record it. */
10442 if (bl->init_insn == 0)
10444 bl->init_insn = note_insn;
10445 bl->init_set = set;
10449 /* If any of the registers in X are "old" and currently have a last use earlier
10450 than INSN, update them to have a last use of INSN. Their actual last use
10451 will be the previous insn but it will not have a valid uid_luid so we can't
10452 use it. X must be a source expression only. */
10454 static void
10455 update_reg_last_use (rtx x, rtx insn)
10457 /* Check for the case where INSN does not have a valid luid. In this case,
10458 there is no need to modify the regno_last_uid, as this can only happen
10459 when code is inserted after the loop_end to set a pseudo's final value,
10460 and hence this insn will never be the last use of x.
10461 ???? This comment is not correct. See for example loop_givs_reduce.
10462 This may insert an insn before another new insn. */
10463 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10464 && INSN_UID (insn) < max_uid_for_loop
10465 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10467 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10469 else
10471 int i, j;
10472 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10473 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10475 if (fmt[i] == 'e')
10476 update_reg_last_use (XEXP (x, i), insn);
10477 else if (fmt[i] == 'E')
10478 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10479 update_reg_last_use (XVECEXP (x, i, j), insn);
10484 /* Similar to rtlanal.c:get_condition, except that we also put an
10485 invariant last unless both operands are invariants. */
10487 static rtx
10488 get_condition_for_loop (const struct loop *loop, rtx x)
10490 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10492 if (comparison == 0
10493 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10494 || loop_invariant_p (loop, XEXP (comparison, 1)))
10495 return comparison;
10497 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10498 XEXP (comparison, 1), XEXP (comparison, 0));
10501 /* Scan the function and determine whether it has indirect (computed) jumps.
10503 This is taken mostly from flow.c; similar code exists elsewhere
10504 in the compiler. It may be useful to put this into rtlanal.c. */
10505 static int
10506 indirect_jump_in_function_p (rtx start)
10508 rtx insn;
10510 for (insn = start; insn; insn = NEXT_INSN (insn))
10511 if (computed_jump_p (insn))
10512 return 1;
10514 return 0;
10517 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10518 documentation for LOOP_MEMS for the definition of `appropriate'.
10519 This function is called from prescan_loop via for_each_rtx. */
10521 static int
10522 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10524 struct loop_info *loop_info = data;
10525 int i;
10526 rtx m = *mem;
10528 if (m == NULL_RTX)
10529 return 0;
10531 switch (GET_CODE (m))
10533 case MEM:
10534 break;
10536 case CLOBBER:
10537 /* We're not interested in MEMs that are only clobbered. */
10538 return -1;
10540 case CONST_DOUBLE:
10541 /* We're not interested in the MEM associated with a
10542 CONST_DOUBLE, so there's no need to traverse into this. */
10543 return -1;
10545 case EXPR_LIST:
10546 /* We're not interested in any MEMs that only appear in notes. */
10547 return -1;
10549 default:
10550 /* This is not a MEM. */
10551 return 0;
10554 /* See if we've already seen this MEM. */
10555 for (i = 0; i < loop_info->mems_idx; ++i)
10556 if (rtx_equal_p (m, loop_info->mems[i].mem))
10558 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10559 loop_info->mems[i].mem = m;
10560 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10561 /* The modes of the two memory accesses are different. If
10562 this happens, something tricky is going on, and we just
10563 don't optimize accesses to this MEM. */
10564 loop_info->mems[i].optimize = 0;
10566 return 0;
10569 /* Resize the array, if necessary. */
10570 if (loop_info->mems_idx == loop_info->mems_allocated)
10572 if (loop_info->mems_allocated != 0)
10573 loop_info->mems_allocated *= 2;
10574 else
10575 loop_info->mems_allocated = 32;
10577 loop_info->mems = xrealloc (loop_info->mems,
10578 loop_info->mems_allocated * sizeof (loop_mem_info));
10581 /* Actually insert the MEM. */
10582 loop_info->mems[loop_info->mems_idx].mem = m;
10583 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10584 because we can't put it in a register. We still store it in the
10585 table, though, so that if we see the same address later, but in a
10586 non-BLK mode, we'll not think we can optimize it at that point. */
10587 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10588 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10589 ++loop_info->mems_idx;
10591 return 0;
10595 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10597 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10598 register that is modified by an insn between FROM and TO. If the
10599 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10600 more, stop incrementing it, to avoid overflow.
10602 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10603 register I is used, if it is only used once. Otherwise, it is set
10604 to 0 (for no uses) or const0_rtx for more than one use. This
10605 parameter may be zero, in which case this processing is not done.
10607 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10608 optimize register I. */
10610 static void
10611 loop_regs_scan (const struct loop *loop, int extra_size)
10613 struct loop_regs *regs = LOOP_REGS (loop);
10614 int old_nregs;
10615 /* last_set[n] is nonzero iff reg n has been set in the current
10616 basic block. In that case, it is the insn that last set reg n. */
10617 rtx *last_set;
10618 rtx insn;
10619 int i;
10621 old_nregs = regs->num;
10622 regs->num = max_reg_num ();
10624 /* Grow the regs array if not allocated or too small. */
10625 if (regs->num >= regs->size)
10627 regs->size = regs->num + extra_size;
10629 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10631 /* Zero the new elements. */
10632 memset (regs->array + old_nregs, 0,
10633 (regs->size - old_nregs) * sizeof (*regs->array));
10636 /* Clear previously scanned fields but do not clear n_times_set. */
10637 for (i = 0; i < old_nregs; i++)
10639 regs->array[i].set_in_loop = 0;
10640 regs->array[i].may_not_optimize = 0;
10641 regs->array[i].single_usage = NULL_RTX;
10644 last_set = xcalloc (regs->num, sizeof (rtx));
10646 /* Scan the loop, recording register usage. */
10647 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10648 insn = NEXT_INSN (insn))
10650 if (INSN_P (insn))
10652 /* Record registers that have exactly one use. */
10653 find_single_use_in_loop (regs, insn, PATTERN (insn));
10655 /* Include uses in REG_EQUAL notes. */
10656 if (REG_NOTES (insn))
10657 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10659 if (GET_CODE (PATTERN (insn)) == SET
10660 || GET_CODE (PATTERN (insn)) == CLOBBER)
10661 count_one_set (regs, insn, PATTERN (insn), last_set);
10662 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10664 int i;
10665 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10666 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10667 last_set);
10671 if (LABEL_P (insn) || JUMP_P (insn))
10672 memset (last_set, 0, regs->num * sizeof (rtx));
10674 /* Invalidate all registers used for function argument passing.
10675 We check rtx_varies_p for the same reason as below, to allow
10676 optimizing PIC calculations. */
10677 if (CALL_P (insn))
10679 rtx link;
10680 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10681 link;
10682 link = XEXP (link, 1))
10684 rtx op, reg;
10686 if (GET_CODE (op = XEXP (link, 0)) == USE
10687 && REG_P (reg = XEXP (op, 0))
10688 && rtx_varies_p (reg, 1))
10689 regs->array[REGNO (reg)].may_not_optimize = 1;
10694 /* Invalidate all hard registers clobbered by calls. With one exception:
10695 a call-clobbered PIC register is still function-invariant for our
10696 purposes, since we can hoist any PIC calculations out of the loop.
10697 Thus the call to rtx_varies_p. */
10698 if (LOOP_INFO (loop)->has_call)
10699 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10700 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10701 && rtx_varies_p (regno_reg_rtx[i], 1))
10703 regs->array[i].may_not_optimize = 1;
10704 regs->array[i].set_in_loop = 1;
10707 #ifdef AVOID_CCMODE_COPIES
10708 /* Don't try to move insns which set CC registers if we should not
10709 create CCmode register copies. */
10710 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10711 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10712 regs->array[i].may_not_optimize = 1;
10713 #endif
10715 /* Set regs->array[I].n_times_set for the new registers. */
10716 for (i = old_nregs; i < regs->num; i++)
10717 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10719 free (last_set);
10722 /* Returns the number of real INSNs in the LOOP. */
10724 static int
10725 count_insns_in_loop (const struct loop *loop)
10727 int count = 0;
10728 rtx insn;
10730 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10731 insn = NEXT_INSN (insn))
10732 if (INSN_P (insn))
10733 ++count;
10735 return count;
10738 /* Move MEMs into registers for the duration of the loop. */
10740 static void
10741 load_mems (const struct loop *loop)
10743 struct loop_info *loop_info = LOOP_INFO (loop);
10744 struct loop_regs *regs = LOOP_REGS (loop);
10745 int maybe_never = 0;
10746 int i;
10747 rtx p, prev_ebb_head;
10748 rtx label = NULL_RTX;
10749 rtx end_label;
10750 /* Nonzero if the next instruction may never be executed. */
10751 int next_maybe_never = 0;
10752 unsigned int last_max_reg = max_reg_num ();
10754 if (loop_info->mems_idx == 0)
10755 return;
10757 /* We cannot use next_label here because it skips over normal insns. */
10758 end_label = next_nonnote_insn (loop->end);
10759 if (end_label && !LABEL_P (end_label))
10760 end_label = NULL_RTX;
10762 /* Check to see if it's possible that some instructions in the loop are
10763 never executed. Also check if there is a goto out of the loop other
10764 than right after the end of the loop. */
10765 for (p = next_insn_in_loop (loop, loop->scan_start);
10766 p != NULL_RTX;
10767 p = next_insn_in_loop (loop, p))
10769 if (LABEL_P (p))
10770 maybe_never = 1;
10771 else if (JUMP_P (p)
10772 /* If we enter the loop in the middle, and scan
10773 around to the beginning, don't set maybe_never
10774 for that. This must be an unconditional jump,
10775 otherwise the code at the top of the loop might
10776 never be executed. Unconditional jumps are
10777 followed a by barrier then loop end. */
10778 && ! (JUMP_P (p)
10779 && JUMP_LABEL (p) == loop->top
10780 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10781 && any_uncondjump_p (p)))
10783 /* If this is a jump outside of the loop but not right
10784 after the end of the loop, we would have to emit new fixup
10785 sequences for each such label. */
10786 if (/* If we can't tell where control might go when this
10787 JUMP_INSN is executed, we must be conservative. */
10788 !JUMP_LABEL (p)
10789 || (JUMP_LABEL (p) != end_label
10790 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10791 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10792 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10793 return;
10795 if (!any_condjump_p (p))
10796 /* Something complicated. */
10797 maybe_never = 1;
10798 else
10799 /* If there are any more instructions in the loop, they
10800 might not be reached. */
10801 next_maybe_never = 1;
10803 else if (next_maybe_never)
10804 maybe_never = 1;
10807 /* Find start of the extended basic block that enters the loop. */
10808 for (p = loop->start;
10809 PREV_INSN (p) && !LABEL_P (p);
10810 p = PREV_INSN (p))
10812 prev_ebb_head = p;
10814 cselib_init (true);
10816 /* Build table of mems that get set to constant values before the
10817 loop. */
10818 for (; p != loop->start; p = NEXT_INSN (p))
10819 cselib_process_insn (p);
10821 /* Actually move the MEMs. */
10822 for (i = 0; i < loop_info->mems_idx; ++i)
10824 regset_head load_copies;
10825 regset_head store_copies;
10826 int written = 0;
10827 rtx reg;
10828 rtx mem = loop_info->mems[i].mem;
10829 rtx mem_list_entry;
10831 if (MEM_VOLATILE_P (mem)
10832 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10833 /* There's no telling whether or not MEM is modified. */
10834 loop_info->mems[i].optimize = 0;
10836 /* Go through the MEMs written to in the loop to see if this
10837 one is aliased by one of them. */
10838 mem_list_entry = loop_info->store_mems;
10839 while (mem_list_entry)
10841 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10842 written = 1;
10843 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10844 mem, rtx_varies_p))
10846 /* MEM is indeed aliased by this store. */
10847 loop_info->mems[i].optimize = 0;
10848 break;
10850 mem_list_entry = XEXP (mem_list_entry, 1);
10853 if (flag_float_store && written
10854 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10855 loop_info->mems[i].optimize = 0;
10857 /* If this MEM is written to, we must be sure that there
10858 are no reads from another MEM that aliases this one. */
10859 if (loop_info->mems[i].optimize && written)
10861 int j;
10863 for (j = 0; j < loop_info->mems_idx; ++j)
10865 if (j == i)
10866 continue;
10867 else if (true_dependence (mem,
10868 VOIDmode,
10869 loop_info->mems[j].mem,
10870 rtx_varies_p))
10872 /* It's not safe to hoist loop_info->mems[i] out of
10873 the loop because writes to it might not be
10874 seen by reads from loop_info->mems[j]. */
10875 loop_info->mems[i].optimize = 0;
10876 break;
10881 if (maybe_never && may_trap_p (mem))
10882 /* We can't access the MEM outside the loop; it might
10883 cause a trap that wouldn't have happened otherwise. */
10884 loop_info->mems[i].optimize = 0;
10886 if (!loop_info->mems[i].optimize)
10887 /* We thought we were going to lift this MEM out of the
10888 loop, but later discovered that we could not. */
10889 continue;
10891 INIT_REG_SET (&load_copies);
10892 INIT_REG_SET (&store_copies);
10894 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10895 order to keep scan_loop from moving stores to this MEM
10896 out of the loop just because this REG is neither a
10897 user-variable nor used in the loop test. */
10898 reg = gen_reg_rtx (GET_MODE (mem));
10899 REG_USERVAR_P (reg) = 1;
10900 loop_info->mems[i].reg = reg;
10902 /* Now, replace all references to the MEM with the
10903 corresponding pseudos. */
10904 maybe_never = 0;
10905 for (p = next_insn_in_loop (loop, loop->scan_start);
10906 p != NULL_RTX;
10907 p = next_insn_in_loop (loop, p))
10909 if (INSN_P (p))
10911 rtx set;
10913 set = single_set (p);
10915 /* See if this copies the mem into a register that isn't
10916 modified afterwards. We'll try to do copy propagation
10917 a little further on. */
10918 if (set
10919 /* @@@ This test is _way_ too conservative. */
10920 && ! maybe_never
10921 && REG_P (SET_DEST (set))
10922 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10923 && REGNO (SET_DEST (set)) < last_max_reg
10924 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10925 && rtx_equal_p (SET_SRC (set), mem))
10926 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10928 /* See if this copies the mem from a register that isn't
10929 modified afterwards. We'll try to remove the
10930 redundant copy later on by doing a little register
10931 renaming and copy propagation. This will help
10932 to untangle things for the BIV detection code. */
10933 if (set
10934 && ! maybe_never
10935 && REG_P (SET_SRC (set))
10936 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10937 && REGNO (SET_SRC (set)) < last_max_reg
10938 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10939 && rtx_equal_p (SET_DEST (set), mem))
10940 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10942 /* If this is a call which uses / clobbers this memory
10943 location, we must not change the interface here. */
10944 if (CALL_P (p)
10945 && reg_mentioned_p (loop_info->mems[i].mem,
10946 CALL_INSN_FUNCTION_USAGE (p)))
10948 cancel_changes (0);
10949 loop_info->mems[i].optimize = 0;
10950 break;
10952 else
10953 /* Replace the memory reference with the shadow register. */
10954 replace_loop_mems (p, loop_info->mems[i].mem,
10955 loop_info->mems[i].reg, written);
10958 if (LABEL_P (p)
10959 || JUMP_P (p))
10960 maybe_never = 1;
10963 if (! loop_info->mems[i].optimize)
10964 ; /* We found we couldn't do the replacement, so do nothing. */
10965 else if (! apply_change_group ())
10966 /* We couldn't replace all occurrences of the MEM. */
10967 loop_info->mems[i].optimize = 0;
10968 else
10970 /* Load the memory immediately before LOOP->START, which is
10971 the NOTE_LOOP_BEG. */
10972 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
10973 rtx set;
10974 rtx best = mem;
10975 unsigned j;
10976 struct elt_loc_list *const_equiv = 0;
10977 reg_set_iterator rsi;
10979 if (e)
10981 struct elt_loc_list *equiv;
10982 struct elt_loc_list *best_equiv = 0;
10983 for (equiv = e->locs; equiv; equiv = equiv->next)
10985 if (CONSTANT_P (equiv->loc))
10986 const_equiv = equiv;
10987 else if (REG_P (equiv->loc)
10988 /* Extending hard register lifetimes causes crash
10989 on SRC targets. Doing so on non-SRC is
10990 probably also not good idea, since we most
10991 probably have pseudoregister equivalence as
10992 well. */
10993 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
10994 best_equiv = equiv;
10996 /* Use the constant equivalence if that is cheap enough. */
10997 if (! best_equiv)
10998 best_equiv = const_equiv;
10999 else if (const_equiv
11000 && (rtx_cost (const_equiv->loc, SET)
11001 <= rtx_cost (best_equiv->loc, SET)))
11003 best_equiv = const_equiv;
11004 const_equiv = 0;
11007 /* If best_equiv is nonzero, we know that MEM is set to a
11008 constant or register before the loop. We will use this
11009 knowledge to initialize the shadow register with that
11010 constant or reg rather than by loading from MEM. */
11011 if (best_equiv)
11012 best = copy_rtx (best_equiv->loc);
11015 set = gen_move_insn (reg, best);
11016 set = loop_insn_hoist (loop, set);
11017 if (REG_P (best))
11019 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
11020 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
11022 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
11023 break;
11027 if (const_equiv)
11028 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
11030 if (written)
11032 if (label == NULL_RTX)
11034 label = gen_label_rtx ();
11035 emit_label_after (label, loop->end);
11038 /* Store the memory immediately after END, which is
11039 the NOTE_LOOP_END. */
11040 set = gen_move_insn (copy_rtx (mem), reg);
11041 loop_insn_emit_after (loop, 0, label, set);
11044 if (loop_dump_stream)
11046 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
11047 REGNO (reg), (written ? "r/w" : "r/o"));
11048 print_rtl (loop_dump_stream, mem);
11049 fputc ('\n', loop_dump_stream);
11052 /* Attempt a bit of copy propagation. This helps untangle the
11053 data flow, and enables {basic,general}_induction_var to find
11054 more bivs/givs. */
11055 EXECUTE_IF_SET_IN_REG_SET
11056 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11058 try_copy_prop (loop, reg, j);
11060 CLEAR_REG_SET (&load_copies);
11062 EXECUTE_IF_SET_IN_REG_SET
11063 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11065 try_swap_copy_prop (loop, reg, j);
11067 CLEAR_REG_SET (&store_copies);
11071 /* Now, we need to replace all references to the previous exit
11072 label with the new one. */
11073 if (label != NULL_RTX && end_label != NULL_RTX)
11074 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11075 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11076 redirect_jump (p, label, false);
11078 cselib_finish ();
11081 /* For communication between note_reg_stored and its caller. */
11082 struct note_reg_stored_arg
11084 int set_seen;
11085 rtx reg;
11088 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11089 is equal to ARG. */
11090 static void
11091 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11093 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11094 if (t->reg == x)
11095 t->set_seen = 1;
11098 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11099 There must be exactly one insn that sets this pseudo; it will be
11100 deleted if all replacements succeed and we can prove that the register
11101 is not used after the loop. */
11103 static void
11104 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11106 /* This is the reg that we are copying from. */
11107 rtx reg_rtx = regno_reg_rtx[regno];
11108 rtx init_insn = 0;
11109 rtx insn;
11110 /* These help keep track of whether we replaced all uses of the reg. */
11111 int replaced_last = 0;
11112 int store_is_first = 0;
11114 for (insn = next_insn_in_loop (loop, loop->scan_start);
11115 insn != NULL_RTX;
11116 insn = next_insn_in_loop (loop, insn))
11118 rtx set;
11120 /* Only substitute within one extended basic block from the initializing
11121 insn. */
11122 if (LABEL_P (insn) && init_insn)
11123 break;
11125 if (! INSN_P (insn))
11126 continue;
11128 /* Is this the initializing insn? */
11129 set = single_set (insn);
11130 if (set
11131 && REG_P (SET_DEST (set))
11132 && REGNO (SET_DEST (set)) == regno)
11134 gcc_assert (!init_insn);
11136 init_insn = insn;
11137 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11138 store_is_first = 1;
11141 /* Only substitute after seeing the initializing insn. */
11142 if (init_insn && insn != init_insn)
11144 struct note_reg_stored_arg arg;
11146 replace_loop_regs (insn, reg_rtx, replacement);
11147 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11148 replaced_last = 1;
11150 /* Stop replacing when REPLACEMENT is modified. */
11151 arg.reg = replacement;
11152 arg.set_seen = 0;
11153 note_stores (PATTERN (insn), note_reg_stored, &arg);
11154 if (arg.set_seen)
11156 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11158 /* It is possible that we've turned previously valid REG_EQUAL to
11159 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11160 REPLACEMENT is modified, we get different meaning. */
11161 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11162 remove_note (insn, note);
11163 break;
11167 gcc_assert (init_insn);
11168 if (apply_change_group ())
11170 if (loop_dump_stream)
11171 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11172 if (store_is_first && replaced_last)
11174 rtx first;
11175 rtx retval_note;
11177 /* Assume we're just deleting INIT_INSN. */
11178 first = init_insn;
11179 /* Look for REG_RETVAL note. If we're deleting the end of
11180 the libcall sequence, the whole sequence can go. */
11181 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11182 /* If we found a REG_RETVAL note, find the first instruction
11183 in the sequence. */
11184 if (retval_note)
11185 first = XEXP (retval_note, 0);
11187 /* Delete the instructions. */
11188 loop_delete_insns (first, init_insn);
11190 if (loop_dump_stream)
11191 fprintf (loop_dump_stream, ".\n");
11195 /* Replace all the instructions from FIRST up to and including LAST
11196 with NOTE_INSN_DELETED notes. */
11198 static void
11199 loop_delete_insns (rtx first, rtx last)
11201 while (1)
11203 if (loop_dump_stream)
11204 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11205 INSN_UID (first));
11206 delete_insn (first);
11208 /* If this was the LAST instructions we're supposed to delete,
11209 we're done. */
11210 if (first == last)
11211 break;
11213 first = NEXT_INSN (first);
11217 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11218 loop LOOP if the order of the sets of these registers can be
11219 swapped. There must be exactly one insn within the loop that sets
11220 this pseudo followed immediately by a move insn that sets
11221 REPLACEMENT with REGNO. */
11222 static void
11223 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11224 unsigned int regno)
11226 rtx insn;
11227 rtx set = NULL_RTX;
11228 unsigned int new_regno;
11230 new_regno = REGNO (replacement);
11232 for (insn = next_insn_in_loop (loop, loop->scan_start);
11233 insn != NULL_RTX;
11234 insn = next_insn_in_loop (loop, insn))
11236 /* Search for the insn that copies REGNO to NEW_REGNO? */
11237 if (INSN_P (insn)
11238 && (set = single_set (insn))
11239 && REG_P (SET_DEST (set))
11240 && REGNO (SET_DEST (set)) == new_regno
11241 && REG_P (SET_SRC (set))
11242 && REGNO (SET_SRC (set)) == regno)
11243 break;
11246 if (insn != NULL_RTX)
11248 rtx prev_insn;
11249 rtx prev_set;
11251 /* Some DEF-USE info would come in handy here to make this
11252 function more general. For now, just check the previous insn
11253 which is the most likely candidate for setting REGNO. */
11255 prev_insn = PREV_INSN (insn);
11257 if (INSN_P (insn)
11258 && (prev_set = single_set (prev_insn))
11259 && REG_P (SET_DEST (prev_set))
11260 && REGNO (SET_DEST (prev_set)) == regno)
11262 /* We have:
11263 (set (reg regno) (expr))
11264 (set (reg new_regno) (reg regno))
11266 so try converting this to:
11267 (set (reg new_regno) (expr))
11268 (set (reg regno) (reg new_regno))
11270 The former construct is often generated when a global
11271 variable used for an induction variable is shadowed by a
11272 register (NEW_REGNO). The latter construct improves the
11273 chances of GIV replacement and BIV elimination. */
11275 validate_change (prev_insn, &SET_DEST (prev_set),
11276 replacement, 1);
11277 validate_change (insn, &SET_DEST (set),
11278 SET_SRC (set), 1);
11279 validate_change (insn, &SET_SRC (set),
11280 replacement, 1);
11282 if (apply_change_group ())
11284 if (loop_dump_stream)
11285 fprintf (loop_dump_stream,
11286 " Swapped set of reg %d at %d with reg %d at %d.\n",
11287 regno, INSN_UID (insn),
11288 new_regno, INSN_UID (prev_insn));
11290 /* Update first use of REGNO. */
11291 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11292 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11294 /* Now perform copy propagation to hopefully
11295 remove all uses of REGNO within the loop. */
11296 try_copy_prop (loop, replacement, regno);
11302 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11304 static int
11305 find_mem_in_note_1 (rtx *x, void *data)
11307 if (*x != NULL_RTX && MEM_P (*x))
11309 rtx *res = (rtx *) data;
11310 *res = *x;
11311 return 1;
11313 return 0;
11316 /* Returns the first MEM found in NOTE by depth-first search. */
11318 static rtx
11319 find_mem_in_note (rtx note)
11321 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11322 return note;
11323 return NULL_RTX;
11326 /* Replace MEM with its associated pseudo register. This function is
11327 called from load_mems via for_each_rtx. DATA is actually a pointer
11328 to a structure describing the instruction currently being scanned
11329 and the MEM we are currently replacing. */
11331 static int
11332 replace_loop_mem (rtx *mem, void *data)
11334 loop_replace_args *args = (loop_replace_args *) data;
11335 rtx m = *mem;
11337 if (m == NULL_RTX)
11338 return 0;
11340 switch (GET_CODE (m))
11342 case MEM:
11343 break;
11345 case CONST_DOUBLE:
11346 /* We're not interested in the MEM associated with a
11347 CONST_DOUBLE, so there's no need to traverse into one. */
11348 return -1;
11350 default:
11351 /* This is not a MEM. */
11352 return 0;
11355 if (!rtx_equal_p (args->match, m))
11356 /* This is not the MEM we are currently replacing. */
11357 return 0;
11359 /* Actually replace the MEM. */
11360 validate_change (args->insn, mem, args->replacement, 1);
11362 return 0;
11365 static void
11366 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11368 loop_replace_args args;
11370 args.insn = insn;
11371 args.match = mem;
11372 args.replacement = reg;
11374 for_each_rtx (&insn, replace_loop_mem, &args);
11376 /* If we hoist a mem write out of the loop, then REG_EQUAL
11377 notes referring to the mem are no longer valid. */
11378 if (written)
11380 rtx note, sub;
11381 rtx *link;
11383 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11385 if (REG_NOTE_KIND (note) == REG_EQUAL
11386 && (sub = find_mem_in_note (note))
11387 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11389 /* Remove the note. */
11390 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11391 break;
11397 /* Replace one register with another. Called through for_each_rtx; PX points
11398 to the rtx being scanned. DATA is actually a pointer to
11399 a structure of arguments. */
11401 static int
11402 replace_loop_reg (rtx *px, void *data)
11404 rtx x = *px;
11405 loop_replace_args *args = (loop_replace_args *) data;
11407 if (x == NULL_RTX)
11408 return 0;
11410 if (x == args->match)
11411 validate_change (args->insn, px, args->replacement, 1);
11413 return 0;
11416 static void
11417 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11419 loop_replace_args args;
11421 args.insn = insn;
11422 args.match = reg;
11423 args.replacement = replacement;
11425 for_each_rtx (&insn, replace_loop_reg, &args);
11428 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11429 (ignored in the interim). */
11431 static rtx
11432 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11433 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11434 rtx pattern)
11436 return emit_insn_after (pattern, where_insn);
11440 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11441 in basic block WHERE_BB (ignored in the interim) within the loop
11442 otherwise hoist PATTERN into the loop pre-header. */
11444 static rtx
11445 loop_insn_emit_before (const struct loop *loop,
11446 basic_block where_bb ATTRIBUTE_UNUSED,
11447 rtx where_insn, rtx pattern)
11449 if (! where_insn)
11450 return loop_insn_hoist (loop, pattern);
11451 return emit_insn_before (pattern, where_insn);
11455 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11456 WHERE_BB (ignored in the interim) within the loop. */
11458 static rtx
11459 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11460 basic_block where_bb ATTRIBUTE_UNUSED,
11461 rtx where_insn, rtx pattern)
11463 return emit_call_insn_before (pattern, where_insn);
11467 /* Hoist insn for PATTERN into the loop pre-header. */
11469 static rtx
11470 loop_insn_hoist (const struct loop *loop, rtx pattern)
11472 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11476 /* Hoist call insn for PATTERN into the loop pre-header. */
11478 static rtx
11479 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11481 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11485 /* Sink insn for PATTERN after the loop end. */
11487 static rtx
11488 loop_insn_sink (const struct loop *loop, rtx pattern)
11490 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11493 /* bl->final_value can be either general_operand or PLUS of general_operand
11494 and constant. Emit sequence of instructions to load it into REG. */
11495 static rtx
11496 gen_load_of_final_value (rtx reg, rtx final_value)
11498 rtx seq;
11499 start_sequence ();
11500 final_value = force_operand (final_value, reg);
11501 if (final_value != reg)
11502 emit_move_insn (reg, final_value);
11503 seq = get_insns ();
11504 end_sequence ();
11505 return seq;
11508 /* If the loop has multiple exits, emit insn for PATTERN before the
11509 loop to ensure that it will always be executed no matter how the
11510 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11511 since this is slightly more efficient. */
11513 static rtx
11514 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11516 if (loop->exit_count)
11517 return loop_insn_hoist (loop, pattern);
11518 else
11519 return loop_insn_sink (loop, pattern);
11522 static void
11523 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11525 struct iv_class *bl;
11526 int iv_num = 0;
11528 if (! loop || ! file)
11529 return;
11531 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11532 iv_num++;
11534 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11536 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11538 loop_iv_class_dump (bl, file, verbose);
11539 fputc ('\n', file);
11544 static void
11545 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11546 int verbose ATTRIBUTE_UNUSED)
11548 struct induction *v;
11549 rtx incr;
11550 int i;
11552 if (! bl || ! file)
11553 return;
11555 fprintf (file, "IV class for reg %d, benefit %d\n",
11556 bl->regno, bl->total_benefit);
11558 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11559 if (bl->initial_value)
11561 fprintf (file, ", init val: ");
11562 print_simple_rtl (file, bl->initial_value);
11564 if (bl->initial_test)
11566 fprintf (file, ", init test: ");
11567 print_simple_rtl (file, bl->initial_test);
11569 fputc ('\n', file);
11571 if (bl->final_value)
11573 fprintf (file, " Final val: ");
11574 print_simple_rtl (file, bl->final_value);
11575 fputc ('\n', file);
11578 if ((incr = biv_total_increment (bl)))
11580 fprintf (file, " Total increment: ");
11581 print_simple_rtl (file, incr);
11582 fputc ('\n', file);
11585 /* List the increments. */
11586 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11588 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11589 print_simple_rtl (file, v->add_val);
11590 fputc ('\n', file);
11593 /* List the givs. */
11594 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11596 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11597 i, INSN_UID (v->insn), v->benefit);
11598 if (v->giv_type == DEST_ADDR)
11599 print_simple_rtl (file, v->mem);
11600 else
11601 print_simple_rtl (file, single_set (v->insn));
11602 fputc ('\n', file);
11607 static void
11608 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11610 if (! v || ! file)
11611 return;
11613 fprintf (file,
11614 "Biv %d: insn %d",
11615 REGNO (v->dest_reg), INSN_UID (v->insn));
11616 fprintf (file, " const ");
11617 print_simple_rtl (file, v->add_val);
11619 if (verbose && v->final_value)
11621 fputc ('\n', file);
11622 fprintf (file, " final ");
11623 print_simple_rtl (file, v->final_value);
11626 fputc ('\n', file);
11630 static void
11631 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11633 if (! v || ! file)
11634 return;
11636 if (v->giv_type == DEST_REG)
11637 fprintf (file, "Giv %d: insn %d",
11638 REGNO (v->dest_reg), INSN_UID (v->insn));
11639 else
11640 fprintf (file, "Dest address: insn %d",
11641 INSN_UID (v->insn));
11643 fprintf (file, " src reg %d benefit %d",
11644 REGNO (v->src_reg), v->benefit);
11645 fprintf (file, " lifetime %d",
11646 v->lifetime);
11648 if (v->replaceable)
11649 fprintf (file, " replaceable");
11651 if (v->no_const_addval)
11652 fprintf (file, " ncav");
11654 if (v->ext_dependent)
11656 switch (GET_CODE (v->ext_dependent))
11658 case SIGN_EXTEND:
11659 fprintf (file, " ext se");
11660 break;
11661 case ZERO_EXTEND:
11662 fprintf (file, " ext ze");
11663 break;
11664 case TRUNCATE:
11665 fprintf (file, " ext tr");
11666 break;
11667 default:
11668 gcc_unreachable ();
11672 fputc ('\n', file);
11673 fprintf (file, " mult ");
11674 print_simple_rtl (file, v->mult_val);
11676 fputc ('\n', file);
11677 fprintf (file, " add ");
11678 print_simple_rtl (file, v->add_val);
11680 if (verbose && v->final_value)
11682 fputc ('\n', file);
11683 fprintf (file, " final ");
11684 print_simple_rtl (file, v->final_value);
11687 fputc ('\n', file);
11691 void
11692 debug_ivs (const struct loop *loop)
11694 loop_ivs_dump (loop, stderr, 1);
11698 void
11699 debug_iv_class (const struct iv_class *bl)
11701 loop_iv_class_dump (bl, stderr, 1);
11705 void
11706 debug_biv (const struct induction *v)
11708 loop_biv_dump (v, stderr, 1);
11712 void
11713 debug_giv (const struct induction *v)
11715 loop_giv_dump (v, stderr, 1);
11719 #define LOOP_BLOCK_NUM_1(INSN) \
11720 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11722 /* The notes do not have an assigned block, so look at the next insn. */
11723 #define LOOP_BLOCK_NUM(INSN) \
11724 ((INSN) ? (NOTE_P (INSN) \
11725 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11726 : LOOP_BLOCK_NUM_1 (INSN)) \
11727 : -1)
11729 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11731 static void
11732 loop_dump_aux (const struct loop *loop, FILE *file,
11733 int verbose ATTRIBUTE_UNUSED)
11735 rtx label;
11737 if (! loop || ! file || !BB_HEAD (loop->first))
11738 return;
11740 /* Print diagnostics to compare our concept of a loop with
11741 what the loop notes say. */
11742 if (! PREV_INSN (BB_HEAD (loop->first))
11743 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11744 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11745 != NOTE_INSN_LOOP_BEG)
11746 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11747 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11748 if (! NEXT_INSN (BB_END (loop->last))
11749 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11750 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11751 != NOTE_INSN_LOOP_END)
11752 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11753 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11755 if (loop->start)
11757 fprintf (file,
11758 ";; start %d (%d), end %d (%d)\n",
11759 LOOP_BLOCK_NUM (loop->start),
11760 LOOP_INSN_UID (loop->start),
11761 LOOP_BLOCK_NUM (loop->end),
11762 LOOP_INSN_UID (loop->end));
11763 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11764 LOOP_BLOCK_NUM (loop->top),
11765 LOOP_INSN_UID (loop->top),
11766 LOOP_BLOCK_NUM (loop->scan_start),
11767 LOOP_INSN_UID (loop->scan_start));
11768 fprintf (file, ";; exit_count %d", loop->exit_count);
11769 if (loop->exit_count)
11771 fputs (", labels:", file);
11772 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11774 fprintf (file, " %d ",
11775 LOOP_INSN_UID (XEXP (label, 0)));
11778 fputs ("\n", file);
11782 /* Call this function from the debugger to dump LOOP. */
11784 void
11785 debug_loop (const struct loop *loop)
11787 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11790 /* Call this function from the debugger to dump LOOPS. */
11792 void
11793 debug_loops (const struct loops *loops)
11795 flow_loops_dump (loops, stderr, loop_dump_aux, 1);