tree-inline.c (estimate_num_insns_1): Handle VEC_COND_EXPR.
[official-gcc.git] / gcc / loop.c
blobeaa1bd931b74900bd099b62f332c34137e152d2e
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "rtl.h"
51 #include "tm_p.h"
52 #include "function.h"
53 #include "expr.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
57 #include "regs.h"
58 #include "recog.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
70 /* Get the loop info pointer of a loop. */
71 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
73 /* Get a pointer to the loop movables structure. */
74 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
76 /* Get a pointer to the loop registers structure. */
77 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
79 /* Get a pointer to the loop induction variables structure. */
80 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
82 /* Get the luid of an insn. Catch the error of trying to reference the LUID
83 of an insn added during loop, since these don't have LUIDs. */
85 #define INSN_LUID(INSN) \
86 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
88 #define REGNO_FIRST_LUID(REGNO) \
89 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
90 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
91 : 0)
92 #define REGNO_LAST_LUID(REGNO) \
93 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
94 ? uid_luid[REGNO_LAST_UID (REGNO)] \
95 : INT_MAX)
97 /* A "basic induction variable" or biv is a pseudo reg that is set
98 (within this loop) only by incrementing or decrementing it. */
99 /* A "general induction variable" or giv is a pseudo reg whose
100 value is a linear function of a biv. */
102 /* Bivs are recognized by `basic_induction_var';
103 Givs by `general_induction_var'. */
105 /* An enum for the two different types of givs, those that are used
106 as memory addresses and those that are calculated into registers. */
107 enum g_types
109 DEST_ADDR,
110 DEST_REG
114 /* A `struct induction' is created for every instruction that sets
115 an induction variable (either a biv or a giv). */
117 struct induction
119 rtx insn; /* The insn that sets a biv or giv */
120 rtx new_reg; /* New register, containing strength reduced
121 version of this giv. */
122 rtx src_reg; /* Biv from which this giv is computed.
123 (If this is a biv, then this is the biv.) */
124 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
125 rtx dest_reg; /* Destination register for insn: this is the
126 register which was the biv or giv.
127 For a biv, this equals src_reg.
128 For a DEST_ADDR type giv, this is 0. */
129 rtx *location; /* Place in the insn where this giv occurs.
130 If GIV_TYPE is DEST_REG, this is 0. */
131 /* For a biv, this is the place where add_val
132 was found. */
133 enum machine_mode mode; /* The mode of this biv or giv */
134 rtx mem; /* For DEST_ADDR, the memory object. */
135 rtx mult_val; /* Multiplicative factor for src_reg. */
136 rtx add_val; /* Additive constant for that product. */
137 int benefit; /* Gain from eliminating this insn. */
138 rtx final_value; /* If the giv is used outside the loop, and its
139 final value could be calculated, it is put
140 here, and the giv is made replaceable. Set
141 the giv to this value before the loop. */
142 unsigned combined_with; /* The number of givs this giv has been
143 combined with. If nonzero, this giv
144 cannot combine with any other giv. */
145 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
146 variable for the original variable.
147 0 means they must be kept separate and the
148 new one must be copied into the old pseudo
149 reg each time the old one is set. */
150 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
151 1 if we know that the giv definitely can
152 not be made replaceable, in which case we
153 don't bother checking the variable again
154 even if further info is available.
155 Both this and the above can be zero. */
156 unsigned ignore : 1; /* 1 prohibits further processing of giv */
157 unsigned always_computable : 1;/* 1 if this value is computable every
158 iteration. */
159 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
160 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
161 update may be done multiple times per
162 iteration. */
163 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
164 another giv. This occurs in many cases
165 where a giv's lifetime spans an update to
166 a biv. */
167 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
168 we won't use it to eliminate a biv, it
169 would probably lose. */
170 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
171 to it to try to form an auto-inc address. */
172 unsigned shared : 1;
173 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
174 int lifetime; /* Length of life of this giv */
175 rtx derive_adjustment; /* If nonzero, is an adjustment to be
176 subtracted from add_val when this giv
177 derives another. This occurs when the
178 giv spans a biv update by incrementation. */
179 rtx ext_dependent; /* If nonzero, is a sign or zero extension
180 if a biv on which this giv is dependent. */
181 struct induction *next_iv; /* For givs, links together all givs that are
182 based on the same biv. For bivs, links
183 together all biv entries that refer to the
184 same biv register. */
185 struct induction *same; /* For givs, if the giv has been combined with
186 another giv, this points to the base giv.
187 The base giv will have COMBINED_WITH nonzero.
188 For bivs, if the biv has the same LOCATION
189 than another biv, this points to the base
190 biv. */
191 struct induction *same_insn; /* If there are multiple identical givs in
192 the same insn, then all but one have this
193 field set, and they all point to the giv
194 that doesn't have this field set. */
195 rtx last_use; /* For a giv made from a biv increment, this is
196 a substitute for the lifetime information. */
200 /* A `struct iv_class' is created for each biv. */
202 struct iv_class
204 unsigned int regno; /* Pseudo reg which is the biv. */
205 int biv_count; /* Number of insns setting this reg. */
206 struct induction *biv; /* List of all insns that set this reg. */
207 int giv_count; /* Number of DEST_REG givs computed from this
208 biv. The resulting count is only used in
209 check_dbra_loop. */
210 struct induction *giv; /* List of all insns that compute a giv
211 from this reg. */
212 int total_benefit; /* Sum of BENEFITs of all those givs. */
213 rtx initial_value; /* Value of reg at loop start. */
214 rtx initial_test; /* Test performed on BIV before loop. */
215 rtx final_value; /* Value of reg at loop end, if known. */
216 struct iv_class *next; /* Links all class structures together. */
217 rtx init_insn; /* insn which initializes biv, 0 if none. */
218 rtx init_set; /* SET of INIT_INSN, if any. */
219 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
220 unsigned eliminable : 1; /* 1 if plausible candidate for
221 elimination. */
222 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
223 this. */
224 unsigned reversed : 1; /* 1 if we reversed the loop that this
225 biv controls. */
226 unsigned all_reduced : 1; /* 1 if all givs using this biv have
227 been reduced. */
231 /* Definitions used by the basic induction variable discovery code. */
232 enum iv_mode
234 UNKNOWN_INDUCT,
235 BASIC_INDUCT,
236 NOT_BASIC_INDUCT,
237 GENERAL_INDUCT
241 /* A `struct iv' is created for every register. */
243 struct iv
245 enum iv_mode type;
246 union
248 struct iv_class *class;
249 struct induction *info;
250 } iv;
254 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
255 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
256 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
259 struct loop_ivs
261 /* Indexed by register number, contains pointer to `struct
262 iv' if register is an induction variable. */
263 struct iv *regs;
265 /* Size of regs array. */
266 unsigned int n_regs;
268 /* The head of a list which links together (via the next field)
269 every iv class for the current loop. */
270 struct iv_class *list;
274 typedef struct loop_mem_info
276 rtx mem; /* The MEM itself. */
277 rtx reg; /* Corresponding pseudo, if any. */
278 int optimize; /* Nonzero if we can optimize access to this MEM. */
279 } loop_mem_info;
283 struct loop_reg
285 /* Number of times the reg is set during the loop being scanned.
286 During code motion, a negative value indicates a reg that has
287 been made a candidate; in particular -2 means that it is an
288 candidate that we know is equal to a constant and -1 means that
289 it is a candidate not known equal to a constant. After code
290 motion, regs moved have 0 (which is accurate now) while the
291 failed candidates have the original number of times set.
293 Therefore, at all times, == 0 indicates an invariant register;
294 < 0 a conditionally invariant one. */
295 int set_in_loop;
297 /* Original value of set_in_loop; same except that this value
298 is not set negative for a reg whose sets have been made candidates
299 and not set to 0 for a reg that is moved. */
300 int n_times_set;
302 /* Contains the insn in which a register was used if it was used
303 exactly once; contains const0_rtx if it was used more than once. */
304 rtx single_usage;
306 /* Nonzero indicates that the register cannot be moved or strength
307 reduced. */
308 char may_not_optimize;
310 /* Nonzero means reg N has already been moved out of one loop.
311 This reduces the desire to move it out of another. */
312 char moved_once;
316 struct loop_regs
318 int num; /* Number of regs used in table. */
319 int size; /* Size of table. */
320 struct loop_reg *array; /* Register usage info. array. */
321 int multiple_uses; /* Nonzero if a reg has multiple uses. */
326 struct loop_movables
328 /* Head of movable chain. */
329 struct movable *head;
330 /* Last movable in chain. */
331 struct movable *last;
335 /* Information pertaining to a loop. */
337 struct loop_info
339 /* Nonzero if there is a subroutine call in the current loop. */
340 int has_call;
341 /* Nonzero if there is a libcall in the current loop. */
342 int has_libcall;
343 /* Nonzero if there is a non constant call in the current loop. */
344 int has_nonconst_call;
345 /* Nonzero if there is a prefetch instruction in the current loop. */
346 int has_prefetch;
347 /* Nonzero if there is a volatile memory reference in the current
348 loop. */
349 int has_volatile;
350 /* Nonzero if there is a tablejump in the current loop. */
351 int has_tablejump;
352 /* Nonzero if there are ways to leave the loop other than falling
353 off the end. */
354 int has_multiple_exit_targets;
355 /* Nonzero if there is an indirect jump in the current function. */
356 int has_indirect_jump;
357 /* Register or constant initial loop value. */
358 rtx initial_value;
359 /* Register or constant value used for comparison test. */
360 rtx comparison_value;
361 /* Register or constant approximate final value. */
362 rtx final_value;
363 /* Register or constant initial loop value with term common to
364 final_value removed. */
365 rtx initial_equiv_value;
366 /* Register or constant final loop value with term common to
367 initial_value removed. */
368 rtx final_equiv_value;
369 /* Register corresponding to iteration variable. */
370 rtx iteration_var;
371 /* Constant loop increment. */
372 rtx increment;
373 enum rtx_code comparison_code;
374 /* Holds the number of loop iterations. It is zero if the number
375 could not be calculated. Must be unsigned since the number of
376 iterations can be as high as 2^wordsize - 1. For loops with a
377 wider iterator, this number will be zero if the number of loop
378 iterations is too large for an unsigned integer to hold. */
379 unsigned HOST_WIDE_INT n_iterations;
380 int used_count_register;
381 /* The loop iterator induction variable. */
382 struct iv_class *iv;
383 /* List of MEMs that are stored in this loop. */
384 rtx store_mems;
385 /* Array of MEMs that are used (read or written) in this loop, but
386 cannot be aliased by anything in this loop, except perhaps
387 themselves. In other words, if mems[i] is altered during
388 the loop, it is altered by an expression that is rtx_equal_p to
389 it. */
390 loop_mem_info *mems;
391 /* The index of the next available slot in MEMS. */
392 int mems_idx;
393 /* The number of elements allocated in MEMS. */
394 int mems_allocated;
395 /* Nonzero if we don't know what MEMs were changed in the current
396 loop. This happens if the loop contains a call (in which case
397 `has_call' will also be set) or if we store into more than
398 NUM_STORES MEMs. */
399 int unknown_address_altered;
400 /* The above doesn't count any readonly memory locations that are
401 stored. This does. */
402 int unknown_constant_address_altered;
403 /* Count of memory write instructions discovered in the loop. */
404 int num_mem_sets;
405 /* The insn where the first of these was found. */
406 rtx first_loop_store_insn;
407 /* The chain of movable insns in loop. */
408 struct loop_movables movables;
409 /* The registers used the in loop. */
410 struct loop_regs regs;
411 /* The induction variable information in loop. */
412 struct loop_ivs ivs;
413 /* Nonzero if call is in pre_header extended basic block. */
414 int pre_header_has_call;
417 /* Not really meaningful values, but at least something. */
418 #ifndef SIMULTANEOUS_PREFETCHES
419 #define SIMULTANEOUS_PREFETCHES 3
420 #endif
421 #ifndef PREFETCH_BLOCK
422 #define PREFETCH_BLOCK 32
423 #endif
424 #ifndef HAVE_prefetch
425 #define HAVE_prefetch 0
426 #define CODE_FOR_prefetch 0
427 #define gen_prefetch(a,b,c) (gcc_unreachable (), NULL_RTX)
428 #endif
430 /* Give up the prefetch optimizations once we exceed a given threshold.
431 It is unlikely that we would be able to optimize something in a loop
432 with so many detected prefetches. */
433 #define MAX_PREFETCHES 100
434 /* The number of prefetch blocks that are beneficial to fetch at once before
435 a loop with a known (and low) iteration count. */
436 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
437 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
438 since it is likely that the data are already in the cache. */
439 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
441 /* Parameterize some prefetch heuristics so they can be turned on and off
442 easily for performance testing on new architectures. These can be
443 defined in target-dependent files. */
445 /* Prefetch is worthwhile only when loads/stores are dense. */
446 #ifndef PREFETCH_ONLY_DENSE_MEM
447 #define PREFETCH_ONLY_DENSE_MEM 1
448 #endif
450 /* Define what we mean by "dense" loads and stores; This value divided by 256
451 is the minimum percentage of memory references that worth prefetching. */
452 #ifndef PREFETCH_DENSE_MEM
453 #define PREFETCH_DENSE_MEM 220
454 #endif
456 /* Do not prefetch for a loop whose iteration count is known to be low. */
457 #ifndef PREFETCH_NO_LOW_LOOPCNT
458 #define PREFETCH_NO_LOW_LOOPCNT 1
459 #endif
461 /* Define what we mean by a "low" iteration count. */
462 #ifndef PREFETCH_LOW_LOOPCNT
463 #define PREFETCH_LOW_LOOPCNT 32
464 #endif
466 /* Do not prefetch for a loop that contains a function call; such a loop is
467 probably not an internal loop. */
468 #ifndef PREFETCH_NO_CALL
469 #define PREFETCH_NO_CALL 1
470 #endif
472 /* Do not prefetch accesses with an extreme stride. */
473 #ifndef PREFETCH_NO_EXTREME_STRIDE
474 #define PREFETCH_NO_EXTREME_STRIDE 1
475 #endif
477 /* Define what we mean by an "extreme" stride. */
478 #ifndef PREFETCH_EXTREME_STRIDE
479 #define PREFETCH_EXTREME_STRIDE 4096
480 #endif
482 /* Define a limit to how far apart indices can be and still be merged
483 into a single prefetch. */
484 #ifndef PREFETCH_EXTREME_DIFFERENCE
485 #define PREFETCH_EXTREME_DIFFERENCE 4096
486 #endif
488 /* Issue prefetch instructions before the loop to fetch data to be used
489 in the first few loop iterations. */
490 #ifndef PREFETCH_BEFORE_LOOP
491 #define PREFETCH_BEFORE_LOOP 1
492 #endif
494 /* Do not handle reversed order prefetches (negative stride). */
495 #ifndef PREFETCH_NO_REVERSE_ORDER
496 #define PREFETCH_NO_REVERSE_ORDER 1
497 #endif
499 /* Prefetch even if the GIV is in conditional code. */
500 #ifndef PREFETCH_CONDITIONAL
501 #define PREFETCH_CONDITIONAL 1
502 #endif
504 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
505 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
507 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
508 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
509 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
511 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
512 ((REGNO) < FIRST_PSEUDO_REGISTER \
513 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
516 /* Vector mapping INSN_UIDs to luids.
517 The luids are like uids but increase monotonically always.
518 We use them to see whether a jump comes from outside a given loop. */
520 static int *uid_luid;
522 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
523 number the insn is contained in. */
525 static struct loop **uid_loop;
527 /* 1 + largest uid of any insn. */
529 static int max_uid_for_loop;
531 /* Number of loops detected in current function. Used as index to the
532 next few tables. */
534 static int max_loop_num;
536 /* Bound on pseudo register number before loop optimization.
537 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
538 static unsigned int max_reg_before_loop;
540 /* The value to pass to the next call of reg_scan_update. */
541 static int loop_max_reg;
543 /* During the analysis of a loop, a chain of `struct movable's
544 is made to record all the movable insns found.
545 Then the entire chain can be scanned to decide which to move. */
547 struct movable
549 rtx insn; /* A movable insn */
550 rtx set_src; /* The expression this reg is set from. */
551 rtx set_dest; /* The destination of this SET. */
552 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
553 of any registers used within the LIBCALL. */
554 int consec; /* Number of consecutive following insns
555 that must be moved with this one. */
556 unsigned int regno; /* The register it sets */
557 short lifetime; /* lifetime of that register;
558 may be adjusted when matching movables
559 that load the same value are found. */
560 short savings; /* Number of insns we can move for this reg,
561 including other movables that force this
562 or match this one. */
563 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
564 a low part that we should avoid changing when
565 clearing the rest of the reg. */
566 unsigned int cond : 1; /* 1 if only conditionally movable */
567 unsigned int force : 1; /* 1 means MUST move this insn */
568 unsigned int global : 1; /* 1 means reg is live outside this loop */
569 /* If PARTIAL is 1, GLOBAL means something different:
570 that the reg is live outside the range from where it is set
571 to the following label. */
572 unsigned int done : 1; /* 1 inhibits further processing of this */
574 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
575 In particular, moving it does not make it
576 invariant. */
577 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
578 load SRC, rather than copying INSN. */
579 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
580 first insn of a consecutive sets group. */
581 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
582 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
583 the original insn with a copy from that
584 pseudo, rather than deleting it. */
585 struct movable *match; /* First entry for same value */
586 struct movable *forces; /* An insn that must be moved if this is */
587 struct movable *next;
591 static FILE *loop_dump_stream;
593 /* Forward declarations. */
595 static void invalidate_loops_containing_label (rtx);
596 static void find_and_verify_loops (rtx, struct loops *);
597 static void mark_loop_jump (rtx, struct loop *);
598 static void prescan_loop (struct loop *);
599 static int reg_in_basic_block_p (rtx, rtx);
600 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
601 static int labels_in_range_p (rtx, int);
602 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
603 static void note_addr_stored (rtx, rtx, void *);
604 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
605 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
606 static rtx find_regs_nested (rtx, rtx);
607 static void scan_loop (struct loop*, int);
608 #if 0
609 static void replace_call_address (rtx, rtx, rtx);
610 #endif
611 static rtx skip_consec_insns (rtx, int);
612 static int libcall_benefit (rtx);
613 static rtx libcall_other_reg (rtx, rtx);
614 static void record_excess_regs (rtx, rtx, rtx *);
615 static void ignore_some_movables (struct loop_movables *);
616 static void force_movables (struct loop_movables *);
617 static void combine_movables (struct loop_movables *, struct loop_regs *);
618 static int num_unmoved_movables (const struct loop *);
619 static int regs_match_p (rtx, rtx, struct loop_movables *);
620 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
621 struct loop_regs *);
622 static void add_label_notes (rtx, rtx);
623 static void move_movables (struct loop *loop, struct loop_movables *, int,
624 int);
625 static void loop_movables_add (struct loop_movables *, struct movable *);
626 static void loop_movables_free (struct loop_movables *);
627 static int count_nonfixed_reads (const struct loop *, rtx);
628 static void loop_bivs_find (struct loop *);
629 static void loop_bivs_init_find (struct loop *);
630 static void loop_bivs_check (struct loop *);
631 static void loop_givs_find (struct loop *);
632 static void loop_givs_check (struct loop *);
633 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
634 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
635 struct induction *, rtx);
636 static void loop_givs_dead_check (struct loop *, struct iv_class *);
637 static void loop_givs_reduce (struct loop *, struct iv_class *);
638 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
639 static void loop_ivs_free (struct loop *);
640 static void strength_reduce (struct loop *, int);
641 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
642 static int valid_initial_value_p (rtx, rtx, int, rtx);
643 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
644 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
645 rtx, rtx *, int, int);
646 static void check_final_value (const struct loop *, struct induction *);
647 static void loop_ivs_dump (const struct loop *, FILE *, int);
648 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
649 static void loop_biv_dump (const struct induction *, FILE *, int);
650 static void loop_giv_dump (const struct induction *, FILE *, int);
651 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
652 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
653 rtx *);
654 static void update_giv_derive (const struct loop *, rtx);
655 static HOST_WIDE_INT get_monotonic_increment (struct iv_class *);
656 static bool biased_biv_fits_mode_p (const struct loop *, struct iv_class *,
657 HOST_WIDE_INT, enum machine_mode,
658 unsigned HOST_WIDE_INT);
659 static bool biv_fits_mode_p (const struct loop *, struct iv_class *,
660 HOST_WIDE_INT, enum machine_mode, bool);
661 static bool extension_within_bounds_p (const struct loop *, struct iv_class *,
662 HOST_WIDE_INT, rtx);
663 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
664 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
665 rtx, rtx, rtx *, rtx *, rtx **);
666 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
667 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
668 rtx *, rtx *, int, int *, enum machine_mode);
669 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
670 rtx *, rtx *, rtx *);
671 static int check_dbra_loop (struct loop *, int);
672 static rtx express_from_1 (rtx, rtx, rtx);
673 static rtx combine_givs_p (struct induction *, struct induction *);
674 static int cmp_combine_givs_stats (const void *, const void *);
675 static void combine_givs (struct loop_regs *, struct iv_class *);
676 static int product_cheap_p (rtx, rtx);
677 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
678 int, int);
679 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
680 struct iv_class *, int, basic_block, rtx);
681 static int last_use_this_basic_block (rtx, rtx);
682 static void record_initial (rtx, rtx, void *);
683 static void update_reg_last_use (rtx, rtx);
684 static rtx next_insn_in_loop (const struct loop *, rtx);
685 static void loop_regs_scan (const struct loop *, int);
686 static int count_insns_in_loop (const struct loop *);
687 static int find_mem_in_note_1 (rtx *, void *);
688 static rtx find_mem_in_note (rtx);
689 static void load_mems (const struct loop *);
690 static int insert_loop_mem (rtx *, void *);
691 static int replace_loop_mem (rtx *, void *);
692 static void replace_loop_mems (rtx, rtx, rtx, int);
693 static int replace_loop_reg (rtx *, void *);
694 static void replace_loop_regs (rtx insn, rtx, rtx);
695 static void note_reg_stored (rtx, rtx, void *);
696 static void try_copy_prop (const struct loop *, rtx, unsigned int);
697 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
698 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
699 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
700 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
701 static void loop_regs_update (const struct loop *, rtx);
702 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
703 static int loop_invariant_p (const struct loop *, rtx);
704 static rtx loop_insn_hoist (const struct loop *, rtx);
705 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
706 rtx, basic_block, rtx);
707 static rtx loop_insn_emit_before (const struct loop *, basic_block,
708 rtx, rtx);
709 static int loop_insn_first_p (rtx, rtx);
710 static rtx get_condition_for_loop (const struct loop *, rtx);
711 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
712 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
713 static rtx extend_value_for_giv (struct induction *, rtx);
714 static rtx loop_insn_sink (const struct loop *, rtx);
716 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
717 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
718 rtx, rtx);
719 static rtx loop_call_insn_hoist (const struct loop *, rtx);
720 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
722 static void loop_dump_aux (const struct loop *, FILE *, int);
723 static void loop_delete_insns (rtx, rtx);
724 static HOST_WIDE_INT remove_constant_addition (rtx *);
725 static rtx gen_load_of_final_value (rtx, rtx);
726 void debug_ivs (const struct loop *);
727 void debug_iv_class (const struct iv_class *);
728 void debug_biv (const struct induction *);
729 void debug_giv (const struct induction *);
730 void debug_loop (const struct loop *);
731 void debug_loops (const struct loops *);
733 typedef struct loop_replace_args
735 rtx match;
736 rtx replacement;
737 rtx insn;
738 } loop_replace_args;
740 /* Nonzero iff INSN is between START and END, inclusive. */
741 #define INSN_IN_RANGE_P(INSN, START, END) \
742 (INSN_UID (INSN) < max_uid_for_loop \
743 && INSN_LUID (INSN) >= INSN_LUID (START) \
744 && INSN_LUID (INSN) <= INSN_LUID (END))
746 /* Indirect_jump_in_function is computed once per function. */
747 static int indirect_jump_in_function;
748 static int indirect_jump_in_function_p (rtx);
750 static int compute_luids (rtx, rtx, int);
752 static int biv_elimination_giv_has_0_offset (struct induction *,
753 struct induction *, rtx);
755 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
756 copy the value of the strength reduced giv to its original register. */
757 static int copy_cost;
759 /* Cost of using a register, to normalize the benefits of a giv. */
760 static int reg_address_cost;
762 void
763 init_loop (void)
765 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
767 reg_address_cost = address_cost (reg, SImode);
769 copy_cost = COSTS_N_INSNS (1);
772 /* Compute the mapping from uids to luids.
773 LUIDs are numbers assigned to insns, like uids,
774 except that luids increase monotonically through the code.
775 Start at insn START and stop just before END. Assign LUIDs
776 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
777 static int
778 compute_luids (rtx start, rtx end, int prev_luid)
780 int i;
781 rtx insn;
783 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
785 if (INSN_UID (insn) >= max_uid_for_loop)
786 continue;
787 /* Don't assign luids to line-number NOTEs, so that the distance in
788 luids between two insns is not affected by -g. */
789 if (!NOTE_P (insn)
790 || NOTE_LINE_NUMBER (insn) <= 0)
791 uid_luid[INSN_UID (insn)] = ++i;
792 else
793 /* Give a line number note the same luid as preceding insn. */
794 uid_luid[INSN_UID (insn)] = i;
796 return i + 1;
799 /* Entry point of this file. Perform loop optimization
800 on the current function. F is the first insn of the function
801 and DUMPFILE is a stream for output of a trace of actions taken
802 (or 0 if none should be output). */
804 void
805 loop_optimize (rtx f, FILE *dumpfile, int flags)
807 rtx insn;
808 int i;
809 struct loops loops_data;
810 struct loops *loops = &loops_data;
811 struct loop_info *loops_info;
813 loop_dump_stream = dumpfile;
815 init_recog_no_volatile ();
817 max_reg_before_loop = max_reg_num ();
818 loop_max_reg = max_reg_before_loop;
820 regs_may_share = 0;
822 /* Count the number of loops. */
824 max_loop_num = 0;
825 for (insn = f; insn; insn = NEXT_INSN (insn))
827 if (NOTE_P (insn)
828 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
829 max_loop_num++;
832 /* Don't waste time if no loops. */
833 if (max_loop_num == 0)
834 return;
836 loops->num = max_loop_num;
838 /* Get size to use for tables indexed by uids.
839 Leave some space for labels allocated by find_and_verify_loops. */
840 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
842 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
843 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
845 /* Allocate storage for array of loops. */
846 loops->array = xcalloc (loops->num, sizeof (struct loop));
848 /* Find and process each loop.
849 First, find them, and record them in order of their beginnings. */
850 find_and_verify_loops (f, loops);
852 /* Allocate and initialize auxiliary loop information. */
853 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
854 for (i = 0; i < (int) loops->num; i++)
855 loops->array[i].aux = loops_info + i;
857 /* Now find all register lifetimes. This must be done after
858 find_and_verify_loops, because it might reorder the insns in the
859 function. */
860 reg_scan (f, max_reg_before_loop);
862 /* This must occur after reg_scan so that registers created by gcse
863 will have entries in the register tables.
865 We could have added a call to reg_scan after gcse_main in toplev.c,
866 but moving this call to init_alias_analysis is more efficient. */
867 init_alias_analysis ();
869 /* See if we went too far. Note that get_max_uid already returns
870 one more that the maximum uid of all insn. */
871 gcc_assert (get_max_uid () <= max_uid_for_loop);
872 /* Now reset it to the actual size we need. See above. */
873 max_uid_for_loop = get_max_uid ();
875 /* find_and_verify_loops has already called compute_luids, but it
876 might have rearranged code afterwards, so we need to recompute
877 the luids now. */
878 compute_luids (f, NULL_RTX, 0);
880 /* Don't leave gaps in uid_luid for insns that have been
881 deleted. It is possible that the first or last insn
882 using some register has been deleted by cross-jumping.
883 Make sure that uid_luid for that former insn's uid
884 points to the general area where that insn used to be. */
885 for (i = 0; i < max_uid_for_loop; i++)
887 uid_luid[0] = uid_luid[i];
888 if (uid_luid[0] != 0)
889 break;
891 for (i = 0; i < max_uid_for_loop; i++)
892 if (uid_luid[i] == 0)
893 uid_luid[i] = uid_luid[i - 1];
895 /* Determine if the function has indirect jump. On some systems
896 this prevents low overhead loop instructions from being used. */
897 indirect_jump_in_function = indirect_jump_in_function_p (f);
899 /* Now scan the loops, last ones first, since this means inner ones are done
900 before outer ones. */
901 for (i = max_loop_num - 1; i >= 0; i--)
903 struct loop *loop = &loops->array[i];
905 if (! loop->invalid && loop->end)
907 scan_loop (loop, flags);
908 ggc_collect ();
912 end_alias_analysis ();
914 /* Clean up. */
915 for (i = 0; i < (int) loops->num; i++)
916 free (loops_info[i].mems);
918 free (uid_luid);
919 free (uid_loop);
920 free (loops_info);
921 free (loops->array);
924 /* Returns the next insn, in execution order, after INSN. START and
925 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
926 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
927 insn-stream; it is used with loops that are entered near the
928 bottom. */
930 static rtx
931 next_insn_in_loop (const struct loop *loop, rtx insn)
933 insn = NEXT_INSN (insn);
935 if (insn == loop->end)
937 if (loop->top)
938 /* Go to the top of the loop, and continue there. */
939 insn = loop->top;
940 else
941 /* We're done. */
942 insn = NULL_RTX;
945 if (insn == loop->scan_start)
946 /* We're done. */
947 insn = NULL_RTX;
949 return insn;
952 /* Find any register references hidden inside X and add them to
953 the dependency list DEPS. This is used to look inside CLOBBER (MEM
954 when checking whether a PARALLEL can be pulled out of a loop. */
956 static rtx
957 find_regs_nested (rtx deps, rtx x)
959 enum rtx_code code = GET_CODE (x);
960 if (code == REG)
961 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
962 else
964 const char *fmt = GET_RTX_FORMAT (code);
965 int i, j;
966 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
968 if (fmt[i] == 'e')
969 deps = find_regs_nested (deps, XEXP (x, i));
970 else if (fmt[i] == 'E')
971 for (j = 0; j < XVECLEN (x, i); j++)
972 deps = find_regs_nested (deps, XVECEXP (x, i, j));
975 return deps;
978 /* Optimize one loop described by LOOP. */
980 /* ??? Could also move memory writes out of loops if the destination address
981 is invariant, the source is invariant, the memory write is not volatile,
982 and if we can prove that no read inside the loop can read this address
983 before the write occurs. If there is a read of this address after the
984 write, then we can also mark the memory read as invariant. */
986 static void
987 scan_loop (struct loop *loop, int flags)
989 struct loop_info *loop_info = LOOP_INFO (loop);
990 struct loop_regs *regs = LOOP_REGS (loop);
991 int i;
992 rtx loop_start = loop->start;
993 rtx loop_end = loop->end;
994 rtx p;
995 /* 1 if we are scanning insns that could be executed zero times. */
996 int maybe_never = 0;
997 /* 1 if we are scanning insns that might never be executed
998 due to a subroutine call which might exit before they are reached. */
999 int call_passed = 0;
1000 /* Number of insns in the loop. */
1001 int insn_count;
1002 int tem;
1003 rtx temp, update_start, update_end;
1004 /* The SET from an insn, if it is the only SET in the insn. */
1005 rtx set, set1;
1006 /* Chain describing insns movable in current loop. */
1007 struct loop_movables *movables = LOOP_MOVABLES (loop);
1008 /* Ratio of extra register life span we can justify
1009 for saving an instruction. More if loop doesn't call subroutines
1010 since in that case saving an insn makes more difference
1011 and more registers are available. */
1012 int threshold;
1013 int in_libcall;
1015 loop->top = 0;
1017 movables->head = 0;
1018 movables->last = 0;
1020 /* Determine whether this loop starts with a jump down to a test at
1021 the end. This will occur for a small number of loops with a test
1022 that is too complex to duplicate in front of the loop.
1024 We search for the first insn or label in the loop, skipping NOTEs.
1025 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1026 (because we might have a loop executed only once that contains a
1027 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1028 (in case we have a degenerate loop).
1030 Note that if we mistakenly think that a loop is entered at the top
1031 when, in fact, it is entered at the exit test, the only effect will be
1032 slightly poorer optimization. Making the opposite error can generate
1033 incorrect code. Since very few loops now start with a jump to the
1034 exit test, the code here to detect that case is very conservative. */
1036 for (p = NEXT_INSN (loop_start);
1037 p != loop_end
1038 && !LABEL_P (p) && ! INSN_P (p)
1039 && (!NOTE_P (p)
1040 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1041 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1042 p = NEXT_INSN (p))
1045 loop->scan_start = p;
1047 /* If loop end is the end of the current function, then emit a
1048 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1049 note insn. This is the position we use when sinking insns out of
1050 the loop. */
1051 if (NEXT_INSN (loop->end) != 0)
1052 loop->sink = NEXT_INSN (loop->end);
1053 else
1054 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1056 /* Set up variables describing this loop. */
1057 prescan_loop (loop);
1058 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1060 /* If loop has a jump before the first label,
1061 the true entry is the target of that jump.
1062 Start scan from there.
1063 But record in LOOP->TOP the place where the end-test jumps
1064 back to so we can scan that after the end of the loop. */
1065 if (JUMP_P (p)
1066 /* Loop entry must be unconditional jump (and not a RETURN) */
1067 && any_uncondjump_p (p)
1068 && JUMP_LABEL (p) != 0
1069 /* Check to see whether the jump actually
1070 jumps out of the loop (meaning it's no loop).
1071 This case can happen for things like
1072 do {..} while (0). If this label was generated previously
1073 by loop, we can't tell anything about it and have to reject
1074 the loop. */
1075 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1077 loop->top = next_label (loop->scan_start);
1078 loop->scan_start = JUMP_LABEL (p);
1081 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1082 as required by loop_reg_used_before_p. So skip such loops. (This
1083 test may never be true, but it's best to play it safe.)
1085 Also, skip loops where we do not start scanning at a label. This
1086 test also rejects loops starting with a JUMP_INSN that failed the
1087 test above. */
1089 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1090 || !LABEL_P (loop->scan_start))
1092 if (loop_dump_stream)
1093 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1094 INSN_UID (loop_start), INSN_UID (loop_end));
1095 return;
1098 /* Allocate extra space for REGs that might be created by load_mems.
1099 We allocate a little extra slop as well, in the hopes that we
1100 won't have to reallocate the regs array. */
1101 loop_regs_scan (loop, loop_info->mems_idx + 16);
1102 insn_count = count_insns_in_loop (loop);
1104 if (loop_dump_stream)
1105 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1106 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1108 /* Scan through the loop finding insns that are safe to move.
1109 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1110 this reg will be considered invariant for subsequent insns.
1111 We consider whether subsequent insns use the reg
1112 in deciding whether it is worth actually moving.
1114 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1115 and therefore it is possible that the insns we are scanning
1116 would never be executed. At such times, we must make sure
1117 that it is safe to execute the insn once instead of zero times.
1118 When MAYBE_NEVER is 0, all insns will be executed at least once
1119 so that is not a problem. */
1121 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1122 p != NULL_RTX;
1123 p = next_insn_in_loop (loop, p))
1125 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1126 in_libcall--;
1127 if (NONJUMP_INSN_P (p))
1129 /* Do not scan past an optimization barrier. */
1130 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1131 break;
1132 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1133 if (temp)
1134 in_libcall++;
1135 if (! in_libcall
1136 && (set = single_set (p))
1137 && REG_P (SET_DEST (set))
1138 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1139 && SET_DEST (set) != pic_offset_table_rtx
1140 #endif
1141 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1143 int tem1 = 0;
1144 int tem2 = 0;
1145 int move_insn = 0;
1146 int insert_temp = 0;
1147 rtx src = SET_SRC (set);
1148 rtx dependencies = 0;
1150 /* Figure out what to use as a source of this insn. If a
1151 REG_EQUIV note is given or if a REG_EQUAL note with a
1152 constant operand is specified, use it as the source and
1153 mark that we should move this insn by calling
1154 emit_move_insn rather that duplicating the insn.
1156 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1157 note is present. */
1158 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1159 if (temp)
1160 src = XEXP (temp, 0), move_insn = 1;
1161 else
1163 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1164 if (temp && CONSTANT_P (XEXP (temp, 0)))
1165 src = XEXP (temp, 0), move_insn = 1;
1166 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1168 src = XEXP (temp, 0);
1169 /* A libcall block can use regs that don't appear in
1170 the equivalent expression. To move the libcall,
1171 we must move those regs too. */
1172 dependencies = libcall_other_reg (p, src);
1176 /* For parallels, add any possible uses to the dependencies, as
1177 we can't move the insn without resolving them first.
1178 MEMs inside CLOBBERs may also reference registers; these
1179 count as implicit uses. */
1180 if (GET_CODE (PATTERN (p)) == PARALLEL)
1182 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1184 rtx x = XVECEXP (PATTERN (p), 0, i);
1185 if (GET_CODE (x) == USE)
1186 dependencies
1187 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1188 dependencies);
1189 else if (GET_CODE (x) == CLOBBER
1190 && MEM_P (XEXP (x, 0)))
1191 dependencies = find_regs_nested (dependencies,
1192 XEXP (XEXP (x, 0), 0));
1196 if (/* The register is used in basic blocks other
1197 than the one where it is set (meaning that
1198 something after this point in the loop might
1199 depend on its value before the set). */
1200 ! reg_in_basic_block_p (p, SET_DEST (set))
1201 /* And the set is not guaranteed to be executed once
1202 the loop starts, or the value before the set is
1203 needed before the set occurs...
1205 ??? Note we have quadratic behavior here, mitigated
1206 by the fact that the previous test will often fail for
1207 large loops. Rather than re-scanning the entire loop
1208 each time for register usage, we should build tables
1209 of the register usage and use them here instead. */
1210 && (maybe_never
1211 || loop_reg_used_before_p (loop, set, p)))
1212 /* It is unsafe to move the set. However, it may be OK to
1213 move the source into a new pseudo, and substitute a
1214 reg-to-reg copy for the original insn.
1216 This code used to consider it OK to move a set of a variable
1217 which was not created by the user and not used in an exit
1218 test.
1219 That behavior is incorrect and was removed. */
1220 insert_temp = 1;
1222 /* Don't try to optimize a MODE_CC set with a constant
1223 source. It probably will be combined with a conditional
1224 jump. */
1225 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1226 && CONSTANT_P (src))
1228 /* Don't try to optimize a register that was made
1229 by loop-optimization for an inner loop.
1230 We don't know its life-span, so we can't compute
1231 the benefit. */
1232 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1234 /* Don't move the source and add a reg-to-reg copy:
1235 - with -Os (this certainly increases size),
1236 - if the mode doesn't support copy operations (obviously),
1237 - if the source is already a reg (the motion will gain nothing),
1238 - if the source is a legitimate constant (likewise). */
1239 else if (insert_temp
1240 && (optimize_size
1241 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1242 || REG_P (SET_SRC (set))
1243 || (CONSTANT_P (SET_SRC (set))
1244 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
1246 else if ((tem = loop_invariant_p (loop, src))
1247 && (dependencies == 0
1248 || (tem2
1249 = loop_invariant_p (loop, dependencies)) != 0)
1250 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1251 || (tem1
1252 = consec_sets_invariant_p
1253 (loop, SET_DEST (set),
1254 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1255 p)))
1256 /* If the insn can cause a trap (such as divide by zero),
1257 can't move it unless it's guaranteed to be executed
1258 once loop is entered. Even a function call might
1259 prevent the trap insn from being reached
1260 (since it might exit!) */
1261 && ! ((maybe_never || call_passed)
1262 && may_trap_p (src)))
1264 struct movable *m;
1265 int regno = REGNO (SET_DEST (set));
1267 /* A potential lossage is where we have a case where two insns
1268 can be combined as long as they are both in the loop, but
1269 we move one of them outside the loop. For large loops,
1270 this can lose. The most common case of this is the address
1271 of a function being called.
1273 Therefore, if this register is marked as being used
1274 exactly once if we are in a loop with calls
1275 (a "large loop"), see if we can replace the usage of
1276 this register with the source of this SET. If we can,
1277 delete this insn.
1279 Don't do this if P has a REG_RETVAL note or if we have
1280 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
1282 if (loop_info->has_call
1283 && regs->array[regno].single_usage != 0
1284 && regs->array[regno].single_usage != const0_rtx
1285 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1286 && (REGNO_LAST_UID (regno)
1287 == INSN_UID (regs->array[regno].single_usage))
1288 && regs->array[regno].set_in_loop == 1
1289 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1290 && ! side_effects_p (SET_SRC (set))
1291 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1292 && (! SMALL_REGISTER_CLASSES
1293 || (! (REG_P (SET_SRC (set))
1294 && (REGNO (SET_SRC (set))
1295 < FIRST_PSEUDO_REGISTER))))
1296 && regno >= FIRST_PSEUDO_REGISTER
1297 /* This test is not redundant; SET_SRC (set) might be
1298 a call-clobbered register and the life of REGNO
1299 might span a call. */
1300 && ! modified_between_p (SET_SRC (set), p,
1301 regs->array[regno].single_usage)
1302 && no_labels_between_p (p,
1303 regs->array[regno].single_usage)
1304 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
1305 regs->array[regno].single_usage))
1307 /* Replace any usage in a REG_EQUAL note. Must copy
1308 the new source, so that we don't get rtx sharing
1309 between the SET_SOURCE and REG_NOTES of insn p. */
1310 REG_NOTES (regs->array[regno].single_usage)
1311 = (replace_rtx
1312 (REG_NOTES (regs->array[regno].single_usage),
1313 SET_DEST (set), copy_rtx (SET_SRC (set))));
1315 delete_insn (p);
1316 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1317 i++)
1318 regs->array[regno+i].set_in_loop = 0;
1319 continue;
1322 m = xmalloc (sizeof (struct movable));
1323 m->next = 0;
1324 m->insn = p;
1325 m->set_src = src;
1326 m->dependencies = dependencies;
1327 m->set_dest = SET_DEST (set);
1328 m->force = 0;
1329 m->consec
1330 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1331 m->done = 0;
1332 m->forces = 0;
1333 m->partial = 0;
1334 m->move_insn = move_insn;
1335 m->move_insn_first = 0;
1336 m->insert_temp = insert_temp;
1337 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1338 m->savemode = VOIDmode;
1339 m->regno = regno;
1340 /* Set M->cond if either loop_invariant_p
1341 or consec_sets_invariant_p returned 2
1342 (only conditionally invariant). */
1343 m->cond = ((tem | tem1 | tem2) > 1);
1344 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1345 m->match = 0;
1346 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1347 m->savings = regs->array[regno].n_times_set;
1348 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1349 m->savings += libcall_benefit (p);
1350 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1351 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1352 /* Add M to the end of the chain MOVABLES. */
1353 loop_movables_add (movables, m);
1355 if (m->consec > 0)
1357 /* It is possible for the first instruction to have a
1358 REG_EQUAL note but a non-invariant SET_SRC, so we must
1359 remember the status of the first instruction in case
1360 the last instruction doesn't have a REG_EQUAL note. */
1361 m->move_insn_first = m->move_insn;
1363 /* Skip this insn, not checking REG_LIBCALL notes. */
1364 p = next_nonnote_insn (p);
1365 /* Skip the consecutive insns, if there are any. */
1366 p = skip_consec_insns (p, m->consec);
1367 /* Back up to the last insn of the consecutive group. */
1368 p = prev_nonnote_insn (p);
1370 /* We must now reset m->move_insn, m->is_equiv, and
1371 possibly m->set_src to correspond to the effects of
1372 all the insns. */
1373 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1374 if (temp)
1375 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1376 else
1378 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1379 if (temp && CONSTANT_P (XEXP (temp, 0)))
1380 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1381 else
1382 m->move_insn = 0;
1385 m->is_equiv
1386 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1389 /* If this register is always set within a STRICT_LOW_PART
1390 or set to zero, then its high bytes are constant.
1391 So clear them outside the loop and within the loop
1392 just load the low bytes.
1393 We must check that the machine has an instruction to do so.
1394 Also, if the value loaded into the register
1395 depends on the same register, this cannot be done. */
1396 else if (SET_SRC (set) == const0_rtx
1397 && NONJUMP_INSN_P (NEXT_INSN (p))
1398 && (set1 = single_set (NEXT_INSN (p)))
1399 && GET_CODE (set1) == SET
1400 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1401 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1402 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1403 == SET_DEST (set))
1404 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1406 int regno = REGNO (SET_DEST (set));
1407 if (regs->array[regno].set_in_loop == 2)
1409 struct movable *m;
1410 m = xmalloc (sizeof (struct movable));
1411 m->next = 0;
1412 m->insn = p;
1413 m->set_dest = SET_DEST (set);
1414 m->dependencies = 0;
1415 m->force = 0;
1416 m->consec = 0;
1417 m->done = 0;
1418 m->forces = 0;
1419 m->move_insn = 0;
1420 m->move_insn_first = 0;
1421 m->insert_temp = insert_temp;
1422 m->partial = 1;
1423 /* If the insn may not be executed on some cycles,
1424 we can't clear the whole reg; clear just high part.
1425 Not even if the reg is used only within this loop.
1426 Consider this:
1427 while (1)
1428 while (s != t) {
1429 if (foo ()) x = *s;
1430 use (x);
1432 Clearing x before the inner loop could clobber a value
1433 being saved from the last time around the outer loop.
1434 However, if the reg is not used outside this loop
1435 and all uses of the register are in the same
1436 basic block as the store, there is no problem.
1438 If this insn was made by loop, we don't know its
1439 INSN_LUID and hence must make a conservative
1440 assumption. */
1441 m->global = (INSN_UID (p) >= max_uid_for_loop
1442 || LOOP_REG_GLOBAL_P (loop, regno)
1443 || (labels_in_range_p
1444 (p, REGNO_FIRST_LUID (regno))));
1445 if (maybe_never && m->global)
1446 m->savemode = GET_MODE (SET_SRC (set1));
1447 else
1448 m->savemode = VOIDmode;
1449 m->regno = regno;
1450 m->cond = 0;
1451 m->match = 0;
1452 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1453 m->savings = 1;
1454 for (i = 0;
1455 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1456 i++)
1457 regs->array[regno+i].set_in_loop = -1;
1458 /* Add M to the end of the chain MOVABLES. */
1459 loop_movables_add (movables, m);
1464 /* Past a call insn, we get to insns which might not be executed
1465 because the call might exit. This matters for insns that trap.
1466 Constant and pure call insns always return, so they don't count. */
1467 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1468 call_passed = 1;
1469 /* Past a label or a jump, we get to insns for which we
1470 can't count on whether or how many times they will be
1471 executed during each iteration. Therefore, we can
1472 only move out sets of trivial variables
1473 (those not used after the loop). */
1474 /* Similar code appears twice in strength_reduce. */
1475 else if ((LABEL_P (p) || JUMP_P (p))
1476 /* If we enter the loop in the middle, and scan around to the
1477 beginning, don't set maybe_never for that. This must be an
1478 unconditional jump, otherwise the code at the top of the
1479 loop might never be executed. Unconditional jumps are
1480 followed by a barrier then the loop_end. */
1481 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1482 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1483 && any_uncondjump_p (p)))
1484 maybe_never = 1;
1487 /* If one movable subsumes another, ignore that other. */
1489 ignore_some_movables (movables);
1491 /* For each movable insn, see if the reg that it loads
1492 leads when it dies right into another conditionally movable insn.
1493 If so, record that the second insn "forces" the first one,
1494 since the second can be moved only if the first is. */
1496 force_movables (movables);
1498 /* See if there are multiple movable insns that load the same value.
1499 If there are, make all but the first point at the first one
1500 through the `match' field, and add the priorities of them
1501 all together as the priority of the first. */
1503 combine_movables (movables, regs);
1505 /* Now consider each movable insn to decide whether it is worth moving.
1506 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1508 For machines with few registers this increases code size, so do not
1509 move moveables when optimizing for code size on such machines.
1510 (The 18 below is the value for i386.) */
1512 if (!optimize_size
1513 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1515 move_movables (loop, movables, threshold, insn_count);
1517 /* Recalculate regs->array if move_movables has created new
1518 registers. */
1519 if (max_reg_num () > regs->num)
1521 loop_regs_scan (loop, 0);
1522 for (update_start = loop_start;
1523 PREV_INSN (update_start)
1524 && !LABEL_P (PREV_INSN (update_start));
1525 update_start = PREV_INSN (update_start))
1527 update_end = NEXT_INSN (loop_end);
1529 reg_scan_update (update_start, update_end, loop_max_reg);
1530 loop_max_reg = max_reg_num ();
1534 /* Now candidates that still are negative are those not moved.
1535 Change regs->array[I].set_in_loop to indicate that those are not actually
1536 invariant. */
1537 for (i = 0; i < regs->num; i++)
1538 if (regs->array[i].set_in_loop < 0)
1539 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1541 /* Now that we've moved some things out of the loop, we might be able to
1542 hoist even more memory references. */
1543 load_mems (loop);
1545 /* Recalculate regs->array if load_mems has created new registers. */
1546 if (max_reg_num () > regs->num)
1547 loop_regs_scan (loop, 0);
1549 for (update_start = loop_start;
1550 PREV_INSN (update_start)
1551 && !LABEL_P (PREV_INSN (update_start));
1552 update_start = PREV_INSN (update_start))
1554 update_end = NEXT_INSN (loop_end);
1556 reg_scan_update (update_start, update_end, loop_max_reg);
1557 loop_max_reg = max_reg_num ();
1559 if (flag_strength_reduce)
1561 if (update_end && LABEL_P (update_end))
1562 /* Ensure our label doesn't go away. */
1563 LABEL_NUSES (update_end)++;
1565 strength_reduce (loop, flags);
1567 reg_scan_update (update_start, update_end, loop_max_reg);
1568 loop_max_reg = max_reg_num ();
1570 if (update_end && LABEL_P (update_end)
1571 && --LABEL_NUSES (update_end) == 0)
1572 delete_related_insns (update_end);
1576 /* The movable information is required for strength reduction. */
1577 loop_movables_free (movables);
1579 free (regs->array);
1580 regs->array = 0;
1581 regs->num = 0;
1584 /* Add elements to *OUTPUT to record all the pseudo-regs
1585 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1587 static void
1588 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1590 enum rtx_code code;
1591 const char *fmt;
1592 int i;
1594 code = GET_CODE (in_this);
1596 switch (code)
1598 case PC:
1599 case CC0:
1600 case CONST_INT:
1601 case CONST_DOUBLE:
1602 case CONST:
1603 case SYMBOL_REF:
1604 case LABEL_REF:
1605 return;
1607 case REG:
1608 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1609 && ! reg_mentioned_p (in_this, not_in_this))
1610 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1611 return;
1613 default:
1614 break;
1617 fmt = GET_RTX_FORMAT (code);
1618 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1620 int j;
1622 switch (fmt[i])
1624 case 'E':
1625 for (j = 0; j < XVECLEN (in_this, i); j++)
1626 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1627 break;
1629 case 'e':
1630 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1631 break;
1636 /* Check what regs are referred to in the libcall block ending with INSN,
1637 aside from those mentioned in the equivalent value.
1638 If there are none, return 0.
1639 If there are one or more, return an EXPR_LIST containing all of them. */
1641 static rtx
1642 libcall_other_reg (rtx insn, rtx equiv)
1644 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1645 rtx p = XEXP (note, 0);
1646 rtx output = 0;
1648 /* First, find all the regs used in the libcall block
1649 that are not mentioned as inputs to the result. */
1651 while (p != insn)
1653 if (INSN_P (p))
1654 record_excess_regs (PATTERN (p), equiv, &output);
1655 p = NEXT_INSN (p);
1658 return output;
1661 /* Return 1 if all uses of REG
1662 are between INSN and the end of the basic block. */
1664 static int
1665 reg_in_basic_block_p (rtx insn, rtx reg)
1667 int regno = REGNO (reg);
1668 rtx p;
1670 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1671 return 0;
1673 /* Search this basic block for the already recorded last use of the reg. */
1674 for (p = insn; p; p = NEXT_INSN (p))
1676 switch (GET_CODE (p))
1678 case NOTE:
1679 break;
1681 case INSN:
1682 case CALL_INSN:
1683 /* Ordinary insn: if this is the last use, we win. */
1684 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1685 return 1;
1686 break;
1688 case JUMP_INSN:
1689 /* Jump insn: if this is the last use, we win. */
1690 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1691 return 1;
1692 /* Otherwise, it's the end of the basic block, so we lose. */
1693 return 0;
1695 case CODE_LABEL:
1696 case BARRIER:
1697 /* It's the end of the basic block, so we lose. */
1698 return 0;
1700 default:
1701 break;
1705 /* The "last use" that was recorded can't be found after the first
1706 use. This can happen when the last use was deleted while
1707 processing an inner loop, this inner loop was then completely
1708 unrolled, and the outer loop is always exited after the inner loop,
1709 so that everything after the first use becomes a single basic block. */
1710 return 1;
1713 /* Compute the benefit of eliminating the insns in the block whose
1714 last insn is LAST. This may be a group of insns used to compute a
1715 value directly or can contain a library call. */
1717 static int
1718 libcall_benefit (rtx last)
1720 rtx insn;
1721 int benefit = 0;
1723 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1724 insn != last; insn = NEXT_INSN (insn))
1726 if (CALL_P (insn))
1727 benefit += 10; /* Assume at least this many insns in a library
1728 routine. */
1729 else if (NONJUMP_INSN_P (insn)
1730 && GET_CODE (PATTERN (insn)) != USE
1731 && GET_CODE (PATTERN (insn)) != CLOBBER)
1732 benefit++;
1735 return benefit;
1738 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1740 static rtx
1741 skip_consec_insns (rtx insn, int count)
1743 for (; count > 0; count--)
1745 rtx temp;
1747 /* If first insn of libcall sequence, skip to end. */
1748 /* Do this at start of loop, since INSN is guaranteed to
1749 be an insn here. */
1750 if (!NOTE_P (insn)
1751 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1752 insn = XEXP (temp, 0);
1755 insn = NEXT_INSN (insn);
1756 while (NOTE_P (insn));
1759 return insn;
1762 /* Ignore any movable whose insn falls within a libcall
1763 which is part of another movable.
1764 We make use of the fact that the movable for the libcall value
1765 was made later and so appears later on the chain. */
1767 static void
1768 ignore_some_movables (struct loop_movables *movables)
1770 struct movable *m, *m1;
1772 for (m = movables->head; m; m = m->next)
1774 /* Is this a movable for the value of a libcall? */
1775 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1776 if (note)
1778 rtx insn;
1779 /* Check for earlier movables inside that range,
1780 and mark them invalid. We cannot use LUIDs here because
1781 insns created by loop.c for prior loops don't have LUIDs.
1782 Rather than reject all such insns from movables, we just
1783 explicitly check each insn in the libcall (since invariant
1784 libcalls aren't that common). */
1785 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1786 for (m1 = movables->head; m1 != m; m1 = m1->next)
1787 if (m1->insn == insn)
1788 m1->done = 1;
1793 /* For each movable insn, see if the reg that it loads
1794 leads when it dies right into another conditionally movable insn.
1795 If so, record that the second insn "forces" the first one,
1796 since the second can be moved only if the first is. */
1798 static void
1799 force_movables (struct loop_movables *movables)
1801 struct movable *m, *m1;
1803 for (m1 = movables->head; m1; m1 = m1->next)
1804 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1805 if (!m1->partial && !m1->done)
1807 int regno = m1->regno;
1808 for (m = m1->next; m; m = m->next)
1809 /* ??? Could this be a bug? What if CSE caused the
1810 register of M1 to be used after this insn?
1811 Since CSE does not update regno_last_uid,
1812 this insn M->insn might not be where it dies.
1813 But very likely this doesn't matter; what matters is
1814 that M's reg is computed from M1's reg. */
1815 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1816 && !m->done)
1817 break;
1818 if (m != 0 && m->set_src == m1->set_dest
1819 /* If m->consec, m->set_src isn't valid. */
1820 && m->consec == 0)
1821 m = 0;
1823 /* Increase the priority of the moving the first insn
1824 since it permits the second to be moved as well.
1825 Likewise for insns already forced by the first insn. */
1826 if (m != 0)
1828 struct movable *m2;
1830 m->forces = m1;
1831 for (m2 = m1; m2; m2 = m2->forces)
1833 m2->lifetime += m->lifetime;
1834 m2->savings += m->savings;
1840 /* Find invariant expressions that are equal and can be combined into
1841 one register. */
1843 static void
1844 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1846 struct movable *m;
1847 char *matched_regs = xmalloc (regs->num);
1848 enum machine_mode mode;
1850 /* Regs that are set more than once are not allowed to match
1851 or be matched. I'm no longer sure why not. */
1852 /* Only pseudo registers are allowed to match or be matched,
1853 since move_movables does not validate the change. */
1854 /* Perhaps testing m->consec_sets would be more appropriate here? */
1856 for (m = movables->head; m; m = m->next)
1857 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1858 && m->regno >= FIRST_PSEUDO_REGISTER
1859 && !m->insert_temp
1860 && !m->partial)
1862 struct movable *m1;
1863 int regno = m->regno;
1865 memset (matched_regs, 0, regs->num);
1866 matched_regs[regno] = 1;
1868 /* We want later insns to match the first one. Don't make the first
1869 one match any later ones. So start this loop at m->next. */
1870 for (m1 = m->next; m1; m1 = m1->next)
1871 if (m != m1 && m1->match == 0
1872 && !m1->insert_temp
1873 && regs->array[m1->regno].n_times_set == 1
1874 && m1->regno >= FIRST_PSEUDO_REGISTER
1875 /* A reg used outside the loop mustn't be eliminated. */
1876 && !m1->global
1877 /* A reg used for zero-extending mustn't be eliminated. */
1878 && !m1->partial
1879 && (matched_regs[m1->regno]
1882 /* Can combine regs with different modes loaded from the
1883 same constant only if the modes are the same or
1884 if both are integer modes with M wider or the same
1885 width as M1. The check for integer is redundant, but
1886 safe, since the only case of differing destination
1887 modes with equal sources is when both sources are
1888 VOIDmode, i.e., CONST_INT. */
1889 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1890 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1891 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1892 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1893 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1894 /* See if the source of M1 says it matches M. */
1895 && ((REG_P (m1->set_src)
1896 && matched_regs[REGNO (m1->set_src)])
1897 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1898 movables, regs))))
1899 && ((m->dependencies == m1->dependencies)
1900 || rtx_equal_p (m->dependencies, m1->dependencies)))
1902 m->lifetime += m1->lifetime;
1903 m->savings += m1->savings;
1904 m1->done = 1;
1905 m1->match = m;
1906 matched_regs[m1->regno] = 1;
1910 /* Now combine the regs used for zero-extension.
1911 This can be done for those not marked `global'
1912 provided their lives don't overlap. */
1914 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1915 mode = GET_MODE_WIDER_MODE (mode))
1917 struct movable *m0 = 0;
1919 /* Combine all the registers for extension from mode MODE.
1920 Don't combine any that are used outside this loop. */
1921 for (m = movables->head; m; m = m->next)
1922 if (m->partial && ! m->global
1923 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1925 struct movable *m1;
1927 int first = REGNO_FIRST_LUID (m->regno);
1928 int last = REGNO_LAST_LUID (m->regno);
1930 if (m0 == 0)
1932 /* First one: don't check for overlap, just record it. */
1933 m0 = m;
1934 continue;
1937 /* Make sure they extend to the same mode.
1938 (Almost always true.) */
1939 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1940 continue;
1942 /* We already have one: check for overlap with those
1943 already combined together. */
1944 for (m1 = movables->head; m1 != m; m1 = m1->next)
1945 if (m1 == m0 || (m1->partial && m1->match == m0))
1946 if (! (REGNO_FIRST_LUID (m1->regno) > last
1947 || REGNO_LAST_LUID (m1->regno) < first))
1948 goto overlap;
1950 /* No overlap: we can combine this with the others. */
1951 m0->lifetime += m->lifetime;
1952 m0->savings += m->savings;
1953 m->done = 1;
1954 m->match = m0;
1956 overlap:
1961 /* Clean up. */
1962 free (matched_regs);
1965 /* Returns the number of movable instructions in LOOP that were not
1966 moved outside the loop. */
1968 static int
1969 num_unmoved_movables (const struct loop *loop)
1971 int num = 0;
1972 struct movable *m;
1974 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1975 if (!m->done)
1976 ++num;
1978 return num;
1982 /* Return 1 if regs X and Y will become the same if moved. */
1984 static int
1985 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1987 unsigned int xn = REGNO (x);
1988 unsigned int yn = REGNO (y);
1989 struct movable *mx, *my;
1991 for (mx = movables->head; mx; mx = mx->next)
1992 if (mx->regno == xn)
1993 break;
1995 for (my = movables->head; my; my = my->next)
1996 if (my->regno == yn)
1997 break;
1999 return (mx && my
2000 && ((mx->match == my->match && mx->match != 0)
2001 || mx->match == my
2002 || mx == my->match));
2005 /* Return 1 if X and Y are identical-looking rtx's.
2006 This is the Lisp function EQUAL for rtx arguments.
2008 If two registers are matching movables or a movable register and an
2009 equivalent constant, consider them equal. */
2011 static int
2012 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2013 struct loop_regs *regs)
2015 int i;
2016 int j;
2017 struct movable *m;
2018 enum rtx_code code;
2019 const char *fmt;
2021 if (x == y)
2022 return 1;
2023 if (x == 0 || y == 0)
2024 return 0;
2026 code = GET_CODE (x);
2028 /* If we have a register and a constant, they may sometimes be
2029 equal. */
2030 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2031 && CONSTANT_P (y))
2033 for (m = movables->head; m; m = m->next)
2034 if (m->move_insn && m->regno == REGNO (x)
2035 && rtx_equal_p (m->set_src, y))
2036 return 1;
2038 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2039 && CONSTANT_P (x))
2041 for (m = movables->head; m; m = m->next)
2042 if (m->move_insn && m->regno == REGNO (y)
2043 && rtx_equal_p (m->set_src, x))
2044 return 1;
2047 /* Otherwise, rtx's of different codes cannot be equal. */
2048 if (code != GET_CODE (y))
2049 return 0;
2051 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2052 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2054 if (GET_MODE (x) != GET_MODE (y))
2055 return 0;
2057 /* These three types of rtx's can be compared nonrecursively. */
2058 if (code == REG)
2059 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2061 if (code == LABEL_REF)
2062 return XEXP (x, 0) == XEXP (y, 0);
2063 if (code == SYMBOL_REF)
2064 return XSTR (x, 0) == XSTR (y, 0);
2066 /* Compare the elements. If any pair of corresponding elements
2067 fail to match, return 0 for the whole things. */
2069 fmt = GET_RTX_FORMAT (code);
2070 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2072 switch (fmt[i])
2074 case 'w':
2075 if (XWINT (x, i) != XWINT (y, i))
2076 return 0;
2077 break;
2079 case 'i':
2080 if (XINT (x, i) != XINT (y, i))
2081 return 0;
2082 break;
2084 case 'E':
2085 /* Two vectors must have the same length. */
2086 if (XVECLEN (x, i) != XVECLEN (y, i))
2087 return 0;
2089 /* And the corresponding elements must match. */
2090 for (j = 0; j < XVECLEN (x, i); j++)
2091 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2092 movables, regs) == 0)
2093 return 0;
2094 break;
2096 case 'e':
2097 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2098 == 0)
2099 return 0;
2100 break;
2102 case 's':
2103 if (strcmp (XSTR (x, i), XSTR (y, i)))
2104 return 0;
2105 break;
2107 case 'u':
2108 /* These are just backpointers, so they don't matter. */
2109 break;
2111 case '0':
2112 break;
2114 /* It is believed that rtx's at this level will never
2115 contain anything but integers and other rtx's,
2116 except for within LABEL_REFs and SYMBOL_REFs. */
2117 default:
2118 gcc_unreachable ();
2121 return 1;
2124 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2125 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2126 references is incremented once for each added note. */
2128 static void
2129 add_label_notes (rtx x, rtx insns)
2131 enum rtx_code code = GET_CODE (x);
2132 int i, j;
2133 const char *fmt;
2134 rtx insn;
2136 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2138 /* This code used to ignore labels that referred to dispatch tables to
2139 avoid flow generating (slightly) worse code.
2141 We no longer ignore such label references (see LABEL_REF handling in
2142 mark_jump_label for additional information). */
2143 for (insn = insns; insn; insn = NEXT_INSN (insn))
2144 if (reg_mentioned_p (XEXP (x, 0), insn))
2146 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2147 REG_NOTES (insn));
2148 if (LABEL_P (XEXP (x, 0)))
2149 LABEL_NUSES (XEXP (x, 0))++;
2153 fmt = GET_RTX_FORMAT (code);
2154 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2156 if (fmt[i] == 'e')
2157 add_label_notes (XEXP (x, i), insns);
2158 else if (fmt[i] == 'E')
2159 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2160 add_label_notes (XVECEXP (x, i, j), insns);
2164 /* Scan MOVABLES, and move the insns that deserve to be moved.
2165 If two matching movables are combined, replace one reg with the
2166 other throughout. */
2168 static void
2169 move_movables (struct loop *loop, struct loop_movables *movables,
2170 int threshold, int insn_count)
2172 struct loop_regs *regs = LOOP_REGS (loop);
2173 int nregs = regs->num;
2174 rtx new_start = 0;
2175 struct movable *m;
2176 rtx p;
2177 rtx loop_start = loop->start;
2178 rtx loop_end = loop->end;
2179 /* Map of pseudo-register replacements to handle combining
2180 when we move several insns that load the same value
2181 into different pseudo-registers. */
2182 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2183 char *already_moved = xcalloc (nregs, sizeof (char));
2185 for (m = movables->head; m; m = m->next)
2187 /* Describe this movable insn. */
2189 if (loop_dump_stream)
2191 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2192 INSN_UID (m->insn), m->regno, m->lifetime);
2193 if (m->consec > 0)
2194 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2195 if (m->cond)
2196 fprintf (loop_dump_stream, "cond ");
2197 if (m->force)
2198 fprintf (loop_dump_stream, "force ");
2199 if (m->global)
2200 fprintf (loop_dump_stream, "global ");
2201 if (m->done)
2202 fprintf (loop_dump_stream, "done ");
2203 if (m->move_insn)
2204 fprintf (loop_dump_stream, "move-insn ");
2205 if (m->match)
2206 fprintf (loop_dump_stream, "matches %d ",
2207 INSN_UID (m->match->insn));
2208 if (m->forces)
2209 fprintf (loop_dump_stream, "forces %d ",
2210 INSN_UID (m->forces->insn));
2213 /* Ignore the insn if it's already done (it matched something else).
2214 Otherwise, see if it is now safe to move. */
2216 if (!m->done
2217 && (! m->cond
2218 || (1 == loop_invariant_p (loop, m->set_src)
2219 && (m->dependencies == 0
2220 || 1 == loop_invariant_p (loop, m->dependencies))
2221 && (m->consec == 0
2222 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2223 m->consec + 1,
2224 m->insn))))
2225 && (! m->forces || m->forces->done))
2227 int regno;
2228 rtx p;
2229 int savings = m->savings;
2231 /* We have an insn that is safe to move.
2232 Compute its desirability. */
2234 p = m->insn;
2235 regno = m->regno;
2237 if (loop_dump_stream)
2238 fprintf (loop_dump_stream, "savings %d ", savings);
2240 if (regs->array[regno].moved_once && loop_dump_stream)
2241 fprintf (loop_dump_stream, "halved since already moved ");
2243 /* An insn MUST be moved if we already moved something else
2244 which is safe only if this one is moved too: that is,
2245 if already_moved[REGNO] is nonzero. */
2247 /* An insn is desirable to move if the new lifetime of the
2248 register is no more than THRESHOLD times the old lifetime.
2249 If it's not desirable, it means the loop is so big
2250 that moving won't speed things up much,
2251 and it is liable to make register usage worse. */
2253 /* It is also desirable to move if it can be moved at no
2254 extra cost because something else was already moved. */
2256 if (already_moved[regno]
2257 || (threshold * savings * m->lifetime) >=
2258 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2259 || (m->forces && m->forces->done
2260 && regs->array[m->forces->regno].n_times_set == 1))
2262 int count;
2263 struct movable *m1;
2264 rtx first = NULL_RTX;
2265 rtx newreg = NULL_RTX;
2267 if (m->insert_temp)
2268 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2270 /* Now move the insns that set the reg. */
2272 if (m->partial && m->match)
2274 rtx newpat, i1;
2275 rtx r1, r2;
2276 /* Find the end of this chain of matching regs.
2277 Thus, we load each reg in the chain from that one reg.
2278 And that reg is loaded with 0 directly,
2279 since it has ->match == 0. */
2280 for (m1 = m; m1->match; m1 = m1->match);
2281 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2282 SET_DEST (PATTERN (m1->insn)));
2283 i1 = loop_insn_hoist (loop, newpat);
2285 /* Mark the moved, invariant reg as being allowed to
2286 share a hard reg with the other matching invariant. */
2287 REG_NOTES (i1) = REG_NOTES (m->insn);
2288 r1 = SET_DEST (PATTERN (m->insn));
2289 r2 = SET_DEST (PATTERN (m1->insn));
2290 regs_may_share
2291 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2292 gen_rtx_EXPR_LIST (VOIDmode, r2,
2293 regs_may_share));
2294 delete_insn (m->insn);
2296 if (new_start == 0)
2297 new_start = i1;
2299 if (loop_dump_stream)
2300 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2302 /* If we are to re-generate the item being moved with a
2303 new move insn, first delete what we have and then emit
2304 the move insn before the loop. */
2305 else if (m->move_insn)
2307 rtx i1, temp, seq;
2309 for (count = m->consec; count >= 0; count--)
2311 if (!NOTE_P (p))
2313 /* If this is the first insn of a library
2314 call sequence, something is very
2315 wrong. */
2316 gcc_assert (!find_reg_note
2317 (p, REG_LIBCALL, NULL_RTX));
2319 /* If this is the last insn of a libcall
2320 sequence, then delete every insn in the
2321 sequence except the last. The last insn
2322 is handled in the normal manner. */
2323 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
2325 if (temp)
2327 temp = XEXP (temp, 0);
2328 while (temp != p)
2329 temp = delete_insn (temp);
2333 temp = p;
2334 p = delete_insn (p);
2336 /* simplify_giv_expr expects that it can walk the insns
2337 at m->insn forwards and see this old sequence we are
2338 tossing here. delete_insn does preserve the next
2339 pointers, but when we skip over a NOTE we must fix
2340 it up. Otherwise that code walks into the non-deleted
2341 insn stream. */
2342 while (p && NOTE_P (p))
2343 p = NEXT_INSN (temp) = NEXT_INSN (p);
2345 if (m->insert_temp)
2347 /* Replace the original insn with a move from
2348 our newly created temp. */
2349 start_sequence ();
2350 emit_move_insn (m->set_dest, newreg);
2351 seq = get_insns ();
2352 end_sequence ();
2353 emit_insn_before (seq, p);
2357 start_sequence ();
2358 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2359 m->set_src);
2360 seq = get_insns ();
2361 end_sequence ();
2363 add_label_notes (m->set_src, seq);
2365 i1 = loop_insn_hoist (loop, seq);
2366 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2367 set_unique_reg_note (i1,
2368 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2369 m->set_src);
2371 if (loop_dump_stream)
2372 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2374 /* The more regs we move, the less we like moving them. */
2375 threshold -= 3;
2377 else
2379 for (count = m->consec; count >= 0; count--)
2381 rtx i1, temp;
2383 /* If first insn of libcall sequence, skip to end. */
2384 /* Do this at start of loop, since p is guaranteed to
2385 be an insn here. */
2386 if (!NOTE_P (p)
2387 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2388 p = XEXP (temp, 0);
2390 /* If last insn of libcall sequence, move all
2391 insns except the last before the loop. The last
2392 insn is handled in the normal manner. */
2393 if (!NOTE_P (p)
2394 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2396 rtx fn_address = 0;
2397 rtx fn_reg = 0;
2398 rtx fn_address_insn = 0;
2400 first = 0;
2401 for (temp = XEXP (temp, 0); temp != p;
2402 temp = NEXT_INSN (temp))
2404 rtx body;
2405 rtx n;
2406 rtx next;
2408 if (NOTE_P (temp))
2409 continue;
2411 body = PATTERN (temp);
2413 /* Find the next insn after TEMP,
2414 not counting USE or NOTE insns. */
2415 for (next = NEXT_INSN (temp); next != p;
2416 next = NEXT_INSN (next))
2417 if (! (NONJUMP_INSN_P (next)
2418 && GET_CODE (PATTERN (next)) == USE)
2419 && !NOTE_P (next))
2420 break;
2422 /* If that is the call, this may be the insn
2423 that loads the function address.
2425 Extract the function address from the insn
2426 that loads it into a register.
2427 If this insn was cse'd, we get incorrect code.
2429 So emit a new move insn that copies the
2430 function address into the register that the
2431 call insn will use. flow.c will delete any
2432 redundant stores that we have created. */
2433 if (CALL_P (next)
2434 && GET_CODE (body) == SET
2435 && REG_P (SET_DEST (body))
2436 && (n = find_reg_note (temp, REG_EQUAL,
2437 NULL_RTX)))
2439 fn_reg = SET_SRC (body);
2440 if (!REG_P (fn_reg))
2441 fn_reg = SET_DEST (body);
2442 fn_address = XEXP (n, 0);
2443 fn_address_insn = temp;
2445 /* We have the call insn.
2446 If it uses the register we suspect it might,
2447 load it with the correct address directly. */
2448 if (CALL_P (temp)
2449 && fn_address != 0
2450 && reg_referenced_p (fn_reg, body))
2451 loop_insn_emit_after (loop, 0, fn_address_insn,
2452 gen_move_insn
2453 (fn_reg, fn_address));
2455 if (CALL_P (temp))
2457 i1 = loop_call_insn_hoist (loop, body);
2458 /* Because the USAGE information potentially
2459 contains objects other than hard registers
2460 we need to copy it. */
2461 if (CALL_INSN_FUNCTION_USAGE (temp))
2462 CALL_INSN_FUNCTION_USAGE (i1)
2463 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2465 else
2466 i1 = loop_insn_hoist (loop, body);
2467 if (first == 0)
2468 first = i1;
2469 if (temp == fn_address_insn)
2470 fn_address_insn = i1;
2471 REG_NOTES (i1) = REG_NOTES (temp);
2472 REG_NOTES (temp) = NULL;
2473 delete_insn (temp);
2475 if (new_start == 0)
2476 new_start = first;
2478 if (m->savemode != VOIDmode)
2480 /* P sets REG to zero; but we should clear only
2481 the bits that are not covered by the mode
2482 m->savemode. */
2483 rtx reg = m->set_dest;
2484 rtx sequence;
2485 rtx tem;
2487 start_sequence ();
2488 tem = expand_simple_binop
2489 (GET_MODE (reg), AND, reg,
2490 GEN_INT ((((HOST_WIDE_INT) 1
2491 << GET_MODE_BITSIZE (m->savemode)))
2492 - 1),
2493 reg, 1, OPTAB_LIB_WIDEN);
2494 gcc_assert (tem);
2495 if (tem != reg)
2496 emit_move_insn (reg, tem);
2497 sequence = get_insns ();
2498 end_sequence ();
2499 i1 = loop_insn_hoist (loop, sequence);
2501 else if (CALL_P (p))
2503 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2504 /* Because the USAGE information potentially
2505 contains objects other than hard registers
2506 we need to copy it. */
2507 if (CALL_INSN_FUNCTION_USAGE (p))
2508 CALL_INSN_FUNCTION_USAGE (i1)
2509 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2511 else if (count == m->consec && m->move_insn_first)
2513 rtx seq;
2514 /* The SET_SRC might not be invariant, so we must
2515 use the REG_EQUAL note. */
2516 start_sequence ();
2517 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2518 m->set_src);
2519 seq = get_insns ();
2520 end_sequence ();
2522 add_label_notes (m->set_src, seq);
2524 i1 = loop_insn_hoist (loop, seq);
2525 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2526 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2527 : REG_EQUAL, m->set_src);
2529 else if (m->insert_temp)
2531 rtx *reg_map2 = xcalloc (REGNO (newreg),
2532 sizeof(rtx));
2533 reg_map2 [m->regno] = newreg;
2535 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2536 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2537 free (reg_map2);
2539 else
2540 i1 = loop_insn_hoist (loop, PATTERN (p));
2542 if (REG_NOTES (i1) == 0)
2544 REG_NOTES (i1) = REG_NOTES (p);
2545 REG_NOTES (p) = NULL;
2547 /* If there is a REG_EQUAL note present whose value
2548 is not loop invariant, then delete it, since it
2549 may cause problems with later optimization passes.
2550 It is possible for cse to create such notes
2551 like this as a result of record_jump_cond. */
2553 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2554 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2555 remove_note (i1, temp);
2558 if (new_start == 0)
2559 new_start = i1;
2561 if (loop_dump_stream)
2562 fprintf (loop_dump_stream, " moved to %d",
2563 INSN_UID (i1));
2565 /* If library call, now fix the REG_NOTES that contain
2566 insn pointers, namely REG_LIBCALL on FIRST
2567 and REG_RETVAL on I1. */
2568 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2570 XEXP (temp, 0) = first;
2571 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2572 XEXP (temp, 0) = i1;
2575 temp = p;
2576 delete_insn (p);
2577 p = NEXT_INSN (p);
2579 /* simplify_giv_expr expects that it can walk the insns
2580 at m->insn forwards and see this old sequence we are
2581 tossing here. delete_insn does preserve the next
2582 pointers, but when we skip over a NOTE we must fix
2583 it up. Otherwise that code walks into the non-deleted
2584 insn stream. */
2585 while (p && NOTE_P (p))
2586 p = NEXT_INSN (temp) = NEXT_INSN (p);
2588 if (m->insert_temp)
2590 rtx seq;
2591 /* Replace the original insn with a move from
2592 our newly created temp. */
2593 start_sequence ();
2594 emit_move_insn (m->set_dest, newreg);
2595 seq = get_insns ();
2596 end_sequence ();
2597 emit_insn_before (seq, p);
2601 /* The more regs we move, the less we like moving them. */
2602 threshold -= 3;
2605 m->done = 1;
2607 if (!m->insert_temp)
2609 /* Any other movable that loads the same register
2610 MUST be moved. */
2611 already_moved[regno] = 1;
2613 /* This reg has been moved out of one loop. */
2614 regs->array[regno].moved_once = 1;
2616 /* The reg set here is now invariant. */
2617 if (! m->partial)
2619 int i;
2620 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2621 regs->array[regno+i].set_in_loop = 0;
2624 /* Change the length-of-life info for the register
2625 to say it lives at least the full length of this loop.
2626 This will help guide optimizations in outer loops. */
2628 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2629 /* This is the old insn before all the moved insns.
2630 We can't use the moved insn because it is out of range
2631 in uid_luid. Only the old insns have luids. */
2632 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2633 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2634 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2637 /* Combine with this moved insn any other matching movables. */
2639 if (! m->partial)
2640 for (m1 = movables->head; m1; m1 = m1->next)
2641 if (m1->match == m)
2643 rtx temp;
2645 /* Schedule the reg loaded by M1
2646 for replacement so that shares the reg of M.
2647 If the modes differ (only possible in restricted
2648 circumstances, make a SUBREG.
2650 Note this assumes that the target dependent files
2651 treat REG and SUBREG equally, including within
2652 GO_IF_LEGITIMATE_ADDRESS and in all the
2653 predicates since we never verify that replacing the
2654 original register with a SUBREG results in a
2655 recognizable insn. */
2656 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2657 reg_map[m1->regno] = m->set_dest;
2658 else
2659 reg_map[m1->regno]
2660 = gen_lowpart_common (GET_MODE (m1->set_dest),
2661 m->set_dest);
2663 /* Get rid of the matching insn
2664 and prevent further processing of it. */
2665 m1->done = 1;
2667 /* If library call, delete all insns. */
2668 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2669 NULL_RTX)))
2670 delete_insn_chain (XEXP (temp, 0), m1->insn);
2671 else
2672 delete_insn (m1->insn);
2674 /* Any other movable that loads the same register
2675 MUST be moved. */
2676 already_moved[m1->regno] = 1;
2678 /* The reg merged here is now invariant,
2679 if the reg it matches is invariant. */
2680 if (! m->partial)
2682 int i;
2683 for (i = 0;
2684 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2685 i++)
2686 regs->array[m1->regno+i].set_in_loop = 0;
2690 else if (loop_dump_stream)
2691 fprintf (loop_dump_stream, "not desirable");
2693 else if (loop_dump_stream && !m->match)
2694 fprintf (loop_dump_stream, "not safe");
2696 if (loop_dump_stream)
2697 fprintf (loop_dump_stream, "\n");
2700 if (new_start == 0)
2701 new_start = loop_start;
2703 /* Go through all the instructions in the loop, making
2704 all the register substitutions scheduled in REG_MAP. */
2705 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2706 if (INSN_P (p))
2708 replace_regs (PATTERN (p), reg_map, nregs, 0);
2709 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2710 INSN_CODE (p) = -1;
2713 /* Clean up. */
2714 free (reg_map);
2715 free (already_moved);
2719 static void
2720 loop_movables_add (struct loop_movables *movables, struct movable *m)
2722 if (movables->head == 0)
2723 movables->head = m;
2724 else
2725 movables->last->next = m;
2726 movables->last = m;
2730 static void
2731 loop_movables_free (struct loop_movables *movables)
2733 struct movable *m;
2734 struct movable *m_next;
2736 for (m = movables->head; m; m = m_next)
2738 m_next = m->next;
2739 free (m);
2743 #if 0
2744 /* Scan X and replace the address of any MEM in it with ADDR.
2745 REG is the address that MEM should have before the replacement. */
2747 static void
2748 replace_call_address (rtx x, rtx reg, rtx addr)
2750 enum rtx_code code;
2751 int i;
2752 const char *fmt;
2754 if (x == 0)
2755 return;
2756 code = GET_CODE (x);
2757 switch (code)
2759 case PC:
2760 case CC0:
2761 case CONST_INT:
2762 case CONST_DOUBLE:
2763 case CONST:
2764 case SYMBOL_REF:
2765 case LABEL_REF:
2766 case REG:
2767 return;
2769 case SET:
2770 /* Short cut for very common case. */
2771 replace_call_address (XEXP (x, 1), reg, addr);
2772 return;
2774 case CALL:
2775 /* Short cut for very common case. */
2776 replace_call_address (XEXP (x, 0), reg, addr);
2777 return;
2779 case MEM:
2780 /* If this MEM uses a reg other than the one we expected,
2781 something is wrong. */
2782 gcc_assert (XEXP (x, 0) == reg);
2783 XEXP (x, 0) = addr;
2784 return;
2786 default:
2787 break;
2790 fmt = GET_RTX_FORMAT (code);
2791 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2793 if (fmt[i] == 'e')
2794 replace_call_address (XEXP (x, i), reg, addr);
2795 else if (fmt[i] == 'E')
2797 int j;
2798 for (j = 0; j < XVECLEN (x, i); j++)
2799 replace_call_address (XVECEXP (x, i, j), reg, addr);
2803 #endif
2805 /* Return the number of memory refs to addresses that vary
2806 in the rtx X. */
2808 static int
2809 count_nonfixed_reads (const struct loop *loop, rtx x)
2811 enum rtx_code code;
2812 int i;
2813 const char *fmt;
2814 int value;
2816 if (x == 0)
2817 return 0;
2819 code = GET_CODE (x);
2820 switch (code)
2822 case PC:
2823 case CC0:
2824 case CONST_INT:
2825 case CONST_DOUBLE:
2826 case CONST:
2827 case SYMBOL_REF:
2828 case LABEL_REF:
2829 case REG:
2830 return 0;
2832 case MEM:
2833 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2834 + count_nonfixed_reads (loop, XEXP (x, 0)));
2836 default:
2837 break;
2840 value = 0;
2841 fmt = GET_RTX_FORMAT (code);
2842 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2844 if (fmt[i] == 'e')
2845 value += count_nonfixed_reads (loop, XEXP (x, i));
2846 if (fmt[i] == 'E')
2848 int j;
2849 for (j = 0; j < XVECLEN (x, i); j++)
2850 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2853 return value;
2856 /* Scan a loop setting the elements `loops_enclosed',
2857 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2858 `unknown_address_altered', `unknown_constant_address_altered', and
2859 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2860 list `store_mems' in LOOP. */
2862 static void
2863 prescan_loop (struct loop *loop)
2865 int level = 1;
2866 rtx insn;
2867 struct loop_info *loop_info = LOOP_INFO (loop);
2868 rtx start = loop->start;
2869 rtx end = loop->end;
2870 /* The label after END. Jumping here is just like falling off the
2871 end of the loop. We use next_nonnote_insn instead of next_label
2872 as a hedge against the (pathological) case where some actual insn
2873 might end up between the two. */
2874 rtx exit_target = next_nonnote_insn (end);
2876 loop_info->has_indirect_jump = indirect_jump_in_function;
2877 loop_info->pre_header_has_call = 0;
2878 loop_info->has_call = 0;
2879 loop_info->has_nonconst_call = 0;
2880 loop_info->has_prefetch = 0;
2881 loop_info->has_volatile = 0;
2882 loop_info->has_tablejump = 0;
2883 loop_info->has_multiple_exit_targets = 0;
2884 loop->level = 1;
2886 loop_info->unknown_address_altered = 0;
2887 loop_info->unknown_constant_address_altered = 0;
2888 loop_info->store_mems = NULL_RTX;
2889 loop_info->first_loop_store_insn = NULL_RTX;
2890 loop_info->mems_idx = 0;
2891 loop_info->num_mem_sets = 0;
2893 for (insn = start; insn && !LABEL_P (insn);
2894 insn = PREV_INSN (insn))
2896 if (CALL_P (insn))
2898 loop_info->pre_header_has_call = 1;
2899 break;
2903 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2904 insn = NEXT_INSN (insn))
2906 switch (GET_CODE (insn))
2908 case NOTE:
2909 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2911 ++level;
2912 /* Count number of loops contained in this one. */
2913 loop->level++;
2915 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2916 --level;
2917 break;
2919 case CALL_INSN:
2920 if (! CONST_OR_PURE_CALL_P (insn))
2922 loop_info->unknown_address_altered = 1;
2923 loop_info->has_nonconst_call = 1;
2925 else if (pure_call_p (insn))
2926 loop_info->has_nonconst_call = 1;
2927 loop_info->has_call = 1;
2928 if (can_throw_internal (insn))
2929 loop_info->has_multiple_exit_targets = 1;
2930 break;
2932 case JUMP_INSN:
2933 if (! loop_info->has_multiple_exit_targets)
2935 rtx set = pc_set (insn);
2937 if (set)
2939 rtx src = SET_SRC (set);
2940 rtx label1, label2;
2942 if (GET_CODE (src) == IF_THEN_ELSE)
2944 label1 = XEXP (src, 1);
2945 label2 = XEXP (src, 2);
2947 else
2949 label1 = src;
2950 label2 = NULL_RTX;
2955 if (label1 && label1 != pc_rtx)
2957 if (GET_CODE (label1) != LABEL_REF)
2959 /* Something tricky. */
2960 loop_info->has_multiple_exit_targets = 1;
2961 break;
2963 else if (XEXP (label1, 0) != exit_target
2964 && LABEL_OUTSIDE_LOOP_P (label1))
2966 /* A jump outside the current loop. */
2967 loop_info->has_multiple_exit_targets = 1;
2968 break;
2972 label1 = label2;
2973 label2 = NULL_RTX;
2975 while (label1);
2977 else
2979 /* A return, or something tricky. */
2980 loop_info->has_multiple_exit_targets = 1;
2983 /* Fall through. */
2985 case INSN:
2986 if (volatile_refs_p (PATTERN (insn)))
2987 loop_info->has_volatile = 1;
2989 if (JUMP_P (insn)
2990 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2991 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2992 loop_info->has_tablejump = 1;
2994 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2995 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2996 loop_info->first_loop_store_insn = insn;
2998 if (flag_non_call_exceptions && can_throw_internal (insn))
2999 loop_info->has_multiple_exit_targets = 1;
3000 break;
3002 default:
3003 break;
3007 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
3008 if (/* An exception thrown by a called function might land us
3009 anywhere. */
3010 ! loop_info->has_nonconst_call
3011 /* We don't want loads for MEMs moved to a location before the
3012 one at which their stack memory becomes allocated. (Note
3013 that this is not a problem for malloc, etc., since those
3014 require actual function calls. */
3015 && ! current_function_calls_alloca
3016 /* There are ways to leave the loop other than falling off the
3017 end. */
3018 && ! loop_info->has_multiple_exit_targets)
3019 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3020 insn = NEXT_INSN (insn))
3021 for_each_rtx (&insn, insert_loop_mem, loop_info);
3023 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3024 that loop_invariant_p and load_mems can use true_dependence
3025 to determine what is really clobbered. */
3026 if (loop_info->unknown_address_altered)
3028 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3030 loop_info->store_mems
3031 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3033 if (loop_info->unknown_constant_address_altered)
3035 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3036 MEM_READONLY_P (mem) = 1;
3037 loop_info->store_mems
3038 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3042 /* Invalidate all loops containing LABEL. */
3044 static void
3045 invalidate_loops_containing_label (rtx label)
3047 struct loop *loop;
3048 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3049 loop->invalid = 1;
3052 /* Scan the function looking for loops. Record the start and end of each loop.
3053 Also mark as invalid loops any loops that contain a setjmp or are branched
3054 to from outside the loop. */
3056 static void
3057 find_and_verify_loops (rtx f, struct loops *loops)
3059 rtx insn;
3060 rtx label;
3061 int num_loops;
3062 struct loop *current_loop;
3063 struct loop *next_loop;
3064 struct loop *loop;
3066 num_loops = loops->num;
3068 compute_luids (f, NULL_RTX, 0);
3070 /* If there are jumps to undefined labels,
3071 treat them as jumps out of any/all loops.
3072 This also avoids writing past end of tables when there are no loops. */
3073 uid_loop[0] = NULL;
3075 /* Find boundaries of loops, mark which loops are contained within
3076 loops, and invalidate loops that have setjmp. */
3078 num_loops = 0;
3079 current_loop = NULL;
3080 for (insn = f; insn; insn = NEXT_INSN (insn))
3082 if (NOTE_P (insn))
3083 switch (NOTE_LINE_NUMBER (insn))
3085 case NOTE_INSN_LOOP_BEG:
3086 next_loop = loops->array + num_loops;
3087 next_loop->num = num_loops;
3088 num_loops++;
3089 next_loop->start = insn;
3090 next_loop->outer = current_loop;
3091 current_loop = next_loop;
3092 break;
3094 case NOTE_INSN_LOOP_END:
3095 gcc_assert (current_loop);
3097 current_loop->end = insn;
3098 current_loop = current_loop->outer;
3099 break;
3101 default:
3102 break;
3105 if (CALL_P (insn)
3106 && find_reg_note (insn, REG_SETJMP, NULL))
3108 /* In this case, we must invalidate our current loop and any
3109 enclosing loop. */
3110 for (loop = current_loop; loop; loop = loop->outer)
3112 loop->invalid = 1;
3113 if (loop_dump_stream)
3114 fprintf (loop_dump_stream,
3115 "\nLoop at %d ignored due to setjmp.\n",
3116 INSN_UID (loop->start));
3120 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3121 enclosing loop, but this doesn't matter. */
3122 uid_loop[INSN_UID (insn)] = current_loop;
3125 /* Any loop containing a label used in an initializer must be invalidated,
3126 because it can be jumped into from anywhere. */
3127 for (label = forced_labels; label; label = XEXP (label, 1))
3128 invalidate_loops_containing_label (XEXP (label, 0));
3130 /* Any loop containing a label used for an exception handler must be
3131 invalidated, because it can be jumped into from anywhere. */
3132 for_each_eh_label (invalidate_loops_containing_label);
3134 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3135 loop that it is not contained within, that loop is marked invalid.
3136 If any INSN or CALL_INSN uses a label's address, then the loop containing
3137 that label is marked invalid, because it could be jumped into from
3138 anywhere.
3140 Also look for blocks of code ending in an unconditional branch that
3141 exits the loop. If such a block is surrounded by a conditional
3142 branch around the block, move the block elsewhere (see below) and
3143 invert the jump to point to the code block. This may eliminate a
3144 label in our loop and will simplify processing by both us and a
3145 possible second cse pass. */
3147 for (insn = f; insn; insn = NEXT_INSN (insn))
3148 if (INSN_P (insn))
3150 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3152 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3154 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3155 if (note)
3156 invalidate_loops_containing_label (XEXP (note, 0));
3159 if (!JUMP_P (insn))
3160 continue;
3162 mark_loop_jump (PATTERN (insn), this_loop);
3164 /* See if this is an unconditional branch outside the loop. */
3165 if (this_loop
3166 && (GET_CODE (PATTERN (insn)) == RETURN
3167 || (any_uncondjump_p (insn)
3168 && onlyjump_p (insn)
3169 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3170 != this_loop)))
3171 && get_max_uid () < max_uid_for_loop)
3173 rtx p;
3174 rtx our_next = next_real_insn (insn);
3175 rtx last_insn_to_move = NEXT_INSN (insn);
3176 struct loop *dest_loop;
3177 struct loop *outer_loop = NULL;
3179 /* Go backwards until we reach the start of the loop, a label,
3180 or a JUMP_INSN. */
3181 for (p = PREV_INSN (insn);
3182 !LABEL_P (p)
3183 && ! (NOTE_P (p)
3184 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3185 && !JUMP_P (p);
3186 p = PREV_INSN (p))
3189 /* Check for the case where we have a jump to an inner nested
3190 loop, and do not perform the optimization in that case. */
3192 if (JUMP_LABEL (insn))
3194 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3195 if (dest_loop)
3197 for (outer_loop = dest_loop; outer_loop;
3198 outer_loop = outer_loop->outer)
3199 if (outer_loop == this_loop)
3200 break;
3204 /* Make sure that the target of P is within the current loop. */
3206 if (JUMP_P (p) && JUMP_LABEL (p)
3207 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3208 outer_loop = this_loop;
3210 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3211 we have a block of code to try to move.
3213 We look backward and then forward from the target of INSN
3214 to find a BARRIER at the same loop depth as the target.
3215 If we find such a BARRIER, we make a new label for the start
3216 of the block, invert the jump in P and point it to that label,
3217 and move the block of code to the spot we found. */
3219 if (! outer_loop
3220 && JUMP_P (p)
3221 && JUMP_LABEL (p) != 0
3222 /* Just ignore jumps to labels that were never emitted.
3223 These always indicate compilation errors. */
3224 && INSN_UID (JUMP_LABEL (p)) != 0
3225 && any_condjump_p (p) && onlyjump_p (p)
3226 && next_real_insn (JUMP_LABEL (p)) == our_next
3227 /* If it's not safe to move the sequence, then we
3228 mustn't try. */
3229 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3230 &last_insn_to_move))
3232 rtx target
3233 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3234 struct loop *target_loop = uid_loop[INSN_UID (target)];
3235 rtx loc, loc2;
3236 rtx tmp;
3238 /* Search for possible garbage past the conditional jumps
3239 and look for the last barrier. */
3240 for (tmp = last_insn_to_move;
3241 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3242 if (BARRIER_P (tmp))
3243 last_insn_to_move = tmp;
3245 for (loc = target; loc; loc = PREV_INSN (loc))
3246 if (BARRIER_P (loc)
3247 /* Don't move things inside a tablejump. */
3248 && ((loc2 = next_nonnote_insn (loc)) == 0
3249 || !LABEL_P (loc2)
3250 || (loc2 = next_nonnote_insn (loc2)) == 0
3251 || !JUMP_P (loc2)
3252 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3253 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3254 && uid_loop[INSN_UID (loc)] == target_loop)
3255 break;
3257 if (loc == 0)
3258 for (loc = target; loc; loc = NEXT_INSN (loc))
3259 if (BARRIER_P (loc)
3260 /* Don't move things inside a tablejump. */
3261 && ((loc2 = next_nonnote_insn (loc)) == 0
3262 || !LABEL_P (loc2)
3263 || (loc2 = next_nonnote_insn (loc2)) == 0
3264 || !JUMP_P (loc2)
3265 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3266 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3267 && uid_loop[INSN_UID (loc)] == target_loop)
3268 break;
3270 if (loc)
3272 rtx cond_label = JUMP_LABEL (p);
3273 rtx new_label = get_label_after (p);
3275 /* Ensure our label doesn't go away. */
3276 LABEL_NUSES (cond_label)++;
3278 /* Verify that uid_loop is large enough and that
3279 we can invert P. */
3280 if (invert_jump (p, new_label, 1))
3282 rtx q, r;
3283 bool only_notes;
3285 /* If no suitable BARRIER was found, create a suitable
3286 one before TARGET. Since TARGET is a fall through
3287 path, we'll need to insert a jump around our block
3288 and add a BARRIER before TARGET.
3290 This creates an extra unconditional jump outside
3291 the loop. However, the benefits of removing rarely
3292 executed instructions from inside the loop usually
3293 outweighs the cost of the extra unconditional jump
3294 outside the loop. */
3295 if (loc == 0)
3297 rtx temp;
3299 temp = gen_jump (JUMP_LABEL (insn));
3300 temp = emit_jump_insn_before (temp, target);
3301 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3302 LABEL_NUSES (JUMP_LABEL (insn))++;
3303 loc = emit_barrier_before (target);
3306 /* Include the BARRIER after INSN and copy the
3307 block after LOC. */
3308 only_notes = squeeze_notes (&new_label,
3309 &last_insn_to_move);
3310 gcc_assert (!only_notes);
3312 reorder_insns (new_label, last_insn_to_move, loc);
3314 /* All those insns are now in TARGET_LOOP. */
3315 for (q = new_label;
3316 q != NEXT_INSN (last_insn_to_move);
3317 q = NEXT_INSN (q))
3318 uid_loop[INSN_UID (q)] = target_loop;
3320 /* The label jumped to by INSN is no longer a loop
3321 exit. Unless INSN does not have a label (e.g.,
3322 it is a RETURN insn), search loop->exit_labels
3323 to find its label_ref, and remove it. Also turn
3324 off LABEL_OUTSIDE_LOOP_P bit. */
3325 if (JUMP_LABEL (insn))
3327 for (q = 0, r = this_loop->exit_labels;
3329 q = r, r = LABEL_NEXTREF (r))
3330 if (XEXP (r, 0) == JUMP_LABEL (insn))
3332 LABEL_OUTSIDE_LOOP_P (r) = 0;
3333 if (q)
3334 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3335 else
3336 this_loop->exit_labels = LABEL_NEXTREF (r);
3337 break;
3340 for (loop = this_loop; loop && loop != target_loop;
3341 loop = loop->outer)
3342 loop->exit_count--;
3344 /* If we didn't find it, then something is
3345 wrong. */
3346 gcc_assert (r);
3349 /* P is now a jump outside the loop, so it must be put
3350 in loop->exit_labels, and marked as such.
3351 The easiest way to do this is to just call
3352 mark_loop_jump again for P. */
3353 mark_loop_jump (PATTERN (p), this_loop);
3355 /* If INSN now jumps to the insn after it,
3356 delete INSN. */
3357 if (JUMP_LABEL (insn) != 0
3358 && (next_real_insn (JUMP_LABEL (insn))
3359 == next_real_insn (insn)))
3360 delete_related_insns (insn);
3363 /* Continue the loop after where the conditional
3364 branch used to jump, since the only branch insn
3365 in the block (if it still remains) is an inter-loop
3366 branch and hence needs no processing. */
3367 insn = NEXT_INSN (cond_label);
3369 if (--LABEL_NUSES (cond_label) == 0)
3370 delete_related_insns (cond_label);
3372 /* This loop will be continued with NEXT_INSN (insn). */
3373 insn = PREV_INSN (insn);
3380 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3381 loops it is contained in, mark the target loop invalid.
3383 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3385 static void
3386 mark_loop_jump (rtx x, struct loop *loop)
3388 struct loop *dest_loop;
3389 struct loop *outer_loop;
3390 int i;
3392 switch (GET_CODE (x))
3394 case PC:
3395 case USE:
3396 case CLOBBER:
3397 case REG:
3398 case MEM:
3399 case CONST_INT:
3400 case CONST_DOUBLE:
3401 case RETURN:
3402 return;
3404 case CONST:
3405 /* There could be a label reference in here. */
3406 mark_loop_jump (XEXP (x, 0), loop);
3407 return;
3409 case PLUS:
3410 case MINUS:
3411 case MULT:
3412 mark_loop_jump (XEXP (x, 0), loop);
3413 mark_loop_jump (XEXP (x, 1), loop);
3414 return;
3416 case LO_SUM:
3417 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3418 mark_loop_jump (XEXP (x, 1), loop);
3419 return;
3421 case SIGN_EXTEND:
3422 case ZERO_EXTEND:
3423 mark_loop_jump (XEXP (x, 0), loop);
3424 return;
3426 case LABEL_REF:
3427 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3429 /* Link together all labels that branch outside the loop. This
3430 is used by final_[bg]iv_value and the loop unrolling code. Also
3431 mark this LABEL_REF so we know that this branch should predict
3432 false. */
3434 /* A check to make sure the label is not in an inner nested loop,
3435 since this does not count as a loop exit. */
3436 if (dest_loop)
3438 for (outer_loop = dest_loop; outer_loop;
3439 outer_loop = outer_loop->outer)
3440 if (outer_loop == loop)
3441 break;
3443 else
3444 outer_loop = NULL;
3446 if (loop && ! outer_loop)
3448 LABEL_OUTSIDE_LOOP_P (x) = 1;
3449 LABEL_NEXTREF (x) = loop->exit_labels;
3450 loop->exit_labels = x;
3452 for (outer_loop = loop;
3453 outer_loop && outer_loop != dest_loop;
3454 outer_loop = outer_loop->outer)
3455 outer_loop->exit_count++;
3458 /* If this is inside a loop, but not in the current loop or one enclosed
3459 by it, it invalidates at least one loop. */
3461 if (! dest_loop)
3462 return;
3464 /* We must invalidate every nested loop containing the target of this
3465 label, except those that also contain the jump insn. */
3467 for (; dest_loop; dest_loop = dest_loop->outer)
3469 /* Stop when we reach a loop that also contains the jump insn. */
3470 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3471 if (dest_loop == outer_loop)
3472 return;
3474 /* If we get here, we know we need to invalidate a loop. */
3475 if (loop_dump_stream && ! dest_loop->invalid)
3476 fprintf (loop_dump_stream,
3477 "\nLoop at %d ignored due to multiple entry points.\n",
3478 INSN_UID (dest_loop->start));
3480 dest_loop->invalid = 1;
3482 return;
3484 case SET:
3485 /* If this is not setting pc, ignore. */
3486 if (SET_DEST (x) == pc_rtx)
3487 mark_loop_jump (SET_SRC (x), loop);
3488 return;
3490 case IF_THEN_ELSE:
3491 mark_loop_jump (XEXP (x, 1), loop);
3492 mark_loop_jump (XEXP (x, 2), loop);
3493 return;
3495 case PARALLEL:
3496 case ADDR_VEC:
3497 for (i = 0; i < XVECLEN (x, 0); i++)
3498 mark_loop_jump (XVECEXP (x, 0, i), loop);
3499 return;
3501 case ADDR_DIFF_VEC:
3502 for (i = 0; i < XVECLEN (x, 1); i++)
3503 mark_loop_jump (XVECEXP (x, 1, i), loop);
3504 return;
3506 default:
3507 /* Strictly speaking this is not a jump into the loop, only a possible
3508 jump out of the loop. However, we have no way to link the destination
3509 of this jump onto the list of exit labels. To be safe we mark this
3510 loop and any containing loops as invalid. */
3511 if (loop)
3513 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3515 if (loop_dump_stream && ! outer_loop->invalid)
3516 fprintf (loop_dump_stream,
3517 "\nLoop at %d ignored due to unknown exit jump.\n",
3518 INSN_UID (outer_loop->start));
3519 outer_loop->invalid = 1;
3522 return;
3526 /* Return nonzero if there is a label in the range from
3527 insn INSN to and including the insn whose luid is END
3528 INSN must have an assigned luid (i.e., it must not have
3529 been previously created by loop.c). */
3531 static int
3532 labels_in_range_p (rtx insn, int end)
3534 while (insn && INSN_LUID (insn) <= end)
3536 if (LABEL_P (insn))
3537 return 1;
3538 insn = NEXT_INSN (insn);
3541 return 0;
3544 /* Record that a memory reference X is being set. */
3546 static void
3547 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3548 void *data ATTRIBUTE_UNUSED)
3550 struct loop_info *loop_info = data;
3552 if (x == 0 || !MEM_P (x))
3553 return;
3555 /* Count number of memory writes.
3556 This affects heuristics in strength_reduce. */
3557 loop_info->num_mem_sets++;
3559 /* BLKmode MEM means all memory is clobbered. */
3560 if (GET_MODE (x) == BLKmode)
3562 if (MEM_READONLY_P (x))
3563 loop_info->unknown_constant_address_altered = 1;
3564 else
3565 loop_info->unknown_address_altered = 1;
3567 return;
3570 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3571 loop_info->store_mems);
3574 /* X is a value modified by an INSN that references a biv inside a loop
3575 exit test (i.e., X is somehow related to the value of the biv). If X
3576 is a pseudo that is used more than once, then the biv is (effectively)
3577 used more than once. DATA is a pointer to a loop_regs structure. */
3579 static void
3580 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3582 struct loop_regs *regs = (struct loop_regs *) data;
3584 if (x == 0)
3585 return;
3587 while (GET_CODE (x) == STRICT_LOW_PART
3588 || GET_CODE (x) == SIGN_EXTRACT
3589 || GET_CODE (x) == ZERO_EXTRACT
3590 || GET_CODE (x) == SUBREG)
3591 x = XEXP (x, 0);
3593 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3594 return;
3596 /* If we do not have usage information, or if we know the register
3597 is used more than once, note that fact for check_dbra_loop. */
3598 if (REGNO (x) >= max_reg_before_loop
3599 || ! regs->array[REGNO (x)].single_usage
3600 || regs->array[REGNO (x)].single_usage == const0_rtx)
3601 regs->multiple_uses = 1;
3604 /* Return nonzero if the rtx X is invariant over the current loop.
3606 The value is 2 if we refer to something only conditionally invariant.
3608 A memory ref is invariant if it is not volatile and does not conflict
3609 with anything stored in `loop_info->store_mems'. */
3611 static int
3612 loop_invariant_p (const struct loop *loop, rtx x)
3614 struct loop_info *loop_info = LOOP_INFO (loop);
3615 struct loop_regs *regs = LOOP_REGS (loop);
3616 int i;
3617 enum rtx_code code;
3618 const char *fmt;
3619 int conditional = 0;
3620 rtx mem_list_entry;
3622 if (x == 0)
3623 return 1;
3624 code = GET_CODE (x);
3625 switch (code)
3627 case CONST_INT:
3628 case CONST_DOUBLE:
3629 case SYMBOL_REF:
3630 case CONST:
3631 return 1;
3633 case LABEL_REF:
3634 return 1;
3636 case PC:
3637 case CC0:
3638 case UNSPEC_VOLATILE:
3639 return 0;
3641 case REG:
3642 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3643 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3644 && ! current_function_has_nonlocal_goto)
3645 return 1;
3647 if (LOOP_INFO (loop)->has_call
3648 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3649 return 0;
3651 /* Out-of-range regs can occur when we are called from unrolling.
3652 These registers created by the unroller are set in the loop,
3653 hence are never invariant.
3654 Other out-of-range regs can be generated by load_mems; those that
3655 are written to in the loop are not invariant, while those that are
3656 not written to are invariant. It would be easy for load_mems
3657 to set n_times_set correctly for these registers, however, there
3658 is no easy way to distinguish them from registers created by the
3659 unroller. */
3661 if (REGNO (x) >= (unsigned) regs->num)
3662 return 0;
3664 if (regs->array[REGNO (x)].set_in_loop < 0)
3665 return 2;
3667 return regs->array[REGNO (x)].set_in_loop == 0;
3669 case MEM:
3670 /* Volatile memory references must be rejected. Do this before
3671 checking for read-only items, so that volatile read-only items
3672 will be rejected also. */
3673 if (MEM_VOLATILE_P (x))
3674 return 0;
3676 /* See if there is any dependence between a store and this load. */
3677 mem_list_entry = loop_info->store_mems;
3678 while (mem_list_entry)
3680 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3681 x, rtx_varies_p))
3682 return 0;
3684 mem_list_entry = XEXP (mem_list_entry, 1);
3687 /* It's not invalidated by a store in memory
3688 but we must still verify the address is invariant. */
3689 break;
3691 case ASM_OPERANDS:
3692 /* Don't mess with insns declared volatile. */
3693 if (MEM_VOLATILE_P (x))
3694 return 0;
3695 break;
3697 default:
3698 break;
3701 fmt = GET_RTX_FORMAT (code);
3702 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3704 if (fmt[i] == 'e')
3706 int tem = loop_invariant_p (loop, XEXP (x, i));
3707 if (tem == 0)
3708 return 0;
3709 if (tem == 2)
3710 conditional = 1;
3712 else if (fmt[i] == 'E')
3714 int j;
3715 for (j = 0; j < XVECLEN (x, i); j++)
3717 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3718 if (tem == 0)
3719 return 0;
3720 if (tem == 2)
3721 conditional = 1;
3727 return 1 + conditional;
3730 /* Return nonzero if all the insns in the loop that set REG
3731 are INSN and the immediately following insns,
3732 and if each of those insns sets REG in an invariant way
3733 (not counting uses of REG in them).
3735 The value is 2 if some of these insns are only conditionally invariant.
3737 We assume that INSN itself is the first set of REG
3738 and that its source is invariant. */
3740 static int
3741 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3742 rtx insn)
3744 struct loop_regs *regs = LOOP_REGS (loop);
3745 rtx p = insn;
3746 unsigned int regno = REGNO (reg);
3747 rtx temp;
3748 /* Number of sets we have to insist on finding after INSN. */
3749 int count = n_sets - 1;
3750 int old = regs->array[regno].set_in_loop;
3751 int value = 0;
3752 int this;
3754 /* If N_SETS hit the limit, we can't rely on its value. */
3755 if (n_sets == 127)
3756 return 0;
3758 regs->array[regno].set_in_loop = 0;
3760 while (count > 0)
3762 enum rtx_code code;
3763 rtx set;
3765 p = NEXT_INSN (p);
3766 code = GET_CODE (p);
3768 /* If library call, skip to end of it. */
3769 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3770 p = XEXP (temp, 0);
3772 this = 0;
3773 if (code == INSN
3774 && (set = single_set (p))
3775 && REG_P (SET_DEST (set))
3776 && REGNO (SET_DEST (set)) == regno)
3778 this = loop_invariant_p (loop, SET_SRC (set));
3779 if (this != 0)
3780 value |= this;
3781 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3783 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3784 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3785 notes are OK. */
3786 this = (CONSTANT_P (XEXP (temp, 0))
3787 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3788 && loop_invariant_p (loop, XEXP (temp, 0))));
3789 if (this != 0)
3790 value |= this;
3793 if (this != 0)
3794 count--;
3795 else if (code != NOTE)
3797 regs->array[regno].set_in_loop = old;
3798 return 0;
3802 regs->array[regno].set_in_loop = old;
3803 /* If loop_invariant_p ever returned 2, we return 2. */
3804 return 1 + (value & 2);
3807 /* Look at all uses (not sets) of registers in X. For each, if it is
3808 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3809 a different insn, set USAGE[REGNO] to const0_rtx. */
3811 static void
3812 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3814 enum rtx_code code = GET_CODE (x);
3815 const char *fmt = GET_RTX_FORMAT (code);
3816 int i, j;
3818 if (code == REG)
3819 regs->array[REGNO (x)].single_usage
3820 = (regs->array[REGNO (x)].single_usage != 0
3821 && regs->array[REGNO (x)].single_usage != insn)
3822 ? const0_rtx : insn;
3824 else if (code == SET)
3826 /* Don't count SET_DEST if it is a REG; otherwise count things
3827 in SET_DEST because if a register is partially modified, it won't
3828 show up as a potential movable so we don't care how USAGE is set
3829 for it. */
3830 if (!REG_P (SET_DEST (x)))
3831 find_single_use_in_loop (regs, insn, SET_DEST (x));
3832 find_single_use_in_loop (regs, insn, SET_SRC (x));
3834 else
3835 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3837 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3838 find_single_use_in_loop (regs, insn, XEXP (x, i));
3839 else if (fmt[i] == 'E')
3840 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3841 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3845 /* Count and record any set in X which is contained in INSN. Update
3846 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3847 in X. */
3849 static void
3850 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3852 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3853 /* Don't move a reg that has an explicit clobber.
3854 It's not worth the pain to try to do it correctly. */
3855 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3857 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3859 rtx dest = SET_DEST (x);
3860 while (GET_CODE (dest) == SUBREG
3861 || GET_CODE (dest) == ZERO_EXTRACT
3862 || GET_CODE (dest) == STRICT_LOW_PART)
3863 dest = XEXP (dest, 0);
3864 if (REG_P (dest))
3866 int i;
3867 int regno = REGNO (dest);
3868 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3870 /* If this is the first setting of this reg
3871 in current basic block, and it was set before,
3872 it must be set in two basic blocks, so it cannot
3873 be moved out of the loop. */
3874 if (regs->array[regno].set_in_loop > 0
3875 && last_set[regno] == 0)
3876 regs->array[regno+i].may_not_optimize = 1;
3877 /* If this is not first setting in current basic block,
3878 see if reg was used in between previous one and this.
3879 If so, neither one can be moved. */
3880 if (last_set[regno] != 0
3881 && reg_used_between_p (dest, last_set[regno], insn))
3882 regs->array[regno+i].may_not_optimize = 1;
3883 if (regs->array[regno+i].set_in_loop < 127)
3884 ++regs->array[regno+i].set_in_loop;
3885 last_set[regno+i] = insn;
3891 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3892 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3893 contained in insn INSN is used by any insn that precedes INSN in
3894 cyclic order starting from the loop entry point.
3896 We don't want to use INSN_LUID here because if we restrict INSN to those
3897 that have a valid INSN_LUID, it means we cannot move an invariant out
3898 from an inner loop past two loops. */
3900 static int
3901 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3903 rtx reg = SET_DEST (set);
3904 rtx p;
3906 /* Scan forward checking for register usage. If we hit INSN, we
3907 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3908 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3910 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3911 return 1;
3913 if (p == loop->end)
3914 p = loop->start;
3917 return 0;
3921 /* Information we collect about arrays that we might want to prefetch. */
3922 struct prefetch_info
3924 struct iv_class *class; /* Class this prefetch is based on. */
3925 struct induction *giv; /* GIV this prefetch is based on. */
3926 rtx base_address; /* Start prefetching from this address plus
3927 index. */
3928 HOST_WIDE_INT index;
3929 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3930 iteration. */
3931 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3932 prefetch area in one iteration. */
3933 unsigned int total_bytes; /* Total bytes loop will access in this block.
3934 This is set only for loops with known
3935 iteration counts and is 0xffffffff
3936 otherwise. */
3937 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3938 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3939 unsigned int write : 1; /* 1 for read/write prefetches. */
3942 /* Data used by check_store function. */
3943 struct check_store_data
3945 rtx mem_address;
3946 int mem_write;
3949 static void check_store (rtx, rtx, void *);
3950 static void emit_prefetch_instructions (struct loop *);
3951 static int rtx_equal_for_prefetch_p (rtx, rtx);
3953 /* Set mem_write when mem_address is found. Used as callback to
3954 note_stores. */
3955 static void
3956 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3958 struct check_store_data *d = (struct check_store_data *) data;
3960 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3961 d->mem_write = 1;
3964 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3965 important to get some addresses combined. Later more sophisticated
3966 transformations can be added when necessary.
3968 ??? Same trick with swapping operand is done at several other places.
3969 It can be nice to develop some common way to handle this. */
3971 static int
3972 rtx_equal_for_prefetch_p (rtx x, rtx y)
3974 int i;
3975 int j;
3976 enum rtx_code code = GET_CODE (x);
3977 const char *fmt;
3979 if (x == y)
3980 return 1;
3981 if (code != GET_CODE (y))
3982 return 0;
3984 if (COMMUTATIVE_ARITH_P (x))
3986 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3987 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3988 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3989 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3992 /* Compare the elements. If any pair of corresponding elements fails to
3993 match, return 0 for the whole thing. */
3995 fmt = GET_RTX_FORMAT (code);
3996 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3998 switch (fmt[i])
4000 case 'w':
4001 if (XWINT (x, i) != XWINT (y, i))
4002 return 0;
4003 break;
4005 case 'i':
4006 if (XINT (x, i) != XINT (y, i))
4007 return 0;
4008 break;
4010 case 'E':
4011 /* Two vectors must have the same length. */
4012 if (XVECLEN (x, i) != XVECLEN (y, i))
4013 return 0;
4015 /* And the corresponding elements must match. */
4016 for (j = 0; j < XVECLEN (x, i); j++)
4017 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4018 XVECEXP (y, i, j)) == 0)
4019 return 0;
4020 break;
4022 case 'e':
4023 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4024 return 0;
4025 break;
4027 case 's':
4028 if (strcmp (XSTR (x, i), XSTR (y, i)))
4029 return 0;
4030 break;
4032 case 'u':
4033 /* These are just backpointers, so they don't matter. */
4034 break;
4036 case '0':
4037 break;
4039 /* It is believed that rtx's at this level will never
4040 contain anything but integers and other rtx's,
4041 except for within LABEL_REFs and SYMBOL_REFs. */
4042 default:
4043 gcc_unreachable ();
4046 return 1;
4049 /* Remove constant addition value from the expression X (when present)
4050 and return it. */
4052 static HOST_WIDE_INT
4053 remove_constant_addition (rtx *x)
4055 HOST_WIDE_INT addval = 0;
4056 rtx exp = *x;
4058 /* Avoid clobbering a shared CONST expression. */
4059 if (GET_CODE (exp) == CONST)
4061 if (GET_CODE (XEXP (exp, 0)) == PLUS
4062 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4063 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4065 *x = XEXP (XEXP (exp, 0), 0);
4066 return INTVAL (XEXP (XEXP (exp, 0), 1));
4068 return 0;
4071 if (GET_CODE (exp) == CONST_INT)
4073 addval = INTVAL (exp);
4074 *x = const0_rtx;
4077 /* For plus expression recurse on ourself. */
4078 else if (GET_CODE (exp) == PLUS)
4080 addval += remove_constant_addition (&XEXP (exp, 0));
4081 addval += remove_constant_addition (&XEXP (exp, 1));
4083 /* In case our parameter was constant, remove extra zero from the
4084 expression. */
4085 if (XEXP (exp, 0) == const0_rtx)
4086 *x = XEXP (exp, 1);
4087 else if (XEXP (exp, 1) == const0_rtx)
4088 *x = XEXP (exp, 0);
4091 return addval;
4094 /* Attempt to identify accesses to arrays that are most likely to cause cache
4095 misses, and emit prefetch instructions a few prefetch blocks forward.
4097 To detect the arrays we use the GIV information that was collected by the
4098 strength reduction pass.
4100 The prefetch instructions are generated after the GIV information is done
4101 and before the strength reduction process. The new GIVs are injected into
4102 the strength reduction tables, so the prefetch addresses are optimized as
4103 well.
4105 GIVs are split into base address, stride, and constant addition values.
4106 GIVs with the same address, stride and close addition values are combined
4107 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4108 for write instructions can be used for the block we write to, on machines
4109 that support write prefetches.
4111 Several heuristics are used to determine when to prefetch. They are
4112 controlled by defined symbols that can be overridden for each target. */
4114 static void
4115 emit_prefetch_instructions (struct loop *loop)
4117 int num_prefetches = 0;
4118 int num_real_prefetches = 0;
4119 int num_real_write_prefetches = 0;
4120 int num_prefetches_before = 0;
4121 int num_write_prefetches_before = 0;
4122 int ahead = 0;
4123 int i;
4124 struct iv_class *bl;
4125 struct induction *iv;
4126 struct prefetch_info info[MAX_PREFETCHES];
4127 struct loop_ivs *ivs = LOOP_IVS (loop);
4129 if (!HAVE_prefetch || PREFETCH_BLOCK == 0)
4130 return;
4132 /* Consider only loops w/o calls. When a call is done, the loop is probably
4133 slow enough to read the memory. */
4134 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4136 if (loop_dump_stream)
4137 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4139 return;
4142 /* Don't prefetch in loops known to have few iterations. */
4143 if (PREFETCH_NO_LOW_LOOPCNT
4144 && LOOP_INFO (loop)->n_iterations
4145 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4147 if (loop_dump_stream)
4148 fprintf (loop_dump_stream,
4149 "Prefetch: ignoring loop: not enough iterations.\n");
4150 return;
4153 /* Search all induction variables and pick those interesting for the prefetch
4154 machinery. */
4155 for (bl = ivs->list; bl; bl = bl->next)
4157 struct induction *biv = bl->biv, *biv1;
4158 int basestride = 0;
4160 biv1 = biv;
4162 /* Expect all BIVs to be executed in each iteration. This makes our
4163 analysis more conservative. */
4164 while (biv1)
4166 /* Discard non-constant additions that we can't handle well yet, and
4167 BIVs that are executed multiple times; such BIVs ought to be
4168 handled in the nested loop. We accept not_every_iteration BIVs,
4169 since these only result in larger strides and make our
4170 heuristics more conservative. */
4171 if (GET_CODE (biv->add_val) != CONST_INT)
4173 if (loop_dump_stream)
4175 fprintf (loop_dump_stream,
4176 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4177 REGNO (biv->src_reg), INSN_UID (biv->insn));
4178 print_rtl (loop_dump_stream, biv->add_val);
4179 fprintf (loop_dump_stream, "\n");
4181 break;
4184 if (biv->maybe_multiple)
4186 if (loop_dump_stream)
4188 fprintf (loop_dump_stream,
4189 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4190 REGNO (biv->src_reg), INSN_UID (biv->insn));
4191 print_rtl (loop_dump_stream, biv->add_val);
4192 fprintf (loop_dump_stream, "\n");
4194 break;
4197 basestride += INTVAL (biv1->add_val);
4198 biv1 = biv1->next_iv;
4201 if (biv1 || !basestride)
4202 continue;
4204 for (iv = bl->giv; iv; iv = iv->next_iv)
4206 rtx address;
4207 rtx temp;
4208 HOST_WIDE_INT index = 0;
4209 int add = 1;
4210 HOST_WIDE_INT stride = 0;
4211 int stride_sign = 1;
4212 struct check_store_data d;
4213 const char *ignore_reason = NULL;
4214 int size = GET_MODE_SIZE (GET_MODE (iv));
4216 /* See whether an induction variable is interesting to us and if
4217 not, report the reason. */
4218 if (iv->giv_type != DEST_ADDR)
4219 ignore_reason = "giv is not a destination address";
4221 /* We are interested only in constant stride memory references
4222 in order to be able to compute density easily. */
4223 else if (GET_CODE (iv->mult_val) != CONST_INT)
4224 ignore_reason = "stride is not constant";
4226 else
4228 stride = INTVAL (iv->mult_val) * basestride;
4229 if (stride < 0)
4231 stride = -stride;
4232 stride_sign = -1;
4235 /* On some targets, reversed order prefetches are not
4236 worthwhile. */
4237 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4238 ignore_reason = "reversed order stride";
4240 /* Prefetch of accesses with an extreme stride might not be
4241 worthwhile, either. */
4242 else if (PREFETCH_NO_EXTREME_STRIDE
4243 && stride > PREFETCH_EXTREME_STRIDE)
4244 ignore_reason = "extreme stride";
4246 /* Ignore GIVs with varying add values; we can't predict the
4247 value for the next iteration. */
4248 else if (!loop_invariant_p (loop, iv->add_val))
4249 ignore_reason = "giv has varying add value";
4251 /* Ignore GIVs in the nested loops; they ought to have been
4252 handled already. */
4253 else if (iv->maybe_multiple)
4254 ignore_reason = "giv is in nested loop";
4257 if (ignore_reason != NULL)
4259 if (loop_dump_stream)
4260 fprintf (loop_dump_stream,
4261 "Prefetch: ignoring giv at %d: %s.\n",
4262 INSN_UID (iv->insn), ignore_reason);
4263 continue;
4266 /* Determine the pointer to the basic array we are examining. It is
4267 the sum of the BIV's initial value and the GIV's add_val. */
4268 address = copy_rtx (iv->add_val);
4269 temp = copy_rtx (bl->initial_value);
4271 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4272 index = remove_constant_addition (&address);
4274 d.mem_write = 0;
4275 d.mem_address = *iv->location;
4277 /* When the GIV is not always executed, we might be better off by
4278 not dirtying the cache pages. */
4279 if (PREFETCH_CONDITIONAL || iv->always_executed)
4280 note_stores (PATTERN (iv->insn), check_store, &d);
4281 else
4283 if (loop_dump_stream)
4284 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4285 INSN_UID (iv->insn), "in conditional code.");
4286 continue;
4289 /* Attempt to find another prefetch to the same array and see if we
4290 can merge this one. */
4291 for (i = 0; i < num_prefetches; i++)
4292 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4293 && stride == info[i].stride)
4295 /* In case both access same array (same location
4296 just with small difference in constant indexes), merge
4297 the prefetches. Just do the later and the earlier will
4298 get prefetched from previous iteration.
4299 The artificial threshold should not be too small,
4300 but also not bigger than small portion of memory usually
4301 traversed by single loop. */
4302 if (index >= info[i].index
4303 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4305 info[i].write |= d.mem_write;
4306 info[i].bytes_accessed += size;
4307 info[i].index = index;
4308 info[i].giv = iv;
4309 info[i].class = bl;
4310 info[num_prefetches].base_address = address;
4311 add = 0;
4312 break;
4315 if (index < info[i].index
4316 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4318 info[i].write |= d.mem_write;
4319 info[i].bytes_accessed += size;
4320 add = 0;
4321 break;
4325 /* Merging failed. */
4326 if (add)
4328 info[num_prefetches].giv = iv;
4329 info[num_prefetches].class = bl;
4330 info[num_prefetches].index = index;
4331 info[num_prefetches].stride = stride;
4332 info[num_prefetches].base_address = address;
4333 info[num_prefetches].write = d.mem_write;
4334 info[num_prefetches].bytes_accessed = size;
4335 num_prefetches++;
4336 if (num_prefetches >= MAX_PREFETCHES)
4338 if (loop_dump_stream)
4339 fprintf (loop_dump_stream,
4340 "Maximal number of prefetches exceeded.\n");
4341 return;
4347 for (i = 0; i < num_prefetches; i++)
4349 int density;
4351 /* Attempt to calculate the total number of bytes fetched by all
4352 iterations of the loop. Avoid overflow. */
4353 if (LOOP_INFO (loop)->n_iterations
4354 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4355 >= LOOP_INFO (loop)->n_iterations))
4356 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4357 else
4358 info[i].total_bytes = 0xffffffff;
4360 density = info[i].bytes_accessed * 100 / info[i].stride;
4362 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4363 if (PREFETCH_ONLY_DENSE_MEM)
4364 if (density * 256 > PREFETCH_DENSE_MEM * 100
4365 && (info[i].total_bytes / PREFETCH_BLOCK
4366 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4368 info[i].prefetch_before_loop = 1;
4369 info[i].prefetch_in_loop
4370 = (info[i].total_bytes / PREFETCH_BLOCK
4371 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4373 else
4375 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4376 if (loop_dump_stream)
4377 fprintf (loop_dump_stream,
4378 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4379 INSN_UID (info[i].giv->insn), density);
4381 else
4382 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4384 /* Find how many prefetch instructions we'll use within the loop. */
4385 if (info[i].prefetch_in_loop != 0)
4387 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4388 / PREFETCH_BLOCK);
4389 num_real_prefetches += info[i].prefetch_in_loop;
4390 if (info[i].write)
4391 num_real_write_prefetches += info[i].prefetch_in_loop;
4395 /* Determine how many iterations ahead to prefetch within the loop, based
4396 on how many prefetches we currently expect to do within the loop. */
4397 if (num_real_prefetches != 0)
4399 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4401 if (loop_dump_stream)
4402 fprintf (loop_dump_stream,
4403 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4404 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4405 num_real_prefetches = 0, num_real_write_prefetches = 0;
4408 /* We'll also use AHEAD to determine how many prefetch instructions to
4409 emit before a loop, so don't leave it zero. */
4410 if (ahead == 0)
4411 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4413 for (i = 0; i < num_prefetches; i++)
4415 /* Update if we've decided not to prefetch anything within the loop. */
4416 if (num_real_prefetches == 0)
4417 info[i].prefetch_in_loop = 0;
4419 /* Find how many prefetch instructions we'll use before the loop. */
4420 if (info[i].prefetch_before_loop != 0)
4422 int n = info[i].total_bytes / PREFETCH_BLOCK;
4423 if (n > ahead)
4424 n = ahead;
4425 info[i].prefetch_before_loop = n;
4426 num_prefetches_before += n;
4427 if (info[i].write)
4428 num_write_prefetches_before += n;
4431 if (loop_dump_stream)
4433 if (info[i].prefetch_in_loop == 0
4434 && info[i].prefetch_before_loop == 0)
4435 continue;
4436 fprintf (loop_dump_stream, "Prefetch insn: %d",
4437 INSN_UID (info[i].giv->insn));
4438 fprintf (loop_dump_stream,
4439 "; in loop: %d; before: %d; %s\n",
4440 info[i].prefetch_in_loop,
4441 info[i].prefetch_before_loop,
4442 info[i].write ? "read/write" : "read only");
4443 fprintf (loop_dump_stream,
4444 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4445 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4446 info[i].bytes_accessed, info[i].total_bytes);
4447 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4448 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4449 info[i].index, info[i].stride);
4450 print_rtl (loop_dump_stream, info[i].base_address);
4451 fprintf (loop_dump_stream, "\n");
4455 if (num_real_prefetches + num_prefetches_before > 0)
4457 /* Record that this loop uses prefetch instructions. */
4458 LOOP_INFO (loop)->has_prefetch = 1;
4460 if (loop_dump_stream)
4462 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4463 num_real_prefetches, num_real_write_prefetches);
4464 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4465 num_prefetches_before, num_write_prefetches_before);
4469 for (i = 0; i < num_prefetches; i++)
4471 int y;
4473 for (y = 0; y < info[i].prefetch_in_loop; y++)
4475 rtx loc = copy_rtx (*info[i].giv->location);
4476 rtx insn;
4477 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4478 rtx before_insn = info[i].giv->insn;
4479 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4480 rtx seq;
4482 /* We can save some effort by offsetting the address on
4483 architectures with offsettable memory references. */
4484 if (offsettable_address_p (0, VOIDmode, loc))
4485 loc = plus_constant (loc, bytes_ahead);
4486 else
4488 rtx reg = gen_reg_rtx (Pmode);
4489 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4490 GEN_INT (bytes_ahead), reg,
4491 0, before_insn);
4492 loc = reg;
4495 start_sequence ();
4496 /* Make sure the address operand is valid for prefetch. */
4497 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4498 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4499 loc = force_reg (Pmode, loc);
4500 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4501 GEN_INT (3)));
4502 seq = get_insns ();
4503 end_sequence ();
4504 emit_insn_before (seq, before_insn);
4506 /* Check all insns emitted and record the new GIV
4507 information. */
4508 insn = NEXT_INSN (prev_insn);
4509 while (insn != before_insn)
4511 insn = check_insn_for_givs (loop, insn,
4512 info[i].giv->always_executed,
4513 info[i].giv->maybe_multiple);
4514 insn = NEXT_INSN (insn);
4518 if (PREFETCH_BEFORE_LOOP)
4520 /* Emit insns before the loop to fetch the first cache lines or,
4521 if we're not prefetching within the loop, everything we expect
4522 to need. */
4523 for (y = 0; y < info[i].prefetch_before_loop; y++)
4525 rtx reg = gen_reg_rtx (Pmode);
4526 rtx loop_start = loop->start;
4527 rtx init_val = info[i].class->initial_value;
4528 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4529 info[i].giv->add_val,
4530 GEN_INT (y * PREFETCH_BLOCK));
4532 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4533 non-constant INIT_VAL to have the same mode as REG, which
4534 in this case we know to be Pmode. */
4535 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4537 rtx seq;
4539 start_sequence ();
4540 init_val = convert_to_mode (Pmode, init_val, 0);
4541 seq = get_insns ();
4542 end_sequence ();
4543 loop_insn_emit_before (loop, 0, loop_start, seq);
4545 loop_iv_add_mult_emit_before (loop, init_val,
4546 info[i].giv->mult_val,
4547 add_val, reg, 0, loop_start);
4548 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4549 GEN_INT (3)),
4550 loop_start);
4555 return;
4558 /* Communication with routines called via `note_stores'. */
4560 static rtx note_insn;
4562 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4564 static rtx addr_placeholder;
4566 /* ??? Unfinished optimizations, and possible future optimizations,
4567 for the strength reduction code. */
4569 /* ??? The interaction of biv elimination, and recognition of 'constant'
4570 bivs, may cause problems. */
4572 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4573 performance problems.
4575 Perhaps don't eliminate things that can be combined with an addressing
4576 mode. Find all givs that have the same biv, mult_val, and add_val;
4577 then for each giv, check to see if its only use dies in a following
4578 memory address. If so, generate a new memory address and check to see
4579 if it is valid. If it is valid, then store the modified memory address,
4580 otherwise, mark the giv as not done so that it will get its own iv. */
4582 /* ??? Could try to optimize branches when it is known that a biv is always
4583 positive. */
4585 /* ??? When replace a biv in a compare insn, we should replace with closest
4586 giv so that an optimized branch can still be recognized by the combiner,
4587 e.g. the VAX acb insn. */
4589 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4590 was rerun in loop_optimize whenever a register was added or moved.
4591 Also, some of the optimizations could be a little less conservative. */
4593 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4594 is a backward branch in that range that branches to somewhere between
4595 LOOP->START and INSN. Returns 0 otherwise. */
4597 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4598 In practice, this is not a problem, because this function is seldom called,
4599 and uses a negligible amount of CPU time on average. */
4601 static int
4602 back_branch_in_range_p (const struct loop *loop, rtx insn)
4604 rtx p, q, target_insn;
4605 rtx loop_start = loop->start;
4606 rtx loop_end = loop->end;
4607 rtx orig_loop_end = loop->end;
4609 /* Stop before we get to the backward branch at the end of the loop. */
4610 loop_end = prev_nonnote_insn (loop_end);
4611 if (BARRIER_P (loop_end))
4612 loop_end = PREV_INSN (loop_end);
4614 /* Check in case insn has been deleted, search forward for first non
4615 deleted insn following it. */
4616 while (INSN_DELETED_P (insn))
4617 insn = NEXT_INSN (insn);
4619 /* Check for the case where insn is the last insn in the loop. Deal
4620 with the case where INSN was a deleted loop test insn, in which case
4621 it will now be the NOTE_LOOP_END. */
4622 if (insn == loop_end || insn == orig_loop_end)
4623 return 0;
4625 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4627 if (JUMP_P (p))
4629 target_insn = JUMP_LABEL (p);
4631 /* Search from loop_start to insn, to see if one of them is
4632 the target_insn. We can't use INSN_LUID comparisons here,
4633 since insn may not have an LUID entry. */
4634 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4635 if (q == target_insn)
4636 return 1;
4640 return 0;
4643 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4644 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4645 callback.
4647 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4648 least once for every loop iteration except for the last one.
4650 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4651 loop iteration.
4653 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4654 static void
4655 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4657 int not_every_iteration = 0;
4658 int maybe_multiple = 0;
4659 int past_loop_latch = 0;
4660 bool exit_test_is_entry = false;
4661 rtx p;
4663 /* If loop_scan_start points to the loop exit test, the loop body
4664 cannot be counted on running on every iteration, and we have to
4665 be wary of subversive use of gotos inside expression
4666 statements. */
4667 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4669 exit_test_is_entry = true;
4670 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4673 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4674 for (p = next_insn_in_loop (loop, loop->scan_start);
4675 p != NULL_RTX;
4676 p = next_insn_in_loop (loop, p))
4678 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4680 /* Past CODE_LABEL, we get to insns that may be executed multiple
4681 times. The only way we can be sure that they can't is if every
4682 jump insn between here and the end of the loop either
4683 returns, exits the loop, is a jump to a location that is still
4684 behind the label, or is a jump to the loop start. */
4686 if (LABEL_P (p))
4688 rtx insn = p;
4690 maybe_multiple = 0;
4692 while (1)
4694 insn = NEXT_INSN (insn);
4695 if (insn == loop->scan_start)
4696 break;
4697 if (insn == loop->end)
4699 if (loop->top != 0)
4700 insn = loop->top;
4701 else
4702 break;
4703 if (insn == loop->scan_start)
4704 break;
4707 if (JUMP_P (insn)
4708 && GET_CODE (PATTERN (insn)) != RETURN
4709 && (!any_condjump_p (insn)
4710 || (JUMP_LABEL (insn) != 0
4711 && JUMP_LABEL (insn) != loop->scan_start
4712 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4714 maybe_multiple = 1;
4715 break;
4720 /* Past a jump, we get to insns for which we can't count
4721 on whether they will be executed during each iteration. */
4722 /* This code appears twice in strength_reduce. There is also similar
4723 code in scan_loop. */
4724 if (JUMP_P (p)
4725 /* If we enter the loop in the middle, and scan around to the
4726 beginning, don't set not_every_iteration for that.
4727 This can be any kind of jump, since we want to know if insns
4728 will be executed if the loop is executed. */
4729 && (exit_test_is_entry
4730 || !(JUMP_LABEL (p) == loop->top
4731 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4732 && any_uncondjump_p (p))
4733 || (NEXT_INSN (p) == loop->end
4734 && any_condjump_p (p))))))
4736 rtx label = 0;
4738 /* If this is a jump outside the loop, then it also doesn't
4739 matter. Check to see if the target of this branch is on the
4740 loop->exits_labels list. */
4742 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4743 if (XEXP (label, 0) == JUMP_LABEL (p))
4744 break;
4746 if (!label)
4747 not_every_iteration = 1;
4750 /* Note if we pass a loop latch. If we do, then we can not clear
4751 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4752 a loop since a jump before the last CODE_LABEL may have started
4753 a new loop iteration.
4755 Note that LOOP_TOP is only set for rotated loops and we need
4756 this check for all loops, so compare against the CODE_LABEL
4757 which immediately follows LOOP_START. */
4758 if (JUMP_P (p)
4759 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4760 past_loop_latch = 1;
4762 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4763 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4764 or not an insn is known to be executed each iteration of the
4765 loop, whether or not any iterations are known to occur.
4767 Therefore, if we have just passed a label and have no more labels
4768 between here and the test insn of the loop, and we have not passed
4769 a jump to the top of the loop, then we know these insns will be
4770 executed each iteration. */
4772 if (not_every_iteration
4773 && !past_loop_latch
4774 && LABEL_P (p)
4775 && no_labels_between_p (p, loop->end))
4776 not_every_iteration = 0;
4780 static void
4781 loop_bivs_find (struct loop *loop)
4783 struct loop_regs *regs = LOOP_REGS (loop);
4784 struct loop_ivs *ivs = LOOP_IVS (loop);
4785 /* Temporary list pointers for traversing ivs->list. */
4786 struct iv_class *bl, **backbl;
4788 ivs->list = 0;
4790 for_each_insn_in_loop (loop, check_insn_for_bivs);
4792 /* Scan ivs->list to remove all regs that proved not to be bivs.
4793 Make a sanity check against regs->n_times_set. */
4794 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4796 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4797 /* Above happens if register modified by subreg, etc. */
4798 /* Make sure it is not recognized as a basic induction var: */
4799 || regs->array[bl->regno].n_times_set != bl->biv_count
4800 /* If never incremented, it is invariant that we decided not to
4801 move. So leave it alone. */
4802 || ! bl->incremented)
4804 if (loop_dump_stream)
4805 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4806 bl->regno,
4807 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4808 ? "not induction variable"
4809 : (! bl->incremented ? "never incremented"
4810 : "count error")));
4812 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4813 *backbl = bl->next;
4815 else
4817 backbl = &bl->next;
4819 if (loop_dump_stream)
4820 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4826 /* Determine how BIVS are initialized by looking through pre-header
4827 extended basic block. */
4828 static void
4829 loop_bivs_init_find (struct loop *loop)
4831 struct loop_ivs *ivs = LOOP_IVS (loop);
4832 /* Temporary list pointers for traversing ivs->list. */
4833 struct iv_class *bl;
4834 int call_seen;
4835 rtx p;
4837 /* Find initial value for each biv by searching backwards from loop_start,
4838 halting at first label. Also record any test condition. */
4840 call_seen = 0;
4841 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4843 rtx test;
4845 note_insn = p;
4847 if (CALL_P (p))
4848 call_seen = 1;
4850 if (INSN_P (p))
4851 note_stores (PATTERN (p), record_initial, ivs);
4853 /* Record any test of a biv that branches around the loop if no store
4854 between it and the start of loop. We only care about tests with
4855 constants and registers and only certain of those. */
4856 if (JUMP_P (p)
4857 && JUMP_LABEL (p) != 0
4858 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4859 && (test = get_condition_for_loop (loop, p)) != 0
4860 && REG_P (XEXP (test, 0))
4861 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4862 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4863 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4864 && bl->init_insn == 0)
4866 /* If an NE test, we have an initial value! */
4867 if (GET_CODE (test) == NE)
4869 bl->init_insn = p;
4870 bl->init_set = gen_rtx_SET (VOIDmode,
4871 XEXP (test, 0), XEXP (test, 1));
4873 else
4874 bl->initial_test = test;
4880 /* Look at the each biv and see if we can say anything better about its
4881 initial value from any initializing insns set up above. (This is done
4882 in two passes to avoid missing SETs in a PARALLEL.) */
4883 static void
4884 loop_bivs_check (struct loop *loop)
4886 struct loop_ivs *ivs = LOOP_IVS (loop);
4887 /* Temporary list pointers for traversing ivs->list. */
4888 struct iv_class *bl;
4889 struct iv_class **backbl;
4891 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4893 rtx src;
4894 rtx note;
4896 if (! bl->init_insn)
4897 continue;
4899 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4900 is a constant, use the value of that. */
4901 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4902 && CONSTANT_P (XEXP (note, 0)))
4903 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4904 && CONSTANT_P (XEXP (note, 0))))
4905 src = XEXP (note, 0);
4906 else
4907 src = SET_SRC (bl->init_set);
4909 if (loop_dump_stream)
4910 fprintf (loop_dump_stream,
4911 "Biv %d: initialized at insn %d: initial value ",
4912 bl->regno, INSN_UID (bl->init_insn));
4914 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4915 || GET_MODE (src) == VOIDmode)
4916 && valid_initial_value_p (src, bl->init_insn,
4917 LOOP_INFO (loop)->pre_header_has_call,
4918 loop->start))
4920 bl->initial_value = src;
4922 if (loop_dump_stream)
4924 print_simple_rtl (loop_dump_stream, src);
4925 fputc ('\n', loop_dump_stream);
4928 /* If we can't make it a giv,
4929 let biv keep initial value of "itself". */
4930 else if (loop_dump_stream)
4931 fprintf (loop_dump_stream, "is complex\n");
4936 /* Search the loop for general induction variables. */
4938 static void
4939 loop_givs_find (struct loop* loop)
4941 for_each_insn_in_loop (loop, check_insn_for_givs);
4945 /* For each giv for which we still don't know whether or not it is
4946 replaceable, check to see if it is replaceable because its final value
4947 can be calculated. */
4949 static void
4950 loop_givs_check (struct loop *loop)
4952 struct loop_ivs *ivs = LOOP_IVS (loop);
4953 struct iv_class *bl;
4955 for (bl = ivs->list; bl; bl = bl->next)
4957 struct induction *v;
4959 for (v = bl->giv; v; v = v->next_iv)
4960 if (! v->replaceable && ! v->not_replaceable)
4961 check_final_value (loop, v);
4965 /* Try to generate the simplest rtx for the expression
4966 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4967 value of giv's. */
4969 static rtx
4970 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4972 rtx temp, mult_res;
4973 rtx result;
4975 /* The modes must all be the same. This should always be true. For now,
4976 check to make sure. */
4977 gcc_assert (GET_MODE (mult1) == mode || GET_MODE (mult1) == VOIDmode);
4978 gcc_assert (GET_MODE (mult2) == mode || GET_MODE (mult2) == VOIDmode);
4979 gcc_assert (GET_MODE (add1) == mode || GET_MODE (add1) == VOIDmode);
4981 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4982 will be a constant. */
4983 if (GET_CODE (mult1) == CONST_INT)
4985 temp = mult2;
4986 mult2 = mult1;
4987 mult1 = temp;
4990 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
4991 if (! mult_res)
4992 mult_res = gen_rtx_MULT (mode, mult1, mult2);
4994 /* Again, put the constant second. */
4995 if (GET_CODE (add1) == CONST_INT)
4997 temp = add1;
4998 add1 = mult_res;
4999 mult_res = temp;
5002 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
5003 if (! result)
5004 result = gen_rtx_PLUS (mode, add1, mult_res);
5006 return result;
5009 /* Searches the list of induction struct's for the biv BL, to try to calculate
5010 the total increment value for one iteration of the loop as a constant.
5012 Returns the increment value as an rtx, simplified as much as possible,
5013 if it can be calculated. Otherwise, returns 0. */
5015 static rtx
5016 biv_total_increment (const struct iv_class *bl)
5018 struct induction *v;
5019 rtx result;
5021 /* For increment, must check every instruction that sets it. Each
5022 instruction must be executed only once each time through the loop.
5023 To verify this, we check that the insn is always executed, and that
5024 there are no backward branches after the insn that branch to before it.
5025 Also, the insn must have a mult_val of one (to make sure it really is
5026 an increment). */
5028 result = const0_rtx;
5029 for (v = bl->biv; v; v = v->next_iv)
5031 if (v->always_computable && v->mult_val == const1_rtx
5032 && ! v->maybe_multiple
5033 && SCALAR_INT_MODE_P (v->mode))
5035 /* If we have already counted it, skip it. */
5036 if (v->same)
5037 continue;
5039 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5041 else
5042 return 0;
5045 return result;
5048 /* Try to prove that the register is dead after the loop exits. Trace every
5049 loop exit looking for an insn that will always be executed, which sets
5050 the register to some value, and appears before the first use of the register
5051 is found. If successful, then return 1, otherwise return 0. */
5053 /* ?? Could be made more intelligent in the handling of jumps, so that
5054 it can search past if statements and other similar structures. */
5056 static int
5057 reg_dead_after_loop (const struct loop *loop, rtx reg)
5059 rtx insn, label;
5060 int jump_count = 0;
5061 int label_count = 0;
5063 /* In addition to checking all exits of this loop, we must also check
5064 all exits of inner nested loops that would exit this loop. We don't
5065 have any way to identify those, so we just give up if there are any
5066 such inner loop exits. */
5068 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5069 label_count++;
5071 if (label_count != loop->exit_count)
5072 return 0;
5074 /* HACK: Must also search the loop fall through exit, create a label_ref
5075 here which points to the loop->end, and append the loop_number_exit_labels
5076 list to it. */
5077 label = gen_rtx_LABEL_REF (Pmode, loop->end);
5078 LABEL_NEXTREF (label) = loop->exit_labels;
5080 for (; label; label = LABEL_NEXTREF (label))
5082 /* Succeed if find an insn which sets the biv or if reach end of
5083 function. Fail if find an insn that uses the biv, or if come to
5084 a conditional jump. */
5086 insn = NEXT_INSN (XEXP (label, 0));
5087 while (insn)
5089 if (INSN_P (insn))
5091 rtx set, note;
5093 if (reg_referenced_p (reg, PATTERN (insn)))
5094 return 0;
5096 note = find_reg_equal_equiv_note (insn);
5097 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5098 return 0;
5100 set = single_set (insn);
5101 if (set && rtx_equal_p (SET_DEST (set), reg))
5102 break;
5104 if (JUMP_P (insn))
5106 if (GET_CODE (PATTERN (insn)) == RETURN)
5107 break;
5108 else if (!any_uncondjump_p (insn)
5109 /* Prevent infinite loop following infinite loops. */
5110 || jump_count++ > 20)
5111 return 0;
5112 else
5113 insn = JUMP_LABEL (insn);
5117 insn = NEXT_INSN (insn);
5121 /* Success, the register is dead on all loop exits. */
5122 return 1;
5125 /* Try to calculate the final value of the biv, the value it will have at
5126 the end of the loop. If we can do it, return that value. */
5128 static rtx
5129 final_biv_value (const struct loop *loop, struct iv_class *bl)
5131 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5132 rtx increment, tem;
5134 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5136 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5137 return 0;
5139 /* The final value for reversed bivs must be calculated differently than
5140 for ordinary bivs. In this case, there is already an insn after the
5141 loop which sets this biv's final value (if necessary), and there are
5142 no other loop exits, so we can return any value. */
5143 if (bl->reversed)
5145 if (loop_dump_stream)
5146 fprintf (loop_dump_stream,
5147 "Final biv value for %d, reversed biv.\n", bl->regno);
5149 return const0_rtx;
5152 /* Try to calculate the final value as initial value + (number of iterations
5153 * increment). For this to work, increment must be invariant, the only
5154 exit from the loop must be the fall through at the bottom (otherwise
5155 it may not have its final value when the loop exits), and the initial
5156 value of the biv must be invariant. */
5158 if (n_iterations != 0
5159 && ! loop->exit_count
5160 && loop_invariant_p (loop, bl->initial_value))
5162 increment = biv_total_increment (bl);
5164 if (increment && loop_invariant_p (loop, increment))
5166 /* Can calculate the loop exit value, emit insns after loop
5167 end to calculate this value into a temporary register in
5168 case it is needed later. */
5170 tem = gen_reg_rtx (bl->biv->mode);
5171 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5172 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5173 bl->initial_value, tem);
5175 if (loop_dump_stream)
5176 fprintf (loop_dump_stream,
5177 "Final biv value for %d, calculated.\n", bl->regno);
5179 return tem;
5183 /* Check to see if the biv is dead at all loop exits. */
5184 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5186 if (loop_dump_stream)
5187 fprintf (loop_dump_stream,
5188 "Final biv value for %d, biv dead after loop exit.\n",
5189 bl->regno);
5191 return const0_rtx;
5194 return 0;
5197 /* Return nonzero if it is possible to eliminate the biv BL provided
5198 all givs are reduced. This is possible if either the reg is not
5199 used outside the loop, or we can compute what its final value will
5200 be. */
5202 static int
5203 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5204 int threshold, int insn_count)
5206 /* For architectures with a decrement_and_branch_until_zero insn,
5207 don't do this if we put a REG_NONNEG note on the endtest for this
5208 biv. */
5210 #ifdef HAVE_decrement_and_branch_until_zero
5211 if (bl->nonneg)
5213 if (loop_dump_stream)
5214 fprintf (loop_dump_stream,
5215 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5216 return 0;
5218 #endif
5220 /* Check that biv is used outside loop or if it has a final value.
5221 Compare against bl->init_insn rather than loop->start. We aren't
5222 concerned with any uses of the biv between init_insn and
5223 loop->start since these won't be affected by the value of the biv
5224 elsewhere in the function, so long as init_insn doesn't use the
5225 biv itself. */
5227 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5228 && bl->init_insn
5229 && INSN_UID (bl->init_insn) < max_uid_for_loop
5230 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5231 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5232 || (bl->final_value = final_biv_value (loop, bl)))
5233 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5235 if (loop_dump_stream)
5237 fprintf (loop_dump_stream,
5238 "Cannot eliminate biv %d.\n",
5239 bl->regno);
5240 fprintf (loop_dump_stream,
5241 "First use: insn %d, last use: insn %d.\n",
5242 REGNO_FIRST_UID (bl->regno),
5243 REGNO_LAST_UID (bl->regno));
5245 return 0;
5249 /* Reduce each giv of BL that we have decided to reduce. */
5251 static void
5252 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5254 struct induction *v;
5256 for (v = bl->giv; v; v = v->next_iv)
5258 struct induction *tv;
5259 if (! v->ignore && v->same == 0)
5261 int auto_inc_opt = 0;
5263 /* If the code for derived givs immediately below has already
5264 allocated a new_reg, we must keep it. */
5265 if (! v->new_reg)
5266 v->new_reg = gen_reg_rtx (v->mode);
5268 #ifdef AUTO_INC_DEC
5269 /* If the target has auto-increment addressing modes, and
5270 this is an address giv, then try to put the increment
5271 immediately after its use, so that flow can create an
5272 auto-increment addressing mode. */
5273 /* Don't do this for loops entered at the bottom, to avoid
5274 this invalid transformation:
5275 jmp L; -> jmp L;
5276 TOP: TOP:
5277 use giv use giv
5278 L: inc giv
5279 inc biv L:
5280 test biv test giv
5281 cbr TOP cbr TOP
5283 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5284 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5285 /* We don't handle reversed biv's because bl->biv->insn
5286 does not have a valid INSN_LUID. */
5287 && ! bl->reversed
5288 && v->always_executed && ! v->maybe_multiple
5289 && INSN_UID (v->insn) < max_uid_for_loop
5290 && !loop->top)
5292 /* If other giv's have been combined with this one, then
5293 this will work only if all uses of the other giv's occur
5294 before this giv's insn. This is difficult to check.
5296 We simplify this by looking for the common case where
5297 there is one DEST_REG giv, and this giv's insn is the
5298 last use of the dest_reg of that DEST_REG giv. If the
5299 increment occurs after the address giv, then we can
5300 perform the optimization. (Otherwise, the increment
5301 would have to go before other_giv, and we would not be
5302 able to combine it with the address giv to get an
5303 auto-inc address.) */
5304 if (v->combined_with)
5306 struct induction *other_giv = 0;
5308 for (tv = bl->giv; tv; tv = tv->next_iv)
5309 if (tv->same == v)
5311 if (other_giv)
5312 break;
5313 else
5314 other_giv = tv;
5316 if (! tv && other_giv
5317 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5318 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5319 == INSN_UID (v->insn))
5320 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5321 auto_inc_opt = 1;
5323 /* Check for case where increment is before the address
5324 giv. Do this test in "loop order". */
5325 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5326 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5327 || (INSN_LUID (bl->biv->insn)
5328 > INSN_LUID (loop->scan_start))))
5329 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5330 && (INSN_LUID (loop->scan_start)
5331 < INSN_LUID (bl->biv->insn))))
5332 auto_inc_opt = -1;
5333 else
5334 auto_inc_opt = 1;
5336 #ifdef HAVE_cc0
5338 rtx prev;
5340 /* We can't put an insn immediately after one setting
5341 cc0, or immediately before one using cc0. */
5342 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5343 || (auto_inc_opt == -1
5344 && (prev = prev_nonnote_insn (v->insn)) != 0
5345 && INSN_P (prev)
5346 && sets_cc0_p (PATTERN (prev))))
5347 auto_inc_opt = 0;
5349 #endif
5351 if (auto_inc_opt)
5352 v->auto_inc_opt = 1;
5354 #endif
5356 /* For each place where the biv is incremented, add an insn
5357 to increment the new, reduced reg for the giv. */
5358 for (tv = bl->biv; tv; tv = tv->next_iv)
5360 rtx insert_before;
5362 /* Skip if location is the same as a previous one. */
5363 if (tv->same)
5364 continue;
5365 if (! auto_inc_opt)
5366 insert_before = NEXT_INSN (tv->insn);
5367 else if (auto_inc_opt == 1)
5368 insert_before = NEXT_INSN (v->insn);
5369 else
5370 insert_before = v->insn;
5372 if (tv->mult_val == const1_rtx)
5373 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5374 v->new_reg, v->new_reg,
5375 0, insert_before);
5376 else /* tv->mult_val == const0_rtx */
5377 /* A multiply is acceptable here
5378 since this is presumed to be seldom executed. */
5379 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5380 v->add_val, v->new_reg,
5381 0, insert_before);
5384 /* Add code at loop start to initialize giv's reduced reg. */
5386 loop_iv_add_mult_hoist (loop,
5387 extend_value_for_giv (v, bl->initial_value),
5388 v->mult_val, v->add_val, v->new_reg);
5394 /* Check for givs whose first use is their definition and whose
5395 last use is the definition of another giv. If so, it is likely
5396 dead and should not be used to derive another giv nor to
5397 eliminate a biv. */
5399 static void
5400 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5402 struct induction *v;
5404 for (v = bl->giv; v; v = v->next_iv)
5406 if (v->ignore
5407 || (v->same && v->same->ignore))
5408 continue;
5410 if (v->giv_type == DEST_REG
5411 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5413 struct induction *v1;
5415 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5416 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5417 v->maybe_dead = 1;
5423 static void
5424 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5426 struct induction *v;
5428 for (v = bl->giv; v; v = v->next_iv)
5430 if (v->same && v->same->ignore)
5431 v->ignore = 1;
5433 if (v->ignore)
5434 continue;
5436 /* Update expression if this was combined, in case other giv was
5437 replaced. */
5438 if (v->same)
5439 v->new_reg = replace_rtx (v->new_reg,
5440 v->same->dest_reg, v->same->new_reg);
5442 /* See if this register is known to be a pointer to something. If
5443 so, see if we can find the alignment. First see if there is a
5444 destination register that is a pointer. If so, this shares the
5445 alignment too. Next see if we can deduce anything from the
5446 computational information. If not, and this is a DEST_ADDR
5447 giv, at least we know that it's a pointer, though we don't know
5448 the alignment. */
5449 if (REG_P (v->new_reg)
5450 && v->giv_type == DEST_REG
5451 && REG_POINTER (v->dest_reg))
5452 mark_reg_pointer (v->new_reg,
5453 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5454 else if (REG_P (v->new_reg)
5455 && REG_POINTER (v->src_reg))
5457 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5459 if (align == 0
5460 || GET_CODE (v->add_val) != CONST_INT
5461 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5462 align = 0;
5464 mark_reg_pointer (v->new_reg, align);
5466 else if (REG_P (v->new_reg)
5467 && REG_P (v->add_val)
5468 && REG_POINTER (v->add_val))
5470 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5472 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5473 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5474 align = 0;
5476 mark_reg_pointer (v->new_reg, align);
5478 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5479 mark_reg_pointer (v->new_reg, 0);
5481 if (v->giv_type == DEST_ADDR)
5483 /* Store reduced reg as the address in the memref where we found
5484 this giv. */
5485 if (validate_change_maybe_volatile (v->insn, v->location,
5486 v->new_reg))
5487 /* Yay, it worked! */;
5488 /* Not replaceable; emit an insn to set the original
5489 giv reg from the reduced giv. */
5490 else if (REG_P (*v->location))
5491 loop_insn_emit_before (loop, 0, v->insn,
5492 gen_move_insn (*v->location,
5493 v->new_reg));
5494 else if (GET_CODE (*v->location) == PLUS
5495 && REG_P (XEXP (*v->location, 0))
5496 && CONSTANT_P (XEXP (*v->location, 1)))
5497 loop_insn_emit_before (loop, 0, v->insn,
5498 gen_move_insn (XEXP (*v->location, 0),
5499 gen_rtx_MINUS
5500 (GET_MODE (*v->location),
5501 v->new_reg,
5502 XEXP (*v->location, 1))));
5503 else
5505 /* If it wasn't a reg, create a pseudo and use that. */
5506 rtx reg, seq;
5507 start_sequence ();
5508 reg = force_reg (v->mode, *v->location);
5509 seq = get_insns ();
5510 end_sequence ();
5511 loop_insn_emit_before (loop, 0, v->insn, seq);
5512 if (!validate_change_maybe_volatile (v->insn, v->location, reg))
5513 gcc_unreachable ();
5516 else if (v->replaceable)
5518 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5520 else
5522 rtx original_insn = v->insn;
5523 rtx note;
5525 /* Not replaceable; emit an insn to set the original giv reg from
5526 the reduced giv, same as above. */
5527 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5528 gen_move_insn (v->dest_reg,
5529 v->new_reg));
5531 /* The original insn may have a REG_EQUAL note. This note is
5532 now incorrect and may result in invalid substitutions later.
5533 The original insn is dead, but may be part of a libcall
5534 sequence, which doesn't seem worth the bother of handling. */
5535 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5536 if (note)
5537 remove_note (original_insn, note);
5540 /* When a loop is reversed, givs which depend on the reversed
5541 biv, and which are live outside the loop, must be set to their
5542 correct final value. This insn is only needed if the giv is
5543 not replaceable. The correct final value is the same as the
5544 value that the giv starts the reversed loop with. */
5545 if (bl->reversed && ! v->replaceable)
5546 loop_iv_add_mult_sink (loop,
5547 extend_value_for_giv (v, bl->initial_value),
5548 v->mult_val, v->add_val, v->dest_reg);
5549 else if (v->final_value)
5550 loop_insn_sink_or_swim (loop,
5551 gen_load_of_final_value (v->dest_reg,
5552 v->final_value));
5554 if (loop_dump_stream)
5556 fprintf (loop_dump_stream, "giv at %d reduced to ",
5557 INSN_UID (v->insn));
5558 print_simple_rtl (loop_dump_stream, v->new_reg);
5559 fprintf (loop_dump_stream, "\n");
5565 static int
5566 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5567 struct iv_class *bl, struct induction *v,
5568 rtx test_reg)
5570 int add_cost;
5571 int benefit;
5573 benefit = v->benefit;
5574 PUT_MODE (test_reg, v->mode);
5575 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
5576 test_reg, test_reg);
5578 /* Reduce benefit if not replaceable, since we will insert a
5579 move-insn to replace the insn that calculates this giv. Don't do
5580 this unless the giv is a user variable, since it will often be
5581 marked non-replaceable because of the duplication of the exit
5582 code outside the loop. In such a case, the copies we insert are
5583 dead and will be deleted. So they don't have a cost. Similar
5584 situations exist. */
5585 /* ??? The new final_[bg]iv_value code does a much better job of
5586 finding replaceable giv's, and hence this code may no longer be
5587 necessary. */
5588 if (! v->replaceable && ! bl->eliminable
5589 && REG_USERVAR_P (v->dest_reg))
5590 benefit -= copy_cost;
5592 /* Decrease the benefit to count the add-insns that we will insert
5593 to increment the reduced reg for the giv. ??? This can
5594 overestimate the run-time cost of the additional insns, e.g. if
5595 there are multiple basic blocks that increment the biv, but only
5596 one of these blocks is executed during each iteration. There is
5597 no good way to detect cases like this with the current structure
5598 of the loop optimizer. This code is more accurate for
5599 determining code size than run-time benefits. */
5600 benefit -= add_cost * bl->biv_count;
5602 /* Decide whether to strength-reduce this giv or to leave the code
5603 unchanged (recompute it from the biv each time it is used). This
5604 decision can be made independently for each giv. */
5606 #ifdef AUTO_INC_DEC
5607 /* Attempt to guess whether autoincrement will handle some of the
5608 new add insns; if so, increase BENEFIT (undo the subtraction of
5609 add_cost that was done above). */
5610 if (v->giv_type == DEST_ADDR
5611 /* Increasing the benefit is risky, since this is only a guess.
5612 Avoid increasing register pressure in cases where there would
5613 be no other benefit from reducing this giv. */
5614 && benefit > 0
5615 && GET_CODE (v->mult_val) == CONST_INT)
5617 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5619 if (HAVE_POST_INCREMENT
5620 && INTVAL (v->mult_val) == size)
5621 benefit += add_cost * bl->biv_count;
5622 else if (HAVE_PRE_INCREMENT
5623 && INTVAL (v->mult_val) == size)
5624 benefit += add_cost * bl->biv_count;
5625 else if (HAVE_POST_DECREMENT
5626 && -INTVAL (v->mult_val) == size)
5627 benefit += add_cost * bl->biv_count;
5628 else if (HAVE_PRE_DECREMENT
5629 && -INTVAL (v->mult_val) == size)
5630 benefit += add_cost * bl->biv_count;
5632 #endif
5634 return benefit;
5638 /* Free IV structures for LOOP. */
5640 static void
5641 loop_ivs_free (struct loop *loop)
5643 struct loop_ivs *ivs = LOOP_IVS (loop);
5644 struct iv_class *iv = ivs->list;
5646 free (ivs->regs);
5648 while (iv)
5650 struct iv_class *next = iv->next;
5651 struct induction *induction;
5652 struct induction *next_induction;
5654 for (induction = iv->biv; induction; induction = next_induction)
5656 next_induction = induction->next_iv;
5657 free (induction);
5659 for (induction = iv->giv; induction; induction = next_induction)
5661 next_induction = induction->next_iv;
5662 free (induction);
5665 free (iv);
5666 iv = next;
5670 /* Look back before LOOP->START for the insn that sets REG and return
5671 the equivalent constant if there is a REG_EQUAL note otherwise just
5672 the SET_SRC of REG. */
5674 static rtx
5675 loop_find_equiv_value (const struct loop *loop, rtx reg)
5677 rtx loop_start = loop->start;
5678 rtx insn, set;
5679 rtx ret;
5681 ret = reg;
5682 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5684 if (LABEL_P (insn))
5685 break;
5687 else if (INSN_P (insn) && reg_set_p (reg, insn))
5689 /* We found the last insn before the loop that sets the register.
5690 If it sets the entire register, and has a REG_EQUAL note,
5691 then use the value of the REG_EQUAL note. */
5692 if ((set = single_set (insn))
5693 && (SET_DEST (set) == reg))
5695 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5697 /* Only use the REG_EQUAL note if it is a constant.
5698 Other things, divide in particular, will cause
5699 problems later if we use them. */
5700 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5701 && CONSTANT_P (XEXP (note, 0)))
5702 ret = XEXP (note, 0);
5703 else
5704 ret = SET_SRC (set);
5706 /* We cannot do this if it changes between the
5707 assignment and loop start though. */
5708 if (modified_between_p (ret, insn, loop_start))
5709 ret = reg;
5711 break;
5714 return ret;
5717 /* Find and return register term common to both expressions OP0 and
5718 OP1 or NULL_RTX if no such term exists. Each expression must be a
5719 REG or a PLUS of a REG. */
5721 static rtx
5722 find_common_reg_term (rtx op0, rtx op1)
5724 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5725 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5727 rtx op00;
5728 rtx op01;
5729 rtx op10;
5730 rtx op11;
5732 if (GET_CODE (op0) == PLUS)
5733 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5734 else
5735 op01 = const0_rtx, op00 = op0;
5737 if (GET_CODE (op1) == PLUS)
5738 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5739 else
5740 op11 = const0_rtx, op10 = op1;
5742 /* Find and return common register term if present. */
5743 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5744 return op00;
5745 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5746 return op01;
5749 /* No common register term found. */
5750 return NULL_RTX;
5753 /* Determine the loop iterator and calculate the number of loop
5754 iterations. Returns the exact number of loop iterations if it can
5755 be calculated, otherwise returns zero. */
5757 static unsigned HOST_WIDE_INT
5758 loop_iterations (struct loop *loop)
5760 struct loop_info *loop_info = LOOP_INFO (loop);
5761 struct loop_ivs *ivs = LOOP_IVS (loop);
5762 rtx comparison, comparison_value;
5763 rtx iteration_var, initial_value, increment, final_value;
5764 enum rtx_code comparison_code;
5765 HOST_WIDE_INT inc;
5766 unsigned HOST_WIDE_INT abs_inc;
5767 unsigned HOST_WIDE_INT abs_diff;
5768 int off_by_one;
5769 int increment_dir;
5770 int unsigned_p, compare_dir, final_larger;
5771 rtx last_loop_insn;
5772 struct iv_class *bl;
5774 loop_info->n_iterations = 0;
5775 loop_info->initial_value = 0;
5776 loop_info->initial_equiv_value = 0;
5777 loop_info->comparison_value = 0;
5778 loop_info->final_value = 0;
5779 loop_info->final_equiv_value = 0;
5780 loop_info->increment = 0;
5781 loop_info->iteration_var = 0;
5782 loop_info->iv = 0;
5784 /* We used to use prev_nonnote_insn here, but that fails because it might
5785 accidentally get the branch for a contained loop if the branch for this
5786 loop was deleted. We can only trust branches immediately before the
5787 loop_end. */
5788 last_loop_insn = PREV_INSN (loop->end);
5790 /* ??? We should probably try harder to find the jump insn
5791 at the end of the loop. The following code assumes that
5792 the last loop insn is a jump to the top of the loop. */
5793 if (!JUMP_P (last_loop_insn))
5795 if (loop_dump_stream)
5796 fprintf (loop_dump_stream,
5797 "Loop iterations: No final conditional branch found.\n");
5798 return 0;
5801 /* If there is a more than a single jump to the top of the loop
5802 we cannot (easily) determine the iteration count. */
5803 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5805 if (loop_dump_stream)
5806 fprintf (loop_dump_stream,
5807 "Loop iterations: Loop has multiple back edges.\n");
5808 return 0;
5811 /* Find the iteration variable. If the last insn is a conditional
5812 branch, and the insn before tests a register value, make that the
5813 iteration variable. */
5815 comparison = get_condition_for_loop (loop, last_loop_insn);
5816 if (comparison == 0)
5818 if (loop_dump_stream)
5819 fprintf (loop_dump_stream,
5820 "Loop iterations: No final comparison found.\n");
5821 return 0;
5824 /* ??? Get_condition may switch position of induction variable and
5825 invariant register when it canonicalizes the comparison. */
5827 comparison_code = GET_CODE (comparison);
5828 iteration_var = XEXP (comparison, 0);
5829 comparison_value = XEXP (comparison, 1);
5831 if (!REG_P (iteration_var))
5833 if (loop_dump_stream)
5834 fprintf (loop_dump_stream,
5835 "Loop iterations: Comparison not against register.\n");
5836 return 0;
5839 /* The only new registers that are created before loop iterations
5840 are givs made from biv increments or registers created by
5841 load_mems. In the latter case, it is possible that try_copy_prop
5842 will propagate a new pseudo into the old iteration register but
5843 this will be marked by having the REG_USERVAR_P bit set. */
5845 gcc_assert ((unsigned) REGNO (iteration_var) < ivs->n_regs
5846 || REG_USERVAR_P (iteration_var));
5848 /* Determine the initial value of the iteration variable, and the amount
5849 that it is incremented each loop. Use the tables constructed by
5850 the strength reduction pass to calculate these values. */
5852 /* Clear the result values, in case no answer can be found. */
5853 initial_value = 0;
5854 increment = 0;
5856 /* The iteration variable can be either a giv or a biv. Check to see
5857 which it is, and compute the variable's initial value, and increment
5858 value if possible. */
5860 /* If this is a new register, can't handle it since we don't have any
5861 reg_iv_type entry for it. */
5862 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5864 if (loop_dump_stream)
5865 fprintf (loop_dump_stream,
5866 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5867 return 0;
5870 /* Reject iteration variables larger than the host wide int size, since they
5871 could result in a number of iterations greater than the range of our
5872 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5873 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5874 > HOST_BITS_PER_WIDE_INT))
5876 if (loop_dump_stream)
5877 fprintf (loop_dump_stream,
5878 "Loop iterations: Iteration var rejected because mode too large.\n");
5879 return 0;
5881 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5883 if (loop_dump_stream)
5884 fprintf (loop_dump_stream,
5885 "Loop iterations: Iteration var not an integer.\n");
5886 return 0;
5889 /* Try swapping the comparison to identify a suitable iv. */
5890 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5891 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5892 && REG_P (comparison_value)
5893 && REGNO (comparison_value) < ivs->n_regs)
5895 rtx temp = comparison_value;
5896 comparison_code = swap_condition (comparison_code);
5897 comparison_value = iteration_var;
5898 iteration_var = temp;
5901 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5903 gcc_assert (REGNO (iteration_var) < ivs->n_regs);
5905 /* Grab initial value, only useful if it is a constant. */
5906 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5907 initial_value = bl->initial_value;
5908 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5910 if (loop_dump_stream)
5911 fprintf (loop_dump_stream,
5912 "Loop iterations: Basic induction var not set once in each iteration.\n");
5913 return 0;
5916 increment = biv_total_increment (bl);
5918 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5920 HOST_WIDE_INT offset = 0;
5921 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5922 rtx biv_initial_value;
5924 gcc_assert (REGNO (v->src_reg) < ivs->n_regs);
5926 if (!v->always_executed || v->maybe_multiple)
5928 if (loop_dump_stream)
5929 fprintf (loop_dump_stream,
5930 "Loop iterations: General induction var not set once in each iteration.\n");
5931 return 0;
5934 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5936 /* Increment value is mult_val times the increment value of the biv. */
5938 increment = biv_total_increment (bl);
5939 if (increment)
5941 struct induction *biv_inc;
5943 increment = fold_rtx_mult_add (v->mult_val,
5944 extend_value_for_giv (v, increment),
5945 const0_rtx, v->mode);
5946 /* The caller assumes that one full increment has occurred at the
5947 first loop test. But that's not true when the biv is incremented
5948 after the giv is set (which is the usual case), e.g.:
5949 i = 6; do {;} while (i++ < 9) .
5950 Therefore, we bias the initial value by subtracting the amount of
5951 the increment that occurs between the giv set and the giv test. */
5952 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5954 if (loop_insn_first_p (v->insn, biv_inc->insn))
5956 if (REG_P (biv_inc->add_val))
5958 if (loop_dump_stream)
5959 fprintf (loop_dump_stream,
5960 "Loop iterations: Basic induction var add_val is REG %d.\n",
5961 REGNO (biv_inc->add_val));
5962 return 0;
5965 /* If we have already counted it, skip it. */
5966 if (biv_inc->same)
5967 continue;
5969 offset -= INTVAL (biv_inc->add_val);
5973 if (loop_dump_stream)
5974 fprintf (loop_dump_stream,
5975 "Loop iterations: Giv iterator, initial value bias %ld.\n",
5976 (long) offset);
5978 /* Initial value is mult_val times the biv's initial value plus
5979 add_val. Only useful if it is a constant. */
5980 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
5981 initial_value
5982 = fold_rtx_mult_add (v->mult_val,
5983 plus_constant (biv_initial_value, offset),
5984 v->add_val, v->mode);
5986 else
5988 if (loop_dump_stream)
5989 fprintf (loop_dump_stream,
5990 "Loop iterations: Not basic or general induction var.\n");
5991 return 0;
5994 if (initial_value == 0)
5995 return 0;
5997 unsigned_p = 0;
5998 off_by_one = 0;
5999 switch (comparison_code)
6001 case LEU:
6002 unsigned_p = 1;
6003 case LE:
6004 compare_dir = 1;
6005 off_by_one = 1;
6006 break;
6007 case GEU:
6008 unsigned_p = 1;
6009 case GE:
6010 compare_dir = -1;
6011 off_by_one = -1;
6012 break;
6013 case EQ:
6014 /* Cannot determine loop iterations with this case. */
6015 compare_dir = 0;
6016 break;
6017 case LTU:
6018 unsigned_p = 1;
6019 case LT:
6020 compare_dir = 1;
6021 break;
6022 case GTU:
6023 unsigned_p = 1;
6024 case GT:
6025 compare_dir = -1;
6026 break;
6027 case NE:
6028 compare_dir = 0;
6029 break;
6030 default:
6031 gcc_unreachable ();
6034 /* If the comparison value is an invariant register, then try to find
6035 its value from the insns before the start of the loop. */
6037 final_value = comparison_value;
6038 if (REG_P (comparison_value)
6039 && loop_invariant_p (loop, comparison_value))
6041 final_value = loop_find_equiv_value (loop, comparison_value);
6043 /* If we don't get an invariant final value, we are better
6044 off with the original register. */
6045 if (! loop_invariant_p (loop, final_value))
6046 final_value = comparison_value;
6049 /* Calculate the approximate final value of the induction variable
6050 (on the last successful iteration). The exact final value
6051 depends on the branch operator, and increment sign. It will be
6052 wrong if the iteration variable is not incremented by one each
6053 time through the loop and (comparison_value + off_by_one -
6054 initial_value) % increment != 0.
6055 ??? Note that the final_value may overflow and thus final_larger
6056 will be bogus. A potentially infinite loop will be classified
6057 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6058 if (off_by_one)
6059 final_value = plus_constant (final_value, off_by_one);
6061 /* Save the calculated values describing this loop's bounds, in case
6062 precondition_loop_p will need them later. These values can not be
6063 recalculated inside precondition_loop_p because strength reduction
6064 optimizations may obscure the loop's structure.
6066 These values are only required by precondition_loop_p and insert_bct
6067 whenever the number of iterations cannot be computed at compile time.
6068 Only the difference between final_value and initial_value is
6069 important. Note that final_value is only approximate. */
6070 loop_info->initial_value = initial_value;
6071 loop_info->comparison_value = comparison_value;
6072 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6073 loop_info->increment = increment;
6074 loop_info->iteration_var = iteration_var;
6075 loop_info->comparison_code = comparison_code;
6076 loop_info->iv = bl;
6078 /* Try to determine the iteration count for loops such
6079 as (for i = init; i < init + const; i++). When running the
6080 loop optimization twice, the first pass often converts simple
6081 loops into this form. */
6083 if (REG_P (initial_value))
6085 rtx reg1;
6086 rtx reg2;
6087 rtx const2;
6089 reg1 = initial_value;
6090 if (GET_CODE (final_value) == PLUS)
6091 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6092 else
6093 reg2 = final_value, const2 = const0_rtx;
6095 /* Check for initial_value = reg1, final_value = reg2 + const2,
6096 where reg1 != reg2. */
6097 if (REG_P (reg2) && reg2 != reg1)
6099 rtx temp;
6101 /* Find what reg1 is equivalent to. Hopefully it will
6102 either be reg2 or reg2 plus a constant. */
6103 temp = loop_find_equiv_value (loop, reg1);
6105 if (find_common_reg_term (temp, reg2))
6106 initial_value = temp;
6107 else if (loop_invariant_p (loop, reg2))
6109 /* Find what reg2 is equivalent to. Hopefully it will
6110 either be reg1 or reg1 plus a constant. Let's ignore
6111 the latter case for now since it is not so common. */
6112 temp = loop_find_equiv_value (loop, reg2);
6114 if (temp == loop_info->iteration_var)
6115 temp = initial_value;
6116 if (temp == reg1)
6117 final_value = (const2 == const0_rtx)
6118 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6123 loop_info->initial_equiv_value = initial_value;
6124 loop_info->final_equiv_value = final_value;
6126 /* For EQ comparison loops, we don't have a valid final value.
6127 Check this now so that we won't leave an invalid value if we
6128 return early for any other reason. */
6129 if (comparison_code == EQ)
6130 loop_info->final_equiv_value = loop_info->final_value = 0;
6132 if (increment == 0)
6134 if (loop_dump_stream)
6135 fprintf (loop_dump_stream,
6136 "Loop iterations: Increment value can't be calculated.\n");
6137 return 0;
6140 if (GET_CODE (increment) != CONST_INT)
6142 /* If we have a REG, check to see if REG holds a constant value. */
6143 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6144 clear if it is worthwhile to try to handle such RTL. */
6145 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6146 increment = loop_find_equiv_value (loop, increment);
6148 if (GET_CODE (increment) != CONST_INT)
6150 if (loop_dump_stream)
6152 fprintf (loop_dump_stream,
6153 "Loop iterations: Increment value not constant ");
6154 print_simple_rtl (loop_dump_stream, increment);
6155 fprintf (loop_dump_stream, ".\n");
6157 return 0;
6159 loop_info->increment = increment;
6162 if (GET_CODE (initial_value) != CONST_INT)
6164 if (loop_dump_stream)
6166 fprintf (loop_dump_stream,
6167 "Loop iterations: Initial value not constant ");
6168 print_simple_rtl (loop_dump_stream, initial_value);
6169 fprintf (loop_dump_stream, ".\n");
6171 return 0;
6173 else if (GET_CODE (final_value) != CONST_INT)
6175 if (loop_dump_stream)
6177 fprintf (loop_dump_stream,
6178 "Loop iterations: Final value not constant ");
6179 print_simple_rtl (loop_dump_stream, final_value);
6180 fprintf (loop_dump_stream, ".\n");
6182 return 0;
6184 else if (comparison_code == EQ)
6186 rtx inc_once;
6188 if (loop_dump_stream)
6189 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6191 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6192 GET_MODE (iteration_var));
6194 if (inc_once == final_value)
6196 /* The iterator value once through the loop is equal to the
6197 comparison value. Either we have an infinite loop, or
6198 we'll loop twice. */
6199 if (increment == const0_rtx)
6200 return 0;
6201 loop_info->n_iterations = 2;
6203 else
6204 loop_info->n_iterations = 1;
6206 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6207 loop_info->final_value
6208 = gen_int_mode ((INTVAL (loop_info->initial_value)
6209 + loop_info->n_iterations * INTVAL (increment)),
6210 GET_MODE (iteration_var));
6211 else
6212 loop_info->final_value
6213 = plus_constant (loop_info->initial_value,
6214 loop_info->n_iterations * INTVAL (increment));
6215 loop_info->final_equiv_value
6216 = gen_int_mode ((INTVAL (initial_value)
6217 + loop_info->n_iterations * INTVAL (increment)),
6218 GET_MODE (iteration_var));
6219 return loop_info->n_iterations;
6222 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6223 if (unsigned_p)
6224 final_larger
6225 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6226 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6227 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6228 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6229 else
6230 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6231 - (INTVAL (final_value) < INTVAL (initial_value));
6233 if (INTVAL (increment) > 0)
6234 increment_dir = 1;
6235 else if (INTVAL (increment) == 0)
6236 increment_dir = 0;
6237 else
6238 increment_dir = -1;
6240 /* There are 27 different cases: compare_dir = -1, 0, 1;
6241 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6242 There are 4 normal cases, 4 reverse cases (where the iteration variable
6243 will overflow before the loop exits), 4 infinite loop cases, and 15
6244 immediate exit (0 or 1 iteration depending on loop type) cases.
6245 Only try to optimize the normal cases. */
6247 /* (compare_dir/final_larger/increment_dir)
6248 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6249 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6250 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6251 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6253 /* ?? If the meaning of reverse loops (where the iteration variable
6254 will overflow before the loop exits) is undefined, then could
6255 eliminate all of these special checks, and just always assume
6256 the loops are normal/immediate/infinite. Note that this means
6257 the sign of increment_dir does not have to be known. Also,
6258 since it does not really hurt if immediate exit loops or infinite loops
6259 are optimized, then that case could be ignored also, and hence all
6260 loops can be optimized.
6262 According to ANSI Spec, the reverse loop case result is undefined,
6263 because the action on overflow is undefined.
6265 See also the special test for NE loops below. */
6267 if (final_larger == increment_dir && final_larger != 0
6268 && (final_larger == compare_dir || compare_dir == 0))
6269 /* Normal case. */
6271 else
6273 if (loop_dump_stream)
6274 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6275 return 0;
6278 /* Calculate the number of iterations, final_value is only an approximation,
6279 so correct for that. Note that abs_diff and n_iterations are
6280 unsigned, because they can be as large as 2^n - 1. */
6282 inc = INTVAL (increment);
6283 gcc_assert (inc);
6284 if (inc > 0)
6286 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6287 abs_inc = inc;
6289 else
6291 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6292 abs_inc = -inc;
6295 /* Given that iteration_var is going to iterate over its own mode,
6296 not HOST_WIDE_INT, disregard higher bits that might have come
6297 into the picture due to sign extension of initial and final
6298 values. */
6299 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6300 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6301 << 1) - 1;
6303 /* For NE tests, make sure that the iteration variable won't miss
6304 the final value. If abs_diff mod abs_incr is not zero, then the
6305 iteration variable will overflow before the loop exits, and we
6306 can not calculate the number of iterations. */
6307 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6308 return 0;
6310 /* Note that the number of iterations could be calculated using
6311 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6312 handle potential overflow of the summation. */
6313 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6314 return loop_info->n_iterations;
6317 /* Perform strength reduction and induction variable elimination.
6319 Pseudo registers created during this function will be beyond the
6320 last valid index in several tables including
6321 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6322 problem here, because the added registers cannot be givs outside of
6323 their loop, and hence will never be reconsidered. But scan_loop
6324 must check regnos to make sure they are in bounds. */
6326 static void
6327 strength_reduce (struct loop *loop, int flags)
6329 struct loop_info *loop_info = LOOP_INFO (loop);
6330 struct loop_regs *regs = LOOP_REGS (loop);
6331 struct loop_ivs *ivs = LOOP_IVS (loop);
6332 rtx p;
6333 /* Temporary list pointer for traversing ivs->list. */
6334 struct iv_class *bl;
6335 /* Ratio of extra register life span we can justify
6336 for saving an instruction. More if loop doesn't call subroutines
6337 since in that case saving an insn makes more difference
6338 and more registers are available. */
6339 /* ??? could set this to last value of threshold in move_movables */
6340 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6341 /* Map of pseudo-register replacements. */
6342 rtx *reg_map = NULL;
6343 int reg_map_size;
6344 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6345 int insn_count = count_insns_in_loop (loop);
6347 addr_placeholder = gen_reg_rtx (Pmode);
6349 ivs->n_regs = max_reg_before_loop;
6350 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6352 /* Find all BIVs in loop. */
6353 loop_bivs_find (loop);
6355 /* Exit if there are no bivs. */
6356 if (! ivs->list)
6358 loop_ivs_free (loop);
6359 return;
6362 /* Determine how BIVS are initialized by looking through pre-header
6363 extended basic block. */
6364 loop_bivs_init_find (loop);
6366 /* Look at the each biv and see if we can say anything better about its
6367 initial value from any initializing insns set up above. */
6368 loop_bivs_check (loop);
6370 /* Search the loop for general induction variables. */
6371 loop_givs_find (loop);
6373 /* Try to calculate and save the number of loop iterations. This is
6374 set to zero if the actual number can not be calculated. This must
6375 be called after all giv's have been identified, since otherwise it may
6376 fail if the iteration variable is a giv. */
6377 loop_iterations (loop);
6379 #ifdef HAVE_prefetch
6380 if (flags & LOOP_PREFETCH)
6381 emit_prefetch_instructions (loop);
6382 #endif
6384 /* Now for each giv for which we still don't know whether or not it is
6385 replaceable, check to see if it is replaceable because its final value
6386 can be calculated. This must be done after loop_iterations is called,
6387 so that final_giv_value will work correctly. */
6388 loop_givs_check (loop);
6390 /* Try to prove that the loop counter variable (if any) is always
6391 nonnegative; if so, record that fact with a REG_NONNEG note
6392 so that "decrement and branch until zero" insn can be used. */
6393 check_dbra_loop (loop, insn_count);
6395 /* Create reg_map to hold substitutions for replaceable giv regs.
6396 Some givs might have been made from biv increments, so look at
6397 ivs->reg_iv_type for a suitable size. */
6398 reg_map_size = ivs->n_regs;
6399 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6401 /* Examine each iv class for feasibility of strength reduction/induction
6402 variable elimination. */
6404 for (bl = ivs->list; bl; bl = bl->next)
6406 struct induction *v;
6407 int benefit;
6409 /* Test whether it will be possible to eliminate this biv
6410 provided all givs are reduced. */
6411 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6413 /* This will be true at the end, if all givs which depend on this
6414 biv have been strength reduced.
6415 We can't (currently) eliminate the biv unless this is so. */
6416 bl->all_reduced = 1;
6418 /* Check each extension dependent giv in this class to see if its
6419 root biv is safe from wrapping in the interior mode. */
6420 check_ext_dependent_givs (loop, bl);
6422 /* Combine all giv's for this iv_class. */
6423 combine_givs (regs, bl);
6425 for (v = bl->giv; v; v = v->next_iv)
6427 struct induction *tv;
6429 if (v->ignore || v->same)
6430 continue;
6432 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6434 /* If an insn is not to be strength reduced, then set its ignore
6435 flag, and clear bl->all_reduced. */
6437 /* A giv that depends on a reversed biv must be reduced if it is
6438 used after the loop exit, otherwise, it would have the wrong
6439 value after the loop exit. To make it simple, just reduce all
6440 of such giv's whether or not we know they are used after the loop
6441 exit. */
6443 if (v->lifetime * threshold * benefit < insn_count
6444 && ! bl->reversed)
6446 if (loop_dump_stream)
6447 fprintf (loop_dump_stream,
6448 "giv of insn %d not worth while, %d vs %d.\n",
6449 INSN_UID (v->insn),
6450 v->lifetime * threshold * benefit, insn_count);
6451 v->ignore = 1;
6452 bl->all_reduced = 0;
6454 else
6456 /* Check that we can increment the reduced giv without a
6457 multiply insn. If not, reject it. */
6459 for (tv = bl->biv; tv; tv = tv->next_iv)
6460 if (tv->mult_val == const1_rtx
6461 && ! product_cheap_p (tv->add_val, v->mult_val))
6463 if (loop_dump_stream)
6464 fprintf (loop_dump_stream,
6465 "giv of insn %d: would need a multiply.\n",
6466 INSN_UID (v->insn));
6467 v->ignore = 1;
6468 bl->all_reduced = 0;
6469 break;
6474 /* Check for givs whose first use is their definition and whose
6475 last use is the definition of another giv. If so, it is likely
6476 dead and should not be used to derive another giv nor to
6477 eliminate a biv. */
6478 loop_givs_dead_check (loop, bl);
6480 /* Reduce each giv that we decided to reduce. */
6481 loop_givs_reduce (loop, bl);
6483 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6484 as not reduced.
6486 For each giv register that can be reduced now: if replaceable,
6487 substitute reduced reg wherever the old giv occurs;
6488 else add new move insn "giv_reg = reduced_reg". */
6489 loop_givs_rescan (loop, bl, reg_map);
6491 /* All the givs based on the biv bl have been reduced if they
6492 merit it. */
6494 /* For each giv not marked as maybe dead that has been combined with a
6495 second giv, clear any "maybe dead" mark on that second giv.
6496 v->new_reg will either be or refer to the register of the giv it
6497 combined with.
6499 Doing this clearing avoids problems in biv elimination where
6500 a giv's new_reg is a complex value that can't be put in the
6501 insn but the giv combined with (with a reg as new_reg) is
6502 marked maybe_dead. Since the register will be used in either
6503 case, we'd prefer it be used from the simpler giv. */
6505 for (v = bl->giv; v; v = v->next_iv)
6506 if (! v->maybe_dead && v->same)
6507 v->same->maybe_dead = 0;
6509 /* Try to eliminate the biv, if it is a candidate.
6510 This won't work if ! bl->all_reduced,
6511 since the givs we planned to use might not have been reduced.
6513 We have to be careful that we didn't initially think we could
6514 eliminate this biv because of a giv that we now think may be
6515 dead and shouldn't be used as a biv replacement.
6517 Also, there is the possibility that we may have a giv that looks
6518 like it can be used to eliminate a biv, but the resulting insn
6519 isn't valid. This can happen, for example, on the 88k, where a
6520 JUMP_INSN can compare a register only with zero. Attempts to
6521 replace it with a compare with a constant will fail.
6523 Note that in cases where this call fails, we may have replaced some
6524 of the occurrences of the biv with a giv, but no harm was done in
6525 doing so in the rare cases where it can occur. */
6527 if (bl->all_reduced == 1 && bl->eliminable
6528 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6530 /* ?? If we created a new test to bypass the loop entirely,
6531 or otherwise drop straight in, based on this test, then
6532 we might want to rewrite it also. This way some later
6533 pass has more hope of removing the initialization of this
6534 biv entirely. */
6536 /* If final_value != 0, then the biv may be used after loop end
6537 and we must emit an insn to set it just in case.
6539 Reversed bivs already have an insn after the loop setting their
6540 value, so we don't need another one. We can't calculate the
6541 proper final value for such a biv here anyways. */
6542 if (bl->final_value && ! bl->reversed)
6543 loop_insn_sink_or_swim (loop,
6544 gen_load_of_final_value (bl->biv->dest_reg,
6545 bl->final_value));
6547 if (loop_dump_stream)
6548 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6549 bl->regno);
6551 /* See above note wrt final_value. But since we couldn't eliminate
6552 the biv, we must set the value after the loop instead of before. */
6553 else if (bl->final_value && ! bl->reversed)
6554 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6555 bl->final_value));
6558 /* Go through all the instructions in the loop, making all the
6559 register substitutions scheduled in REG_MAP. */
6561 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6562 if (INSN_P (p))
6564 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6565 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6566 INSN_CODE (p) = -1;
6569 if (loop_dump_stream)
6570 fprintf (loop_dump_stream, "\n");
6572 loop_ivs_free (loop);
6573 if (reg_map)
6574 free (reg_map);
6577 /*Record all basic induction variables calculated in the insn. */
6578 static rtx
6579 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6580 int maybe_multiple)
6582 struct loop_ivs *ivs = LOOP_IVS (loop);
6583 rtx set;
6584 rtx dest_reg;
6585 rtx inc_val;
6586 rtx mult_val;
6587 rtx *location;
6589 if (NONJUMP_INSN_P (p)
6590 && (set = single_set (p))
6591 && REG_P (SET_DEST (set)))
6593 dest_reg = SET_DEST (set);
6594 if (REGNO (dest_reg) < max_reg_before_loop
6595 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6596 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6598 if (basic_induction_var (loop, SET_SRC (set),
6599 GET_MODE (SET_SRC (set)),
6600 dest_reg, p, &inc_val, &mult_val,
6601 &location))
6603 /* It is a possible basic induction variable.
6604 Create and initialize an induction structure for it. */
6606 struct induction *v = xmalloc (sizeof (struct induction));
6608 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6609 not_every_iteration, maybe_multiple);
6610 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6612 else if (REGNO (dest_reg) < ivs->n_regs)
6613 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6616 return p;
6619 /* Record all givs calculated in the insn.
6620 A register is a giv if: it is only set once, it is a function of a
6621 biv and a constant (or invariant), and it is not a biv. */
6622 static rtx
6623 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6624 int maybe_multiple)
6626 struct loop_regs *regs = LOOP_REGS (loop);
6628 rtx set;
6629 /* Look for a general induction variable in a register. */
6630 if (NONJUMP_INSN_P (p)
6631 && (set = single_set (p))
6632 && REG_P (SET_DEST (set))
6633 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6635 rtx src_reg;
6636 rtx dest_reg;
6637 rtx add_val;
6638 rtx mult_val;
6639 rtx ext_val;
6640 int benefit;
6641 rtx regnote = 0;
6642 rtx last_consec_insn;
6644 dest_reg = SET_DEST (set);
6645 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6646 return p;
6648 if (/* SET_SRC is a giv. */
6649 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6650 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6651 /* Equivalent expression is a giv. */
6652 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6653 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6654 &add_val, &mult_val, &ext_val, 0,
6655 &benefit, VOIDmode)))
6656 /* Don't try to handle any regs made by loop optimization.
6657 We have nothing on them in regno_first_uid, etc. */
6658 && REGNO (dest_reg) < max_reg_before_loop
6659 /* Don't recognize a BASIC_INDUCT_VAR here. */
6660 && dest_reg != src_reg
6661 /* This must be the only place where the register is set. */
6662 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6663 /* or all sets must be consecutive and make a giv. */
6664 || (benefit = consec_sets_giv (loop, benefit, p,
6665 src_reg, dest_reg,
6666 &add_val, &mult_val, &ext_val,
6667 &last_consec_insn))))
6669 struct induction *v = xmalloc (sizeof (struct induction));
6671 /* If this is a library call, increase benefit. */
6672 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6673 benefit += libcall_benefit (p);
6675 /* Skip the consecutive insns, if there are any. */
6676 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6677 p = last_consec_insn;
6679 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6680 ext_val, benefit, DEST_REG, not_every_iteration,
6681 maybe_multiple, (rtx*) 0);
6686 /* Look for givs which are memory addresses. */
6687 if (NONJUMP_INSN_P (p))
6688 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6689 maybe_multiple);
6691 /* Update the status of whether giv can derive other givs. This can
6692 change when we pass a label or an insn that updates a biv. */
6693 if (INSN_P (p) || LABEL_P (p))
6694 update_giv_derive (loop, p);
6695 return p;
6698 /* Return 1 if X is a valid source for an initial value (or as value being
6699 compared against in an initial test).
6701 X must be either a register or constant and must not be clobbered between
6702 the current insn and the start of the loop.
6704 INSN is the insn containing X. */
6706 static int
6707 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6709 if (CONSTANT_P (x))
6710 return 1;
6712 /* Only consider pseudos we know about initialized in insns whose luids
6713 we know. */
6714 if (!REG_P (x)
6715 || REGNO (x) >= max_reg_before_loop)
6716 return 0;
6718 /* Don't use call-clobbered registers across a call which clobbers it. On
6719 some machines, don't use any hard registers at all. */
6720 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6721 && (SMALL_REGISTER_CLASSES
6722 || (call_used_regs[REGNO (x)] && call_seen)))
6723 return 0;
6725 /* Don't use registers that have been clobbered before the start of the
6726 loop. */
6727 if (reg_set_between_p (x, insn, loop_start))
6728 return 0;
6730 return 1;
6733 /* Scan X for memory refs and check each memory address
6734 as a possible giv. INSN is the insn whose pattern X comes from.
6735 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6736 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6737 more than once in each loop iteration. */
6739 static void
6740 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6741 int not_every_iteration, int maybe_multiple)
6743 int i, j;
6744 enum rtx_code code;
6745 const char *fmt;
6747 if (x == 0)
6748 return;
6750 code = GET_CODE (x);
6751 switch (code)
6753 case REG:
6754 case CONST_INT:
6755 case CONST:
6756 case CONST_DOUBLE:
6757 case SYMBOL_REF:
6758 case LABEL_REF:
6759 case PC:
6760 case CC0:
6761 case ADDR_VEC:
6762 case ADDR_DIFF_VEC:
6763 case USE:
6764 case CLOBBER:
6765 return;
6767 case MEM:
6769 rtx src_reg;
6770 rtx add_val;
6771 rtx mult_val;
6772 rtx ext_val;
6773 int benefit;
6775 /* This code used to disable creating GIVs with mult_val == 1 and
6776 add_val == 0. However, this leads to lost optimizations when
6777 it comes time to combine a set of related DEST_ADDR GIVs, since
6778 this one would not be seen. */
6780 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6781 &mult_val, &ext_val, 1, &benefit,
6782 GET_MODE (x)))
6784 /* Found one; record it. */
6785 struct induction *v = xmalloc (sizeof (struct induction));
6787 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6788 add_val, ext_val, benefit, DEST_ADDR,
6789 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6791 v->mem = x;
6794 return;
6796 default:
6797 break;
6800 /* Recursively scan the subexpressions for other mem refs. */
6802 fmt = GET_RTX_FORMAT (code);
6803 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6804 if (fmt[i] == 'e')
6805 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6806 maybe_multiple);
6807 else if (fmt[i] == 'E')
6808 for (j = 0; j < XVECLEN (x, i); j++)
6809 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6810 maybe_multiple);
6813 /* Fill in the data about one biv update.
6814 V is the `struct induction' in which we record the biv. (It is
6815 allocated by the caller, with alloca.)
6816 INSN is the insn that sets it.
6817 DEST_REG is the biv's reg.
6819 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6820 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6821 being set to INC_VAL.
6823 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6824 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6825 can be executed more than once per iteration. If MAYBE_MULTIPLE
6826 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6827 executed exactly once per iteration. */
6829 static void
6830 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6831 rtx inc_val, rtx mult_val, rtx *location,
6832 int not_every_iteration, int maybe_multiple)
6834 struct loop_ivs *ivs = LOOP_IVS (loop);
6835 struct iv_class *bl;
6837 v->insn = insn;
6838 v->src_reg = dest_reg;
6839 v->dest_reg = dest_reg;
6840 v->mult_val = mult_val;
6841 v->add_val = inc_val;
6842 v->ext_dependent = NULL_RTX;
6843 v->location = location;
6844 v->mode = GET_MODE (dest_reg);
6845 v->always_computable = ! not_every_iteration;
6846 v->always_executed = ! not_every_iteration;
6847 v->maybe_multiple = maybe_multiple;
6848 v->same = 0;
6850 /* Add this to the reg's iv_class, creating a class
6851 if this is the first incrementation of the reg. */
6853 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6854 if (bl == 0)
6856 /* Create and initialize new iv_class. */
6858 bl = xmalloc (sizeof (struct iv_class));
6860 bl->regno = REGNO (dest_reg);
6861 bl->biv = 0;
6862 bl->giv = 0;
6863 bl->biv_count = 0;
6864 bl->giv_count = 0;
6866 /* Set initial value to the reg itself. */
6867 bl->initial_value = dest_reg;
6868 bl->final_value = 0;
6869 /* We haven't seen the initializing insn yet. */
6870 bl->init_insn = 0;
6871 bl->init_set = 0;
6872 bl->initial_test = 0;
6873 bl->incremented = 0;
6874 bl->eliminable = 0;
6875 bl->nonneg = 0;
6876 bl->reversed = 0;
6877 bl->total_benefit = 0;
6879 /* Add this class to ivs->list. */
6880 bl->next = ivs->list;
6881 ivs->list = bl;
6883 /* Put it in the array of biv register classes. */
6884 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6886 else
6888 /* Check if location is the same as a previous one. */
6889 struct induction *induction;
6890 for (induction = bl->biv; induction; induction = induction->next_iv)
6891 if (location == induction->location)
6893 v->same = induction;
6894 break;
6898 /* Update IV_CLASS entry for this biv. */
6899 v->next_iv = bl->biv;
6900 bl->biv = v;
6901 bl->biv_count++;
6902 if (mult_val == const1_rtx)
6903 bl->incremented = 1;
6905 if (loop_dump_stream)
6906 loop_biv_dump (v, loop_dump_stream, 0);
6909 /* Fill in the data about one giv.
6910 V is the `struct induction' in which we record the giv. (It is
6911 allocated by the caller, with alloca.)
6912 INSN is the insn that sets it.
6913 BENEFIT estimates the savings from deleting this insn.
6914 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6915 into a register or is used as a memory address.
6917 SRC_REG is the biv reg which the giv is computed from.
6918 DEST_REG is the giv's reg (if the giv is stored in a reg).
6919 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6920 LOCATION points to the place where this giv's value appears in INSN. */
6922 static void
6923 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6924 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6925 rtx ext_val, int benefit, enum g_types type,
6926 int not_every_iteration, int maybe_multiple, rtx *location)
6928 struct loop_ivs *ivs = LOOP_IVS (loop);
6929 struct induction *b;
6930 struct iv_class *bl;
6931 rtx set = single_set (insn);
6932 rtx temp;
6934 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6935 undo the MULT canonicalization that we performed earlier. */
6936 temp = simplify_rtx (add_val);
6937 if (temp
6938 && ! (GET_CODE (add_val) == MULT
6939 && GET_CODE (temp) == ASHIFT))
6940 add_val = temp;
6942 v->insn = insn;
6943 v->src_reg = src_reg;
6944 v->giv_type = type;
6945 v->dest_reg = dest_reg;
6946 v->mult_val = mult_val;
6947 v->add_val = add_val;
6948 v->ext_dependent = ext_val;
6949 v->benefit = benefit;
6950 v->location = location;
6951 v->cant_derive = 0;
6952 v->combined_with = 0;
6953 v->maybe_multiple = maybe_multiple;
6954 v->maybe_dead = 0;
6955 v->derive_adjustment = 0;
6956 v->same = 0;
6957 v->ignore = 0;
6958 v->new_reg = 0;
6959 v->final_value = 0;
6960 v->same_insn = 0;
6961 v->auto_inc_opt = 0;
6962 v->shared = 0;
6964 /* The v->always_computable field is used in update_giv_derive, to
6965 determine whether a giv can be used to derive another giv. For a
6966 DEST_REG giv, INSN computes a new value for the giv, so its value
6967 isn't computable if INSN insn't executed every iteration.
6968 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
6969 it does not compute a new value. Hence the value is always computable
6970 regardless of whether INSN is executed each iteration. */
6972 if (type == DEST_ADDR)
6973 v->always_computable = 1;
6974 else
6975 v->always_computable = ! not_every_iteration;
6977 v->always_executed = ! not_every_iteration;
6979 if (type == DEST_ADDR)
6981 v->mode = GET_MODE (*location);
6982 v->lifetime = 1;
6984 else /* type == DEST_REG */
6986 v->mode = GET_MODE (SET_DEST (set));
6988 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
6990 /* If the lifetime is zero, it means that this register is
6991 really a dead store. So mark this as a giv that can be
6992 ignored. This will not prevent the biv from being eliminated. */
6993 if (v->lifetime == 0)
6994 v->ignore = 1;
6996 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6997 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7000 /* Add the giv to the class of givs computed from one biv. */
7002 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
7003 gcc_assert (bl);
7004 v->next_iv = bl->giv;
7005 bl->giv = v;
7007 /* Don't count DEST_ADDR. This is supposed to count the number of
7008 insns that calculate givs. */
7009 if (type == DEST_REG)
7010 bl->giv_count++;
7011 bl->total_benefit += benefit;
7013 if (type == DEST_ADDR)
7015 v->replaceable = 1;
7016 v->not_replaceable = 0;
7018 else
7020 /* The giv can be replaced outright by the reduced register only if all
7021 of the following conditions are true:
7022 - the insn that sets the giv is always executed on any iteration
7023 on which the giv is used at all
7024 (there are two ways to deduce this:
7025 either the insn is executed on every iteration,
7026 or all uses follow that insn in the same basic block),
7027 - the giv is not used outside the loop
7028 - no assignments to the biv occur during the giv's lifetime. */
7030 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
7031 /* Previous line always fails if INSN was moved by loop opt. */
7032 && REGNO_LAST_LUID (REGNO (dest_reg))
7033 < INSN_LUID (loop->end)
7034 && (! not_every_iteration
7035 || last_use_this_basic_block (dest_reg, insn)))
7037 /* Now check that there are no assignments to the biv within the
7038 giv's lifetime. This requires two separate checks. */
7040 /* Check each biv update, and fail if any are between the first
7041 and last use of the giv.
7043 If this loop contains an inner loop that was unrolled, then
7044 the insn modifying the biv may have been emitted by the loop
7045 unrolling code, and hence does not have a valid luid. Just
7046 mark the biv as not replaceable in this case. It is not very
7047 useful as a biv, because it is used in two different loops.
7048 It is very unlikely that we would be able to optimize the giv
7049 using this biv anyways. */
7051 v->replaceable = 1;
7052 v->not_replaceable = 0;
7053 for (b = bl->biv; b; b = b->next_iv)
7055 if (INSN_UID (b->insn) >= max_uid_for_loop
7056 || ((INSN_LUID (b->insn)
7057 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7058 && (INSN_LUID (b->insn)
7059 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7061 v->replaceable = 0;
7062 v->not_replaceable = 1;
7063 break;
7067 /* If there are any backwards branches that go from after the
7068 biv update to before it, then this giv is not replaceable. */
7069 if (v->replaceable)
7070 for (b = bl->biv; b; b = b->next_iv)
7071 if (back_branch_in_range_p (loop, b->insn))
7073 v->replaceable = 0;
7074 v->not_replaceable = 1;
7075 break;
7078 else
7080 /* May still be replaceable, we don't have enough info here to
7081 decide. */
7082 v->replaceable = 0;
7083 v->not_replaceable = 0;
7087 /* Record whether the add_val contains a const_int, for later use by
7088 combine_givs. */
7090 rtx tem = add_val;
7092 v->no_const_addval = 1;
7093 if (tem == const0_rtx)
7095 else if (CONSTANT_P (add_val))
7096 v->no_const_addval = 0;
7097 if (GET_CODE (tem) == PLUS)
7099 while (1)
7101 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7102 tem = XEXP (tem, 0);
7103 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7104 tem = XEXP (tem, 1);
7105 else
7106 break;
7108 if (CONSTANT_P (XEXP (tem, 1)))
7109 v->no_const_addval = 0;
7113 if (loop_dump_stream)
7114 loop_giv_dump (v, loop_dump_stream, 0);
7117 /* Try to calculate the final value of the giv, the value it will have at
7118 the end of the loop. If we can do it, return that value. */
7120 static rtx
7121 final_giv_value (const struct loop *loop, struct induction *v)
7123 struct loop_ivs *ivs = LOOP_IVS (loop);
7124 struct iv_class *bl;
7125 rtx insn;
7126 rtx increment, tem;
7127 rtx seq;
7128 rtx loop_end = loop->end;
7129 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7131 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7133 /* The final value for givs which depend on reversed bivs must be calculated
7134 differently than for ordinary givs. In this case, there is already an
7135 insn after the loop which sets this giv's final value (if necessary),
7136 and there are no other loop exits, so we can return any value. */
7137 if (bl->reversed)
7139 if (loop_dump_stream)
7140 fprintf (loop_dump_stream,
7141 "Final giv value for %d, depends on reversed biv\n",
7142 REGNO (v->dest_reg));
7143 return const0_rtx;
7146 /* Try to calculate the final value as a function of the biv it depends
7147 upon. The only exit from the loop must be the fall through at the bottom
7148 and the insn that sets the giv must be executed on every iteration
7149 (otherwise the giv may not have its final value when the loop exits). */
7151 /* ??? Can calculate the final giv value by subtracting off the
7152 extra biv increments times the giv's mult_val. The loop must have
7153 only one exit for this to work, but the loop iterations does not need
7154 to be known. */
7156 if (n_iterations != 0
7157 && ! loop->exit_count
7158 && v->always_executed)
7160 /* ?? It is tempting to use the biv's value here since these insns will
7161 be put after the loop, and hence the biv will have its final value
7162 then. However, this fails if the biv is subsequently eliminated.
7163 Perhaps determine whether biv's are eliminable before trying to
7164 determine whether giv's are replaceable so that we can use the
7165 biv value here if it is not eliminable. */
7167 /* We are emitting code after the end of the loop, so we must make
7168 sure that bl->initial_value is still valid then. It will still
7169 be valid if it is invariant. */
7171 increment = biv_total_increment (bl);
7173 if (increment && loop_invariant_p (loop, increment)
7174 && loop_invariant_p (loop, bl->initial_value))
7176 /* Can calculate the loop exit value of its biv as
7177 (n_iterations * increment) + initial_value */
7179 /* The loop exit value of the giv is then
7180 (final_biv_value - extra increments) * mult_val + add_val.
7181 The extra increments are any increments to the biv which
7182 occur in the loop after the giv's value is calculated.
7183 We must search from the insn that sets the giv to the end
7184 of the loop to calculate this value. */
7186 /* Put the final biv value in tem. */
7187 tem = gen_reg_rtx (v->mode);
7188 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7189 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7190 GEN_INT (n_iterations),
7191 extend_value_for_giv (v, bl->initial_value),
7192 tem);
7194 /* Subtract off extra increments as we find them. */
7195 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7196 insn = NEXT_INSN (insn))
7198 struct induction *biv;
7200 for (biv = bl->biv; biv; biv = biv->next_iv)
7201 if (biv->insn == insn)
7203 start_sequence ();
7204 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7205 biv->add_val, NULL_RTX, 0,
7206 OPTAB_LIB_WIDEN);
7207 seq = get_insns ();
7208 end_sequence ();
7209 loop_insn_sink (loop, seq);
7213 /* Now calculate the giv's final value. */
7214 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7216 if (loop_dump_stream)
7217 fprintf (loop_dump_stream,
7218 "Final giv value for %d, calc from biv's value.\n",
7219 REGNO (v->dest_reg));
7221 return tem;
7225 /* Replaceable giv's should never reach here. */
7226 gcc_assert (!v->replaceable);
7228 /* Check to see if the biv is dead at all loop exits. */
7229 if (reg_dead_after_loop (loop, v->dest_reg))
7231 if (loop_dump_stream)
7232 fprintf (loop_dump_stream,
7233 "Final giv value for %d, giv dead after loop exit.\n",
7234 REGNO (v->dest_reg));
7236 return const0_rtx;
7239 return 0;
7242 /* All this does is determine whether a giv can be made replaceable because
7243 its final value can be calculated. This code can not be part of record_giv
7244 above, because final_giv_value requires that the number of loop iterations
7245 be known, and that can not be accurately calculated until after all givs
7246 have been identified. */
7248 static void
7249 check_final_value (const struct loop *loop, struct induction *v)
7251 rtx final_value = 0;
7253 /* DEST_ADDR givs will never reach here, because they are always marked
7254 replaceable above in record_giv. */
7256 /* The giv can be replaced outright by the reduced register only if all
7257 of the following conditions are true:
7258 - the insn that sets the giv is always executed on any iteration
7259 on which the giv is used at all
7260 (there are two ways to deduce this:
7261 either the insn is executed on every iteration,
7262 or all uses follow that insn in the same basic block),
7263 - its final value can be calculated (this condition is different
7264 than the one above in record_giv)
7265 - it's not used before the it's set
7266 - no assignments to the biv occur during the giv's lifetime. */
7268 #if 0
7269 /* This is only called now when replaceable is known to be false. */
7270 /* Clear replaceable, so that it won't confuse final_giv_value. */
7271 v->replaceable = 0;
7272 #endif
7274 if ((final_value = final_giv_value (loop, v))
7275 && (v->always_executed
7276 || last_use_this_basic_block (v->dest_reg, v->insn)))
7278 int biv_increment_seen = 0, before_giv_insn = 0;
7279 rtx p = v->insn;
7280 rtx last_giv_use;
7282 v->replaceable = 1;
7283 v->not_replaceable = 0;
7285 /* When trying to determine whether or not a biv increment occurs
7286 during the lifetime of the giv, we can ignore uses of the variable
7287 outside the loop because final_value is true. Hence we can not
7288 use regno_last_uid and regno_first_uid as above in record_giv. */
7290 /* Search the loop to determine whether any assignments to the
7291 biv occur during the giv's lifetime. Start with the insn
7292 that sets the giv, and search around the loop until we come
7293 back to that insn again.
7295 Also fail if there is a jump within the giv's lifetime that jumps
7296 to somewhere outside the lifetime but still within the loop. This
7297 catches spaghetti code where the execution order is not linear, and
7298 hence the above test fails. Here we assume that the giv lifetime
7299 does not extend from one iteration of the loop to the next, so as
7300 to make the test easier. Since the lifetime isn't known yet,
7301 this requires two loops. See also record_giv above. */
7303 last_giv_use = v->insn;
7305 while (1)
7307 p = NEXT_INSN (p);
7308 if (p == loop->end)
7310 before_giv_insn = 1;
7311 p = NEXT_INSN (loop->start);
7313 if (p == v->insn)
7314 break;
7316 if (INSN_P (p))
7318 /* It is possible for the BIV increment to use the GIV if we
7319 have a cycle. Thus we must be sure to check each insn for
7320 both BIV and GIV uses, and we must check for BIV uses
7321 first. */
7323 if (! biv_increment_seen
7324 && reg_set_p (v->src_reg, PATTERN (p)))
7325 biv_increment_seen = 1;
7327 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7329 if (biv_increment_seen || before_giv_insn)
7331 v->replaceable = 0;
7332 v->not_replaceable = 1;
7333 break;
7335 last_giv_use = p;
7340 /* Now that the lifetime of the giv is known, check for branches
7341 from within the lifetime to outside the lifetime if it is still
7342 replaceable. */
7344 if (v->replaceable)
7346 p = v->insn;
7347 while (1)
7349 p = NEXT_INSN (p);
7350 if (p == loop->end)
7351 p = NEXT_INSN (loop->start);
7352 if (p == last_giv_use)
7353 break;
7355 if (JUMP_P (p) && JUMP_LABEL (p)
7356 && LABEL_NAME (JUMP_LABEL (p))
7357 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7358 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7359 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7360 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7362 v->replaceable = 0;
7363 v->not_replaceable = 1;
7365 if (loop_dump_stream)
7366 fprintf (loop_dump_stream,
7367 "Found branch outside giv lifetime.\n");
7369 break;
7374 /* If it is replaceable, then save the final value. */
7375 if (v->replaceable)
7376 v->final_value = final_value;
7379 if (loop_dump_stream && v->replaceable)
7380 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7381 INSN_UID (v->insn), REGNO (v->dest_reg));
7384 /* Update the status of whether a giv can derive other givs.
7386 We need to do something special if there is or may be an update to the biv
7387 between the time the giv is defined and the time it is used to derive
7388 another giv.
7390 In addition, a giv that is only conditionally set is not allowed to
7391 derive another giv once a label has been passed.
7393 The cases we look at are when a label or an update to a biv is passed. */
7395 static void
7396 update_giv_derive (const struct loop *loop, rtx p)
7398 struct loop_ivs *ivs = LOOP_IVS (loop);
7399 struct iv_class *bl;
7400 struct induction *biv, *giv;
7401 rtx tem;
7402 int dummy;
7404 /* Search all IV classes, then all bivs, and finally all givs.
7406 There are three cases we are concerned with. First we have the situation
7407 of a giv that is only updated conditionally. In that case, it may not
7408 derive any givs after a label is passed.
7410 The second case is when a biv update occurs, or may occur, after the
7411 definition of a giv. For certain biv updates (see below) that are
7412 known to occur between the giv definition and use, we can adjust the
7413 giv definition. For others, or when the biv update is conditional,
7414 we must prevent the giv from deriving any other givs. There are two
7415 sub-cases within this case.
7417 If this is a label, we are concerned with any biv update that is done
7418 conditionally, since it may be done after the giv is defined followed by
7419 a branch here (actually, we need to pass both a jump and a label, but
7420 this extra tracking doesn't seem worth it).
7422 If this is a jump, we are concerned about any biv update that may be
7423 executed multiple times. We are actually only concerned about
7424 backward jumps, but it is probably not worth performing the test
7425 on the jump again here.
7427 If this is a biv update, we must adjust the giv status to show that a
7428 subsequent biv update was performed. If this adjustment cannot be done,
7429 the giv cannot derive further givs. */
7431 for (bl = ivs->list; bl; bl = bl->next)
7432 for (biv = bl->biv; biv; biv = biv->next_iv)
7433 if (LABEL_P (p) || JUMP_P (p)
7434 || biv->insn == p)
7436 /* Skip if location is the same as a previous one. */
7437 if (biv->same)
7438 continue;
7440 for (giv = bl->giv; giv; giv = giv->next_iv)
7442 /* If cant_derive is already true, there is no point in
7443 checking all of these conditions again. */
7444 if (giv->cant_derive)
7445 continue;
7447 /* If this giv is conditionally set and we have passed a label,
7448 it cannot derive anything. */
7449 if (LABEL_P (p) && ! giv->always_computable)
7450 giv->cant_derive = 1;
7452 /* Skip givs that have mult_val == 0, since
7453 they are really invariants. Also skip those that are
7454 replaceable, since we know their lifetime doesn't contain
7455 any biv update. */
7456 else if (giv->mult_val == const0_rtx || giv->replaceable)
7457 continue;
7459 /* The only way we can allow this giv to derive another
7460 is if this is a biv increment and we can form the product
7461 of biv->add_val and giv->mult_val. In this case, we will
7462 be able to compute a compensation. */
7463 else if (biv->insn == p)
7465 rtx ext_val_dummy;
7467 tem = 0;
7468 if (biv->mult_val == const1_rtx)
7469 tem = simplify_giv_expr (loop,
7470 gen_rtx_MULT (giv->mode,
7471 biv->add_val,
7472 giv->mult_val),
7473 &ext_val_dummy, &dummy);
7475 if (tem && giv->derive_adjustment)
7476 tem = simplify_giv_expr
7477 (loop,
7478 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7479 &ext_val_dummy, &dummy);
7481 if (tem)
7482 giv->derive_adjustment = tem;
7483 else
7484 giv->cant_derive = 1;
7486 else if ((LABEL_P (p) && ! biv->always_computable)
7487 || (JUMP_P (p) && biv->maybe_multiple))
7488 giv->cant_derive = 1;
7493 /* Check whether an insn is an increment legitimate for a basic induction var.
7494 X is the source of insn P, or a part of it.
7495 MODE is the mode in which X should be interpreted.
7497 DEST_REG is the putative biv, also the destination of the insn.
7498 We accept patterns of these forms:
7499 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7500 REG = INVARIANT + REG
7502 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7503 store the additive term into *INC_VAL, and store the place where
7504 we found the additive term into *LOCATION.
7506 If X is an assignment of an invariant into DEST_REG, we set
7507 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7509 We also want to detect a BIV when it corresponds to a variable
7510 whose mode was promoted. In that case, an increment
7511 of the variable may be a PLUS that adds a SUBREG of that variable to
7512 an invariant and then sign- or zero-extends the result of the PLUS
7513 into the variable.
7515 Most GIVs in such cases will be in the promoted mode, since that is the
7516 probably the natural computation mode (and almost certainly the mode
7517 used for addresses) on the machine. So we view the pseudo-reg containing
7518 the variable as the BIV, as if it were simply incremented.
7520 Note that treating the entire pseudo as a BIV will result in making
7521 simple increments to any GIVs based on it. However, if the variable
7522 overflows in its declared mode but not its promoted mode, the result will
7523 be incorrect. This is acceptable if the variable is signed, since
7524 overflows in such cases are undefined, but not if it is unsigned, since
7525 those overflows are defined. So we only check for SIGN_EXTEND and
7526 not ZERO_EXTEND.
7528 If we cannot find a biv, we return 0. */
7530 static int
7531 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7532 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7533 rtx **location)
7535 enum rtx_code code;
7536 rtx *argp, arg;
7537 rtx insn, set = 0, last, inc;
7539 code = GET_CODE (x);
7540 *location = NULL;
7541 switch (code)
7543 case PLUS:
7544 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7545 || (GET_CODE (XEXP (x, 0)) == SUBREG
7546 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7547 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7549 argp = &XEXP (x, 1);
7551 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7552 || (GET_CODE (XEXP (x, 1)) == SUBREG
7553 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7554 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7556 argp = &XEXP (x, 0);
7558 else
7559 return 0;
7561 arg = *argp;
7562 if (loop_invariant_p (loop, arg) != 1)
7563 return 0;
7565 /* convert_modes can emit new instructions, e.g. when arg is a loop
7566 invariant MEM and dest_reg has a different mode.
7567 These instructions would be emitted after the end of the function
7568 and then *inc_val would be an uninitialized pseudo.
7569 Detect this and bail in this case.
7570 Other alternatives to solve this can be introducing a convert_modes
7571 variant which is allowed to fail but not allowed to emit new
7572 instructions, emit these instructions before loop start and let
7573 it be garbage collected if *inc_val is never used or saving the
7574 *inc_val initialization sequence generated here and when *inc_val
7575 is going to be actually used, emit it at some suitable place. */
7576 last = get_last_insn ();
7577 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7578 if (get_last_insn () != last)
7580 delete_insns_since (last);
7581 return 0;
7584 *inc_val = inc;
7585 *mult_val = const1_rtx;
7586 *location = argp;
7587 return 1;
7589 case SUBREG:
7590 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7591 handle addition of promoted variables.
7592 ??? The comment at the start of this function is wrong: promoted
7593 variable increments don't look like it says they do. */
7594 return basic_induction_var (loop, SUBREG_REG (x),
7595 GET_MODE (SUBREG_REG (x)),
7596 dest_reg, p, inc_val, mult_val, location);
7598 case REG:
7599 /* If this register is assigned in a previous insn, look at its
7600 source, but don't go outside the loop or past a label. */
7602 /* If this sets a register to itself, we would repeat any previous
7603 biv increment if we applied this strategy blindly. */
7604 if (rtx_equal_p (dest_reg, x))
7605 return 0;
7607 insn = p;
7608 while (1)
7610 rtx dest;
7613 insn = PREV_INSN (insn);
7615 while (insn && NOTE_P (insn)
7616 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7618 if (!insn)
7619 break;
7620 set = single_set (insn);
7621 if (set == 0)
7622 break;
7623 dest = SET_DEST (set);
7624 if (dest == x
7625 || (GET_CODE (dest) == SUBREG
7626 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7627 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7628 && SUBREG_REG (dest) == x))
7629 return basic_induction_var (loop, SET_SRC (set),
7630 (GET_MODE (SET_SRC (set)) == VOIDmode
7631 ? GET_MODE (x)
7632 : GET_MODE (SET_SRC (set))),
7633 dest_reg, insn,
7634 inc_val, mult_val, location);
7636 while (GET_CODE (dest) == SUBREG
7637 || GET_CODE (dest) == ZERO_EXTRACT
7638 || GET_CODE (dest) == STRICT_LOW_PART)
7639 dest = XEXP (dest, 0);
7640 if (dest == x)
7641 break;
7643 /* Fall through. */
7645 /* Can accept constant setting of biv only when inside inner most loop.
7646 Otherwise, a biv of an inner loop may be incorrectly recognized
7647 as a biv of the outer loop,
7648 causing code to be moved INTO the inner loop. */
7649 case MEM:
7650 if (loop_invariant_p (loop, x) != 1)
7651 return 0;
7652 case CONST_INT:
7653 case SYMBOL_REF:
7654 case CONST:
7655 /* convert_modes dies if we try to convert to or from CCmode, so just
7656 exclude that case. It is very unlikely that a condition code value
7657 would be a useful iterator anyways. convert_modes dies if we try to
7658 convert a float mode to non-float or vice versa too. */
7659 if (loop->level == 1
7660 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7661 && GET_MODE_CLASS (mode) != MODE_CC)
7663 /* Possible bug here? Perhaps we don't know the mode of X. */
7664 last = get_last_insn ();
7665 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7666 if (get_last_insn () != last)
7668 delete_insns_since (last);
7669 return 0;
7672 *inc_val = inc;
7673 *mult_val = const0_rtx;
7674 return 1;
7676 else
7677 return 0;
7679 case SIGN_EXTEND:
7680 /* Ignore this BIV if signed arithmetic overflow is defined. */
7681 if (flag_wrapv)
7682 return 0;
7683 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7684 dest_reg, p, inc_val, mult_val, location);
7686 case ASHIFTRT:
7687 /* Similar, since this can be a sign extension. */
7688 for (insn = PREV_INSN (p);
7689 (insn && NOTE_P (insn)
7690 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7691 insn = PREV_INSN (insn))
7694 if (insn)
7695 set = single_set (insn);
7697 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
7698 && set && SET_DEST (set) == XEXP (x, 0)
7699 && GET_CODE (XEXP (x, 1)) == CONST_INT
7700 && INTVAL (XEXP (x, 1)) >= 0
7701 && GET_CODE (SET_SRC (set)) == ASHIFT
7702 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7703 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7704 GET_MODE (XEXP (x, 0)),
7705 dest_reg, insn, inc_val, mult_val,
7706 location);
7707 return 0;
7709 default:
7710 return 0;
7714 /* A general induction variable (giv) is any quantity that is a linear
7715 function of a basic induction variable,
7716 i.e. giv = biv * mult_val + add_val.
7717 The coefficients can be any loop invariant quantity.
7718 A giv need not be computed directly from the biv;
7719 it can be computed by way of other givs. */
7721 /* Determine whether X computes a giv.
7722 If it does, return a nonzero value
7723 which is the benefit from eliminating the computation of X;
7724 set *SRC_REG to the register of the biv that it is computed from;
7725 set *ADD_VAL and *MULT_VAL to the coefficients,
7726 such that the value of X is biv * mult + add; */
7728 static int
7729 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7730 rtx *add_val, rtx *mult_val, rtx *ext_val,
7731 int is_addr, int *pbenefit,
7732 enum machine_mode addr_mode)
7734 struct loop_ivs *ivs = LOOP_IVS (loop);
7735 rtx orig_x = x;
7737 /* If this is an invariant, forget it, it isn't a giv. */
7738 if (loop_invariant_p (loop, x) == 1)
7739 return 0;
7741 *pbenefit = 0;
7742 *ext_val = NULL_RTX;
7743 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7744 if (x == 0)
7745 return 0;
7747 switch (GET_CODE (x))
7749 case USE:
7750 case CONST_INT:
7751 /* Since this is now an invariant and wasn't before, it must be a giv
7752 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7753 with. */
7754 *src_reg = ivs->list->biv->dest_reg;
7755 *mult_val = const0_rtx;
7756 *add_val = x;
7757 break;
7759 case REG:
7760 /* This is equivalent to a BIV. */
7761 *src_reg = x;
7762 *mult_val = const1_rtx;
7763 *add_val = const0_rtx;
7764 break;
7766 case PLUS:
7767 /* Either (plus (biv) (invar)) or
7768 (plus (mult (biv) (invar_1)) (invar_2)). */
7769 if (GET_CODE (XEXP (x, 0)) == MULT)
7771 *src_reg = XEXP (XEXP (x, 0), 0);
7772 *mult_val = XEXP (XEXP (x, 0), 1);
7774 else
7776 *src_reg = XEXP (x, 0);
7777 *mult_val = const1_rtx;
7779 *add_val = XEXP (x, 1);
7780 break;
7782 case MULT:
7783 /* ADD_VAL is zero. */
7784 *src_reg = XEXP (x, 0);
7785 *mult_val = XEXP (x, 1);
7786 *add_val = const0_rtx;
7787 break;
7789 default:
7790 gcc_unreachable ();
7793 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7794 unless they are CONST_INT). */
7795 if (GET_CODE (*add_val) == USE)
7796 *add_val = XEXP (*add_val, 0);
7797 if (GET_CODE (*mult_val) == USE)
7798 *mult_val = XEXP (*mult_val, 0);
7800 if (is_addr)
7801 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7802 else
7803 *pbenefit += rtx_cost (orig_x, SET);
7805 /* Always return true if this is a giv so it will be detected as such,
7806 even if the benefit is zero or negative. This allows elimination
7807 of bivs that might otherwise not be eliminated. */
7808 return 1;
7811 /* Given an expression, X, try to form it as a linear function of a biv.
7812 We will canonicalize it to be of the form
7813 (plus (mult (BIV) (invar_1))
7814 (invar_2))
7815 with possible degeneracies.
7817 The invariant expressions must each be of a form that can be used as a
7818 machine operand. We surround then with a USE rtx (a hack, but localized
7819 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7820 routine; it is the caller's responsibility to strip them.
7822 If no such canonicalization is possible (i.e., two biv's are used or an
7823 expression that is neither invariant nor a biv or giv), this routine
7824 returns 0.
7826 For a nonzero return, the result will have a code of CONST_INT, USE,
7827 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7829 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7831 static rtx sge_plus (enum machine_mode, rtx, rtx);
7832 static rtx sge_plus_constant (rtx, rtx);
7834 static rtx
7835 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7837 struct loop_ivs *ivs = LOOP_IVS (loop);
7838 struct loop_regs *regs = LOOP_REGS (loop);
7839 enum machine_mode mode = GET_MODE (x);
7840 rtx arg0, arg1;
7841 rtx tem;
7843 /* If this is not an integer mode, or if we cannot do arithmetic in this
7844 mode, this can't be a giv. */
7845 if (mode != VOIDmode
7846 && (GET_MODE_CLASS (mode) != MODE_INT
7847 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7848 return NULL_RTX;
7850 switch (GET_CODE (x))
7852 case PLUS:
7853 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7854 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7855 if (arg0 == 0 || arg1 == 0)
7856 return NULL_RTX;
7858 /* Put constant last, CONST_INT last if both constant. */
7859 if ((GET_CODE (arg0) == USE
7860 || GET_CODE (arg0) == CONST_INT)
7861 && ! ((GET_CODE (arg0) == USE
7862 && GET_CODE (arg1) == USE)
7863 || GET_CODE (arg1) == CONST_INT))
7864 tem = arg0, arg0 = arg1, arg1 = tem;
7866 /* Handle addition of zero, then addition of an invariant. */
7867 if (arg1 == const0_rtx)
7868 return arg0;
7869 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7870 switch (GET_CODE (arg0))
7872 case CONST_INT:
7873 case USE:
7874 /* Adding two invariants must result in an invariant, so enclose
7875 addition operation inside a USE and return it. */
7876 if (GET_CODE (arg0) == USE)
7877 arg0 = XEXP (arg0, 0);
7878 if (GET_CODE (arg1) == USE)
7879 arg1 = XEXP (arg1, 0);
7881 if (GET_CODE (arg0) == CONST_INT)
7882 tem = arg0, arg0 = arg1, arg1 = tem;
7883 if (GET_CODE (arg1) == CONST_INT)
7884 tem = sge_plus_constant (arg0, arg1);
7885 else
7886 tem = sge_plus (mode, arg0, arg1);
7888 if (GET_CODE (tem) != CONST_INT)
7889 tem = gen_rtx_USE (mode, tem);
7890 return tem;
7892 case REG:
7893 case MULT:
7894 /* biv + invar or mult + invar. Return sum. */
7895 return gen_rtx_PLUS (mode, arg0, arg1);
7897 case PLUS:
7898 /* (a + invar_1) + invar_2. Associate. */
7899 return
7900 simplify_giv_expr (loop,
7901 gen_rtx_PLUS (mode,
7902 XEXP (arg0, 0),
7903 gen_rtx_PLUS (mode,
7904 XEXP (arg0, 1),
7905 arg1)),
7906 ext_val, benefit);
7908 default:
7909 gcc_unreachable ();
7912 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7913 MULT to reduce cases. */
7914 if (REG_P (arg0))
7915 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
7916 if (REG_P (arg1))
7917 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
7919 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7920 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7921 Recurse to associate the second PLUS. */
7922 if (GET_CODE (arg1) == MULT)
7923 tem = arg0, arg0 = arg1, arg1 = tem;
7925 if (GET_CODE (arg1) == PLUS)
7926 return
7927 simplify_giv_expr (loop,
7928 gen_rtx_PLUS (mode,
7929 gen_rtx_PLUS (mode, arg0,
7930 XEXP (arg1, 0)),
7931 XEXP (arg1, 1)),
7932 ext_val, benefit);
7934 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7935 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
7936 return NULL_RTX;
7938 if (!rtx_equal_p (arg0, arg1))
7939 return NULL_RTX;
7941 return simplify_giv_expr (loop,
7942 gen_rtx_MULT (mode,
7943 XEXP (arg0, 0),
7944 gen_rtx_PLUS (mode,
7945 XEXP (arg0, 1),
7946 XEXP (arg1, 1))),
7947 ext_val, benefit);
7949 case MINUS:
7950 /* Handle "a - b" as "a + b * (-1)". */
7951 return simplify_giv_expr (loop,
7952 gen_rtx_PLUS (mode,
7953 XEXP (x, 0),
7954 gen_rtx_MULT (mode,
7955 XEXP (x, 1),
7956 constm1_rtx)),
7957 ext_val, benefit);
7959 case MULT:
7960 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7961 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7962 if (arg0 == 0 || arg1 == 0)
7963 return NULL_RTX;
7965 /* Put constant last, CONST_INT last if both constant. */
7966 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
7967 && GET_CODE (arg1) != CONST_INT)
7968 tem = arg0, arg0 = arg1, arg1 = tem;
7970 /* If second argument is not now constant, not giv. */
7971 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
7972 return NULL_RTX;
7974 /* Handle multiply by 0 or 1. */
7975 if (arg1 == const0_rtx)
7976 return const0_rtx;
7978 else if (arg1 == const1_rtx)
7979 return arg0;
7981 switch (GET_CODE (arg0))
7983 case REG:
7984 /* biv * invar. Done. */
7985 return gen_rtx_MULT (mode, arg0, arg1);
7987 case CONST_INT:
7988 /* Product of two constants. */
7989 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
7991 case USE:
7992 /* invar * invar is a giv, but attempt to simplify it somehow. */
7993 if (GET_CODE (arg1) != CONST_INT)
7994 return NULL_RTX;
7996 arg0 = XEXP (arg0, 0);
7997 if (GET_CODE (arg0) == MULT)
7999 /* (invar_0 * invar_1) * invar_2. Associate. */
8000 return simplify_giv_expr (loop,
8001 gen_rtx_MULT (mode,
8002 XEXP (arg0, 0),
8003 gen_rtx_MULT (mode,
8004 XEXP (arg0,
8006 arg1)),
8007 ext_val, benefit);
8009 /* Propagate the MULT expressions to the innermost nodes. */
8010 else if (GET_CODE (arg0) == PLUS)
8012 /* (invar_0 + invar_1) * invar_2. Distribute. */
8013 return simplify_giv_expr (loop,
8014 gen_rtx_PLUS (mode,
8015 gen_rtx_MULT (mode,
8016 XEXP (arg0,
8018 arg1),
8019 gen_rtx_MULT (mode,
8020 XEXP (arg0,
8022 arg1)),
8023 ext_val, benefit);
8025 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
8027 case MULT:
8028 /* (a * invar_1) * invar_2. Associate. */
8029 return simplify_giv_expr (loop,
8030 gen_rtx_MULT (mode,
8031 XEXP (arg0, 0),
8032 gen_rtx_MULT (mode,
8033 XEXP (arg0, 1),
8034 arg1)),
8035 ext_val, benefit);
8037 case PLUS:
8038 /* (a + invar_1) * invar_2. Distribute. */
8039 return simplify_giv_expr (loop,
8040 gen_rtx_PLUS (mode,
8041 gen_rtx_MULT (mode,
8042 XEXP (arg0, 0),
8043 arg1),
8044 gen_rtx_MULT (mode,
8045 XEXP (arg0, 1),
8046 arg1)),
8047 ext_val, benefit);
8049 default:
8050 gcc_unreachable ();
8053 case ASHIFT:
8054 /* Shift by constant is multiply by power of two. */
8055 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8056 return 0;
8058 return
8059 simplify_giv_expr (loop,
8060 gen_rtx_MULT (mode,
8061 XEXP (x, 0),
8062 GEN_INT ((HOST_WIDE_INT) 1
8063 << INTVAL (XEXP (x, 1)))),
8064 ext_val, benefit);
8066 case NEG:
8067 /* "-a" is "a * (-1)" */
8068 return simplify_giv_expr (loop,
8069 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8070 ext_val, benefit);
8072 case NOT:
8073 /* "~a" is "-a - 1". Silly, but easy. */
8074 return simplify_giv_expr (loop,
8075 gen_rtx_MINUS (mode,
8076 gen_rtx_NEG (mode, XEXP (x, 0)),
8077 const1_rtx),
8078 ext_val, benefit);
8080 case USE:
8081 /* Already in proper form for invariant. */
8082 return x;
8084 case SIGN_EXTEND:
8085 case ZERO_EXTEND:
8086 case TRUNCATE:
8087 /* Conditionally recognize extensions of simple IVs. After we've
8088 computed loop traversal counts and verified the range of the
8089 source IV, we'll reevaluate this as a GIV. */
8090 if (*ext_val == NULL_RTX)
8092 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8093 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8095 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8096 return arg0;
8099 goto do_default;
8101 case REG:
8102 /* If this is a new register, we can't deal with it. */
8103 if (REGNO (x) >= max_reg_before_loop)
8104 return 0;
8106 /* Check for biv or giv. */
8107 switch (REG_IV_TYPE (ivs, REGNO (x)))
8109 case BASIC_INDUCT:
8110 return x;
8111 case GENERAL_INDUCT:
8113 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8115 /* Form expression from giv and add benefit. Ensure this giv
8116 can derive another and subtract any needed adjustment if so. */
8118 /* Increasing the benefit here is risky. The only case in which it
8119 is arguably correct is if this is the only use of V. In other
8120 cases, this will artificially inflate the benefit of the current
8121 giv, and lead to suboptimal code. Thus, it is disabled, since
8122 potentially not reducing an only marginally beneficial giv is
8123 less harmful than reducing many givs that are not really
8124 beneficial. */
8126 rtx single_use = regs->array[REGNO (x)].single_usage;
8127 if (single_use && single_use != const0_rtx)
8128 *benefit += v->benefit;
8131 if (v->cant_derive)
8132 return 0;
8134 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8135 v->src_reg, v->mult_val),
8136 v->add_val);
8138 if (v->derive_adjustment)
8139 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8140 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8141 if (*ext_val)
8143 if (!v->ext_dependent)
8144 return arg0;
8146 else
8148 *ext_val = v->ext_dependent;
8149 return arg0;
8151 return 0;
8154 default:
8155 do_default:
8156 /* If it isn't an induction variable, and it is invariant, we
8157 may be able to simplify things further by looking through
8158 the bits we just moved outside the loop. */
8159 if (loop_invariant_p (loop, x) == 1)
8161 struct movable *m;
8162 struct loop_movables *movables = LOOP_MOVABLES (loop);
8164 for (m = movables->head; m; m = m->next)
8165 if (rtx_equal_p (x, m->set_dest))
8167 /* Ok, we found a match. Substitute and simplify. */
8169 /* If we match another movable, we must use that, as
8170 this one is going away. */
8171 if (m->match)
8172 return simplify_giv_expr (loop, m->match->set_dest,
8173 ext_val, benefit);
8175 /* If consec is nonzero, this is a member of a group of
8176 instructions that were moved together. We handle this
8177 case only to the point of seeking to the last insn and
8178 looking for a REG_EQUAL. Fail if we don't find one. */
8179 if (m->consec != 0)
8181 int i = m->consec;
8182 tem = m->insn;
8185 tem = NEXT_INSN (tem);
8187 while (--i > 0);
8189 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8190 if (tem)
8191 tem = XEXP (tem, 0);
8193 else
8195 tem = single_set (m->insn);
8196 if (tem)
8197 tem = SET_SRC (tem);
8200 if (tem)
8202 /* What we are most interested in is pointer
8203 arithmetic on invariants -- only take
8204 patterns we may be able to do something with. */
8205 if (GET_CODE (tem) == PLUS
8206 || GET_CODE (tem) == MULT
8207 || GET_CODE (tem) == ASHIFT
8208 || GET_CODE (tem) == CONST_INT
8209 || GET_CODE (tem) == SYMBOL_REF)
8211 tem = simplify_giv_expr (loop, tem, ext_val,
8212 benefit);
8213 if (tem)
8214 return tem;
8216 else if (GET_CODE (tem) == CONST
8217 && GET_CODE (XEXP (tem, 0)) == PLUS
8218 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8219 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8221 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8222 ext_val, benefit);
8223 if (tem)
8224 return tem;
8227 break;
8230 break;
8233 /* Fall through to general case. */
8234 default:
8235 /* If invariant, return as USE (unless CONST_INT).
8236 Otherwise, not giv. */
8237 if (GET_CODE (x) == USE)
8238 x = XEXP (x, 0);
8240 if (loop_invariant_p (loop, x) == 1)
8242 if (GET_CODE (x) == CONST_INT)
8243 return x;
8244 if (GET_CODE (x) == CONST
8245 && GET_CODE (XEXP (x, 0)) == PLUS
8246 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8247 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8248 x = XEXP (x, 0);
8249 return gen_rtx_USE (mode, x);
8251 else
8252 return 0;
8256 /* This routine folds invariants such that there is only ever one
8257 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8259 static rtx
8260 sge_plus_constant (rtx x, rtx c)
8262 if (GET_CODE (x) == CONST_INT)
8263 return GEN_INT (INTVAL (x) + INTVAL (c));
8264 else if (GET_CODE (x) != PLUS)
8265 return gen_rtx_PLUS (GET_MODE (x), x, c);
8266 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8268 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8269 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8271 else if (GET_CODE (XEXP (x, 0)) == PLUS
8272 || GET_CODE (XEXP (x, 1)) != PLUS)
8274 return gen_rtx_PLUS (GET_MODE (x),
8275 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8277 else
8279 return gen_rtx_PLUS (GET_MODE (x),
8280 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8284 static rtx
8285 sge_plus (enum machine_mode mode, rtx x, rtx y)
8287 while (GET_CODE (y) == PLUS)
8289 rtx a = XEXP (y, 0);
8290 if (GET_CODE (a) == CONST_INT)
8291 x = sge_plus_constant (x, a);
8292 else
8293 x = gen_rtx_PLUS (mode, x, a);
8294 y = XEXP (y, 1);
8296 if (GET_CODE (y) == CONST_INT)
8297 x = sge_plus_constant (x, y);
8298 else
8299 x = gen_rtx_PLUS (mode, x, y);
8300 return x;
8303 /* Help detect a giv that is calculated by several consecutive insns;
8304 for example,
8305 giv = biv * M
8306 giv = giv + A
8307 The caller has already identified the first insn P as having a giv as dest;
8308 we check that all other insns that set the same register follow
8309 immediately after P, that they alter nothing else,
8310 and that the result of the last is still a giv.
8312 The value is 0 if the reg set in P is not really a giv.
8313 Otherwise, the value is the amount gained by eliminating
8314 all the consecutive insns that compute the value.
8316 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8317 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8319 The coefficients of the ultimate giv value are stored in
8320 *MULT_VAL and *ADD_VAL. */
8322 static int
8323 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8324 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8325 rtx *ext_val, rtx *last_consec_insn)
8327 struct loop_ivs *ivs = LOOP_IVS (loop);
8328 struct loop_regs *regs = LOOP_REGS (loop);
8329 int count;
8330 enum rtx_code code;
8331 int benefit;
8332 rtx temp;
8333 rtx set;
8335 /* Indicate that this is a giv so that we can update the value produced in
8336 each insn of the multi-insn sequence.
8338 This induction structure will be used only by the call to
8339 general_induction_var below, so we can allocate it on our stack.
8340 If this is a giv, our caller will replace the induct var entry with
8341 a new induction structure. */
8342 struct induction *v;
8344 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8345 return 0;
8347 v = alloca (sizeof (struct induction));
8348 v->src_reg = src_reg;
8349 v->mult_val = *mult_val;
8350 v->add_val = *add_val;
8351 v->benefit = first_benefit;
8352 v->cant_derive = 0;
8353 v->derive_adjustment = 0;
8354 v->ext_dependent = NULL_RTX;
8356 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8357 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8359 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8361 while (count > 0)
8363 p = NEXT_INSN (p);
8364 code = GET_CODE (p);
8366 /* If libcall, skip to end of call sequence. */
8367 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8368 p = XEXP (temp, 0);
8370 if (code == INSN
8371 && (set = single_set (p))
8372 && REG_P (SET_DEST (set))
8373 && SET_DEST (set) == dest_reg
8374 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8375 add_val, mult_val, ext_val, 0,
8376 &benefit, VOIDmode)
8377 /* Giv created by equivalent expression. */
8378 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8379 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8380 add_val, mult_val, ext_val, 0,
8381 &benefit, VOIDmode)))
8382 && src_reg == v->src_reg)
8384 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8385 benefit += libcall_benefit (p);
8387 count--;
8388 v->mult_val = *mult_val;
8389 v->add_val = *add_val;
8390 v->benefit += benefit;
8392 else if (code != NOTE)
8394 /* Allow insns that set something other than this giv to a
8395 constant. Such insns are needed on machines which cannot
8396 include long constants and should not disqualify a giv. */
8397 if (code == INSN
8398 && (set = single_set (p))
8399 && SET_DEST (set) != dest_reg
8400 && CONSTANT_P (SET_SRC (set)))
8401 continue;
8403 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8404 return 0;
8408 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8409 *last_consec_insn = p;
8410 return v->benefit;
8413 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8414 represented by G1. If no such expression can be found, or it is clear that
8415 it cannot possibly be a valid address, 0 is returned.
8417 To perform the computation, we note that
8418 G1 = x * v + a and
8419 G2 = y * v + b
8420 where `v' is the biv.
8422 So G2 = (y/b) * G1 + (b - a*y/x).
8424 Note that MULT = y/x.
8426 Update: A and B are now allowed to be additive expressions such that
8427 B contains all variables in A. That is, computing B-A will not require
8428 subtracting variables. */
8430 static rtx
8431 express_from_1 (rtx a, rtx b, rtx mult)
8433 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8435 if (mult == const0_rtx)
8436 return b;
8438 /* If MULT is not 1, we cannot handle A with non-constants, since we
8439 would then be required to subtract multiples of the registers in A.
8440 This is theoretically possible, and may even apply to some Fortran
8441 constructs, but it is a lot of work and we do not attempt it here. */
8443 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8444 return NULL_RTX;
8446 /* In general these structures are sorted top to bottom (down the PLUS
8447 chain), but not left to right across the PLUS. If B is a higher
8448 order giv than A, we can strip one level and recurse. If A is higher
8449 order, we'll eventually bail out, but won't know that until the end.
8450 If they are the same, we'll strip one level around this loop. */
8452 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8454 rtx ra, rb, oa, ob, tmp;
8456 ra = XEXP (a, 0), oa = XEXP (a, 1);
8457 if (GET_CODE (ra) == PLUS)
8458 tmp = ra, ra = oa, oa = tmp;
8460 rb = XEXP (b, 0), ob = XEXP (b, 1);
8461 if (GET_CODE (rb) == PLUS)
8462 tmp = rb, rb = ob, ob = tmp;
8464 if (rtx_equal_p (ra, rb))
8465 /* We matched: remove one reg completely. */
8466 a = oa, b = ob;
8467 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8468 /* An alternate match. */
8469 a = oa, b = rb;
8470 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8471 /* An alternate match. */
8472 a = ra, b = ob;
8473 else
8475 /* Indicates an extra register in B. Strip one level from B and
8476 recurse, hoping B was the higher order expression. */
8477 ob = express_from_1 (a, ob, mult);
8478 if (ob == NULL_RTX)
8479 return NULL_RTX;
8480 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8484 /* Here we are at the last level of A, go through the cases hoping to
8485 get rid of everything but a constant. */
8487 if (GET_CODE (a) == PLUS)
8489 rtx ra, oa;
8491 ra = XEXP (a, 0), oa = XEXP (a, 1);
8492 if (rtx_equal_p (oa, b))
8493 oa = ra;
8494 else if (!rtx_equal_p (ra, b))
8495 return NULL_RTX;
8497 if (GET_CODE (oa) != CONST_INT)
8498 return NULL_RTX;
8500 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8502 else if (GET_CODE (a) == CONST_INT)
8504 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8506 else if (CONSTANT_P (a))
8508 enum machine_mode mode_a = GET_MODE (a);
8509 enum machine_mode mode_b = GET_MODE (b);
8510 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8511 return simplify_gen_binary (MINUS, mode, b, a);
8513 else if (GET_CODE (b) == PLUS)
8515 if (rtx_equal_p (a, XEXP (b, 0)))
8516 return XEXP (b, 1);
8517 else if (rtx_equal_p (a, XEXP (b, 1)))
8518 return XEXP (b, 0);
8519 else
8520 return NULL_RTX;
8522 else if (rtx_equal_p (a, b))
8523 return const0_rtx;
8525 return NULL_RTX;
8528 static rtx
8529 express_from (struct induction *g1, struct induction *g2)
8531 rtx mult, add;
8533 /* The value that G1 will be multiplied by must be a constant integer. Also,
8534 the only chance we have of getting a valid address is if b*c/a (see above
8535 for notation) is also an integer. */
8536 if (GET_CODE (g1->mult_val) == CONST_INT
8537 && GET_CODE (g2->mult_val) == CONST_INT)
8539 if (g1->mult_val == const0_rtx
8540 || (g1->mult_val == constm1_rtx
8541 && INTVAL (g2->mult_val)
8542 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8543 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8544 return NULL_RTX;
8545 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8547 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8548 mult = const1_rtx;
8549 else
8551 /* ??? Find out if the one is a multiple of the other? */
8552 return NULL_RTX;
8555 add = express_from_1 (g1->add_val, g2->add_val, mult);
8556 if (add == NULL_RTX)
8558 /* Failed. If we've got a multiplication factor between G1 and G2,
8559 scale G1's addend and try again. */
8560 if (INTVAL (mult) > 1)
8562 rtx g1_add_val = g1->add_val;
8563 if (GET_CODE (g1_add_val) == MULT
8564 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8566 HOST_WIDE_INT m;
8567 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8568 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8569 XEXP (g1_add_val, 0), GEN_INT (m));
8571 else
8573 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8574 mult);
8577 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8580 if (add == NULL_RTX)
8581 return NULL_RTX;
8583 /* Form simplified final result. */
8584 if (mult == const0_rtx)
8585 return add;
8586 else if (mult == const1_rtx)
8587 mult = g1->dest_reg;
8588 else
8589 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8591 if (add == const0_rtx)
8592 return mult;
8593 else
8595 if (GET_CODE (add) == PLUS
8596 && CONSTANT_P (XEXP (add, 1)))
8598 rtx tem = XEXP (add, 1);
8599 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8600 add = tem;
8603 return gen_rtx_PLUS (g2->mode, mult, add);
8607 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8608 represented by G1. This indicates that G2 should be combined with G1 and
8609 that G2 can use (either directly or via an address expression) a register
8610 used to represent G1. */
8612 static rtx
8613 combine_givs_p (struct induction *g1, struct induction *g2)
8615 rtx comb, ret;
8617 /* With the introduction of ext dependent givs, we must care for modes.
8618 G2 must not use a wider mode than G1. */
8619 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8620 return NULL_RTX;
8622 ret = comb = express_from (g1, g2);
8623 if (comb == NULL_RTX)
8624 return NULL_RTX;
8625 if (g1->mode != g2->mode)
8626 ret = gen_lowpart (g2->mode, comb);
8628 /* If these givs are identical, they can be combined. We use the results
8629 of express_from because the addends are not in a canonical form, so
8630 rtx_equal_p is a weaker test. */
8631 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8632 combination to be the other way round. */
8633 if (comb == g1->dest_reg
8634 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8636 return ret;
8639 /* If G2 can be expressed as a function of G1 and that function is valid
8640 as an address and no more expensive than using a register for G2,
8641 the expression of G2 in terms of G1 can be used. */
8642 if (ret != NULL_RTX
8643 && g2->giv_type == DEST_ADDR
8644 && memory_address_p (GET_MODE (g2->mem), ret))
8645 return ret;
8647 return NULL_RTX;
8650 /* See if BL is monotonic and has a constant per-iteration increment.
8651 Return the increment if so, otherwise return 0. */
8653 static HOST_WIDE_INT
8654 get_monotonic_increment (struct iv_class *bl)
8656 struct induction *v;
8657 rtx incr;
8659 /* Get the total increment and check that it is constant. */
8660 incr = biv_total_increment (bl);
8661 if (incr == 0 || GET_CODE (incr) != CONST_INT)
8662 return 0;
8664 for (v = bl->biv; v != 0; v = v->next_iv)
8666 if (GET_CODE (v->add_val) != CONST_INT)
8667 return 0;
8669 if (INTVAL (v->add_val) < 0 && INTVAL (incr) >= 0)
8670 return 0;
8672 if (INTVAL (v->add_val) > 0 && INTVAL (incr) <= 0)
8673 return 0;
8675 return INTVAL (incr);
8679 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8680 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8681 to which the biv belongs and INCR is its per-iteration increment. */
8683 static bool
8684 biased_biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8685 HOST_WIDE_INT incr, enum machine_mode mode,
8686 unsigned HOST_WIDE_INT bias)
8688 unsigned HOST_WIDE_INT initial, maximum, span, delta;
8690 /* We need to be able to manipulate MODE-size constants. */
8691 if (HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode))
8692 return false;
8694 /* The number of loop iterations must be constant. */
8695 if (LOOP_INFO (loop)->n_iterations == 0)
8696 return false;
8698 /* So must the biv's initial value. */
8699 if (bl->initial_value == 0 || GET_CODE (bl->initial_value) != CONST_INT)
8700 return false;
8702 initial = bias + INTVAL (bl->initial_value);
8703 maximum = GET_MODE_MASK (mode);
8705 /* Make sure that the initial value is within range. */
8706 if (initial > maximum)
8707 return false;
8709 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8710 (calculated to arbitrary precision) must be <= SPAN. */
8711 if (incr < 0)
8713 delta = -incr;
8714 span = initial;
8716 else
8718 delta = incr;
8719 /* Handle the special case in which MAXIMUM is the largest
8720 unsigned HOST_WIDE_INT and INITIAL is 0. */
8721 if (maximum + 1 == initial)
8722 span = LOOP_INFO (loop)->n_iterations * delta;
8723 else
8724 span = maximum + 1 - initial;
8726 return (span / LOOP_INFO (loop)->n_iterations >= delta);
8730 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8731 the loop to which BL belongs and INCR is its per-iteration increment.
8732 UNSIGNEDP is true if the biv should be treated as unsigned. */
8734 static bool
8735 biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8736 HOST_WIDE_INT incr, enum machine_mode mode, bool unsignedp)
8738 struct loop_info *loop_info;
8739 unsigned HOST_WIDE_INT bias;
8741 /* A biv's value will always be limited to its natural mode.
8742 Larger modes will observe the same wrap-around. */
8743 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (bl->biv->src_reg)))
8744 mode = GET_MODE (bl->biv->src_reg);
8746 loop_info = LOOP_INFO (loop);
8748 bias = (unsignedp ? 0 : (GET_MODE_MASK (mode) >> 1) + 1);
8749 if (biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8750 return true;
8752 if (mode == GET_MODE (bl->biv->src_reg)
8753 && bl->biv->src_reg == loop_info->iteration_var
8754 && loop_info->comparison_value
8755 && loop_invariant_p (loop, loop_info->comparison_value))
8757 /* If the increment is +1, and the exit test is a <, the BIV
8758 cannot overflow. (For <=, we have the problematic case that
8759 the comparison value might be the maximum value of the range.) */
8760 if (incr == 1)
8762 if (loop_info->comparison_code == LT)
8763 return true;
8764 if (loop_info->comparison_code == LTU && unsignedp)
8765 return true;
8768 /* Likewise for increment -1 and exit test >. */
8769 if (incr == -1)
8771 if (loop_info->comparison_code == GT)
8772 return true;
8773 if (loop_info->comparison_code == GTU && unsignedp)
8774 return true;
8777 return false;
8781 /* Given that X is an extension or truncation of BL, return true
8782 if it is unaffected by overflow. LOOP is the loop to which
8783 BL belongs and INCR is its per-iteration increment. */
8785 static bool
8786 extension_within_bounds_p (const struct loop *loop, struct iv_class *bl,
8787 HOST_WIDE_INT incr, rtx x)
8789 enum machine_mode mode;
8790 bool signedp, unsignedp;
8792 switch (GET_CODE (x))
8794 case SIGN_EXTEND:
8795 case ZERO_EXTEND:
8796 mode = GET_MODE (XEXP (x, 0));
8797 signedp = (GET_CODE (x) == SIGN_EXTEND);
8798 unsignedp = (GET_CODE (x) == ZERO_EXTEND);
8799 break;
8801 case TRUNCATE:
8802 /* We don't know whether this value is being used as signed
8803 or unsigned, so check the conditions for both. */
8804 mode = GET_MODE (x);
8805 signedp = unsignedp = true;
8806 break;
8808 default:
8809 gcc_unreachable ();
8812 return ((!signedp || biv_fits_mode_p (loop, bl, incr, mode, false))
8813 && (!unsignedp || biv_fits_mode_p (loop, bl, incr, mode, true)));
8817 /* Check each extension dependent giv in this class to see if its
8818 root biv is safe from wrapping in the interior mode, which would
8819 make the giv illegal. */
8821 static void
8822 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8824 struct induction *v;
8825 HOST_WIDE_INT incr;
8827 incr = get_monotonic_increment (bl);
8829 /* Invalidate givs that fail the tests. */
8830 for (v = bl->giv; v; v = v->next_iv)
8831 if (v->ext_dependent)
8833 if (incr != 0
8834 && extension_within_bounds_p (loop, bl, incr, v->ext_dependent))
8836 if (loop_dump_stream)
8837 fprintf (loop_dump_stream,
8838 "Verified ext dependent giv at %d of reg %d\n",
8839 INSN_UID (v->insn), bl->regno);
8841 else
8843 if (loop_dump_stream)
8844 fprintf (loop_dump_stream,
8845 "Failed ext dependent giv at %d\n",
8846 INSN_UID (v->insn));
8848 v->ignore = 1;
8849 bl->all_reduced = 0;
8854 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8856 static rtx
8857 extend_value_for_giv (struct induction *v, rtx value)
8859 rtx ext_dep = v->ext_dependent;
8861 if (! ext_dep)
8862 return value;
8864 /* Recall that check_ext_dependent_givs verified that the known bounds
8865 of a biv did not overflow or wrap with respect to the extension for
8866 the giv. Therefore, constants need no additional adjustment. */
8867 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
8868 return value;
8870 /* Otherwise, we must adjust the value to compensate for the
8871 differing modes of the biv and the giv. */
8872 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
8875 struct combine_givs_stats
8877 int giv_number;
8878 int total_benefit;
8881 static int
8882 cmp_combine_givs_stats (const void *xp, const void *yp)
8884 const struct combine_givs_stats * const x =
8885 (const struct combine_givs_stats *) xp;
8886 const struct combine_givs_stats * const y =
8887 (const struct combine_givs_stats *) yp;
8888 int d;
8889 d = y->total_benefit - x->total_benefit;
8890 /* Stabilize the sort. */
8891 if (!d)
8892 d = x->giv_number - y->giv_number;
8893 return d;
8896 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8897 any other. If so, point SAME to the giv combined with and set NEW_REG to
8898 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8899 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8901 static void
8902 combine_givs (struct loop_regs *regs, struct iv_class *bl)
8904 /* Additional benefit to add for being combined multiple times. */
8905 const int extra_benefit = 3;
8907 struct induction *g1, *g2, **giv_array;
8908 int i, j, k, giv_count;
8909 struct combine_givs_stats *stats;
8910 rtx *can_combine;
8912 /* Count givs, because bl->giv_count is incorrect here. */
8913 giv_count = 0;
8914 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8915 if (!g1->ignore)
8916 giv_count++;
8918 giv_array = alloca (giv_count * sizeof (struct induction *));
8919 i = 0;
8920 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8921 if (!g1->ignore)
8922 giv_array[i++] = g1;
8924 stats = xcalloc (giv_count, sizeof (*stats));
8925 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
8927 for (i = 0; i < giv_count; i++)
8929 int this_benefit;
8930 rtx single_use;
8932 g1 = giv_array[i];
8933 stats[i].giv_number = i;
8935 /* If a DEST_REG GIV is used only once, do not allow it to combine
8936 with anything, for in doing so we will gain nothing that cannot
8937 be had by simply letting the GIV with which we would have combined
8938 to be reduced on its own. The lossage shows up in particular with
8939 DEST_ADDR targets on hosts with reg+reg addressing, though it can
8940 be seen elsewhere as well. */
8941 if (g1->giv_type == DEST_REG
8942 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
8943 && single_use != const0_rtx)
8944 continue;
8946 this_benefit = g1->benefit;
8947 /* Add an additional weight for zero addends. */
8948 if (g1->no_const_addval)
8949 this_benefit += 1;
8951 for (j = 0; j < giv_count; j++)
8953 rtx this_combine;
8955 g2 = giv_array[j];
8956 if (g1 != g2
8957 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
8959 can_combine[i * giv_count + j] = this_combine;
8960 this_benefit += g2->benefit + extra_benefit;
8963 stats[i].total_benefit = this_benefit;
8966 /* Iterate, combining until we can't. */
8967 restart:
8968 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
8970 if (loop_dump_stream)
8972 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
8973 for (k = 0; k < giv_count; k++)
8975 g1 = giv_array[stats[k].giv_number];
8976 if (!g1->combined_with && !g1->same)
8977 fprintf (loop_dump_stream, " {%d, %d}",
8978 INSN_UID (giv_array[stats[k].giv_number]->insn),
8979 stats[k].total_benefit);
8981 putc ('\n', loop_dump_stream);
8984 for (k = 0; k < giv_count; k++)
8986 int g1_add_benefit = 0;
8988 i = stats[k].giv_number;
8989 g1 = giv_array[i];
8991 /* If it has already been combined, skip. */
8992 if (g1->combined_with || g1->same)
8993 continue;
8995 for (j = 0; j < giv_count; j++)
8997 g2 = giv_array[j];
8998 if (g1 != g2 && can_combine[i * giv_count + j]
8999 /* If it has already been combined, skip. */
9000 && ! g2->same && ! g2->combined_with)
9002 int l;
9004 g2->new_reg = can_combine[i * giv_count + j];
9005 g2->same = g1;
9006 /* For destination, we now may replace by mem expression instead
9007 of register. This changes the costs considerably, so add the
9008 compensation. */
9009 if (g2->giv_type == DEST_ADDR)
9010 g2->benefit = (g2->benefit + reg_address_cost
9011 - address_cost (g2->new_reg,
9012 GET_MODE (g2->mem)));
9013 g1->combined_with++;
9014 g1->lifetime += g2->lifetime;
9016 g1_add_benefit += g2->benefit;
9018 /* ??? The new final_[bg]iv_value code does a much better job
9019 of finding replaceable giv's, and hence this code may no
9020 longer be necessary. */
9021 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
9022 g1_add_benefit -= copy_cost;
9024 /* To help optimize the next set of combinations, remove
9025 this giv from the benefits of other potential mates. */
9026 for (l = 0; l < giv_count; ++l)
9028 int m = stats[l].giv_number;
9029 if (can_combine[m * giv_count + j])
9030 stats[l].total_benefit -= g2->benefit + extra_benefit;
9033 if (loop_dump_stream)
9034 fprintf (loop_dump_stream,
9035 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9036 INSN_UID (g2->insn), INSN_UID (g1->insn),
9037 g1->benefit, g1_add_benefit, g1->lifetime);
9041 /* To help optimize the next set of combinations, remove
9042 this giv from the benefits of other potential mates. */
9043 if (g1->combined_with)
9045 for (j = 0; j < giv_count; ++j)
9047 int m = stats[j].giv_number;
9048 if (can_combine[m * giv_count + i])
9049 stats[j].total_benefit -= g1->benefit + extra_benefit;
9052 g1->benefit += g1_add_benefit;
9054 /* We've finished with this giv, and everything it touched.
9055 Restart the combination so that proper weights for the
9056 rest of the givs are properly taken into account. */
9057 /* ??? Ideally we would compact the arrays at this point, so
9058 as to not cover old ground. But sanely compacting
9059 can_combine is tricky. */
9060 goto restart;
9064 /* Clean up. */
9065 free (stats);
9066 free (can_combine);
9069 /* Generate sequence for REG = B * M + A. B is the initial value of
9070 the basic induction variable, M a multiplicative constant, A an
9071 additive constant and REG the destination register. */
9073 static rtx
9074 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9076 rtx seq;
9077 rtx result;
9079 start_sequence ();
9080 /* Use unsigned arithmetic. */
9081 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9082 if (reg != result)
9083 emit_move_insn (reg, result);
9084 seq = get_insns ();
9085 end_sequence ();
9087 return seq;
9091 /* Update registers created in insn sequence SEQ. */
9093 static void
9094 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9096 rtx insn;
9098 /* Update register info for alias analysis. */
9100 insn = seq;
9101 while (insn != NULL_RTX)
9103 rtx set = single_set (insn);
9105 if (set && REG_P (SET_DEST (set)))
9106 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9108 insn = NEXT_INSN (insn);
9113 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9114 is the initial value of the basic induction variable, M a
9115 multiplicative constant, A an additive constant and REG the
9116 destination register. */
9118 static void
9119 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9120 rtx reg, basic_block before_bb, rtx before_insn)
9122 rtx seq;
9124 if (! before_insn)
9126 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9127 return;
9130 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9131 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9133 /* Increase the lifetime of any invariants moved further in code. */
9134 update_reg_last_use (a, before_insn);
9135 update_reg_last_use (b, before_insn);
9136 update_reg_last_use (m, before_insn);
9138 /* It is possible that the expansion created lots of new registers.
9139 Iterate over the sequence we just created and record them all. We
9140 must do this before inserting the sequence. */
9141 loop_regs_update (loop, seq);
9143 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9147 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9148 initial value of the basic induction variable, M a multiplicative
9149 constant, A an additive constant and REG the destination
9150 register. */
9152 static void
9153 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9155 rtx seq;
9157 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9158 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9160 /* Increase the lifetime of any invariants moved further in code.
9161 ???? Is this really necessary? */
9162 update_reg_last_use (a, loop->sink);
9163 update_reg_last_use (b, loop->sink);
9164 update_reg_last_use (m, loop->sink);
9166 /* It is possible that the expansion created lots of new registers.
9167 Iterate over the sequence we just created and record them all. We
9168 must do this before inserting the sequence. */
9169 loop_regs_update (loop, seq);
9171 loop_insn_sink (loop, seq);
9175 /* Emit insns after loop to set REG = B * M + A. B is the initial
9176 value of the basic induction variable, M a multiplicative constant,
9177 A an additive constant and REG the destination register. */
9179 static void
9180 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9182 rtx seq;
9184 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9185 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9187 /* It is possible that the expansion created lots of new registers.
9188 Iterate over the sequence we just created and record them all. We
9189 must do this before inserting the sequence. */
9190 loop_regs_update (loop, seq);
9192 loop_insn_hoist (loop, seq);
9197 /* Similar to gen_add_mult, but compute cost rather than generating
9198 sequence. */
9200 static int
9201 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9203 int cost = 0;
9204 rtx last, result;
9206 start_sequence ();
9207 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9208 if (reg != result)
9209 emit_move_insn (reg, result);
9210 last = get_last_insn ();
9211 while (last)
9213 rtx t = single_set (last);
9214 if (t)
9215 cost += rtx_cost (SET_SRC (t), SET);
9216 last = PREV_INSN (last);
9218 end_sequence ();
9219 return cost;
9222 /* Test whether A * B can be computed without
9223 an actual multiply insn. Value is 1 if so.
9225 ??? This function stinks because it generates a ton of wasted RTL
9226 ??? and as a result fragments GC memory to no end. There are other
9227 ??? places in the compiler which are invoked a lot and do the same
9228 ??? thing, generate wasted RTL just to see if something is possible. */
9230 static int
9231 product_cheap_p (rtx a, rtx b)
9233 rtx tmp;
9234 int win, n_insns;
9236 /* If only one is constant, make it B. */
9237 if (GET_CODE (a) == CONST_INT)
9238 tmp = a, a = b, b = tmp;
9240 /* If first constant, both constant, so don't need multiply. */
9241 if (GET_CODE (a) == CONST_INT)
9242 return 1;
9244 /* If second not constant, neither is constant, so would need multiply. */
9245 if (GET_CODE (b) != CONST_INT)
9246 return 0;
9248 /* One operand is constant, so might not need multiply insn. Generate the
9249 code for the multiply and see if a call or multiply, or long sequence
9250 of insns is generated. */
9252 start_sequence ();
9253 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9254 tmp = get_insns ();
9255 end_sequence ();
9257 win = 1;
9258 if (tmp == NULL_RTX)
9260 else if (INSN_P (tmp))
9262 n_insns = 0;
9263 while (tmp != NULL_RTX)
9265 rtx next = NEXT_INSN (tmp);
9267 if (++n_insns > 3
9268 || !NONJUMP_INSN_P (tmp)
9269 || (GET_CODE (PATTERN (tmp)) == SET
9270 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9271 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9272 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9273 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9275 win = 0;
9276 break;
9279 tmp = next;
9282 else if (GET_CODE (tmp) == SET
9283 && GET_CODE (SET_SRC (tmp)) == MULT)
9284 win = 0;
9285 else if (GET_CODE (tmp) == PARALLEL
9286 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9287 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9288 win = 0;
9290 return win;
9293 /* Check to see if loop can be terminated by a "decrement and branch until
9294 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9295 Also try reversing an increment loop to a decrement loop
9296 to see if the optimization can be performed.
9297 Value is nonzero if optimization was performed. */
9299 /* This is useful even if the architecture doesn't have such an insn,
9300 because it might change a loops which increments from 0 to n to a loop
9301 which decrements from n to 0. A loop that decrements to zero is usually
9302 faster than one that increments from zero. */
9304 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9305 such as approx_final_value, biv_total_increment, loop_iterations, and
9306 final_[bg]iv_value. */
9308 static int
9309 check_dbra_loop (struct loop *loop, int insn_count)
9311 struct loop_info *loop_info = LOOP_INFO (loop);
9312 struct loop_regs *regs = LOOP_REGS (loop);
9313 struct loop_ivs *ivs = LOOP_IVS (loop);
9314 struct iv_class *bl;
9315 rtx reg;
9316 enum machine_mode mode;
9317 rtx jump_label;
9318 rtx final_value;
9319 rtx start_value;
9320 rtx new_add_val;
9321 rtx comparison;
9322 rtx before_comparison;
9323 rtx p;
9324 rtx jump;
9325 rtx first_compare;
9326 int compare_and_branch;
9327 rtx loop_start = loop->start;
9328 rtx loop_end = loop->end;
9330 /* If last insn is a conditional branch, and the insn before tests a
9331 register value, try to optimize it. Otherwise, we can't do anything. */
9333 jump = PREV_INSN (loop_end);
9334 comparison = get_condition_for_loop (loop, jump);
9335 if (comparison == 0)
9336 return 0;
9337 if (!onlyjump_p (jump))
9338 return 0;
9340 /* Try to compute whether the compare/branch at the loop end is one or
9341 two instructions. */
9342 get_condition (jump, &first_compare, false, true);
9343 if (first_compare == jump)
9344 compare_and_branch = 1;
9345 else if (first_compare == prev_nonnote_insn (jump))
9346 compare_and_branch = 2;
9347 else
9348 return 0;
9351 /* If more than one condition is present to control the loop, then
9352 do not proceed, as this function does not know how to rewrite
9353 loop tests with more than one condition.
9355 Look backwards from the first insn in the last comparison
9356 sequence and see if we've got another comparison sequence. */
9358 rtx jump1;
9359 if ((jump1 = prev_nonnote_insn (first_compare))
9360 && JUMP_P (jump1))
9361 return 0;
9364 /* Check all of the bivs to see if the compare uses one of them.
9365 Skip biv's set more than once because we can't guarantee that
9366 it will be zero on the last iteration. Also skip if the biv is
9367 used between its update and the test insn. */
9369 for (bl = ivs->list; bl; bl = bl->next)
9371 if (bl->biv_count == 1
9372 && ! bl->biv->maybe_multiple
9373 && bl->biv->dest_reg == XEXP (comparison, 0)
9374 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9375 first_compare))
9376 break;
9379 /* Try swapping the comparison to identify a suitable biv. */
9380 if (!bl)
9381 for (bl = ivs->list; bl; bl = bl->next)
9382 if (bl->biv_count == 1
9383 && ! bl->biv->maybe_multiple
9384 && bl->biv->dest_reg == XEXP (comparison, 1)
9385 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9386 first_compare))
9388 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9389 VOIDmode,
9390 XEXP (comparison, 1),
9391 XEXP (comparison, 0));
9392 break;
9395 if (! bl)
9396 return 0;
9398 /* Look for the case where the basic induction variable is always
9399 nonnegative, and equals zero on the last iteration.
9400 In this case, add a reg_note REG_NONNEG, which allows the
9401 m68k DBRA instruction to be used. */
9403 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9404 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9405 && GET_CODE (bl->biv->add_val) == CONST_INT
9406 && INTVAL (bl->biv->add_val) < 0)
9408 /* Initial value must be greater than 0,
9409 init_val % -dec_value == 0 to ensure that it equals zero on
9410 the last iteration */
9412 if (GET_CODE (bl->initial_value) == CONST_INT
9413 && INTVAL (bl->initial_value) > 0
9414 && (INTVAL (bl->initial_value)
9415 % (-INTVAL (bl->biv->add_val))) == 0)
9417 /* Register always nonnegative, add REG_NOTE to branch. */
9418 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9419 REG_NOTES (jump)
9420 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9421 REG_NOTES (jump));
9422 bl->nonneg = 1;
9424 return 1;
9427 /* If the decrement is 1 and the value was tested as >= 0 before
9428 the loop, then we can safely optimize. */
9429 for (p = loop_start; p; p = PREV_INSN (p))
9431 if (LABEL_P (p))
9432 break;
9433 if (!JUMP_P (p))
9434 continue;
9436 before_comparison = get_condition_for_loop (loop, p);
9437 if (before_comparison
9438 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9439 && (GET_CODE (before_comparison) == LT
9440 || GET_CODE (before_comparison) == LTU)
9441 && XEXP (before_comparison, 1) == const0_rtx
9442 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9443 && INTVAL (bl->biv->add_val) == -1)
9445 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9446 REG_NOTES (jump)
9447 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9448 REG_NOTES (jump));
9449 bl->nonneg = 1;
9451 return 1;
9455 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9456 && INTVAL (bl->biv->add_val) > 0)
9458 /* Try to change inc to dec, so can apply above optimization. */
9459 /* Can do this if:
9460 all registers modified are induction variables or invariant,
9461 all memory references have non-overlapping addresses
9462 (obviously true if only one write)
9463 allow 2 insns for the compare/jump at the end of the loop. */
9464 /* Also, we must avoid any instructions which use both the reversed
9465 biv and another biv. Such instructions will fail if the loop is
9466 reversed. We meet this condition by requiring that either
9467 no_use_except_counting is true, or else that there is only
9468 one biv. */
9469 int num_nonfixed_reads = 0;
9470 /* 1 if the iteration var is used only to count iterations. */
9471 int no_use_except_counting = 0;
9472 /* 1 if the loop has no memory store, or it has a single memory store
9473 which is reversible. */
9474 int reversible_mem_store = 1;
9476 if (bl->giv_count == 0
9477 && !loop->exit_count
9478 && !loop_info->has_multiple_exit_targets)
9480 rtx bivreg = regno_reg_rtx[bl->regno];
9481 struct iv_class *blt;
9483 /* If there are no givs for this biv, and the only exit is the
9484 fall through at the end of the loop, then
9485 see if perhaps there are no uses except to count. */
9486 no_use_except_counting = 1;
9487 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9488 if (INSN_P (p))
9490 rtx set = single_set (p);
9492 if (set && REG_P (SET_DEST (set))
9493 && REGNO (SET_DEST (set)) == bl->regno)
9494 /* An insn that sets the biv is okay. */
9496 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9497 /* An insn that doesn't mention the biv is okay. */
9499 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9500 || p == prev_nonnote_insn (loop_end))
9502 /* If either of these insns uses the biv and sets a pseudo
9503 that has more than one usage, then the biv has uses
9504 other than counting since it's used to derive a value
9505 that is used more than one time. */
9506 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9507 regs);
9508 if (regs->multiple_uses)
9510 no_use_except_counting = 0;
9511 break;
9514 else
9516 no_use_except_counting = 0;
9517 break;
9521 /* A biv has uses besides counting if it is used to set
9522 another biv. */
9523 for (blt = ivs->list; blt; blt = blt->next)
9524 if (blt->init_set
9525 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9527 no_use_except_counting = 0;
9528 break;
9532 if (no_use_except_counting)
9533 /* No need to worry about MEMs. */
9535 else if (loop_info->num_mem_sets <= 1)
9537 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9538 if (INSN_P (p))
9539 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9541 /* If the loop has a single store, and the destination address is
9542 invariant, then we can't reverse the loop, because this address
9543 might then have the wrong value at loop exit.
9544 This would work if the source was invariant also, however, in that
9545 case, the insn should have been moved out of the loop. */
9547 if (loop_info->num_mem_sets == 1)
9549 struct induction *v;
9551 /* If we could prove that each of the memory locations
9552 written to was different, then we could reverse the
9553 store -- but we don't presently have any way of
9554 knowing that. */
9555 reversible_mem_store = 0;
9557 /* If the store depends on a register that is set after the
9558 store, it depends on the initial value, and is thus not
9559 reversible. */
9560 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9562 if (v->giv_type == DEST_REG
9563 && reg_mentioned_p (v->dest_reg,
9564 PATTERN (loop_info->first_loop_store_insn))
9565 && loop_insn_first_p (loop_info->first_loop_store_insn,
9566 v->insn))
9567 reversible_mem_store = 0;
9571 else
9572 return 0;
9574 /* This code only acts for innermost loops. Also it simplifies
9575 the memory address check by only reversing loops with
9576 zero or one memory access.
9577 Two memory accesses could involve parts of the same array,
9578 and that can't be reversed.
9579 If the biv is used only for counting, than we don't need to worry
9580 about all these things. */
9582 if ((num_nonfixed_reads <= 1
9583 && ! loop_info->has_nonconst_call
9584 && ! loop_info->has_prefetch
9585 && ! loop_info->has_volatile
9586 && reversible_mem_store
9587 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9588 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9589 && (bl == ivs->list && bl->next == 0))
9590 || (no_use_except_counting && ! loop_info->has_prefetch))
9592 rtx tem;
9594 /* Loop can be reversed. */
9595 if (loop_dump_stream)
9596 fprintf (loop_dump_stream, "Can reverse loop\n");
9598 /* Now check other conditions:
9600 The increment must be a constant, as must the initial value,
9601 and the comparison code must be LT.
9603 This test can probably be improved since +/- 1 in the constant
9604 can be obtained by changing LT to LE and vice versa; this is
9605 confusing. */
9607 if (comparison
9608 /* for constants, LE gets turned into LT */
9609 && (GET_CODE (comparison) == LT
9610 || (GET_CODE (comparison) == LE
9611 && no_use_except_counting)
9612 || GET_CODE (comparison) == LTU))
9614 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9615 rtx initial_value, comparison_value;
9616 int nonneg = 0;
9617 enum rtx_code cmp_code;
9618 int comparison_const_width;
9619 unsigned HOST_WIDE_INT comparison_sign_mask;
9620 bool keep_first_compare;
9622 add_val = INTVAL (bl->biv->add_val);
9623 comparison_value = XEXP (comparison, 1);
9624 if (GET_MODE (comparison_value) == VOIDmode)
9625 comparison_const_width
9626 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9627 else
9628 comparison_const_width
9629 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9630 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9631 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9632 comparison_sign_mask
9633 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9635 /* If the comparison value is not a loop invariant, then we
9636 can not reverse this loop.
9638 ??? If the insns which initialize the comparison value as
9639 a whole compute an invariant result, then we could move
9640 them out of the loop and proceed with loop reversal. */
9641 if (! loop_invariant_p (loop, comparison_value))
9642 return 0;
9644 if (GET_CODE (comparison_value) == CONST_INT)
9645 comparison_val = INTVAL (comparison_value);
9646 initial_value = bl->initial_value;
9648 /* Normalize the initial value if it is an integer and
9649 has no other use except as a counter. This will allow
9650 a few more loops to be reversed. */
9651 if (no_use_except_counting
9652 && GET_CODE (comparison_value) == CONST_INT
9653 && GET_CODE (initial_value) == CONST_INT)
9655 comparison_val = comparison_val - INTVAL (bl->initial_value);
9656 /* The code below requires comparison_val to be a multiple
9657 of add_val in order to do the loop reversal, so
9658 round up comparison_val to a multiple of add_val.
9659 Since comparison_value is constant, we know that the
9660 current comparison code is LT. */
9661 comparison_val = comparison_val + add_val - 1;
9662 comparison_val
9663 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9664 /* We postpone overflow checks for COMPARISON_VAL here;
9665 even if there is an overflow, we might still be able to
9666 reverse the loop, if converting the loop exit test to
9667 NE is possible. */
9668 initial_value = const0_rtx;
9671 /* First check if we can do a vanilla loop reversal. */
9672 if (initial_value == const0_rtx
9673 && GET_CODE (comparison_value) == CONST_INT
9674 /* Now do postponed overflow checks on COMPARISON_VAL. */
9675 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9676 & comparison_sign_mask))
9678 /* Register will always be nonnegative, with value
9679 0 on last iteration */
9680 add_adjust = add_val;
9681 nonneg = 1;
9682 cmp_code = GE;
9684 else
9685 return 0;
9687 if (GET_CODE (comparison) == LE)
9688 add_adjust -= add_val;
9690 /* If the initial value is not zero, or if the comparison
9691 value is not an exact multiple of the increment, then we
9692 can not reverse this loop. */
9693 if (initial_value == const0_rtx
9694 && GET_CODE (comparison_value) == CONST_INT)
9696 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9697 return 0;
9699 else
9701 if (! no_use_except_counting || add_val != 1)
9702 return 0;
9705 final_value = comparison_value;
9707 /* Reset these in case we normalized the initial value
9708 and comparison value above. */
9709 if (GET_CODE (comparison_value) == CONST_INT
9710 && GET_CODE (initial_value) == CONST_INT)
9712 comparison_value = GEN_INT (comparison_val);
9713 final_value
9714 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9716 bl->initial_value = initial_value;
9718 /* Save some info needed to produce the new insns. */
9719 reg = bl->biv->dest_reg;
9720 mode = GET_MODE (reg);
9721 jump_label = condjump_label (PREV_INSN (loop_end));
9722 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9724 /* Set start_value; if this is not a CONST_INT, we need
9725 to generate a SUB.
9726 Initialize biv to start_value before loop start.
9727 The old initializing insn will be deleted as a
9728 dead store by flow.c. */
9729 if (initial_value == const0_rtx
9730 && GET_CODE (comparison_value) == CONST_INT)
9732 start_value
9733 = gen_int_mode (comparison_val - add_adjust, mode);
9734 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9736 else if (GET_CODE (initial_value) == CONST_INT)
9738 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9739 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9741 if (add_insn == 0)
9742 return 0;
9744 start_value
9745 = gen_rtx_PLUS (mode, comparison_value, offset);
9746 loop_insn_hoist (loop, add_insn);
9747 if (GET_CODE (comparison) == LE)
9748 final_value = gen_rtx_PLUS (mode, comparison_value,
9749 GEN_INT (add_val));
9751 else if (! add_adjust)
9753 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9754 initial_value);
9756 if (sub_insn == 0)
9757 return 0;
9758 start_value
9759 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9760 loop_insn_hoist (loop, sub_insn);
9762 else
9763 /* We could handle the other cases too, but it'll be
9764 better to have a testcase first. */
9765 return 0;
9767 /* We may not have a single insn which can increment a reg, so
9768 create a sequence to hold all the insns from expand_inc. */
9769 start_sequence ();
9770 expand_inc (reg, new_add_val);
9771 tem = get_insns ();
9772 end_sequence ();
9774 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9775 delete_insn (bl->biv->insn);
9777 /* Update biv info to reflect its new status. */
9778 bl->biv->insn = p;
9779 bl->initial_value = start_value;
9780 bl->biv->add_val = new_add_val;
9782 /* Update loop info. */
9783 loop_info->initial_value = reg;
9784 loop_info->initial_equiv_value = reg;
9785 loop_info->final_value = const0_rtx;
9786 loop_info->final_equiv_value = const0_rtx;
9787 loop_info->comparison_value = const0_rtx;
9788 loop_info->comparison_code = cmp_code;
9789 loop_info->increment = new_add_val;
9791 /* Inc LABEL_NUSES so that delete_insn will
9792 not delete the label. */
9793 LABEL_NUSES (XEXP (jump_label, 0))++;
9795 /* If we have a separate comparison insn that does more
9796 than just set cc0, the result of the comparison might
9797 be used outside the loop. */
9798 keep_first_compare = (compare_and_branch == 2
9799 #ifdef HAVE_CC0
9800 && sets_cc0_p (first_compare) <= 0
9801 #endif
9804 /* Emit an insn after the end of the loop to set the biv's
9805 proper exit value if it is used anywhere outside the loop. */
9806 if (keep_first_compare
9807 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9808 || ! bl->init_insn
9809 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9810 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9812 if (keep_first_compare)
9813 loop_insn_sink (loop, PATTERN (first_compare));
9815 /* Delete compare/branch at end of loop. */
9816 delete_related_insns (PREV_INSN (loop_end));
9817 if (compare_and_branch == 2)
9818 delete_related_insns (first_compare);
9820 /* Add new compare/branch insn at end of loop. */
9821 start_sequence ();
9822 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9823 mode, 0,
9824 XEXP (jump_label, 0));
9825 tem = get_insns ();
9826 end_sequence ();
9827 emit_jump_insn_before (tem, loop_end);
9829 for (tem = PREV_INSN (loop_end);
9830 tem && !JUMP_P (tem);
9831 tem = PREV_INSN (tem))
9834 if (tem)
9835 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9837 if (nonneg)
9839 if (tem)
9841 /* Increment of LABEL_NUSES done above. */
9842 /* Register is now always nonnegative,
9843 so add REG_NONNEG note to the branch. */
9844 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9845 REG_NOTES (tem));
9847 bl->nonneg = 1;
9850 /* No insn may reference both the reversed and another biv or it
9851 will fail (see comment near the top of the loop reversal
9852 code).
9853 Earlier on, we have verified that the biv has no use except
9854 counting, or it is the only biv in this function.
9855 However, the code that computes no_use_except_counting does
9856 not verify reg notes. It's possible to have an insn that
9857 references another biv, and has a REG_EQUAL note with an
9858 expression based on the reversed biv. To avoid this case,
9859 remove all REG_EQUAL notes based on the reversed biv
9860 here. */
9861 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9862 if (INSN_P (p))
9864 rtx *pnote;
9865 rtx set = single_set (p);
9866 /* If this is a set of a GIV based on the reversed biv, any
9867 REG_EQUAL notes should still be correct. */
9868 if (! set
9869 || !REG_P (SET_DEST (set))
9870 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
9871 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
9872 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
9873 for (pnote = &REG_NOTES (p); *pnote;)
9875 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
9876 && reg_mentioned_p (regno_reg_rtx[bl->regno],
9877 XEXP (*pnote, 0)))
9878 *pnote = XEXP (*pnote, 1);
9879 else
9880 pnote = &XEXP (*pnote, 1);
9884 /* Mark that this biv has been reversed. Each giv which depends
9885 on this biv, and which is also live past the end of the loop
9886 will have to be fixed up. */
9888 bl->reversed = 1;
9890 if (loop_dump_stream)
9892 fprintf (loop_dump_stream, "Reversed loop");
9893 if (bl->nonneg)
9894 fprintf (loop_dump_stream, " and added reg_nonneg\n");
9895 else
9896 fprintf (loop_dump_stream, "\n");
9899 return 1;
9904 return 0;
9907 /* Verify whether the biv BL appears to be eliminable,
9908 based on the insns in the loop that refer to it.
9910 If ELIMINATE_P is nonzero, actually do the elimination.
9912 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
9913 determine whether invariant insns should be placed inside or at the
9914 start of the loop. */
9916 static int
9917 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
9918 int eliminate_p, int threshold, int insn_count)
9920 struct loop_ivs *ivs = LOOP_IVS (loop);
9921 rtx reg = bl->biv->dest_reg;
9922 rtx p;
9924 /* Scan all insns in the loop, stopping if we find one that uses the
9925 biv in a way that we cannot eliminate. */
9927 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9929 enum rtx_code code = GET_CODE (p);
9930 basic_block where_bb = 0;
9931 rtx where_insn = threshold >= insn_count ? 0 : p;
9932 rtx note;
9934 /* If this is a libcall that sets a giv, skip ahead to its end. */
9935 if (INSN_P (p))
9937 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
9939 if (note)
9941 rtx last = XEXP (note, 0);
9942 rtx set = single_set (last);
9944 if (set && REG_P (SET_DEST (set)))
9946 unsigned int regno = REGNO (SET_DEST (set));
9948 if (regno < ivs->n_regs
9949 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
9950 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
9951 p = last;
9956 /* Closely examine the insn if the biv is mentioned. */
9957 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
9958 && reg_mentioned_p (reg, PATTERN (p))
9959 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
9960 eliminate_p, where_bb, where_insn))
9962 if (loop_dump_stream)
9963 fprintf (loop_dump_stream,
9964 "Cannot eliminate biv %d: biv used in insn %d.\n",
9965 bl->regno, INSN_UID (p));
9966 break;
9969 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
9970 if (eliminate_p
9971 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
9972 && reg_mentioned_p (reg, XEXP (note, 0)))
9973 remove_note (p, note);
9976 if (p == loop->end)
9978 if (loop_dump_stream)
9979 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
9980 bl->regno, eliminate_p ? "was" : "can be");
9981 return 1;
9984 return 0;
9987 /* INSN and REFERENCE are instructions in the same insn chain.
9988 Return nonzero if INSN is first. */
9990 static int
9991 loop_insn_first_p (rtx insn, rtx reference)
9993 rtx p, q;
9995 for (p = insn, q = reference;;)
9997 /* Start with test for not first so that INSN == REFERENCE yields not
9998 first. */
9999 if (q == insn || ! p)
10000 return 0;
10001 if (p == reference || ! q)
10002 return 1;
10004 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
10005 previous insn, hence the <= comparison below does not work if
10006 P is a note. */
10007 if (INSN_UID (p) < max_uid_for_loop
10008 && INSN_UID (q) < max_uid_for_loop
10009 && !NOTE_P (p))
10010 return INSN_LUID (p) <= INSN_LUID (q);
10012 if (INSN_UID (p) >= max_uid_for_loop
10013 || NOTE_P (p))
10014 p = NEXT_INSN (p);
10015 if (INSN_UID (q) >= max_uid_for_loop)
10016 q = NEXT_INSN (q);
10020 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
10021 the offset that we have to take into account due to auto-increment /
10022 div derivation is zero. */
10023 static int
10024 biv_elimination_giv_has_0_offset (struct induction *biv,
10025 struct induction *giv, rtx insn)
10027 /* If the giv V had the auto-inc address optimization applied
10028 to it, and INSN occurs between the giv insn and the biv
10029 insn, then we'd have to adjust the value used here.
10030 This is rare, so we don't bother to make this possible. */
10031 if (giv->auto_inc_opt
10032 && ((loop_insn_first_p (giv->insn, insn)
10033 && loop_insn_first_p (insn, biv->insn))
10034 || (loop_insn_first_p (biv->insn, insn)
10035 && loop_insn_first_p (insn, giv->insn))))
10036 return 0;
10038 return 1;
10041 /* If BL appears in X (part of the pattern of INSN), see if we can
10042 eliminate its use. If so, return 1. If not, return 0.
10044 If BIV does not appear in X, return 1.
10046 If ELIMINATE_P is nonzero, actually do the elimination.
10047 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10048 Depending on how many items have been moved out of the loop, it
10049 will either be before INSN (when WHERE_INSN is nonzero) or at the
10050 start of the loop (when WHERE_INSN is zero). */
10052 static int
10053 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10054 struct iv_class *bl, int eliminate_p,
10055 basic_block where_bb, rtx where_insn)
10057 enum rtx_code code = GET_CODE (x);
10058 rtx reg = bl->biv->dest_reg;
10059 enum machine_mode mode = GET_MODE (reg);
10060 struct induction *v;
10061 rtx arg, tem;
10062 #ifdef HAVE_cc0
10063 rtx new;
10064 #endif
10065 int arg_operand;
10066 const char *fmt;
10067 int i, j;
10069 switch (code)
10071 case REG:
10072 /* If we haven't already been able to do something with this BIV,
10073 we can't eliminate it. */
10074 if (x == reg)
10075 return 0;
10076 return 1;
10078 case SET:
10079 /* If this sets the BIV, it is not a problem. */
10080 if (SET_DEST (x) == reg)
10081 return 1;
10083 /* If this is an insn that defines a giv, it is also ok because
10084 it will go away when the giv is reduced. */
10085 for (v = bl->giv; v; v = v->next_iv)
10086 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10087 return 1;
10089 #ifdef HAVE_cc0
10090 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10092 /* Can replace with any giv that was reduced and
10093 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10094 Require a constant for MULT_VAL, so we know it's nonzero.
10095 ??? We disable this optimization to avoid potential
10096 overflows. */
10098 for (v = bl->giv; v; v = v->next_iv)
10099 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10100 && v->add_val == const0_rtx
10101 && ! v->ignore && ! v->maybe_dead && v->always_computable
10102 && v->mode == mode
10103 && 0)
10105 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10106 continue;
10108 if (! eliminate_p)
10109 return 1;
10111 /* If the giv has the opposite direction of change,
10112 then reverse the comparison. */
10113 if (INTVAL (v->mult_val) < 0)
10114 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10115 const0_rtx, v->new_reg);
10116 else
10117 new = v->new_reg;
10119 /* We can probably test that giv's reduced reg. */
10120 if (validate_change (insn, &SET_SRC (x), new, 0))
10121 return 1;
10124 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10125 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10126 Require a constant for MULT_VAL, so we know it's nonzero.
10127 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10128 overflow problem. */
10130 for (v = bl->giv; v; v = v->next_iv)
10131 if (GET_CODE (v->mult_val) == CONST_INT
10132 && v->mult_val != const0_rtx
10133 && ! v->ignore && ! v->maybe_dead && v->always_computable
10134 && v->mode == mode
10135 && (GET_CODE (v->add_val) == SYMBOL_REF
10136 || GET_CODE (v->add_val) == LABEL_REF
10137 || GET_CODE (v->add_val) == CONST
10138 || (REG_P (v->add_val)
10139 && REG_POINTER (v->add_val))))
10141 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10142 continue;
10144 if (! eliminate_p)
10145 return 1;
10147 /* If the giv has the opposite direction of change,
10148 then reverse the comparison. */
10149 if (INTVAL (v->mult_val) < 0)
10150 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10151 v->new_reg);
10152 else
10153 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10154 copy_rtx (v->add_val));
10156 /* Replace biv with the giv's reduced register. */
10157 update_reg_last_use (v->add_val, insn);
10158 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10159 return 1;
10161 /* Insn doesn't support that constant or invariant. Copy it
10162 into a register (it will be a loop invariant.) */
10163 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10165 loop_insn_emit_before (loop, 0, where_insn,
10166 gen_move_insn (tem,
10167 copy_rtx (v->add_val)));
10169 /* Substitute the new register for its invariant value in
10170 the compare expression. */
10171 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10172 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10173 return 1;
10176 #endif
10177 break;
10179 case COMPARE:
10180 case EQ: case NE:
10181 case GT: case GE: case GTU: case GEU:
10182 case LT: case LE: case LTU: case LEU:
10183 /* See if either argument is the biv. */
10184 if (XEXP (x, 0) == reg)
10185 arg = XEXP (x, 1), arg_operand = 1;
10186 else if (XEXP (x, 1) == reg)
10187 arg = XEXP (x, 0), arg_operand = 0;
10188 else
10189 break;
10191 if (CONSTANT_P (arg))
10193 /* First try to replace with any giv that has constant positive
10194 mult_val and constant add_val. We might be able to support
10195 negative mult_val, but it seems complex to do it in general. */
10197 for (v = bl->giv; v; v = v->next_iv)
10198 if (GET_CODE (v->mult_val) == CONST_INT
10199 && INTVAL (v->mult_val) > 0
10200 && (GET_CODE (v->add_val) == SYMBOL_REF
10201 || GET_CODE (v->add_val) == LABEL_REF
10202 || GET_CODE (v->add_val) == CONST
10203 || (REG_P (v->add_val)
10204 && REG_POINTER (v->add_val)))
10205 && ! v->ignore && ! v->maybe_dead && v->always_computable
10206 && v->mode == mode)
10208 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10209 continue;
10211 /* Don't eliminate if the linear combination that makes up
10212 the giv overflows when it is applied to ARG. */
10213 if (GET_CODE (arg) == CONST_INT)
10215 rtx add_val;
10217 if (GET_CODE (v->add_val) == CONST_INT)
10218 add_val = v->add_val;
10219 else
10220 add_val = const0_rtx;
10222 if (const_mult_add_overflow_p (arg, v->mult_val,
10223 add_val, mode, 1))
10224 continue;
10227 if (! eliminate_p)
10228 return 1;
10230 /* Replace biv with the giv's reduced reg. */
10231 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10233 /* If all constants are actually constant integers and
10234 the derived constant can be directly placed in the COMPARE,
10235 do so. */
10236 if (GET_CODE (arg) == CONST_INT
10237 && GET_CODE (v->add_val) == CONST_INT)
10239 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
10240 v->add_val, mode, 1);
10242 else
10244 /* Otherwise, load it into a register. */
10245 tem = gen_reg_rtx (mode);
10246 loop_iv_add_mult_emit_before (loop, arg,
10247 v->mult_val, v->add_val,
10248 tem, where_bb, where_insn);
10251 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10253 if (apply_change_group ())
10254 return 1;
10257 /* Look for giv with positive constant mult_val and nonconst add_val.
10258 Insert insns to calculate new compare value.
10259 ??? Turn this off due to possible overflow. */
10261 for (v = bl->giv; v; v = v->next_iv)
10262 if (GET_CODE (v->mult_val) == CONST_INT
10263 && INTVAL (v->mult_val) > 0
10264 && ! v->ignore && ! v->maybe_dead && v->always_computable
10265 && v->mode == mode
10266 && 0)
10268 rtx tem;
10270 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10271 continue;
10273 if (! eliminate_p)
10274 return 1;
10276 tem = gen_reg_rtx (mode);
10278 /* Replace biv with giv's reduced register. */
10279 validate_change (insn, &XEXP (x, 1 - arg_operand),
10280 v->new_reg, 1);
10282 /* Compute value to compare against. */
10283 loop_iv_add_mult_emit_before (loop, arg,
10284 v->mult_val, v->add_val,
10285 tem, where_bb, where_insn);
10286 /* Use it in this insn. */
10287 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10288 if (apply_change_group ())
10289 return 1;
10292 else if (REG_P (arg) || MEM_P (arg))
10294 if (loop_invariant_p (loop, arg) == 1)
10296 /* Look for giv with constant positive mult_val and nonconst
10297 add_val. Insert insns to compute new compare value.
10298 ??? Turn this off due to possible overflow. */
10300 for (v = bl->giv; v; v = v->next_iv)
10301 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
10302 && ! v->ignore && ! v->maybe_dead && v->always_computable
10303 && v->mode == mode
10304 && 0)
10306 rtx tem;
10308 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10309 continue;
10311 if (! eliminate_p)
10312 return 1;
10314 tem = gen_reg_rtx (mode);
10316 /* Replace biv with giv's reduced register. */
10317 validate_change (insn, &XEXP (x, 1 - arg_operand),
10318 v->new_reg, 1);
10320 /* Compute value to compare against. */
10321 loop_iv_add_mult_emit_before (loop, arg,
10322 v->mult_val, v->add_val,
10323 tem, where_bb, where_insn);
10324 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10325 if (apply_change_group ())
10326 return 1;
10330 /* This code has problems. Basically, you can't know when
10331 seeing if we will eliminate BL, whether a particular giv
10332 of ARG will be reduced. If it isn't going to be reduced,
10333 we can't eliminate BL. We can try forcing it to be reduced,
10334 but that can generate poor code.
10336 The problem is that the benefit of reducing TV, below should
10337 be increased if BL can actually be eliminated, but this means
10338 we might have to do a topological sort of the order in which
10339 we try to process biv. It doesn't seem worthwhile to do
10340 this sort of thing now. */
10342 #if 0
10343 /* Otherwise the reg compared with had better be a biv. */
10344 if (!REG_P (arg)
10345 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
10346 return 0;
10348 /* Look for a pair of givs, one for each biv,
10349 with identical coefficients. */
10350 for (v = bl->giv; v; v = v->next_iv)
10352 struct induction *tv;
10354 if (v->ignore || v->maybe_dead || v->mode != mode)
10355 continue;
10357 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
10358 tv = tv->next_iv)
10359 if (! tv->ignore && ! tv->maybe_dead
10360 && rtx_equal_p (tv->mult_val, v->mult_val)
10361 && rtx_equal_p (tv->add_val, v->add_val)
10362 && tv->mode == mode)
10364 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10365 continue;
10367 if (! eliminate_p)
10368 return 1;
10370 /* Replace biv with its giv's reduced reg. */
10371 XEXP (x, 1 - arg_operand) = v->new_reg;
10372 /* Replace other operand with the other giv's
10373 reduced reg. */
10374 XEXP (x, arg_operand) = tv->new_reg;
10375 return 1;
10378 #endif
10381 /* If we get here, the biv can't be eliminated. */
10382 return 0;
10384 case MEM:
10385 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10386 biv is used in it, since it will be replaced. */
10387 for (v = bl->giv; v; v = v->next_iv)
10388 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10389 return 1;
10390 break;
10392 default:
10393 break;
10396 /* See if any subexpression fails elimination. */
10397 fmt = GET_RTX_FORMAT (code);
10398 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10400 switch (fmt[i])
10402 case 'e':
10403 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10404 eliminate_p, where_bb, where_insn))
10405 return 0;
10406 break;
10408 case 'E':
10409 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10410 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10411 eliminate_p, where_bb, where_insn))
10412 return 0;
10413 break;
10417 return 1;
10420 /* Return nonzero if the last use of REG
10421 is in an insn following INSN in the same basic block. */
10423 static int
10424 last_use_this_basic_block (rtx reg, rtx insn)
10426 rtx n;
10427 for (n = insn;
10428 n && !LABEL_P (n) && !JUMP_P (n);
10429 n = NEXT_INSN (n))
10431 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10432 return 1;
10434 return 0;
10437 /* Called via `note_stores' to record the initial value of a biv. Here we
10438 just record the location of the set and process it later. */
10440 static void
10441 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10443 struct loop_ivs *ivs = (struct loop_ivs *) data;
10444 struct iv_class *bl;
10446 if (!REG_P (dest)
10447 || REGNO (dest) >= ivs->n_regs
10448 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10449 return;
10451 bl = REG_IV_CLASS (ivs, REGNO (dest));
10453 /* If this is the first set found, record it. */
10454 if (bl->init_insn == 0)
10456 bl->init_insn = note_insn;
10457 bl->init_set = set;
10461 /* If any of the registers in X are "old" and currently have a last use earlier
10462 than INSN, update them to have a last use of INSN. Their actual last use
10463 will be the previous insn but it will not have a valid uid_luid so we can't
10464 use it. X must be a source expression only. */
10466 static void
10467 update_reg_last_use (rtx x, rtx insn)
10469 /* Check for the case where INSN does not have a valid luid. In this case,
10470 there is no need to modify the regno_last_uid, as this can only happen
10471 when code is inserted after the loop_end to set a pseudo's final value,
10472 and hence this insn will never be the last use of x.
10473 ???? This comment is not correct. See for example loop_givs_reduce.
10474 This may insert an insn before another new insn. */
10475 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10476 && INSN_UID (insn) < max_uid_for_loop
10477 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10479 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10481 else
10483 int i, j;
10484 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10485 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10487 if (fmt[i] == 'e')
10488 update_reg_last_use (XEXP (x, i), insn);
10489 else if (fmt[i] == 'E')
10490 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10491 update_reg_last_use (XVECEXP (x, i, j), insn);
10496 /* Similar to rtlanal.c:get_condition, except that we also put an
10497 invariant last unless both operands are invariants. */
10499 static rtx
10500 get_condition_for_loop (const struct loop *loop, rtx x)
10502 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10504 if (comparison == 0
10505 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10506 || loop_invariant_p (loop, XEXP (comparison, 1)))
10507 return comparison;
10509 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10510 XEXP (comparison, 1), XEXP (comparison, 0));
10513 /* Scan the function and determine whether it has indirect (computed) jumps.
10515 This is taken mostly from flow.c; similar code exists elsewhere
10516 in the compiler. It may be useful to put this into rtlanal.c. */
10517 static int
10518 indirect_jump_in_function_p (rtx start)
10520 rtx insn;
10522 for (insn = start; insn; insn = NEXT_INSN (insn))
10523 if (computed_jump_p (insn))
10524 return 1;
10526 return 0;
10529 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10530 documentation for LOOP_MEMS for the definition of `appropriate'.
10531 This function is called from prescan_loop via for_each_rtx. */
10533 static int
10534 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10536 struct loop_info *loop_info = data;
10537 int i;
10538 rtx m = *mem;
10540 if (m == NULL_RTX)
10541 return 0;
10543 switch (GET_CODE (m))
10545 case MEM:
10546 break;
10548 case CLOBBER:
10549 /* We're not interested in MEMs that are only clobbered. */
10550 return -1;
10552 case CONST_DOUBLE:
10553 /* We're not interested in the MEM associated with a
10554 CONST_DOUBLE, so there's no need to traverse into this. */
10555 return -1;
10557 case EXPR_LIST:
10558 /* We're not interested in any MEMs that only appear in notes. */
10559 return -1;
10561 default:
10562 /* This is not a MEM. */
10563 return 0;
10566 /* See if we've already seen this MEM. */
10567 for (i = 0; i < loop_info->mems_idx; ++i)
10568 if (rtx_equal_p (m, loop_info->mems[i].mem))
10570 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10571 loop_info->mems[i].mem = m;
10572 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10573 /* The modes of the two memory accesses are different. If
10574 this happens, something tricky is going on, and we just
10575 don't optimize accesses to this MEM. */
10576 loop_info->mems[i].optimize = 0;
10578 return 0;
10581 /* Resize the array, if necessary. */
10582 if (loop_info->mems_idx == loop_info->mems_allocated)
10584 if (loop_info->mems_allocated != 0)
10585 loop_info->mems_allocated *= 2;
10586 else
10587 loop_info->mems_allocated = 32;
10589 loop_info->mems = xrealloc (loop_info->mems,
10590 loop_info->mems_allocated * sizeof (loop_mem_info));
10593 /* Actually insert the MEM. */
10594 loop_info->mems[loop_info->mems_idx].mem = m;
10595 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10596 because we can't put it in a register. We still store it in the
10597 table, though, so that if we see the same address later, but in a
10598 non-BLK mode, we'll not think we can optimize it at that point. */
10599 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10600 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10601 ++loop_info->mems_idx;
10603 return 0;
10607 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10609 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10610 register that is modified by an insn between FROM and TO. If the
10611 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10612 more, stop incrementing it, to avoid overflow.
10614 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10615 register I is used, if it is only used once. Otherwise, it is set
10616 to 0 (for no uses) or const0_rtx for more than one use. This
10617 parameter may be zero, in which case this processing is not done.
10619 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10620 optimize register I. */
10622 static void
10623 loop_regs_scan (const struct loop *loop, int extra_size)
10625 struct loop_regs *regs = LOOP_REGS (loop);
10626 int old_nregs;
10627 /* last_set[n] is nonzero iff reg n has been set in the current
10628 basic block. In that case, it is the insn that last set reg n. */
10629 rtx *last_set;
10630 rtx insn;
10631 int i;
10633 old_nregs = regs->num;
10634 regs->num = max_reg_num ();
10636 /* Grow the regs array if not allocated or too small. */
10637 if (regs->num >= regs->size)
10639 regs->size = regs->num + extra_size;
10641 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10643 /* Zero the new elements. */
10644 memset (regs->array + old_nregs, 0,
10645 (regs->size - old_nregs) * sizeof (*regs->array));
10648 /* Clear previously scanned fields but do not clear n_times_set. */
10649 for (i = 0; i < old_nregs; i++)
10651 regs->array[i].set_in_loop = 0;
10652 regs->array[i].may_not_optimize = 0;
10653 regs->array[i].single_usage = NULL_RTX;
10656 last_set = xcalloc (regs->num, sizeof (rtx));
10658 /* Scan the loop, recording register usage. */
10659 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10660 insn = NEXT_INSN (insn))
10662 if (INSN_P (insn))
10664 /* Record registers that have exactly one use. */
10665 find_single_use_in_loop (regs, insn, PATTERN (insn));
10667 /* Include uses in REG_EQUAL notes. */
10668 if (REG_NOTES (insn))
10669 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10671 if (GET_CODE (PATTERN (insn)) == SET
10672 || GET_CODE (PATTERN (insn)) == CLOBBER)
10673 count_one_set (regs, insn, PATTERN (insn), last_set);
10674 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10676 int i;
10677 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10678 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10679 last_set);
10683 if (LABEL_P (insn) || JUMP_P (insn))
10684 memset (last_set, 0, regs->num * sizeof (rtx));
10686 /* Invalidate all registers used for function argument passing.
10687 We check rtx_varies_p for the same reason as below, to allow
10688 optimizing PIC calculations. */
10689 if (CALL_P (insn))
10691 rtx link;
10692 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10693 link;
10694 link = XEXP (link, 1))
10696 rtx op, reg;
10698 if (GET_CODE (op = XEXP (link, 0)) == USE
10699 && REG_P (reg = XEXP (op, 0))
10700 && rtx_varies_p (reg, 1))
10701 regs->array[REGNO (reg)].may_not_optimize = 1;
10706 /* Invalidate all hard registers clobbered by calls. With one exception:
10707 a call-clobbered PIC register is still function-invariant for our
10708 purposes, since we can hoist any PIC calculations out of the loop.
10709 Thus the call to rtx_varies_p. */
10710 if (LOOP_INFO (loop)->has_call)
10711 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10712 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10713 && rtx_varies_p (regno_reg_rtx[i], 1))
10715 regs->array[i].may_not_optimize = 1;
10716 regs->array[i].set_in_loop = 1;
10719 #ifdef AVOID_CCMODE_COPIES
10720 /* Don't try to move insns which set CC registers if we should not
10721 create CCmode register copies. */
10722 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10723 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10724 regs->array[i].may_not_optimize = 1;
10725 #endif
10727 /* Set regs->array[I].n_times_set for the new registers. */
10728 for (i = old_nregs; i < regs->num; i++)
10729 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10731 free (last_set);
10734 /* Returns the number of real INSNs in the LOOP. */
10736 static int
10737 count_insns_in_loop (const struct loop *loop)
10739 int count = 0;
10740 rtx insn;
10742 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10743 insn = NEXT_INSN (insn))
10744 if (INSN_P (insn))
10745 ++count;
10747 return count;
10750 /* Move MEMs into registers for the duration of the loop. */
10752 static void
10753 load_mems (const struct loop *loop)
10755 struct loop_info *loop_info = LOOP_INFO (loop);
10756 struct loop_regs *regs = LOOP_REGS (loop);
10757 int maybe_never = 0;
10758 int i;
10759 rtx p, prev_ebb_head;
10760 rtx label = NULL_RTX;
10761 rtx end_label;
10762 /* Nonzero if the next instruction may never be executed. */
10763 int next_maybe_never = 0;
10764 unsigned int last_max_reg = max_reg_num ();
10766 if (loop_info->mems_idx == 0)
10767 return;
10769 /* We cannot use next_label here because it skips over normal insns. */
10770 end_label = next_nonnote_insn (loop->end);
10771 if (end_label && !LABEL_P (end_label))
10772 end_label = NULL_RTX;
10774 /* Check to see if it's possible that some instructions in the loop are
10775 never executed. Also check if there is a goto out of the loop other
10776 than right after the end of the loop. */
10777 for (p = next_insn_in_loop (loop, loop->scan_start);
10778 p != NULL_RTX;
10779 p = next_insn_in_loop (loop, p))
10781 if (LABEL_P (p))
10782 maybe_never = 1;
10783 else if (JUMP_P (p)
10784 /* If we enter the loop in the middle, and scan
10785 around to the beginning, don't set maybe_never
10786 for that. This must be an unconditional jump,
10787 otherwise the code at the top of the loop might
10788 never be executed. Unconditional jumps are
10789 followed a by barrier then loop end. */
10790 && ! (JUMP_P (p)
10791 && JUMP_LABEL (p) == loop->top
10792 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10793 && any_uncondjump_p (p)))
10795 /* If this is a jump outside of the loop but not right
10796 after the end of the loop, we would have to emit new fixup
10797 sequences for each such label. */
10798 if (/* If we can't tell where control might go when this
10799 JUMP_INSN is executed, we must be conservative. */
10800 !JUMP_LABEL (p)
10801 || (JUMP_LABEL (p) != end_label
10802 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10803 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10804 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10805 return;
10807 if (!any_condjump_p (p))
10808 /* Something complicated. */
10809 maybe_never = 1;
10810 else
10811 /* If there are any more instructions in the loop, they
10812 might not be reached. */
10813 next_maybe_never = 1;
10815 else if (next_maybe_never)
10816 maybe_never = 1;
10819 /* Find start of the extended basic block that enters the loop. */
10820 for (p = loop->start;
10821 PREV_INSN (p) && !LABEL_P (p);
10822 p = PREV_INSN (p))
10824 prev_ebb_head = p;
10826 cselib_init (true);
10828 /* Build table of mems that get set to constant values before the
10829 loop. */
10830 for (; p != loop->start; p = NEXT_INSN (p))
10831 cselib_process_insn (p);
10833 /* Actually move the MEMs. */
10834 for (i = 0; i < loop_info->mems_idx; ++i)
10836 regset_head load_copies;
10837 regset_head store_copies;
10838 int written = 0;
10839 rtx reg;
10840 rtx mem = loop_info->mems[i].mem;
10841 rtx mem_list_entry;
10843 if (MEM_VOLATILE_P (mem)
10844 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10845 /* There's no telling whether or not MEM is modified. */
10846 loop_info->mems[i].optimize = 0;
10848 /* Go through the MEMs written to in the loop to see if this
10849 one is aliased by one of them. */
10850 mem_list_entry = loop_info->store_mems;
10851 while (mem_list_entry)
10853 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10854 written = 1;
10855 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10856 mem, rtx_varies_p))
10858 /* MEM is indeed aliased by this store. */
10859 loop_info->mems[i].optimize = 0;
10860 break;
10862 mem_list_entry = XEXP (mem_list_entry, 1);
10865 if (flag_float_store && written
10866 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10867 loop_info->mems[i].optimize = 0;
10869 /* If this MEM is written to, we must be sure that there
10870 are no reads from another MEM that aliases this one. */
10871 if (loop_info->mems[i].optimize && written)
10873 int j;
10875 for (j = 0; j < loop_info->mems_idx; ++j)
10877 if (j == i)
10878 continue;
10879 else if (true_dependence (mem,
10880 VOIDmode,
10881 loop_info->mems[j].mem,
10882 rtx_varies_p))
10884 /* It's not safe to hoist loop_info->mems[i] out of
10885 the loop because writes to it might not be
10886 seen by reads from loop_info->mems[j]. */
10887 loop_info->mems[i].optimize = 0;
10888 break;
10893 if (maybe_never && may_trap_p (mem))
10894 /* We can't access the MEM outside the loop; it might
10895 cause a trap that wouldn't have happened otherwise. */
10896 loop_info->mems[i].optimize = 0;
10898 if (!loop_info->mems[i].optimize)
10899 /* We thought we were going to lift this MEM out of the
10900 loop, but later discovered that we could not. */
10901 continue;
10903 INIT_REG_SET (&load_copies);
10904 INIT_REG_SET (&store_copies);
10906 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10907 order to keep scan_loop from moving stores to this MEM
10908 out of the loop just because this REG is neither a
10909 user-variable nor used in the loop test. */
10910 reg = gen_reg_rtx (GET_MODE (mem));
10911 REG_USERVAR_P (reg) = 1;
10912 loop_info->mems[i].reg = reg;
10914 /* Now, replace all references to the MEM with the
10915 corresponding pseudos. */
10916 maybe_never = 0;
10917 for (p = next_insn_in_loop (loop, loop->scan_start);
10918 p != NULL_RTX;
10919 p = next_insn_in_loop (loop, p))
10921 if (INSN_P (p))
10923 rtx set;
10925 set = single_set (p);
10927 /* See if this copies the mem into a register that isn't
10928 modified afterwards. We'll try to do copy propagation
10929 a little further on. */
10930 if (set
10931 /* @@@ This test is _way_ too conservative. */
10932 && ! maybe_never
10933 && REG_P (SET_DEST (set))
10934 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10935 && REGNO (SET_DEST (set)) < last_max_reg
10936 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10937 && rtx_equal_p (SET_SRC (set), mem))
10938 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10940 /* See if this copies the mem from a register that isn't
10941 modified afterwards. We'll try to remove the
10942 redundant copy later on by doing a little register
10943 renaming and copy propagation. This will help
10944 to untangle things for the BIV detection code. */
10945 if (set
10946 && ! maybe_never
10947 && REG_P (SET_SRC (set))
10948 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10949 && REGNO (SET_SRC (set)) < last_max_reg
10950 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10951 && rtx_equal_p (SET_DEST (set), mem))
10952 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10954 /* If this is a call which uses / clobbers this memory
10955 location, we must not change the interface here. */
10956 if (CALL_P (p)
10957 && reg_mentioned_p (loop_info->mems[i].mem,
10958 CALL_INSN_FUNCTION_USAGE (p)))
10960 cancel_changes (0);
10961 loop_info->mems[i].optimize = 0;
10962 break;
10964 else
10965 /* Replace the memory reference with the shadow register. */
10966 replace_loop_mems (p, loop_info->mems[i].mem,
10967 loop_info->mems[i].reg, written);
10970 if (LABEL_P (p)
10971 || JUMP_P (p))
10972 maybe_never = 1;
10975 if (! loop_info->mems[i].optimize)
10976 ; /* We found we couldn't do the replacement, so do nothing. */
10977 else if (! apply_change_group ())
10978 /* We couldn't replace all occurrences of the MEM. */
10979 loop_info->mems[i].optimize = 0;
10980 else
10982 /* Load the memory immediately before LOOP->START, which is
10983 the NOTE_LOOP_BEG. */
10984 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
10985 rtx set;
10986 rtx best = mem;
10987 unsigned j;
10988 struct elt_loc_list *const_equiv = 0;
10989 reg_set_iterator rsi;
10991 if (e)
10993 struct elt_loc_list *equiv;
10994 struct elt_loc_list *best_equiv = 0;
10995 for (equiv = e->locs; equiv; equiv = equiv->next)
10997 if (CONSTANT_P (equiv->loc))
10998 const_equiv = equiv;
10999 else if (REG_P (equiv->loc)
11000 /* Extending hard register lifetimes causes crash
11001 on SRC targets. Doing so on non-SRC is
11002 probably also not good idea, since we most
11003 probably have pseudoregister equivalence as
11004 well. */
11005 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
11006 best_equiv = equiv;
11008 /* Use the constant equivalence if that is cheap enough. */
11009 if (! best_equiv)
11010 best_equiv = const_equiv;
11011 else if (const_equiv
11012 && (rtx_cost (const_equiv->loc, SET)
11013 <= rtx_cost (best_equiv->loc, SET)))
11015 best_equiv = const_equiv;
11016 const_equiv = 0;
11019 /* If best_equiv is nonzero, we know that MEM is set to a
11020 constant or register before the loop. We will use this
11021 knowledge to initialize the shadow register with that
11022 constant or reg rather than by loading from MEM. */
11023 if (best_equiv)
11024 best = copy_rtx (best_equiv->loc);
11027 set = gen_move_insn (reg, best);
11028 set = loop_insn_hoist (loop, set);
11029 if (REG_P (best))
11031 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
11032 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
11034 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
11035 break;
11039 if (const_equiv)
11040 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
11042 if (written)
11044 if (label == NULL_RTX)
11046 label = gen_label_rtx ();
11047 emit_label_after (label, loop->end);
11050 /* Store the memory immediately after END, which is
11051 the NOTE_LOOP_END. */
11052 set = gen_move_insn (copy_rtx (mem), reg);
11053 loop_insn_emit_after (loop, 0, label, set);
11056 if (loop_dump_stream)
11058 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
11059 REGNO (reg), (written ? "r/w" : "r/o"));
11060 print_rtl (loop_dump_stream, mem);
11061 fputc ('\n', loop_dump_stream);
11064 /* Attempt a bit of copy propagation. This helps untangle the
11065 data flow, and enables {basic,general}_induction_var to find
11066 more bivs/givs. */
11067 EXECUTE_IF_SET_IN_REG_SET
11068 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11070 try_copy_prop (loop, reg, j);
11072 CLEAR_REG_SET (&load_copies);
11074 EXECUTE_IF_SET_IN_REG_SET
11075 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11077 try_swap_copy_prop (loop, reg, j);
11079 CLEAR_REG_SET (&store_copies);
11083 /* Now, we need to replace all references to the previous exit
11084 label with the new one. */
11085 if (label != NULL_RTX && end_label != NULL_RTX)
11086 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11087 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11088 redirect_jump (p, label, false);
11090 cselib_finish ();
11093 /* For communication between note_reg_stored and its caller. */
11094 struct note_reg_stored_arg
11096 int set_seen;
11097 rtx reg;
11100 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11101 is equal to ARG. */
11102 static void
11103 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11105 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11106 if (t->reg == x)
11107 t->set_seen = 1;
11110 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11111 There must be exactly one insn that sets this pseudo; it will be
11112 deleted if all replacements succeed and we can prove that the register
11113 is not used after the loop. */
11115 static void
11116 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11118 /* This is the reg that we are copying from. */
11119 rtx reg_rtx = regno_reg_rtx[regno];
11120 rtx init_insn = 0;
11121 rtx insn;
11122 /* These help keep track of whether we replaced all uses of the reg. */
11123 int replaced_last = 0;
11124 int store_is_first = 0;
11126 for (insn = next_insn_in_loop (loop, loop->scan_start);
11127 insn != NULL_RTX;
11128 insn = next_insn_in_loop (loop, insn))
11130 rtx set;
11132 /* Only substitute within one extended basic block from the initializing
11133 insn. */
11134 if (LABEL_P (insn) && init_insn)
11135 break;
11137 if (! INSN_P (insn))
11138 continue;
11140 /* Is this the initializing insn? */
11141 set = single_set (insn);
11142 if (set
11143 && REG_P (SET_DEST (set))
11144 && REGNO (SET_DEST (set)) == regno)
11146 gcc_assert (!init_insn);
11148 init_insn = insn;
11149 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11150 store_is_first = 1;
11153 /* Only substitute after seeing the initializing insn. */
11154 if (init_insn && insn != init_insn)
11156 struct note_reg_stored_arg arg;
11158 replace_loop_regs (insn, reg_rtx, replacement);
11159 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11160 replaced_last = 1;
11162 /* Stop replacing when REPLACEMENT is modified. */
11163 arg.reg = replacement;
11164 arg.set_seen = 0;
11165 note_stores (PATTERN (insn), note_reg_stored, &arg);
11166 if (arg.set_seen)
11168 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11170 /* It is possible that we've turned previously valid REG_EQUAL to
11171 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11172 REPLACEMENT is modified, we get different meaning. */
11173 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11174 remove_note (insn, note);
11175 break;
11179 gcc_assert (init_insn);
11180 if (apply_change_group ())
11182 if (loop_dump_stream)
11183 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11184 if (store_is_first && replaced_last)
11186 rtx first;
11187 rtx retval_note;
11189 /* Assume we're just deleting INIT_INSN. */
11190 first = init_insn;
11191 /* Look for REG_RETVAL note. If we're deleting the end of
11192 the libcall sequence, the whole sequence can go. */
11193 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11194 /* If we found a REG_RETVAL note, find the first instruction
11195 in the sequence. */
11196 if (retval_note)
11197 first = XEXP (retval_note, 0);
11199 /* Delete the instructions. */
11200 loop_delete_insns (first, init_insn);
11202 if (loop_dump_stream)
11203 fprintf (loop_dump_stream, ".\n");
11207 /* Replace all the instructions from FIRST up to and including LAST
11208 with NOTE_INSN_DELETED notes. */
11210 static void
11211 loop_delete_insns (rtx first, rtx last)
11213 while (1)
11215 if (loop_dump_stream)
11216 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11217 INSN_UID (first));
11218 delete_insn (first);
11220 /* If this was the LAST instructions we're supposed to delete,
11221 we're done. */
11222 if (first == last)
11223 break;
11225 first = NEXT_INSN (first);
11229 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11230 loop LOOP if the order of the sets of these registers can be
11231 swapped. There must be exactly one insn within the loop that sets
11232 this pseudo followed immediately by a move insn that sets
11233 REPLACEMENT with REGNO. */
11234 static void
11235 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11236 unsigned int regno)
11238 rtx insn;
11239 rtx set = NULL_RTX;
11240 unsigned int new_regno;
11242 new_regno = REGNO (replacement);
11244 for (insn = next_insn_in_loop (loop, loop->scan_start);
11245 insn != NULL_RTX;
11246 insn = next_insn_in_loop (loop, insn))
11248 /* Search for the insn that copies REGNO to NEW_REGNO? */
11249 if (INSN_P (insn)
11250 && (set = single_set (insn))
11251 && REG_P (SET_DEST (set))
11252 && REGNO (SET_DEST (set)) == new_regno
11253 && REG_P (SET_SRC (set))
11254 && REGNO (SET_SRC (set)) == regno)
11255 break;
11258 if (insn != NULL_RTX)
11260 rtx prev_insn;
11261 rtx prev_set;
11263 /* Some DEF-USE info would come in handy here to make this
11264 function more general. For now, just check the previous insn
11265 which is the most likely candidate for setting REGNO. */
11267 prev_insn = PREV_INSN (insn);
11269 if (INSN_P (insn)
11270 && (prev_set = single_set (prev_insn))
11271 && REG_P (SET_DEST (prev_set))
11272 && REGNO (SET_DEST (prev_set)) == regno)
11274 /* We have:
11275 (set (reg regno) (expr))
11276 (set (reg new_regno) (reg regno))
11278 so try converting this to:
11279 (set (reg new_regno) (expr))
11280 (set (reg regno) (reg new_regno))
11282 The former construct is often generated when a global
11283 variable used for an induction variable is shadowed by a
11284 register (NEW_REGNO). The latter construct improves the
11285 chances of GIV replacement and BIV elimination. */
11287 validate_change (prev_insn, &SET_DEST (prev_set),
11288 replacement, 1);
11289 validate_change (insn, &SET_DEST (set),
11290 SET_SRC (set), 1);
11291 validate_change (insn, &SET_SRC (set),
11292 replacement, 1);
11294 if (apply_change_group ())
11296 if (loop_dump_stream)
11297 fprintf (loop_dump_stream,
11298 " Swapped set of reg %d at %d with reg %d at %d.\n",
11299 regno, INSN_UID (insn),
11300 new_regno, INSN_UID (prev_insn));
11302 /* Update first use of REGNO. */
11303 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11304 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11306 /* Now perform copy propagation to hopefully
11307 remove all uses of REGNO within the loop. */
11308 try_copy_prop (loop, replacement, regno);
11314 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11316 static int
11317 find_mem_in_note_1 (rtx *x, void *data)
11319 if (*x != NULL_RTX && MEM_P (*x))
11321 rtx *res = (rtx *) data;
11322 *res = *x;
11323 return 1;
11325 return 0;
11328 /* Returns the first MEM found in NOTE by depth-first search. */
11330 static rtx
11331 find_mem_in_note (rtx note)
11333 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11334 return note;
11335 return NULL_RTX;
11338 /* Replace MEM with its associated pseudo register. This function is
11339 called from load_mems via for_each_rtx. DATA is actually a pointer
11340 to a structure describing the instruction currently being scanned
11341 and the MEM we are currently replacing. */
11343 static int
11344 replace_loop_mem (rtx *mem, void *data)
11346 loop_replace_args *args = (loop_replace_args *) data;
11347 rtx m = *mem;
11349 if (m == NULL_RTX)
11350 return 0;
11352 switch (GET_CODE (m))
11354 case MEM:
11355 break;
11357 case CONST_DOUBLE:
11358 /* We're not interested in the MEM associated with a
11359 CONST_DOUBLE, so there's no need to traverse into one. */
11360 return -1;
11362 default:
11363 /* This is not a MEM. */
11364 return 0;
11367 if (!rtx_equal_p (args->match, m))
11368 /* This is not the MEM we are currently replacing. */
11369 return 0;
11371 /* Actually replace the MEM. */
11372 validate_change (args->insn, mem, args->replacement, 1);
11374 return 0;
11377 static void
11378 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11380 loop_replace_args args;
11382 args.insn = insn;
11383 args.match = mem;
11384 args.replacement = reg;
11386 for_each_rtx (&insn, replace_loop_mem, &args);
11388 /* If we hoist a mem write out of the loop, then REG_EQUAL
11389 notes referring to the mem are no longer valid. */
11390 if (written)
11392 rtx note, sub;
11393 rtx *link;
11395 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11397 if (REG_NOTE_KIND (note) == REG_EQUAL
11398 && (sub = find_mem_in_note (note))
11399 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11401 /* Remove the note. */
11402 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11403 break;
11409 /* Replace one register with another. Called through for_each_rtx; PX points
11410 to the rtx being scanned. DATA is actually a pointer to
11411 a structure of arguments. */
11413 static int
11414 replace_loop_reg (rtx *px, void *data)
11416 rtx x = *px;
11417 loop_replace_args *args = (loop_replace_args *) data;
11419 if (x == NULL_RTX)
11420 return 0;
11422 if (x == args->match)
11423 validate_change (args->insn, px, args->replacement, 1);
11425 return 0;
11428 static void
11429 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11431 loop_replace_args args;
11433 args.insn = insn;
11434 args.match = reg;
11435 args.replacement = replacement;
11437 for_each_rtx (&insn, replace_loop_reg, &args);
11440 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11441 (ignored in the interim). */
11443 static rtx
11444 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11445 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11446 rtx pattern)
11448 return emit_insn_after (pattern, where_insn);
11452 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11453 in basic block WHERE_BB (ignored in the interim) within the loop
11454 otherwise hoist PATTERN into the loop pre-header. */
11456 static rtx
11457 loop_insn_emit_before (const struct loop *loop,
11458 basic_block where_bb ATTRIBUTE_UNUSED,
11459 rtx where_insn, rtx pattern)
11461 if (! where_insn)
11462 return loop_insn_hoist (loop, pattern);
11463 return emit_insn_before (pattern, where_insn);
11467 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11468 WHERE_BB (ignored in the interim) within the loop. */
11470 static rtx
11471 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11472 basic_block where_bb ATTRIBUTE_UNUSED,
11473 rtx where_insn, rtx pattern)
11475 return emit_call_insn_before (pattern, where_insn);
11479 /* Hoist insn for PATTERN into the loop pre-header. */
11481 static rtx
11482 loop_insn_hoist (const struct loop *loop, rtx pattern)
11484 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11488 /* Hoist call insn for PATTERN into the loop pre-header. */
11490 static rtx
11491 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11493 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11497 /* Sink insn for PATTERN after the loop end. */
11499 static rtx
11500 loop_insn_sink (const struct loop *loop, rtx pattern)
11502 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11505 /* bl->final_value can be either general_operand or PLUS of general_operand
11506 and constant. Emit sequence of instructions to load it into REG. */
11507 static rtx
11508 gen_load_of_final_value (rtx reg, rtx final_value)
11510 rtx seq;
11511 start_sequence ();
11512 final_value = force_operand (final_value, reg);
11513 if (final_value != reg)
11514 emit_move_insn (reg, final_value);
11515 seq = get_insns ();
11516 end_sequence ();
11517 return seq;
11520 /* If the loop has multiple exits, emit insn for PATTERN before the
11521 loop to ensure that it will always be executed no matter how the
11522 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11523 since this is slightly more efficient. */
11525 static rtx
11526 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11528 if (loop->exit_count)
11529 return loop_insn_hoist (loop, pattern);
11530 else
11531 return loop_insn_sink (loop, pattern);
11534 static void
11535 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11537 struct iv_class *bl;
11538 int iv_num = 0;
11540 if (! loop || ! file)
11541 return;
11543 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11544 iv_num++;
11546 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11548 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11550 loop_iv_class_dump (bl, file, verbose);
11551 fputc ('\n', file);
11556 static void
11557 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11558 int verbose ATTRIBUTE_UNUSED)
11560 struct induction *v;
11561 rtx incr;
11562 int i;
11564 if (! bl || ! file)
11565 return;
11567 fprintf (file, "IV class for reg %d, benefit %d\n",
11568 bl->regno, bl->total_benefit);
11570 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11571 if (bl->initial_value)
11573 fprintf (file, ", init val: ");
11574 print_simple_rtl (file, bl->initial_value);
11576 if (bl->initial_test)
11578 fprintf (file, ", init test: ");
11579 print_simple_rtl (file, bl->initial_test);
11581 fputc ('\n', file);
11583 if (bl->final_value)
11585 fprintf (file, " Final val: ");
11586 print_simple_rtl (file, bl->final_value);
11587 fputc ('\n', file);
11590 if ((incr = biv_total_increment (bl)))
11592 fprintf (file, " Total increment: ");
11593 print_simple_rtl (file, incr);
11594 fputc ('\n', file);
11597 /* List the increments. */
11598 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11600 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11601 print_simple_rtl (file, v->add_val);
11602 fputc ('\n', file);
11605 /* List the givs. */
11606 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11608 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11609 i, INSN_UID (v->insn), v->benefit);
11610 if (v->giv_type == DEST_ADDR)
11611 print_simple_rtl (file, v->mem);
11612 else
11613 print_simple_rtl (file, single_set (v->insn));
11614 fputc ('\n', file);
11619 static void
11620 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11622 if (! v || ! file)
11623 return;
11625 fprintf (file,
11626 "Biv %d: insn %d",
11627 REGNO (v->dest_reg), INSN_UID (v->insn));
11628 fprintf (file, " const ");
11629 print_simple_rtl (file, v->add_val);
11631 if (verbose && v->final_value)
11633 fputc ('\n', file);
11634 fprintf (file, " final ");
11635 print_simple_rtl (file, v->final_value);
11638 fputc ('\n', file);
11642 static void
11643 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11645 if (! v || ! file)
11646 return;
11648 if (v->giv_type == DEST_REG)
11649 fprintf (file, "Giv %d: insn %d",
11650 REGNO (v->dest_reg), INSN_UID (v->insn));
11651 else
11652 fprintf (file, "Dest address: insn %d",
11653 INSN_UID (v->insn));
11655 fprintf (file, " src reg %d benefit %d",
11656 REGNO (v->src_reg), v->benefit);
11657 fprintf (file, " lifetime %d",
11658 v->lifetime);
11660 if (v->replaceable)
11661 fprintf (file, " replaceable");
11663 if (v->no_const_addval)
11664 fprintf (file, " ncav");
11666 if (v->ext_dependent)
11668 switch (GET_CODE (v->ext_dependent))
11670 case SIGN_EXTEND:
11671 fprintf (file, " ext se");
11672 break;
11673 case ZERO_EXTEND:
11674 fprintf (file, " ext ze");
11675 break;
11676 case TRUNCATE:
11677 fprintf (file, " ext tr");
11678 break;
11679 default:
11680 gcc_unreachable ();
11684 fputc ('\n', file);
11685 fprintf (file, " mult ");
11686 print_simple_rtl (file, v->mult_val);
11688 fputc ('\n', file);
11689 fprintf (file, " add ");
11690 print_simple_rtl (file, v->add_val);
11692 if (verbose && v->final_value)
11694 fputc ('\n', file);
11695 fprintf (file, " final ");
11696 print_simple_rtl (file, v->final_value);
11699 fputc ('\n', file);
11703 void
11704 debug_ivs (const struct loop *loop)
11706 loop_ivs_dump (loop, stderr, 1);
11710 void
11711 debug_iv_class (const struct iv_class *bl)
11713 loop_iv_class_dump (bl, stderr, 1);
11717 void
11718 debug_biv (const struct induction *v)
11720 loop_biv_dump (v, stderr, 1);
11724 void
11725 debug_giv (const struct induction *v)
11727 loop_giv_dump (v, stderr, 1);
11731 #define LOOP_BLOCK_NUM_1(INSN) \
11732 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11734 /* The notes do not have an assigned block, so look at the next insn. */
11735 #define LOOP_BLOCK_NUM(INSN) \
11736 ((INSN) ? (NOTE_P (INSN) \
11737 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11738 : LOOP_BLOCK_NUM_1 (INSN)) \
11739 : -1)
11741 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11743 static void
11744 loop_dump_aux (const struct loop *loop, FILE *file,
11745 int verbose ATTRIBUTE_UNUSED)
11747 rtx label;
11749 if (! loop || ! file || !BB_HEAD (loop->first))
11750 return;
11752 /* Print diagnostics to compare our concept of a loop with
11753 what the loop notes say. */
11754 if (! PREV_INSN (BB_HEAD (loop->first))
11755 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11756 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11757 != NOTE_INSN_LOOP_BEG)
11758 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11759 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11760 if (! NEXT_INSN (BB_END (loop->last))
11761 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11762 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11763 != NOTE_INSN_LOOP_END)
11764 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11765 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11767 if (loop->start)
11769 fprintf (file,
11770 ";; start %d (%d), end %d (%d)\n",
11771 LOOP_BLOCK_NUM (loop->start),
11772 LOOP_INSN_UID (loop->start),
11773 LOOP_BLOCK_NUM (loop->end),
11774 LOOP_INSN_UID (loop->end));
11775 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11776 LOOP_BLOCK_NUM (loop->top),
11777 LOOP_INSN_UID (loop->top),
11778 LOOP_BLOCK_NUM (loop->scan_start),
11779 LOOP_INSN_UID (loop->scan_start));
11780 fprintf (file, ";; exit_count %d", loop->exit_count);
11781 if (loop->exit_count)
11783 fputs (", labels:", file);
11784 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11786 fprintf (file, " %d ",
11787 LOOP_INSN_UID (XEXP (label, 0)));
11790 fputs ("\n", file);
11794 /* Call this function from the debugger to dump LOOP. */
11796 void
11797 debug_loop (const struct loop *loop)
11799 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11802 /* Call this function from the debugger to dump LOOPS. */
11804 void
11805 debug_loops (const struct loops *loops)
11807 flow_loops_dump (loops, stderr, loop_dump_aux, 1);