Release 0.3
[delight.git] / gcc-4.1.2 / gcc / loop.c
blobf9d7493a03b88abba2b2de65d8607437cd883ffa
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "rtl.h"
51 #include "tm_p.h"
52 #include "function.h"
53 #include "expr.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
57 #include "regs.h"
58 #include "recog.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
69 #include "timevar.h"
70 #include "tree-pass.h"
72 /* Get the loop info pointer of a loop. */
73 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
75 /* Get a pointer to the loop movables structure. */
76 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
78 /* Get a pointer to the loop registers structure. */
79 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
81 /* Get a pointer to the loop induction variables structure. */
82 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
84 /* Get the luid of an insn. Catch the error of trying to reference the LUID
85 of an insn added during loop, since these don't have LUIDs. */
87 #define INSN_LUID(INSN) \
88 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
90 #define REGNO_FIRST_LUID(REGNO) \
91 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
92 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
93 : 0)
94 #define REGNO_LAST_LUID(REGNO) \
95 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
96 ? uid_luid[REGNO_LAST_UID (REGNO)] \
97 : INT_MAX)
99 /* A "basic induction variable" or biv is a pseudo reg that is set
100 (within this loop) only by incrementing or decrementing it. */
101 /* A "general induction variable" or giv is a pseudo reg whose
102 value is a linear function of a biv. */
104 /* Bivs are recognized by `basic_induction_var';
105 Givs by `general_induction_var'. */
107 /* An enum for the two different types of givs, those that are used
108 as memory addresses and those that are calculated into registers. */
109 enum g_types
111 DEST_ADDR,
112 DEST_REG
116 /* A `struct induction' is created for every instruction that sets
117 an induction variable (either a biv or a giv). */
119 struct induction
121 rtx insn; /* The insn that sets a biv or giv */
122 rtx new_reg; /* New register, containing strength reduced
123 version of this giv. */
124 rtx src_reg; /* Biv from which this giv is computed.
125 (If this is a biv, then this is the biv.) */
126 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
127 rtx dest_reg; /* Destination register for insn: this is the
128 register which was the biv or giv.
129 For a biv, this equals src_reg.
130 For a DEST_ADDR type giv, this is 0. */
131 rtx *location; /* Place in the insn where this giv occurs.
132 If GIV_TYPE is DEST_REG, this is 0. */
133 /* For a biv, this is the place where add_val
134 was found. */
135 enum machine_mode mode; /* The mode of this biv or giv */
136 rtx mem; /* For DEST_ADDR, the memory object. */
137 rtx mult_val; /* Multiplicative factor for src_reg. */
138 rtx add_val; /* Additive constant for that product. */
139 int benefit; /* Gain from eliminating this insn. */
140 rtx final_value; /* If the giv is used outside the loop, and its
141 final value could be calculated, it is put
142 here, and the giv is made replaceable. Set
143 the giv to this value before the loop. */
144 unsigned combined_with; /* The number of givs this giv has been
145 combined with. If nonzero, this giv
146 cannot combine with any other giv. */
147 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
148 variable for the original variable.
149 0 means they must be kept separate and the
150 new one must be copied into the old pseudo
151 reg each time the old one is set. */
152 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
153 1 if we know that the giv definitely can
154 not be made replaceable, in which case we
155 don't bother checking the variable again
156 even if further info is available.
157 Both this and the above can be zero. */
158 unsigned ignore : 1; /* 1 prohibits further processing of giv */
159 unsigned always_computable : 1;/* 1 if this value is computable every
160 iteration. */
161 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
162 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
163 update may be done multiple times per
164 iteration. */
165 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
166 another giv. This occurs in many cases
167 where a giv's lifetime spans an update to
168 a biv. */
169 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
170 we won't use it to eliminate a biv, it
171 would probably lose. */
172 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
173 to it to try to form an auto-inc address. */
174 unsigned shared : 1;
175 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
176 int lifetime; /* Length of life of this giv */
177 rtx derive_adjustment; /* If nonzero, is an adjustment to be
178 subtracted from add_val when this giv
179 derives another. This occurs when the
180 giv spans a biv update by incrementation. */
181 rtx ext_dependent; /* If nonzero, is a sign or zero extension
182 if a biv on which this giv is dependent. */
183 struct induction *next_iv; /* For givs, links together all givs that are
184 based on the same biv. For bivs, links
185 together all biv entries that refer to the
186 same biv register. */
187 struct induction *same; /* For givs, if the giv has been combined with
188 another giv, this points to the base giv.
189 The base giv will have COMBINED_WITH nonzero.
190 For bivs, if the biv has the same LOCATION
191 than another biv, this points to the base
192 biv. */
193 struct induction *same_insn; /* If there are multiple identical givs in
194 the same insn, then all but one have this
195 field set, and they all point to the giv
196 that doesn't have this field set. */
197 rtx last_use; /* For a giv made from a biv increment, this is
198 a substitute for the lifetime information. */
202 /* A `struct iv_class' is created for each biv. */
204 struct iv_class
206 unsigned int regno; /* Pseudo reg which is the biv. */
207 int biv_count; /* Number of insns setting this reg. */
208 struct induction *biv; /* List of all insns that set this reg. */
209 int giv_count; /* Number of DEST_REG givs computed from this
210 biv. The resulting count is only used in
211 check_dbra_loop. */
212 struct induction *giv; /* List of all insns that compute a giv
213 from this reg. */
214 int total_benefit; /* Sum of BENEFITs of all those givs. */
215 rtx initial_value; /* Value of reg at loop start. */
216 rtx initial_test; /* Test performed on BIV before loop. */
217 rtx final_value; /* Value of reg at loop end, if known. */
218 struct iv_class *next; /* Links all class structures together. */
219 rtx init_insn; /* insn which initializes biv, 0 if none. */
220 rtx init_set; /* SET of INIT_INSN, if any. */
221 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
222 unsigned eliminable : 1; /* 1 if plausible candidate for
223 elimination. */
224 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
225 this. */
226 unsigned reversed : 1; /* 1 if we reversed the loop that this
227 biv controls. */
228 unsigned all_reduced : 1; /* 1 if all givs using this biv have
229 been reduced. */
233 /* Definitions used by the basic induction variable discovery code. */
234 enum iv_mode
236 UNKNOWN_INDUCT,
237 BASIC_INDUCT,
238 NOT_BASIC_INDUCT,
239 GENERAL_INDUCT
243 /* A `struct iv' is created for every register. */
245 struct iv
247 enum iv_mode type;
248 union
250 struct iv_class *class;
251 struct induction *info;
252 } iv;
256 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
257 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
258 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
261 struct loop_ivs
263 /* Indexed by register number, contains pointer to `struct
264 iv' if register is an induction variable. */
265 struct iv *regs;
267 /* Size of regs array. */
268 unsigned int n_regs;
270 /* The head of a list which links together (via the next field)
271 every iv class for the current loop. */
272 struct iv_class *list;
276 typedef struct loop_mem_info
278 rtx mem; /* The MEM itself. */
279 rtx reg; /* Corresponding pseudo, if any. */
280 int optimize; /* Nonzero if we can optimize access to this MEM. */
281 } loop_mem_info;
285 struct loop_reg
287 /* Number of times the reg is set during the loop being scanned.
288 During code motion, a negative value indicates a reg that has
289 been made a candidate; in particular -2 means that it is an
290 candidate that we know is equal to a constant and -1 means that
291 it is a candidate not known equal to a constant. After code
292 motion, regs moved have 0 (which is accurate now) while the
293 failed candidates have the original number of times set.
295 Therefore, at all times, == 0 indicates an invariant register;
296 < 0 a conditionally invariant one. */
297 int set_in_loop;
299 /* Original value of set_in_loop; same except that this value
300 is not set negative for a reg whose sets have been made candidates
301 and not set to 0 for a reg that is moved. */
302 int n_times_set;
304 /* Contains the insn in which a register was used if it was used
305 exactly once; contains const0_rtx if it was used more than once. */
306 rtx single_usage;
308 /* Nonzero indicates that the register cannot be moved or strength
309 reduced. */
310 char may_not_optimize;
312 /* Nonzero means reg N has already been moved out of one loop.
313 This reduces the desire to move it out of another. */
314 char moved_once;
318 struct loop_regs
320 int num; /* Number of regs used in table. */
321 int size; /* Size of table. */
322 struct loop_reg *array; /* Register usage info. array. */
323 int multiple_uses; /* Nonzero if a reg has multiple uses. */
328 struct loop_movables
330 /* Head of movable chain. */
331 struct movable *head;
332 /* Last movable in chain. */
333 struct movable *last;
337 /* Information pertaining to a loop. */
339 struct loop_info
341 /* Nonzero if there is a subroutine call in the current loop. */
342 int has_call;
343 /* Nonzero if there is a libcall in the current loop. */
344 int has_libcall;
345 /* Nonzero if there is a non constant call in the current loop. */
346 int has_nonconst_call;
347 /* Nonzero if there is a prefetch instruction in the current loop. */
348 int has_prefetch;
349 /* Nonzero if there is a volatile memory reference in the current
350 loop. */
351 int has_volatile;
352 /* Nonzero if there is a tablejump in the current loop. */
353 int has_tablejump;
354 /* Nonzero if there are ways to leave the loop other than falling
355 off the end. */
356 int has_multiple_exit_targets;
357 /* Nonzero if there is an indirect jump in the current function. */
358 int has_indirect_jump;
359 /* Register or constant initial loop value. */
360 rtx initial_value;
361 /* Register or constant value used for comparison test. */
362 rtx comparison_value;
363 /* Register or constant approximate final value. */
364 rtx final_value;
365 /* Register or constant initial loop value with term common to
366 final_value removed. */
367 rtx initial_equiv_value;
368 /* Register or constant final loop value with term common to
369 initial_value removed. */
370 rtx final_equiv_value;
371 /* Register corresponding to iteration variable. */
372 rtx iteration_var;
373 /* Constant loop increment. */
374 rtx increment;
375 enum rtx_code comparison_code;
376 /* Holds the number of loop iterations. It is zero if the number
377 could not be calculated. Must be unsigned since the number of
378 iterations can be as high as 2^wordsize - 1. For loops with a
379 wider iterator, this number will be zero if the number of loop
380 iterations is too large for an unsigned integer to hold. */
381 unsigned HOST_WIDE_INT n_iterations;
382 int used_count_register;
383 /* The loop iterator induction variable. */
384 struct iv_class *iv;
385 /* List of MEMs that are stored in this loop. */
386 rtx store_mems;
387 /* Array of MEMs that are used (read or written) in this loop, but
388 cannot be aliased by anything in this loop, except perhaps
389 themselves. In other words, if mems[i] is altered during
390 the loop, it is altered by an expression that is rtx_equal_p to
391 it. */
392 loop_mem_info *mems;
393 /* The index of the next available slot in MEMS. */
394 int mems_idx;
395 /* The number of elements allocated in MEMS. */
396 int mems_allocated;
397 /* Nonzero if we don't know what MEMs were changed in the current
398 loop. This happens if the loop contains a call (in which case
399 `has_call' will also be set) or if we store into more than
400 NUM_STORES MEMs. */
401 int unknown_address_altered;
402 /* The above doesn't count any readonly memory locations that are
403 stored. This does. */
404 int unknown_constant_address_altered;
405 /* Count of memory write instructions discovered in the loop. */
406 int num_mem_sets;
407 /* The insn where the first of these was found. */
408 rtx first_loop_store_insn;
409 /* The chain of movable insns in loop. */
410 struct loop_movables movables;
411 /* The registers used the in loop. */
412 struct loop_regs regs;
413 /* The induction variable information in loop. */
414 struct loop_ivs ivs;
415 /* Nonzero if call is in pre_header extended basic block. */
416 int pre_header_has_call;
419 /* Not really meaningful values, but at least something. */
420 #ifndef SIMULTANEOUS_PREFETCHES
421 #define SIMULTANEOUS_PREFETCHES 3
422 #endif
423 #ifndef PREFETCH_BLOCK
424 #define PREFETCH_BLOCK 32
425 #endif
426 #ifndef HAVE_prefetch
427 #define HAVE_prefetch 0
428 #define CODE_FOR_prefetch 0
429 #define gen_prefetch(a,b,c) (gcc_unreachable (), NULL_RTX)
430 #endif
432 /* Give up the prefetch optimizations once we exceed a given threshold.
433 It is unlikely that we would be able to optimize something in a loop
434 with so many detected prefetches. */
435 #define MAX_PREFETCHES 100
436 /* The number of prefetch blocks that are beneficial to fetch at once before
437 a loop with a known (and low) iteration count. */
438 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
439 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
440 since it is likely that the data are already in the cache. */
441 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
443 /* Parameterize some prefetch heuristics so they can be turned on and off
444 easily for performance testing on new architectures. These can be
445 defined in target-dependent files. */
447 /* Prefetch is worthwhile only when loads/stores are dense. */
448 #ifndef PREFETCH_ONLY_DENSE_MEM
449 #define PREFETCH_ONLY_DENSE_MEM 1
450 #endif
452 /* Define what we mean by "dense" loads and stores; This value divided by 256
453 is the minimum percentage of memory references that worth prefetching. */
454 #ifndef PREFETCH_DENSE_MEM
455 #define PREFETCH_DENSE_MEM 220
456 #endif
458 /* Do not prefetch for a loop whose iteration count is known to be low. */
459 #ifndef PREFETCH_NO_LOW_LOOPCNT
460 #define PREFETCH_NO_LOW_LOOPCNT 1
461 #endif
463 /* Define what we mean by a "low" iteration count. */
464 #ifndef PREFETCH_LOW_LOOPCNT
465 #define PREFETCH_LOW_LOOPCNT 32
466 #endif
468 /* Do not prefetch for a loop that contains a function call; such a loop is
469 probably not an internal loop. */
470 #ifndef PREFETCH_NO_CALL
471 #define PREFETCH_NO_CALL 1
472 #endif
474 /* Do not prefetch accesses with an extreme stride. */
475 #ifndef PREFETCH_NO_EXTREME_STRIDE
476 #define PREFETCH_NO_EXTREME_STRIDE 1
477 #endif
479 /* Define what we mean by an "extreme" stride. */
480 #ifndef PREFETCH_EXTREME_STRIDE
481 #define PREFETCH_EXTREME_STRIDE 4096
482 #endif
484 /* Define a limit to how far apart indices can be and still be merged
485 into a single prefetch. */
486 #ifndef PREFETCH_EXTREME_DIFFERENCE
487 #define PREFETCH_EXTREME_DIFFERENCE 4096
488 #endif
490 /* Issue prefetch instructions before the loop to fetch data to be used
491 in the first few loop iterations. */
492 #ifndef PREFETCH_BEFORE_LOOP
493 #define PREFETCH_BEFORE_LOOP 1
494 #endif
496 /* Do not handle reversed order prefetches (negative stride). */
497 #ifndef PREFETCH_NO_REVERSE_ORDER
498 #define PREFETCH_NO_REVERSE_ORDER 1
499 #endif
501 /* Prefetch even if the GIV is in conditional code. */
502 #ifndef PREFETCH_CONDITIONAL
503 #define PREFETCH_CONDITIONAL 1
504 #endif
506 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
507 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
509 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
510 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
511 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
513 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
514 ((REGNO) < FIRST_PSEUDO_REGISTER \
515 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
518 /* Vector mapping INSN_UIDs to luids.
519 The luids are like uids but increase monotonically always.
520 We use them to see whether a jump comes from outside a given loop. */
522 static int *uid_luid;
524 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
525 number the insn is contained in. */
527 static struct loop **uid_loop;
529 /* 1 + largest uid of any insn. */
531 static int max_uid_for_loop;
533 /* Number of loops detected in current function. Used as index to the
534 next few tables. */
536 static int max_loop_num;
538 /* Bound on pseudo register number before loop optimization.
539 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
540 static unsigned int max_reg_before_loop;
542 /* The value to pass to the next call of reg_scan_update. */
543 static int loop_max_reg;
545 /* During the analysis of a loop, a chain of `struct movable's
546 is made to record all the movable insns found.
547 Then the entire chain can be scanned to decide which to move. */
549 struct movable
551 rtx insn; /* A movable insn */
552 rtx set_src; /* The expression this reg is set from. */
553 rtx set_dest; /* The destination of this SET. */
554 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
555 of any registers used within the LIBCALL. */
556 int consec; /* Number of consecutive following insns
557 that must be moved with this one. */
558 unsigned int regno; /* The register it sets */
559 short lifetime; /* lifetime of that register;
560 may be adjusted when matching movables
561 that load the same value are found. */
562 short savings; /* Number of insns we can move for this reg,
563 including other movables that force this
564 or match this one. */
565 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
566 a low part that we should avoid changing when
567 clearing the rest of the reg. */
568 unsigned int cond : 1; /* 1 if only conditionally movable */
569 unsigned int force : 1; /* 1 means MUST move this insn */
570 unsigned int global : 1; /* 1 means reg is live outside this loop */
571 /* If PARTIAL is 1, GLOBAL means something different:
572 that the reg is live outside the range from where it is set
573 to the following label. */
574 unsigned int done : 1; /* 1 inhibits further processing of this */
576 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
577 In particular, moving it does not make it
578 invariant. */
579 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
580 load SRC, rather than copying INSN. */
581 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
582 first insn of a consecutive sets group. */
583 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
584 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
585 the original insn with a copy from that
586 pseudo, rather than deleting it. */
587 struct movable *match; /* First entry for same value */
588 struct movable *forces; /* An insn that must be moved if this is */
589 struct movable *next;
593 static FILE *loop_dump_stream;
595 /* Forward declarations. */
597 static void invalidate_loops_containing_label (rtx);
598 static void find_and_verify_loops (rtx, struct loops *);
599 static void mark_loop_jump (rtx, struct loop *);
600 static void prescan_loop (struct loop *);
601 static int reg_in_basic_block_p (rtx, rtx);
602 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
603 static int labels_in_range_p (rtx, int);
604 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
605 static void note_addr_stored (rtx, rtx, void *);
606 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
607 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
608 static rtx find_regs_nested (rtx, rtx);
609 static void scan_loop (struct loop*, int);
610 #if 0
611 static void replace_call_address (rtx, rtx, rtx);
612 #endif
613 static rtx skip_consec_insns (rtx, int);
614 static int libcall_benefit (rtx);
615 static rtx libcall_other_reg (rtx, rtx);
616 static void record_excess_regs (rtx, rtx, rtx *);
617 static void ignore_some_movables (struct loop_movables *);
618 static void force_movables (struct loop_movables *);
619 static void combine_movables (struct loop_movables *, struct loop_regs *);
620 static int num_unmoved_movables (const struct loop *);
621 static int regs_match_p (rtx, rtx, struct loop_movables *);
622 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
623 struct loop_regs *);
624 static void add_label_notes (rtx, rtx);
625 static void move_movables (struct loop *loop, struct loop_movables *, int,
626 int);
627 static void loop_movables_add (struct loop_movables *, struct movable *);
628 static void loop_movables_free (struct loop_movables *);
629 static int count_nonfixed_reads (const struct loop *, rtx);
630 static void loop_bivs_find (struct loop *);
631 static void loop_bivs_init_find (struct loop *);
632 static void loop_bivs_check (struct loop *);
633 static void loop_givs_find (struct loop *);
634 static void loop_givs_check (struct loop *);
635 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
636 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
637 struct induction *, rtx);
638 static void loop_givs_dead_check (struct loop *, struct iv_class *);
639 static void loop_givs_reduce (struct loop *, struct iv_class *);
640 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
641 static void loop_ivs_free (struct loop *);
642 static void strength_reduce (struct loop *, int);
643 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
644 static int valid_initial_value_p (rtx, rtx, int, rtx);
645 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
646 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
647 rtx, rtx *, int, int);
648 static void check_final_value (const struct loop *, struct induction *);
649 static void loop_ivs_dump (const struct loop *, FILE *, int);
650 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
651 static void loop_biv_dump (const struct induction *, FILE *, int);
652 static void loop_giv_dump (const struct induction *, FILE *, int);
653 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
654 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
655 rtx *);
656 static void update_giv_derive (const struct loop *, rtx);
657 static HOST_WIDE_INT get_monotonic_increment (struct iv_class *);
658 static bool biased_biv_fits_mode_p (const struct loop *, struct iv_class *,
659 HOST_WIDE_INT, enum machine_mode,
660 unsigned HOST_WIDE_INT);
661 static bool biv_fits_mode_p (const struct loop *, struct iv_class *,
662 HOST_WIDE_INT, enum machine_mode, bool);
663 static bool extension_within_bounds_p (const struct loop *, struct iv_class *,
664 HOST_WIDE_INT, rtx);
665 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
666 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
667 rtx, rtx, rtx *, rtx *, rtx **,
668 enum machine_mode);
669 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
670 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
671 rtx *, rtx *, int, int *, enum machine_mode);
672 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
673 rtx *, rtx *, rtx *);
674 static int check_dbra_loop (struct loop *, int);
675 static rtx express_from_1 (rtx, rtx, rtx);
676 static rtx combine_givs_p (struct induction *, struct induction *);
677 static int cmp_combine_givs_stats (const void *, const void *);
678 static void combine_givs (struct loop_regs *, struct iv_class *);
679 static int product_cheap_p (rtx, rtx);
680 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
681 int, int);
682 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
683 struct iv_class *, int, basic_block, rtx);
684 static int last_use_this_basic_block (rtx, rtx);
685 static void record_initial (rtx, rtx, void *);
686 static void update_reg_last_use (rtx, rtx);
687 static rtx next_insn_in_loop (const struct loop *, rtx);
688 static void loop_regs_scan (const struct loop *, int);
689 static int count_insns_in_loop (const struct loop *);
690 static int find_mem_in_note_1 (rtx *, void *);
691 static rtx find_mem_in_note (rtx);
692 static void load_mems (const struct loop *);
693 static int insert_loop_mem (rtx *, void *);
694 static int replace_loop_mem (rtx *, void *);
695 static void replace_loop_mems (rtx, rtx, rtx, int);
696 static int replace_loop_reg (rtx *, void *);
697 static void replace_loop_regs (rtx insn, rtx, rtx);
698 static void note_reg_stored (rtx, rtx, void *);
699 static void try_copy_prop (const struct loop *, rtx, unsigned int);
700 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
701 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
702 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
703 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
704 static void loop_regs_update (const struct loop *, rtx);
705 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
706 static int loop_invariant_p (const struct loop *, rtx);
707 static rtx loop_insn_hoist (const struct loop *, rtx);
708 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
709 rtx, basic_block, rtx);
710 static rtx loop_insn_emit_before (const struct loop *, basic_block,
711 rtx, rtx);
712 static int loop_insn_first_p (rtx, rtx);
713 static rtx get_condition_for_loop (const struct loop *, rtx);
714 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
715 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
716 static rtx extend_value_for_giv (struct induction *, rtx);
717 static rtx loop_insn_sink (const struct loop *, rtx);
719 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
720 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
721 rtx, rtx);
722 static rtx loop_call_insn_hoist (const struct loop *, rtx);
723 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
725 static void loop_dump_aux (const struct loop *, FILE *, int);
726 static void loop_delete_insns (rtx, rtx);
727 static HOST_WIDE_INT remove_constant_addition (rtx *);
728 static rtx gen_load_of_final_value (rtx, rtx);
729 void debug_ivs (const struct loop *);
730 void debug_iv_class (const struct iv_class *);
731 void debug_biv (const struct induction *);
732 void debug_giv (const struct induction *);
733 void debug_loop (const struct loop *);
734 void debug_loops (const struct loops *);
736 typedef struct loop_replace_args
738 rtx match;
739 rtx replacement;
740 rtx insn;
741 } loop_replace_args;
743 /* Nonzero iff INSN is between START and END, inclusive. */
744 #define INSN_IN_RANGE_P(INSN, START, END) \
745 (INSN_UID (INSN) < max_uid_for_loop \
746 && INSN_LUID (INSN) >= INSN_LUID (START) \
747 && INSN_LUID (INSN) <= INSN_LUID (END))
749 /* Indirect_jump_in_function is computed once per function. */
750 static int indirect_jump_in_function;
751 static int indirect_jump_in_function_p (rtx);
753 static int compute_luids (rtx, rtx, int);
755 static int biv_elimination_giv_has_0_offset (struct induction *,
756 struct induction *, rtx);
758 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
759 copy the value of the strength reduced giv to its original register. */
760 static int copy_cost;
762 /* Cost of using a register, to normalize the benefits of a giv. */
763 static int reg_address_cost;
765 void
766 init_loop (void)
768 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
770 reg_address_cost = address_cost (reg, SImode);
772 copy_cost = COSTS_N_INSNS (1);
775 /* Compute the mapping from uids to luids.
776 LUIDs are numbers assigned to insns, like uids,
777 except that luids increase monotonically through the code.
778 Start at insn START and stop just before END. Assign LUIDs
779 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
780 static int
781 compute_luids (rtx start, rtx end, int prev_luid)
783 int i;
784 rtx insn;
786 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
788 if (INSN_UID (insn) >= max_uid_for_loop)
789 continue;
790 /* Don't assign luids to line-number NOTEs, so that the distance in
791 luids between two insns is not affected by -g. */
792 if (!NOTE_P (insn)
793 || NOTE_LINE_NUMBER (insn) <= 0)
794 uid_luid[INSN_UID (insn)] = ++i;
795 else
796 /* Give a line number note the same luid as preceding insn. */
797 uid_luid[INSN_UID (insn)] = i;
799 return i + 1;
802 /* Entry point of this file. Perform loop optimization
803 on the current function. F is the first insn of the function
804 and DUMPFILE is a stream for output of a trace of actions taken
805 (or 0 if none should be output). */
807 void
808 loop_optimize (rtx f, FILE *dumpfile, int flags)
810 rtx insn;
811 int i;
812 struct loops loops_data;
813 struct loops *loops = &loops_data;
814 struct loop_info *loops_info;
816 loop_dump_stream = dumpfile;
818 init_recog_no_volatile ();
820 max_reg_before_loop = max_reg_num ();
821 loop_max_reg = max_reg_before_loop;
823 regs_may_share = 0;
825 /* Count the number of loops. */
827 max_loop_num = 0;
828 for (insn = f; insn; insn = NEXT_INSN (insn))
830 if (NOTE_P (insn)
831 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
832 max_loop_num++;
835 /* Don't waste time if no loops. */
836 if (max_loop_num == 0)
837 return;
839 loops->num = max_loop_num;
841 /* Get size to use for tables indexed by uids.
842 Leave some space for labels allocated by find_and_verify_loops. */
843 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
845 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
846 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
848 /* Allocate storage for array of loops. */
849 loops->array = xcalloc (loops->num, sizeof (struct loop));
851 /* Find and process each loop.
852 First, find them, and record them in order of their beginnings. */
853 find_and_verify_loops (f, loops);
855 /* Allocate and initialize auxiliary loop information. */
856 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
857 for (i = 0; i < (int) loops->num; i++)
858 loops->array[i].aux = loops_info + i;
860 /* Now find all register lifetimes. This must be done after
861 find_and_verify_loops, because it might reorder the insns in the
862 function. */
863 reg_scan (f, max_reg_before_loop);
865 /* This must occur after reg_scan so that registers created by gcse
866 will have entries in the register tables.
868 We could have added a call to reg_scan after gcse_main in toplev.c,
869 but moving this call to init_alias_analysis is more efficient. */
870 init_alias_analysis ();
872 /* See if we went too far. Note that get_max_uid already returns
873 one more that the maximum uid of all insn. */
874 gcc_assert (get_max_uid () <= max_uid_for_loop);
875 /* Now reset it to the actual size we need. See above. */
876 max_uid_for_loop = get_max_uid ();
878 /* find_and_verify_loops has already called compute_luids, but it
879 might have rearranged code afterwards, so we need to recompute
880 the luids now. */
881 compute_luids (f, NULL_RTX, 0);
883 /* Don't leave gaps in uid_luid for insns that have been
884 deleted. It is possible that the first or last insn
885 using some register has been deleted by cross-jumping.
886 Make sure that uid_luid for that former insn's uid
887 points to the general area where that insn used to be. */
888 for (i = 0; i < max_uid_for_loop; i++)
890 uid_luid[0] = uid_luid[i];
891 if (uid_luid[0] != 0)
892 break;
894 for (i = 0; i < max_uid_for_loop; i++)
895 if (uid_luid[i] == 0)
896 uid_luid[i] = uid_luid[i - 1];
898 /* Determine if the function has indirect jump. On some systems
899 this prevents low overhead loop instructions from being used. */
900 indirect_jump_in_function = indirect_jump_in_function_p (f);
902 /* Now scan the loops, last ones first, since this means inner ones are done
903 before outer ones. */
904 for (i = max_loop_num - 1; i >= 0; i--)
906 struct loop *loop = &loops->array[i];
908 if (! loop->invalid && loop->end)
910 scan_loop (loop, flags);
911 ggc_collect ();
915 end_alias_analysis ();
917 /* Clean up. */
918 for (i = 0; i < (int) loops->num; i++)
919 free (loops_info[i].mems);
921 free (uid_luid);
922 free (uid_loop);
923 free (loops_info);
924 free (loops->array);
927 /* Returns the next insn, in execution order, after INSN. START and
928 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
929 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
930 insn-stream; it is used with loops that are entered near the
931 bottom. */
933 static rtx
934 next_insn_in_loop (const struct loop *loop, rtx insn)
936 insn = NEXT_INSN (insn);
938 if (insn == loop->end)
940 if (loop->top)
941 /* Go to the top of the loop, and continue there. */
942 insn = loop->top;
943 else
944 /* We're done. */
945 insn = NULL_RTX;
948 if (insn == loop->scan_start)
949 /* We're done. */
950 insn = NULL_RTX;
952 return insn;
955 /* Find any register references hidden inside X and add them to
956 the dependency list DEPS. This is used to look inside CLOBBER (MEM
957 when checking whether a PARALLEL can be pulled out of a loop. */
959 static rtx
960 find_regs_nested (rtx deps, rtx x)
962 enum rtx_code code = GET_CODE (x);
963 if (code == REG)
964 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
965 else
967 const char *fmt = GET_RTX_FORMAT (code);
968 int i, j;
969 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
971 if (fmt[i] == 'e')
972 deps = find_regs_nested (deps, XEXP (x, i));
973 else if (fmt[i] == 'E')
974 for (j = 0; j < XVECLEN (x, i); j++)
975 deps = find_regs_nested (deps, XVECEXP (x, i, j));
978 return deps;
981 /* Optimize one loop described by LOOP. */
983 /* ??? Could also move memory writes out of loops if the destination address
984 is invariant, the source is invariant, the memory write is not volatile,
985 and if we can prove that no read inside the loop can read this address
986 before the write occurs. If there is a read of this address after the
987 write, then we can also mark the memory read as invariant. */
989 static void
990 scan_loop (struct loop *loop, int flags)
992 struct loop_info *loop_info = LOOP_INFO (loop);
993 struct loop_regs *regs = LOOP_REGS (loop);
994 int i;
995 rtx loop_start = loop->start;
996 rtx loop_end = loop->end;
997 rtx p;
998 /* 1 if we are scanning insns that could be executed zero times. */
999 int maybe_never = 0;
1000 /* 1 if we are scanning insns that might never be executed
1001 due to a subroutine call which might exit before they are reached. */
1002 int call_passed = 0;
1003 /* Number of insns in the loop. */
1004 int insn_count;
1005 int tem;
1006 rtx temp, update_start, update_end;
1007 /* The SET from an insn, if it is the only SET in the insn. */
1008 rtx set, set1;
1009 /* Chain describing insns movable in current loop. */
1010 struct loop_movables *movables = LOOP_MOVABLES (loop);
1011 /* Ratio of extra register life span we can justify
1012 for saving an instruction. More if loop doesn't call subroutines
1013 since in that case saving an insn makes more difference
1014 and more registers are available. */
1015 int threshold;
1016 int in_libcall;
1018 loop->top = 0;
1020 movables->head = 0;
1021 movables->last = 0;
1023 /* Determine whether this loop starts with a jump down to a test at
1024 the end. This will occur for a small number of loops with a test
1025 that is too complex to duplicate in front of the loop.
1027 We search for the first insn or label in the loop, skipping NOTEs.
1028 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1029 (because we might have a loop executed only once that contains a
1030 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1031 (in case we have a degenerate loop).
1033 Note that if we mistakenly think that a loop is entered at the top
1034 when, in fact, it is entered at the exit test, the only effect will be
1035 slightly poorer optimization. Making the opposite error can generate
1036 incorrect code. Since very few loops now start with a jump to the
1037 exit test, the code here to detect that case is very conservative. */
1039 for (p = NEXT_INSN (loop_start);
1040 p != loop_end
1041 && !LABEL_P (p) && ! INSN_P (p)
1042 && (!NOTE_P (p)
1043 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1044 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1045 p = NEXT_INSN (p))
1048 loop->scan_start = p;
1050 /* If loop end is the end of the current function, then emit a
1051 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1052 note insn. This is the position we use when sinking insns out of
1053 the loop. */
1054 if (NEXT_INSN (loop->end) != 0)
1055 loop->sink = NEXT_INSN (loop->end);
1056 else
1057 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1059 /* Set up variables describing this loop. */
1060 prescan_loop (loop);
1061 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1063 /* If loop has a jump before the first label,
1064 the true entry is the target of that jump.
1065 Start scan from there.
1066 But record in LOOP->TOP the place where the end-test jumps
1067 back to so we can scan that after the end of the loop. */
1068 if (JUMP_P (p)
1069 /* Loop entry must be unconditional jump (and not a RETURN) */
1070 && any_uncondjump_p (p)
1071 && JUMP_LABEL (p) != 0
1072 /* Check to see whether the jump actually
1073 jumps out of the loop (meaning it's no loop).
1074 This case can happen for things like
1075 do {..} while (0). If this label was generated previously
1076 by loop, we can't tell anything about it and have to reject
1077 the loop. */
1078 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1080 loop->top = next_label (loop->scan_start);
1081 loop->scan_start = JUMP_LABEL (p);
1084 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1085 as required by loop_reg_used_before_p. So skip such loops. (This
1086 test may never be true, but it's best to play it safe.)
1088 Also, skip loops where we do not start scanning at a label. This
1089 test also rejects loops starting with a JUMP_INSN that failed the
1090 test above. */
1092 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1093 || !LABEL_P (loop->scan_start))
1095 if (loop_dump_stream)
1096 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1097 INSN_UID (loop_start), INSN_UID (loop_end));
1098 return;
1101 /* Allocate extra space for REGs that might be created by load_mems.
1102 We allocate a little extra slop as well, in the hopes that we
1103 won't have to reallocate the regs array. */
1104 loop_regs_scan (loop, loop_info->mems_idx + 16);
1105 insn_count = count_insns_in_loop (loop);
1107 if (loop_dump_stream)
1108 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1109 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1111 /* Scan through the loop finding insns that are safe to move.
1112 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1113 this reg will be considered invariant for subsequent insns.
1114 We consider whether subsequent insns use the reg
1115 in deciding whether it is worth actually moving.
1117 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1118 and therefore it is possible that the insns we are scanning
1119 would never be executed. At such times, we must make sure
1120 that it is safe to execute the insn once instead of zero times.
1121 When MAYBE_NEVER is 0, all insns will be executed at least once
1122 so that is not a problem. */
1124 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1125 p != NULL_RTX;
1126 p = next_insn_in_loop (loop, p))
1128 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1129 in_libcall--;
1130 if (NONJUMP_INSN_P (p))
1132 /* Do not scan past an optimization barrier. */
1133 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1134 break;
1135 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1136 if (temp)
1137 in_libcall++;
1138 if (! in_libcall
1139 && (set = single_set (p))
1140 && REG_P (SET_DEST (set))
1141 && SET_DEST (set) != frame_pointer_rtx
1142 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1143 && SET_DEST (set) != pic_offset_table_rtx
1144 #endif
1145 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1147 int tem1 = 0;
1148 int tem2 = 0;
1149 int move_insn = 0;
1150 int insert_temp = 0;
1151 rtx src = SET_SRC (set);
1152 rtx dependencies = 0;
1154 /* Figure out what to use as a source of this insn. If a
1155 REG_EQUIV note is given or if a REG_EQUAL note with a
1156 constant operand is specified, use it as the source and
1157 mark that we should move this insn by calling
1158 emit_move_insn rather that duplicating the insn.
1160 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1161 note is present. */
1162 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1163 if (temp)
1164 src = XEXP (temp, 0), move_insn = 1;
1165 else
1167 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1168 if (temp && CONSTANT_P (XEXP (temp, 0)))
1169 src = XEXP (temp, 0), move_insn = 1;
1170 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1172 src = XEXP (temp, 0);
1173 /* A libcall block can use regs that don't appear in
1174 the equivalent expression. To move the libcall,
1175 we must move those regs too. */
1176 dependencies = libcall_other_reg (p, src);
1180 /* For parallels, add any possible uses to the dependencies, as
1181 we can't move the insn without resolving them first.
1182 MEMs inside CLOBBERs may also reference registers; these
1183 count as implicit uses. */
1184 if (GET_CODE (PATTERN (p)) == PARALLEL)
1186 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1188 rtx x = XVECEXP (PATTERN (p), 0, i);
1189 if (GET_CODE (x) == USE)
1190 dependencies
1191 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1192 dependencies);
1193 else if (GET_CODE (x) == CLOBBER
1194 && MEM_P (XEXP (x, 0)))
1195 dependencies = find_regs_nested (dependencies,
1196 XEXP (XEXP (x, 0), 0));
1200 if (/* The register is used in basic blocks other
1201 than the one where it is set (meaning that
1202 something after this point in the loop might
1203 depend on its value before the set). */
1204 ! reg_in_basic_block_p (p, SET_DEST (set))
1205 /* And the set is not guaranteed to be executed once
1206 the loop starts, or the value before the set is
1207 needed before the set occurs...
1209 ??? Note we have quadratic behavior here, mitigated
1210 by the fact that the previous test will often fail for
1211 large loops. Rather than re-scanning the entire loop
1212 each time for register usage, we should build tables
1213 of the register usage and use them here instead. */
1214 && (maybe_never
1215 || loop_reg_used_before_p (loop, set, p)))
1216 /* It is unsafe to move the set. However, it may be OK to
1217 move the source into a new pseudo, and substitute a
1218 reg-to-reg copy for the original insn.
1220 This code used to consider it OK to move a set of a variable
1221 which was not created by the user and not used in an exit
1222 test.
1223 That behavior is incorrect and was removed. */
1224 insert_temp = 1;
1226 /* Don't try to optimize a MODE_CC set with a constant
1227 source. It probably will be combined with a conditional
1228 jump. */
1229 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1230 && CONSTANT_P (src))
1232 /* Don't try to optimize a register that was made
1233 by loop-optimization for an inner loop.
1234 We don't know its life-span, so we can't compute
1235 the benefit. */
1236 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1238 /* Don't move the source and add a reg-to-reg copy:
1239 - with -Os (this certainly increases size),
1240 - if the mode doesn't support copy operations (obviously),
1241 - if the source is already a reg (the motion will gain nothing),
1242 - if the source is a legitimate constant (likewise),
1243 - if the dest is a hard register (may be unrecognizable). */
1244 else if (insert_temp
1245 && (optimize_size
1246 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1247 || REG_P (SET_SRC (set))
1248 || (CONSTANT_P (SET_SRC (set))
1249 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))
1250 || REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER))
1252 else if ((tem = loop_invariant_p (loop, src))
1253 && (dependencies == 0
1254 || (tem2
1255 = loop_invariant_p (loop, dependencies)) != 0)
1256 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1257 || (tem1
1258 = consec_sets_invariant_p
1259 (loop, SET_DEST (set),
1260 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1261 p)))
1262 /* If the insn can cause a trap (such as divide by zero),
1263 can't move it unless it's guaranteed to be executed
1264 once loop is entered. Even a function call might
1265 prevent the trap insn from being reached
1266 (since it might exit!) */
1267 && ! ((maybe_never || call_passed)
1268 && may_trap_p (src)))
1270 struct movable *m;
1271 int regno = REGNO (SET_DEST (set));
1272 rtx user, user_set;
1274 /* A potential lossage is where we have a case where two
1275 insns can be combined as long as they are both in the
1276 loop, but we move one of them outside the loop. For
1277 large loops, this can lose. The most common case of
1278 this is the address of a function being called.
1280 Therefore, if this register is marked as being used
1281 exactly once if we are in a loop with calls
1282 (a "large loop"), see if we can replace the usage of
1283 this register with the source of this SET. If we can,
1284 delete this insn.
1286 Don't do this if:
1287 (1) P has a REG_RETVAL note or
1288 (2) if we have SMALL_REGISTER_CLASSES and
1289 (a) SET_SRC is a hard register or
1290 (b) the destination of the user is a hard register. */
1292 if (loop_info->has_call
1293 && regno >= FIRST_PSEUDO_REGISTER
1294 && (user = regs->array[regno].single_usage) != NULL
1295 && user != const0_rtx
1296 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1297 && REGNO_LAST_UID (regno) == INSN_UID (user)
1298 && regs->array[regno].set_in_loop == 1
1299 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1300 && ! side_effects_p (SET_SRC (set))
1301 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1302 && (!SMALL_REGISTER_CLASSES
1303 || !REG_P (SET_SRC (set))
1304 || !HARD_REGISTER_P (SET_SRC (set)))
1305 && (!SMALL_REGISTER_CLASSES
1306 || !NONJUMP_INSN_P (user)
1307 || !(user_set = single_set (user))
1308 || !REG_P (SET_DEST (user_set))
1309 || !HARD_REGISTER_P (SET_DEST (user_set)))
1310 /* This test is not redundant; SET_SRC (set) might be
1311 a call-clobbered register and the life of REGNO
1312 might span a call. */
1313 && ! modified_between_p (SET_SRC (set), p, user)
1314 && no_labels_between_p (p, user)
1315 && validate_replace_rtx (SET_DEST (set),
1316 SET_SRC (set), user))
1318 /* Replace any usage in a REG_EQUAL note. Must copy
1319 the new source, so that we don't get rtx sharing
1320 between the SET_SOURCE and REG_NOTES of insn p. */
1321 REG_NOTES (user)
1322 = replace_rtx (REG_NOTES (user), SET_DEST (set),
1323 copy_rtx (SET_SRC (set)));
1325 delete_insn (p);
1326 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1327 i++)
1328 regs->array[regno+i].set_in_loop = 0;
1329 continue;
1332 m = xmalloc (sizeof (struct movable));
1333 m->next = 0;
1334 m->insn = p;
1335 m->set_src = src;
1336 m->dependencies = dependencies;
1337 m->set_dest = SET_DEST (set);
1338 m->force = 0;
1339 m->consec
1340 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1341 m->done = 0;
1342 m->forces = 0;
1343 m->partial = 0;
1344 m->move_insn = move_insn;
1345 m->move_insn_first = 0;
1346 m->insert_temp = insert_temp;
1347 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1348 m->savemode = VOIDmode;
1349 m->regno = regno;
1350 /* Set M->cond if either loop_invariant_p
1351 or consec_sets_invariant_p returned 2
1352 (only conditionally invariant). */
1353 m->cond = ((tem | tem1 | tem2) > 1);
1354 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1355 m->match = 0;
1356 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1357 m->savings = regs->array[regno].n_times_set;
1358 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1359 m->savings += libcall_benefit (p);
1360 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1361 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1362 /* Add M to the end of the chain MOVABLES. */
1363 loop_movables_add (movables, m);
1365 if (m->consec > 0)
1367 /* It is possible for the first instruction to have a
1368 REG_EQUAL note but a non-invariant SET_SRC, so we must
1369 remember the status of the first instruction in case
1370 the last instruction doesn't have a REG_EQUAL note. */
1371 m->move_insn_first = m->move_insn;
1373 /* Skip this insn, not checking REG_LIBCALL notes. */
1374 p = next_nonnote_insn (p);
1375 /* Skip the consecutive insns, if there are any. */
1376 p = skip_consec_insns (p, m->consec);
1377 /* Back up to the last insn of the consecutive group. */
1378 p = prev_nonnote_insn (p);
1380 /* We must now reset m->move_insn, m->is_equiv, and
1381 possibly m->set_src to correspond to the effects of
1382 all the insns. */
1383 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1384 if (temp)
1385 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1386 else
1388 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1389 if (temp && CONSTANT_P (XEXP (temp, 0)))
1390 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1391 else
1392 m->move_insn = 0;
1395 m->is_equiv
1396 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1399 /* If this register is always set within a STRICT_LOW_PART
1400 or set to zero, then its high bytes are constant.
1401 So clear them outside the loop and within the loop
1402 just load the low bytes.
1403 We must check that the machine has an instruction to do so.
1404 Also, if the value loaded into the register
1405 depends on the same register, this cannot be done. */
1406 else if (SET_SRC (set) == const0_rtx
1407 && NONJUMP_INSN_P (NEXT_INSN (p))
1408 && (set1 = single_set (NEXT_INSN (p)))
1409 && GET_CODE (set1) == SET
1410 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1411 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1412 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1413 == SET_DEST (set))
1414 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1416 int regno = REGNO (SET_DEST (set));
1417 if (regs->array[regno].set_in_loop == 2)
1419 struct movable *m;
1420 m = xmalloc (sizeof (struct movable));
1421 m->next = 0;
1422 m->insn = p;
1423 m->set_dest = SET_DEST (set);
1424 m->dependencies = 0;
1425 m->force = 0;
1426 m->consec = 0;
1427 m->done = 0;
1428 m->forces = 0;
1429 m->move_insn = 0;
1430 m->move_insn_first = 0;
1431 m->insert_temp = insert_temp;
1432 m->partial = 1;
1433 /* If the insn may not be executed on some cycles,
1434 we can't clear the whole reg; clear just high part.
1435 Not even if the reg is used only within this loop.
1436 Consider this:
1437 while (1)
1438 while (s != t) {
1439 if (foo ()) x = *s;
1440 use (x);
1442 Clearing x before the inner loop could clobber a value
1443 being saved from the last time around the outer loop.
1444 However, if the reg is not used outside this loop
1445 and all uses of the register are in the same
1446 basic block as the store, there is no problem.
1448 If this insn was made by loop, we don't know its
1449 INSN_LUID and hence must make a conservative
1450 assumption. */
1451 m->global = (INSN_UID (p) >= max_uid_for_loop
1452 || LOOP_REG_GLOBAL_P (loop, regno)
1453 || (labels_in_range_p
1454 (p, REGNO_FIRST_LUID (regno))));
1455 if (maybe_never && m->global)
1456 m->savemode = GET_MODE (SET_SRC (set1));
1457 else
1458 m->savemode = VOIDmode;
1459 m->regno = regno;
1460 m->cond = 0;
1461 m->match = 0;
1462 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1463 m->savings = 1;
1464 for (i = 0;
1465 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1466 i++)
1467 regs->array[regno+i].set_in_loop = -1;
1468 /* Add M to the end of the chain MOVABLES. */
1469 loop_movables_add (movables, m);
1474 /* Past a call insn, we get to insns which might not be executed
1475 because the call might exit. This matters for insns that trap.
1476 Constant and pure call insns always return, so they don't count. */
1477 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1478 call_passed = 1;
1479 /* Past a label or a jump, we get to insns for which we
1480 can't count on whether or how many times they will be
1481 executed during each iteration. Therefore, we can
1482 only move out sets of trivial variables
1483 (those not used after the loop). */
1484 /* Similar code appears twice in strength_reduce. */
1485 else if ((LABEL_P (p) || JUMP_P (p))
1486 /* If we enter the loop in the middle, and scan around to the
1487 beginning, don't set maybe_never for that. This must be an
1488 unconditional jump, otherwise the code at the top of the
1489 loop might never be executed. Unconditional jumps are
1490 followed by a barrier then the loop_end. */
1491 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1492 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1493 && any_uncondjump_p (p)))
1494 maybe_never = 1;
1497 /* If one movable subsumes another, ignore that other. */
1499 ignore_some_movables (movables);
1501 /* For each movable insn, see if the reg that it loads
1502 leads when it dies right into another conditionally movable insn.
1503 If so, record that the second insn "forces" the first one,
1504 since the second can be moved only if the first is. */
1506 force_movables (movables);
1508 /* See if there are multiple movable insns that load the same value.
1509 If there are, make all but the first point at the first one
1510 through the `match' field, and add the priorities of them
1511 all together as the priority of the first. */
1513 combine_movables (movables, regs);
1515 /* Now consider each movable insn to decide whether it is worth moving.
1516 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1518 For machines with few registers this increases code size, so do not
1519 move moveables when optimizing for code size on such machines.
1520 (The 18 below is the value for i386.) */
1522 if (!optimize_size
1523 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1525 move_movables (loop, movables, threshold, insn_count);
1527 /* Recalculate regs->array if move_movables has created new
1528 registers. */
1529 if (max_reg_num () > regs->num)
1531 loop_regs_scan (loop, 0);
1532 for (update_start = loop_start;
1533 PREV_INSN (update_start)
1534 && !LABEL_P (PREV_INSN (update_start));
1535 update_start = PREV_INSN (update_start))
1537 update_end = NEXT_INSN (loop_end);
1539 reg_scan_update (update_start, update_end, loop_max_reg);
1540 loop_max_reg = max_reg_num ();
1544 /* Now candidates that still are negative are those not moved.
1545 Change regs->array[I].set_in_loop to indicate that those are not actually
1546 invariant. */
1547 for (i = 0; i < regs->num; i++)
1548 if (regs->array[i].set_in_loop < 0)
1549 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1551 /* Now that we've moved some things out of the loop, we might be able to
1552 hoist even more memory references. */
1553 load_mems (loop);
1555 /* Recalculate regs->array if load_mems has created new registers. */
1556 if (max_reg_num () > regs->num)
1557 loop_regs_scan (loop, 0);
1559 for (update_start = loop_start;
1560 PREV_INSN (update_start)
1561 && !LABEL_P (PREV_INSN (update_start));
1562 update_start = PREV_INSN (update_start))
1564 update_end = NEXT_INSN (loop_end);
1566 reg_scan_update (update_start, update_end, loop_max_reg);
1567 loop_max_reg = max_reg_num ();
1569 if (flag_strength_reduce)
1571 if (update_end && LABEL_P (update_end))
1572 /* Ensure our label doesn't go away. */
1573 LABEL_NUSES (update_end)++;
1575 strength_reduce (loop, flags);
1577 reg_scan_update (update_start, update_end, loop_max_reg);
1578 loop_max_reg = max_reg_num ();
1580 if (update_end && LABEL_P (update_end)
1581 && --LABEL_NUSES (update_end) == 0)
1582 delete_related_insns (update_end);
1586 /* The movable information is required for strength reduction. */
1587 loop_movables_free (movables);
1589 free (regs->array);
1590 regs->array = 0;
1591 regs->num = 0;
1594 /* Add elements to *OUTPUT to record all the pseudo-regs
1595 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1597 static void
1598 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1600 enum rtx_code code;
1601 const char *fmt;
1602 int i;
1604 code = GET_CODE (in_this);
1606 switch (code)
1608 case PC:
1609 case CC0:
1610 case CONST_INT:
1611 case CONST_DOUBLE:
1612 case CONST:
1613 case SYMBOL_REF:
1614 case LABEL_REF:
1615 return;
1617 case REG:
1618 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1619 && ! reg_mentioned_p (in_this, not_in_this))
1620 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1621 return;
1623 default:
1624 break;
1627 fmt = GET_RTX_FORMAT (code);
1628 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1630 int j;
1632 switch (fmt[i])
1634 case 'E':
1635 for (j = 0; j < XVECLEN (in_this, i); j++)
1636 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1637 break;
1639 case 'e':
1640 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1641 break;
1646 /* Check what regs are referred to in the libcall block ending with INSN,
1647 aside from those mentioned in the equivalent value.
1648 If there are none, return 0.
1649 If there are one or more, return an EXPR_LIST containing all of them. */
1651 static rtx
1652 libcall_other_reg (rtx insn, rtx equiv)
1654 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1655 rtx p = XEXP (note, 0);
1656 rtx output = 0;
1658 /* First, find all the regs used in the libcall block
1659 that are not mentioned as inputs to the result. */
1661 while (p != insn)
1663 if (INSN_P (p))
1664 record_excess_regs (PATTERN (p), equiv, &output);
1665 p = NEXT_INSN (p);
1668 return output;
1671 /* Return 1 if all uses of REG
1672 are between INSN and the end of the basic block. */
1674 static int
1675 reg_in_basic_block_p (rtx insn, rtx reg)
1677 int regno = REGNO (reg);
1678 rtx p;
1680 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1681 return 0;
1683 /* Search this basic block for the already recorded last use of the reg. */
1684 for (p = insn; p; p = NEXT_INSN (p))
1686 switch (GET_CODE (p))
1688 case NOTE:
1689 break;
1691 case INSN:
1692 case CALL_INSN:
1693 /* Ordinary insn: if this is the last use, we win. */
1694 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1695 return 1;
1696 break;
1698 case JUMP_INSN:
1699 /* Jump insn: if this is the last use, we win. */
1700 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1701 return 1;
1702 /* Otherwise, it's the end of the basic block, so we lose. */
1703 return 0;
1705 case CODE_LABEL:
1706 case BARRIER:
1707 /* It's the end of the basic block, so we lose. */
1708 return 0;
1710 default:
1711 break;
1715 /* The "last use" that was recorded can't be found after the first
1716 use. This can happen when the last use was deleted while
1717 processing an inner loop, this inner loop was then completely
1718 unrolled, and the outer loop is always exited after the inner loop,
1719 so that everything after the first use becomes a single basic block. */
1720 return 1;
1723 /* Compute the benefit of eliminating the insns in the block whose
1724 last insn is LAST. This may be a group of insns used to compute a
1725 value directly or can contain a library call. */
1727 static int
1728 libcall_benefit (rtx last)
1730 rtx insn;
1731 int benefit = 0;
1733 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1734 insn != last; insn = NEXT_INSN (insn))
1736 if (CALL_P (insn))
1737 benefit += 10; /* Assume at least this many insns in a library
1738 routine. */
1739 else if (NONJUMP_INSN_P (insn)
1740 && GET_CODE (PATTERN (insn)) != USE
1741 && GET_CODE (PATTERN (insn)) != CLOBBER)
1742 benefit++;
1745 return benefit;
1748 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1750 static rtx
1751 skip_consec_insns (rtx insn, int count)
1753 for (; count > 0; count--)
1755 rtx temp;
1757 /* If first insn of libcall sequence, skip to end. */
1758 /* Do this at start of loop, since INSN is guaranteed to
1759 be an insn here. */
1760 if (!NOTE_P (insn)
1761 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1762 insn = XEXP (temp, 0);
1765 insn = NEXT_INSN (insn);
1766 while (NOTE_P (insn));
1769 return insn;
1772 /* Ignore any movable whose insn falls within a libcall
1773 which is part of another movable.
1774 We make use of the fact that the movable for the libcall value
1775 was made later and so appears later on the chain. */
1777 static void
1778 ignore_some_movables (struct loop_movables *movables)
1780 struct movable *m, *m1;
1782 for (m = movables->head; m; m = m->next)
1784 /* Is this a movable for the value of a libcall? */
1785 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1786 if (note)
1788 rtx insn;
1789 /* Check for earlier movables inside that range,
1790 and mark them invalid. We cannot use LUIDs here because
1791 insns created by loop.c for prior loops don't have LUIDs.
1792 Rather than reject all such insns from movables, we just
1793 explicitly check each insn in the libcall (since invariant
1794 libcalls aren't that common). */
1795 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1796 for (m1 = movables->head; m1 != m; m1 = m1->next)
1797 if (m1->insn == insn)
1798 m1->done = 1;
1803 /* For each movable insn, see if the reg that it loads
1804 leads when it dies right into another conditionally movable insn.
1805 If so, record that the second insn "forces" the first one,
1806 since the second can be moved only if the first is. */
1808 static void
1809 force_movables (struct loop_movables *movables)
1811 struct movable *m, *m1;
1813 for (m1 = movables->head; m1; m1 = m1->next)
1814 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1815 if (!m1->partial && !m1->done)
1817 int regno = m1->regno;
1818 for (m = m1->next; m; m = m->next)
1819 /* ??? Could this be a bug? What if CSE caused the
1820 register of M1 to be used after this insn?
1821 Since CSE does not update regno_last_uid,
1822 this insn M->insn might not be where it dies.
1823 But very likely this doesn't matter; what matters is
1824 that M's reg is computed from M1's reg. */
1825 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1826 && !m->done)
1827 break;
1828 if (m != 0 && m->set_src == m1->set_dest
1829 /* If m->consec, m->set_src isn't valid. */
1830 && m->consec == 0)
1831 m = 0;
1833 /* Increase the priority of the moving the first insn
1834 since it permits the second to be moved as well.
1835 Likewise for insns already forced by the first insn. */
1836 if (m != 0)
1838 struct movable *m2;
1840 m->forces = m1;
1841 for (m2 = m1; m2; m2 = m2->forces)
1843 m2->lifetime += m->lifetime;
1844 m2->savings += m->savings;
1850 /* Find invariant expressions that are equal and can be combined into
1851 one register. */
1853 static void
1854 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1856 struct movable *m;
1857 char *matched_regs = xmalloc (regs->num);
1858 enum machine_mode mode;
1860 /* Regs that are set more than once are not allowed to match
1861 or be matched. I'm no longer sure why not. */
1862 /* Only pseudo registers are allowed to match or be matched,
1863 since move_movables does not validate the change. */
1864 /* Perhaps testing m->consec_sets would be more appropriate here? */
1866 for (m = movables->head; m; m = m->next)
1867 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1868 && m->regno >= FIRST_PSEUDO_REGISTER
1869 && !m->insert_temp
1870 && !m->partial)
1872 struct movable *m1;
1873 int regno = m->regno;
1875 memset (matched_regs, 0, regs->num);
1876 matched_regs[regno] = 1;
1878 /* We want later insns to match the first one. Don't make the first
1879 one match any later ones. So start this loop at m->next. */
1880 for (m1 = m->next; m1; m1 = m1->next)
1881 if (m != m1 && m1->match == 0
1882 && !m1->insert_temp
1883 && regs->array[m1->regno].n_times_set == 1
1884 && m1->regno >= FIRST_PSEUDO_REGISTER
1885 /* A reg used outside the loop mustn't be eliminated. */
1886 && !m1->global
1887 /* A reg used for zero-extending mustn't be eliminated. */
1888 && !m1->partial
1889 && (matched_regs[m1->regno]
1891 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1892 /* See if the source of M1 says it matches M. */
1893 && ((REG_P (m1->set_src)
1894 && matched_regs[REGNO (m1->set_src)])
1895 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1896 movables, regs))))
1897 && ((m->dependencies == m1->dependencies)
1898 || rtx_equal_p (m->dependencies, m1->dependencies)))
1900 m->lifetime += m1->lifetime;
1901 m->savings += m1->savings;
1902 m1->done = 1;
1903 m1->match = m;
1904 matched_regs[m1->regno] = 1;
1908 /* Now combine the regs used for zero-extension.
1909 This can be done for those not marked `global'
1910 provided their lives don't overlap. */
1912 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1913 mode = GET_MODE_WIDER_MODE (mode))
1915 struct movable *m0 = 0;
1917 /* Combine all the registers for extension from mode MODE.
1918 Don't combine any that are used outside this loop. */
1919 for (m = movables->head; m; m = m->next)
1920 if (m->partial && ! m->global
1921 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1923 struct movable *m1;
1925 int first = REGNO_FIRST_LUID (m->regno);
1926 int last = REGNO_LAST_LUID (m->regno);
1928 if (m0 == 0)
1930 /* First one: don't check for overlap, just record it. */
1931 m0 = m;
1932 continue;
1935 /* Make sure they extend to the same mode.
1936 (Almost always true.) */
1937 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1938 continue;
1940 /* We already have one: check for overlap with those
1941 already combined together. */
1942 for (m1 = movables->head; m1 != m; m1 = m1->next)
1943 if (m1 == m0 || (m1->partial && m1->match == m0))
1944 if (! (REGNO_FIRST_LUID (m1->regno) > last
1945 || REGNO_LAST_LUID (m1->regno) < first))
1946 goto overlap;
1948 /* No overlap: we can combine this with the others. */
1949 m0->lifetime += m->lifetime;
1950 m0->savings += m->savings;
1951 m->done = 1;
1952 m->match = m0;
1954 overlap:
1959 /* Clean up. */
1960 free (matched_regs);
1963 /* Returns the number of movable instructions in LOOP that were not
1964 moved outside the loop. */
1966 static int
1967 num_unmoved_movables (const struct loop *loop)
1969 int num = 0;
1970 struct movable *m;
1972 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1973 if (!m->done)
1974 ++num;
1976 return num;
1980 /* Return 1 if regs X and Y will become the same if moved. */
1982 static int
1983 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1985 unsigned int xn = REGNO (x);
1986 unsigned int yn = REGNO (y);
1987 struct movable *mx, *my;
1989 for (mx = movables->head; mx; mx = mx->next)
1990 if (mx->regno == xn)
1991 break;
1993 for (my = movables->head; my; my = my->next)
1994 if (my->regno == yn)
1995 break;
1997 return (mx && my
1998 && ((mx->match == my->match && mx->match != 0)
1999 || mx->match == my
2000 || mx == my->match));
2003 /* Return 1 if X and Y are identical-looking rtx's.
2004 This is the Lisp function EQUAL for rtx arguments.
2006 If two registers are matching movables or a movable register and an
2007 equivalent constant, consider them equal. */
2009 static int
2010 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2011 struct loop_regs *regs)
2013 int i;
2014 int j;
2015 struct movable *m;
2016 enum rtx_code code;
2017 const char *fmt;
2019 if (x == y)
2020 return 1;
2021 if (x == 0 || y == 0)
2022 return 0;
2024 code = GET_CODE (x);
2026 /* If we have a register and a constant, they may sometimes be
2027 equal. */
2028 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2029 && CONSTANT_P (y))
2031 for (m = movables->head; m; m = m->next)
2032 if (m->move_insn && m->regno == REGNO (x)
2033 && rtx_equal_p (m->set_src, y))
2034 return 1;
2036 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2037 && CONSTANT_P (x))
2039 for (m = movables->head; m; m = m->next)
2040 if (m->move_insn && m->regno == REGNO (y)
2041 && rtx_equal_p (m->set_src, x))
2042 return 1;
2045 /* Otherwise, rtx's of different codes cannot be equal. */
2046 if (code != GET_CODE (y))
2047 return 0;
2049 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2050 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2052 if (GET_MODE (x) != GET_MODE (y))
2053 return 0;
2055 /* These types of rtx's can be compared nonrecursively. */
2056 switch (code)
2058 case PC:
2059 case CC0:
2060 case CONST_INT:
2061 case CONST_DOUBLE:
2062 return 0;
2064 case REG:
2065 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2067 case LABEL_REF:
2068 return XEXP (x, 0) == XEXP (y, 0);
2069 case SYMBOL_REF:
2070 return XSTR (x, 0) == XSTR (y, 0);
2072 default:
2073 break;
2076 /* Compare the elements. If any pair of corresponding elements
2077 fail to match, return 0 for the whole things. */
2079 fmt = GET_RTX_FORMAT (code);
2080 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2082 switch (fmt[i])
2084 case 'w':
2085 if (XWINT (x, i) != XWINT (y, i))
2086 return 0;
2087 break;
2089 case 'i':
2090 if (XINT (x, i) != XINT (y, i))
2091 return 0;
2092 break;
2094 case 'E':
2095 /* Two vectors must have the same length. */
2096 if (XVECLEN (x, i) != XVECLEN (y, i))
2097 return 0;
2099 /* And the corresponding elements must match. */
2100 for (j = 0; j < XVECLEN (x, i); j++)
2101 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2102 movables, regs) == 0)
2103 return 0;
2104 break;
2106 case 'e':
2107 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2108 == 0)
2109 return 0;
2110 break;
2112 case 's':
2113 if (strcmp (XSTR (x, i), XSTR (y, i)))
2114 return 0;
2115 break;
2117 case 'u':
2118 /* These are just backpointers, so they don't matter. */
2119 break;
2121 case '0':
2122 break;
2124 /* It is believed that rtx's at this level will never
2125 contain anything but integers and other rtx's,
2126 except for within LABEL_REFs and SYMBOL_REFs. */
2127 default:
2128 gcc_unreachable ();
2131 return 1;
2134 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2135 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2136 references is incremented once for each added note. */
2138 static void
2139 add_label_notes (rtx x, rtx insns)
2141 enum rtx_code code = GET_CODE (x);
2142 int i, j;
2143 const char *fmt;
2144 rtx insn;
2146 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2148 /* This code used to ignore labels that referred to dispatch tables to
2149 avoid flow generating (slightly) worse code.
2151 We no longer ignore such label references (see LABEL_REF handling in
2152 mark_jump_label for additional information). */
2153 for (insn = insns; insn; insn = NEXT_INSN (insn))
2154 if (reg_mentioned_p (XEXP (x, 0), insn))
2156 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2157 REG_NOTES (insn));
2158 if (LABEL_P (XEXP (x, 0)))
2159 LABEL_NUSES (XEXP (x, 0))++;
2163 fmt = GET_RTX_FORMAT (code);
2164 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2166 if (fmt[i] == 'e')
2167 add_label_notes (XEXP (x, i), insns);
2168 else if (fmt[i] == 'E')
2169 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2170 add_label_notes (XVECEXP (x, i, j), insns);
2174 /* Scan MOVABLES, and move the insns that deserve to be moved.
2175 If two matching movables are combined, replace one reg with the
2176 other throughout. */
2178 static void
2179 move_movables (struct loop *loop, struct loop_movables *movables,
2180 int threshold, int insn_count)
2182 struct loop_regs *regs = LOOP_REGS (loop);
2183 int nregs = regs->num;
2184 rtx new_start = 0;
2185 struct movable *m;
2186 rtx p;
2187 rtx loop_start = loop->start;
2188 rtx loop_end = loop->end;
2189 /* Map of pseudo-register replacements to handle combining
2190 when we move several insns that load the same value
2191 into different pseudo-registers. */
2192 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2193 char *already_moved = xcalloc (nregs, sizeof (char));
2195 for (m = movables->head; m; m = m->next)
2197 /* Describe this movable insn. */
2199 if (loop_dump_stream)
2201 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2202 INSN_UID (m->insn), m->regno, m->lifetime);
2203 if (m->consec > 0)
2204 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2205 if (m->cond)
2206 fprintf (loop_dump_stream, "cond ");
2207 if (m->force)
2208 fprintf (loop_dump_stream, "force ");
2209 if (m->global)
2210 fprintf (loop_dump_stream, "global ");
2211 if (m->done)
2212 fprintf (loop_dump_stream, "done ");
2213 if (m->move_insn)
2214 fprintf (loop_dump_stream, "move-insn ");
2215 if (m->match)
2216 fprintf (loop_dump_stream, "matches %d ",
2217 INSN_UID (m->match->insn));
2218 if (m->forces)
2219 fprintf (loop_dump_stream, "forces %d ",
2220 INSN_UID (m->forces->insn));
2223 /* Ignore the insn if it's already done (it matched something else).
2224 Otherwise, see if it is now safe to move. */
2226 if (!m->done
2227 && (! m->cond
2228 || (1 == loop_invariant_p (loop, m->set_src)
2229 && (m->dependencies == 0
2230 || 1 == loop_invariant_p (loop, m->dependencies))
2231 && (m->consec == 0
2232 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2233 m->consec + 1,
2234 m->insn))))
2235 && (! m->forces || m->forces->done))
2237 int regno;
2238 rtx p;
2239 int savings = m->savings;
2241 /* We have an insn that is safe to move.
2242 Compute its desirability. */
2244 p = m->insn;
2245 regno = m->regno;
2247 if (loop_dump_stream)
2248 fprintf (loop_dump_stream, "savings %d ", savings);
2250 if (regs->array[regno].moved_once && loop_dump_stream)
2251 fprintf (loop_dump_stream, "halved since already moved ");
2253 /* An insn MUST be moved if we already moved something else
2254 which is safe only if this one is moved too: that is,
2255 if already_moved[REGNO] is nonzero. */
2257 /* An insn is desirable to move if the new lifetime of the
2258 register is no more than THRESHOLD times the old lifetime.
2259 If it's not desirable, it means the loop is so big
2260 that moving won't speed things up much,
2261 and it is liable to make register usage worse. */
2263 /* It is also desirable to move if it can be moved at no
2264 extra cost because something else was already moved. */
2266 if (already_moved[regno]
2267 || (threshold * savings * m->lifetime) >=
2268 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2269 || (m->forces && m->forces->done
2270 && regs->array[m->forces->regno].n_times_set == 1))
2272 int count;
2273 struct movable *m1;
2274 rtx first = NULL_RTX;
2275 rtx newreg = NULL_RTX;
2277 if (m->insert_temp)
2278 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2280 /* Now move the insns that set the reg. */
2282 if (m->partial && m->match)
2284 rtx newpat, i1;
2285 rtx r1, r2;
2286 /* Find the end of this chain of matching regs.
2287 Thus, we load each reg in the chain from that one reg.
2288 And that reg is loaded with 0 directly,
2289 since it has ->match == 0. */
2290 for (m1 = m; m1->match; m1 = m1->match);
2291 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2292 SET_DEST (PATTERN (m1->insn)));
2293 i1 = loop_insn_hoist (loop, newpat);
2295 /* Mark the moved, invariant reg as being allowed to
2296 share a hard reg with the other matching invariant. */
2297 REG_NOTES (i1) = REG_NOTES (m->insn);
2298 r1 = SET_DEST (PATTERN (m->insn));
2299 r2 = SET_DEST (PATTERN (m1->insn));
2300 regs_may_share
2301 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2302 gen_rtx_EXPR_LIST (VOIDmode, r2,
2303 regs_may_share));
2304 delete_insn (m->insn);
2306 if (new_start == 0)
2307 new_start = i1;
2309 if (loop_dump_stream)
2310 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2312 /* If we are to re-generate the item being moved with a
2313 new move insn, first delete what we have and then emit
2314 the move insn before the loop. */
2315 else if (m->move_insn)
2317 rtx i1, temp, seq;
2319 for (count = m->consec; count >= 0; count--)
2321 if (!NOTE_P (p))
2323 /* If this is the first insn of a library
2324 call sequence, something is very
2325 wrong. */
2326 gcc_assert (!find_reg_note
2327 (p, REG_LIBCALL, NULL_RTX));
2329 /* If this is the last insn of a libcall
2330 sequence, then delete every insn in the
2331 sequence except the last. The last insn
2332 is handled in the normal manner. */
2333 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
2335 if (temp)
2337 temp = XEXP (temp, 0);
2338 while (temp != p)
2339 temp = delete_insn (temp);
2343 temp = p;
2344 p = delete_insn (p);
2346 /* simplify_giv_expr expects that it can walk the insns
2347 at m->insn forwards and see this old sequence we are
2348 tossing here. delete_insn does preserve the next
2349 pointers, but when we skip over a NOTE we must fix
2350 it up. Otherwise that code walks into the non-deleted
2351 insn stream. */
2352 while (p && NOTE_P (p))
2353 p = NEXT_INSN (temp) = NEXT_INSN (p);
2355 if (m->insert_temp)
2357 /* Replace the original insn with a move from
2358 our newly created temp. */
2359 start_sequence ();
2360 emit_move_insn (m->set_dest, newreg);
2361 seq = get_insns ();
2362 end_sequence ();
2363 emit_insn_before (seq, p);
2367 start_sequence ();
2368 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2369 m->set_src);
2370 seq = get_insns ();
2371 end_sequence ();
2373 add_label_notes (m->set_src, seq);
2375 i1 = loop_insn_hoist (loop, seq);
2376 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2377 set_unique_reg_note (i1,
2378 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2379 m->set_src);
2381 if (loop_dump_stream)
2382 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2384 /* The more regs we move, the less we like moving them. */
2385 threshold -= 3;
2387 else
2389 for (count = m->consec; count >= 0; count--)
2391 rtx i1, temp;
2393 /* If first insn of libcall sequence, skip to end. */
2394 /* Do this at start of loop, since p is guaranteed to
2395 be an insn here. */
2396 if (!NOTE_P (p)
2397 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2398 p = XEXP (temp, 0);
2400 /* If last insn of libcall sequence, move all
2401 insns except the last before the loop. The last
2402 insn is handled in the normal manner. */
2403 if (!NOTE_P (p)
2404 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2406 rtx fn_address = 0;
2407 rtx fn_reg = 0;
2408 rtx fn_address_insn = 0;
2410 first = 0;
2411 for (temp = XEXP (temp, 0); temp != p;
2412 temp = NEXT_INSN (temp))
2414 rtx body;
2415 rtx n;
2416 rtx next;
2418 if (NOTE_P (temp))
2419 continue;
2421 body = PATTERN (temp);
2423 /* Find the next insn after TEMP,
2424 not counting USE or NOTE insns. */
2425 for (next = NEXT_INSN (temp); next != p;
2426 next = NEXT_INSN (next))
2427 if (! (NONJUMP_INSN_P (next)
2428 && GET_CODE (PATTERN (next)) == USE)
2429 && !NOTE_P (next))
2430 break;
2432 /* If that is the call, this may be the insn
2433 that loads the function address.
2435 Extract the function address from the insn
2436 that loads it into a register.
2437 If this insn was cse'd, we get incorrect code.
2439 So emit a new move insn that copies the
2440 function address into the register that the
2441 call insn will use. flow.c will delete any
2442 redundant stores that we have created. */
2443 if (CALL_P (next)
2444 && GET_CODE (body) == SET
2445 && REG_P (SET_DEST (body))
2446 && (n = find_reg_note (temp, REG_EQUAL,
2447 NULL_RTX)))
2449 fn_reg = SET_SRC (body);
2450 if (!REG_P (fn_reg))
2451 fn_reg = SET_DEST (body);
2452 fn_address = XEXP (n, 0);
2453 fn_address_insn = temp;
2455 /* We have the call insn.
2456 If it uses the register we suspect it might,
2457 load it with the correct address directly. */
2458 if (CALL_P (temp)
2459 && fn_address != 0
2460 && reg_referenced_p (fn_reg, body))
2461 loop_insn_emit_after (loop, 0, fn_address_insn,
2462 gen_move_insn
2463 (fn_reg, fn_address));
2465 if (CALL_P (temp))
2467 i1 = loop_call_insn_hoist (loop, body);
2468 /* Because the USAGE information potentially
2469 contains objects other than hard registers
2470 we need to copy it. */
2471 if (CALL_INSN_FUNCTION_USAGE (temp))
2472 CALL_INSN_FUNCTION_USAGE (i1)
2473 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2475 else
2476 i1 = loop_insn_hoist (loop, body);
2477 if (first == 0)
2478 first = i1;
2479 if (temp == fn_address_insn)
2480 fn_address_insn = i1;
2481 REG_NOTES (i1) = REG_NOTES (temp);
2482 REG_NOTES (temp) = NULL;
2483 delete_insn (temp);
2485 if (new_start == 0)
2486 new_start = first;
2488 if (m->savemode != VOIDmode)
2490 /* P sets REG to zero; but we should clear only
2491 the bits that are not covered by the mode
2492 m->savemode. */
2493 rtx reg = m->set_dest;
2494 rtx sequence;
2495 rtx tem;
2497 start_sequence ();
2498 tem = expand_simple_binop
2499 (GET_MODE (reg), AND, reg,
2500 GEN_INT ((((HOST_WIDE_INT) 1
2501 << GET_MODE_BITSIZE (m->savemode)))
2502 - 1),
2503 reg, 1, OPTAB_LIB_WIDEN);
2504 gcc_assert (tem);
2505 if (tem != reg)
2506 emit_move_insn (reg, tem);
2507 sequence = get_insns ();
2508 end_sequence ();
2509 i1 = loop_insn_hoist (loop, sequence);
2511 else if (CALL_P (p))
2513 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2514 /* Because the USAGE information potentially
2515 contains objects other than hard registers
2516 we need to copy it. */
2517 if (CALL_INSN_FUNCTION_USAGE (p))
2518 CALL_INSN_FUNCTION_USAGE (i1)
2519 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2521 else if (count == m->consec && m->move_insn_first)
2523 rtx seq;
2524 /* The SET_SRC might not be invariant, so we must
2525 use the REG_EQUAL note. */
2526 start_sequence ();
2527 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2528 m->set_src);
2529 seq = get_insns ();
2530 end_sequence ();
2532 add_label_notes (m->set_src, seq);
2534 i1 = loop_insn_hoist (loop, seq);
2535 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2536 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2537 : REG_EQUAL, m->set_src);
2539 else if (m->insert_temp)
2541 rtx *reg_map2 = xcalloc (REGNO (newreg),
2542 sizeof(rtx));
2543 reg_map2 [m->regno] = newreg;
2545 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2546 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2547 free (reg_map2);
2549 else
2550 i1 = loop_insn_hoist (loop, PATTERN (p));
2552 if (REG_NOTES (i1) == 0)
2554 REG_NOTES (i1) = REG_NOTES (p);
2555 REG_NOTES (p) = NULL;
2557 /* If there is a REG_EQUAL note present whose value
2558 is not loop invariant, then delete it, since it
2559 may cause problems with later optimization passes.
2560 It is possible for cse to create such notes
2561 like this as a result of record_jump_cond. */
2563 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2564 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2565 remove_note (i1, temp);
2568 if (new_start == 0)
2569 new_start = i1;
2571 if (loop_dump_stream)
2572 fprintf (loop_dump_stream, " moved to %d",
2573 INSN_UID (i1));
2575 /* If library call, now fix the REG_NOTES that contain
2576 insn pointers, namely REG_LIBCALL on FIRST
2577 and REG_RETVAL on I1. */
2578 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2580 XEXP (temp, 0) = first;
2581 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2582 XEXP (temp, 0) = i1;
2585 temp = p;
2586 delete_insn (p);
2587 p = NEXT_INSN (p);
2589 /* simplify_giv_expr expects that it can walk the insns
2590 at m->insn forwards and see this old sequence we are
2591 tossing here. delete_insn does preserve the next
2592 pointers, but when we skip over a NOTE we must fix
2593 it up. Otherwise that code walks into the non-deleted
2594 insn stream. */
2595 while (p && NOTE_P (p))
2596 p = NEXT_INSN (temp) = NEXT_INSN (p);
2598 if (m->insert_temp)
2600 rtx seq;
2601 /* Replace the original insn with a move from
2602 our newly created temp. */
2603 start_sequence ();
2604 emit_move_insn (m->set_dest, newreg);
2605 seq = get_insns ();
2606 end_sequence ();
2607 emit_insn_before (seq, p);
2611 /* The more regs we move, the less we like moving them. */
2612 threshold -= 3;
2615 m->done = 1;
2617 if (!m->insert_temp)
2619 /* Any other movable that loads the same register
2620 MUST be moved. */
2621 already_moved[regno] = 1;
2623 /* This reg has been moved out of one loop. */
2624 regs->array[regno].moved_once = 1;
2626 /* The reg set here is now invariant. */
2627 if (! m->partial)
2629 int i;
2630 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2631 regs->array[regno+i].set_in_loop = 0;
2634 /* Change the length-of-life info for the register
2635 to say it lives at least the full length of this loop.
2636 This will help guide optimizations in outer loops. */
2638 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2639 /* This is the old insn before all the moved insns.
2640 We can't use the moved insn because it is out of range
2641 in uid_luid. Only the old insns have luids. */
2642 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2643 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2644 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2647 /* Combine with this moved insn any other matching movables. */
2649 if (! m->partial)
2650 for (m1 = movables->head; m1; m1 = m1->next)
2651 if (m1->match == m)
2653 rtx temp;
2655 reg_map[m1->regno] = m->set_dest;
2657 /* Get rid of the matching insn
2658 and prevent further processing of it. */
2659 m1->done = 1;
2661 /* If library call, delete all insns. */
2662 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2663 NULL_RTX)))
2664 delete_insn_chain (XEXP (temp, 0), m1->insn);
2665 else
2666 delete_insn (m1->insn);
2668 /* Any other movable that loads the same register
2669 MUST be moved. */
2670 already_moved[m1->regno] = 1;
2672 /* The reg merged here is now invariant,
2673 if the reg it matches is invariant. */
2674 if (! m->partial)
2676 int i;
2677 for (i = 0;
2678 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2679 i++)
2680 regs->array[m1->regno+i].set_in_loop = 0;
2684 else if (loop_dump_stream)
2685 fprintf (loop_dump_stream, "not desirable");
2687 else if (loop_dump_stream && !m->match)
2688 fprintf (loop_dump_stream, "not safe");
2690 if (loop_dump_stream)
2691 fprintf (loop_dump_stream, "\n");
2694 if (new_start == 0)
2695 new_start = loop_start;
2697 /* Go through all the instructions in the loop, making
2698 all the register substitutions scheduled in REG_MAP. */
2699 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2700 if (INSN_P (p))
2702 replace_regs (PATTERN (p), reg_map, nregs, 0);
2703 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2704 INSN_CODE (p) = -1;
2707 /* Clean up. */
2708 free (reg_map);
2709 free (already_moved);
2713 static void
2714 loop_movables_add (struct loop_movables *movables, struct movable *m)
2716 if (movables->head == 0)
2717 movables->head = m;
2718 else
2719 movables->last->next = m;
2720 movables->last = m;
2724 static void
2725 loop_movables_free (struct loop_movables *movables)
2727 struct movable *m;
2728 struct movable *m_next;
2730 for (m = movables->head; m; m = m_next)
2732 m_next = m->next;
2733 free (m);
2737 #if 0
2738 /* Scan X and replace the address of any MEM in it with ADDR.
2739 REG is the address that MEM should have before the replacement. */
2741 static void
2742 replace_call_address (rtx x, rtx reg, rtx addr)
2744 enum rtx_code code;
2745 int i;
2746 const char *fmt;
2748 if (x == 0)
2749 return;
2750 code = GET_CODE (x);
2751 switch (code)
2753 case PC:
2754 case CC0:
2755 case CONST_INT:
2756 case CONST_DOUBLE:
2757 case CONST:
2758 case SYMBOL_REF:
2759 case LABEL_REF:
2760 case REG:
2761 return;
2763 case SET:
2764 /* Short cut for very common case. */
2765 replace_call_address (XEXP (x, 1), reg, addr);
2766 return;
2768 case CALL:
2769 /* Short cut for very common case. */
2770 replace_call_address (XEXP (x, 0), reg, addr);
2771 return;
2773 case MEM:
2774 /* If this MEM uses a reg other than the one we expected,
2775 something is wrong. */
2776 gcc_assert (XEXP (x, 0) == reg);
2777 XEXP (x, 0) = addr;
2778 return;
2780 default:
2781 break;
2784 fmt = GET_RTX_FORMAT (code);
2785 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2787 if (fmt[i] == 'e')
2788 replace_call_address (XEXP (x, i), reg, addr);
2789 else if (fmt[i] == 'E')
2791 int j;
2792 for (j = 0; j < XVECLEN (x, i); j++)
2793 replace_call_address (XVECEXP (x, i, j), reg, addr);
2797 #endif
2799 /* Return the number of memory refs to addresses that vary
2800 in the rtx X. */
2802 static int
2803 count_nonfixed_reads (const struct loop *loop, rtx x)
2805 enum rtx_code code;
2806 int i;
2807 const char *fmt;
2808 int value;
2810 if (x == 0)
2811 return 0;
2813 code = GET_CODE (x);
2814 switch (code)
2816 case PC:
2817 case CC0:
2818 case CONST_INT:
2819 case CONST_DOUBLE:
2820 case CONST:
2821 case SYMBOL_REF:
2822 case LABEL_REF:
2823 case REG:
2824 return 0;
2826 case MEM:
2827 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2828 + count_nonfixed_reads (loop, XEXP (x, 0)));
2830 default:
2831 break;
2834 value = 0;
2835 fmt = GET_RTX_FORMAT (code);
2836 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2838 if (fmt[i] == 'e')
2839 value += count_nonfixed_reads (loop, XEXP (x, i));
2840 if (fmt[i] == 'E')
2842 int j;
2843 for (j = 0; j < XVECLEN (x, i); j++)
2844 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2847 return value;
2850 /* Scan a loop setting the elements `loops_enclosed',
2851 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2852 `unknown_address_altered', `unknown_constant_address_altered', and
2853 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2854 list `store_mems' in LOOP. */
2856 static void
2857 prescan_loop (struct loop *loop)
2859 int level = 1;
2860 rtx insn;
2861 struct loop_info *loop_info = LOOP_INFO (loop);
2862 rtx start = loop->start;
2863 rtx end = loop->end;
2864 /* The label after END. Jumping here is just like falling off the
2865 end of the loop. We use next_nonnote_insn instead of next_label
2866 as a hedge against the (pathological) case where some actual insn
2867 might end up between the two. */
2868 rtx exit_target = next_nonnote_insn (end);
2870 loop_info->has_indirect_jump = indirect_jump_in_function;
2871 loop_info->pre_header_has_call = 0;
2872 loop_info->has_call = 0;
2873 loop_info->has_nonconst_call = 0;
2874 loop_info->has_prefetch = 0;
2875 loop_info->has_volatile = 0;
2876 loop_info->has_tablejump = 0;
2877 loop_info->has_multiple_exit_targets = 0;
2878 loop->level = 1;
2880 loop_info->unknown_address_altered = 0;
2881 loop_info->unknown_constant_address_altered = 0;
2882 loop_info->store_mems = NULL_RTX;
2883 loop_info->first_loop_store_insn = NULL_RTX;
2884 loop_info->mems_idx = 0;
2885 loop_info->num_mem_sets = 0;
2887 for (insn = start; insn && !LABEL_P (insn);
2888 insn = PREV_INSN (insn))
2890 if (CALL_P (insn))
2892 loop_info->pre_header_has_call = 1;
2893 break;
2897 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2898 insn = NEXT_INSN (insn))
2900 switch (GET_CODE (insn))
2902 case NOTE:
2903 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2905 ++level;
2906 /* Count number of loops contained in this one. */
2907 loop->level++;
2909 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2910 --level;
2911 break;
2913 case CALL_INSN:
2914 if (! CONST_OR_PURE_CALL_P (insn))
2916 loop_info->unknown_address_altered = 1;
2917 loop_info->has_nonconst_call = 1;
2919 else if (pure_call_p (insn))
2920 loop_info->has_nonconst_call = 1;
2921 loop_info->has_call = 1;
2922 if (can_throw_internal (insn))
2923 loop_info->has_multiple_exit_targets = 1;
2924 break;
2926 case JUMP_INSN:
2927 if (! loop_info->has_multiple_exit_targets)
2929 rtx set = pc_set (insn);
2931 if (set)
2933 rtx src = SET_SRC (set);
2934 rtx label1, label2;
2936 if (GET_CODE (src) == IF_THEN_ELSE)
2938 label1 = XEXP (src, 1);
2939 label2 = XEXP (src, 2);
2941 else
2943 label1 = src;
2944 label2 = NULL_RTX;
2949 if (label1 && label1 != pc_rtx)
2951 if (GET_CODE (label1) != LABEL_REF)
2953 /* Something tricky. */
2954 loop_info->has_multiple_exit_targets = 1;
2955 break;
2957 else if (XEXP (label1, 0) != exit_target
2958 && LABEL_OUTSIDE_LOOP_P (label1))
2960 /* A jump outside the current loop. */
2961 loop_info->has_multiple_exit_targets = 1;
2962 break;
2966 label1 = label2;
2967 label2 = NULL_RTX;
2969 while (label1);
2971 else
2973 /* A return, or something tricky. */
2974 loop_info->has_multiple_exit_targets = 1;
2977 /* Fall through. */
2979 case INSN:
2980 if (volatile_refs_p (PATTERN (insn)))
2981 loop_info->has_volatile = 1;
2983 if (JUMP_P (insn)
2984 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2985 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2986 loop_info->has_tablejump = 1;
2988 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2989 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2990 loop_info->first_loop_store_insn = insn;
2992 if (flag_non_call_exceptions && can_throw_internal (insn))
2993 loop_info->has_multiple_exit_targets = 1;
2994 break;
2996 default:
2997 break;
3001 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
3002 if (/* An exception thrown by a called function might land us
3003 anywhere. */
3004 ! loop_info->has_nonconst_call
3005 /* We don't want loads for MEMs moved to a location before the
3006 one at which their stack memory becomes allocated. (Note
3007 that this is not a problem for malloc, etc., since those
3008 require actual function calls. */
3009 && ! current_function_calls_alloca
3010 /* There are ways to leave the loop other than falling off the
3011 end. */
3012 && ! loop_info->has_multiple_exit_targets)
3013 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3014 insn = NEXT_INSN (insn))
3015 for_each_rtx (&insn, insert_loop_mem, loop_info);
3017 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3018 that loop_invariant_p and load_mems can use true_dependence
3019 to determine what is really clobbered. */
3020 if (loop_info->unknown_address_altered)
3022 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3024 loop_info->store_mems
3025 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3027 if (loop_info->unknown_constant_address_altered)
3029 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3030 MEM_READONLY_P (mem) = 1;
3031 loop_info->store_mems
3032 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3036 /* Invalidate all loops containing LABEL. */
3038 static void
3039 invalidate_loops_containing_label (rtx label)
3041 struct loop *loop;
3042 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3043 loop->invalid = 1;
3046 /* Scan the function looking for loops. Record the start and end of each loop.
3047 Also mark as invalid loops any loops that contain a setjmp or are branched
3048 to from outside the loop. */
3050 static void
3051 find_and_verify_loops (rtx f, struct loops *loops)
3053 rtx insn;
3054 rtx label;
3055 int num_loops;
3056 struct loop *current_loop;
3057 struct loop *next_loop;
3058 struct loop *loop;
3060 num_loops = loops->num;
3062 compute_luids (f, NULL_RTX, 0);
3064 /* If there are jumps to undefined labels,
3065 treat them as jumps out of any/all loops.
3066 This also avoids writing past end of tables when there are no loops. */
3067 uid_loop[0] = NULL;
3069 /* Find boundaries of loops, mark which loops are contained within
3070 loops, and invalidate loops that have setjmp. */
3072 num_loops = 0;
3073 current_loop = NULL;
3074 for (insn = f; insn; insn = NEXT_INSN (insn))
3076 if (NOTE_P (insn))
3077 switch (NOTE_LINE_NUMBER (insn))
3079 case NOTE_INSN_LOOP_BEG:
3080 next_loop = loops->array + num_loops;
3081 next_loop->num = num_loops;
3082 num_loops++;
3083 next_loop->start = insn;
3084 next_loop->outer = current_loop;
3085 current_loop = next_loop;
3086 break;
3088 case NOTE_INSN_LOOP_END:
3089 gcc_assert (current_loop);
3091 current_loop->end = insn;
3092 current_loop = current_loop->outer;
3093 break;
3095 default:
3096 break;
3099 if (CALL_P (insn)
3100 && find_reg_note (insn, REG_SETJMP, NULL))
3102 /* In this case, we must invalidate our current loop and any
3103 enclosing loop. */
3104 for (loop = current_loop; loop; loop = loop->outer)
3106 loop->invalid = 1;
3107 if (loop_dump_stream)
3108 fprintf (loop_dump_stream,
3109 "\nLoop at %d ignored due to setjmp.\n",
3110 INSN_UID (loop->start));
3114 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3115 enclosing loop, but this doesn't matter. */
3116 uid_loop[INSN_UID (insn)] = current_loop;
3119 /* Any loop containing a label used in an initializer must be invalidated,
3120 because it can be jumped into from anywhere. */
3121 for (label = forced_labels; label; label = XEXP (label, 1))
3122 invalidate_loops_containing_label (XEXP (label, 0));
3124 /* Any loop containing a label used for an exception handler must be
3125 invalidated, because it can be jumped into from anywhere. */
3126 for_each_eh_label (invalidate_loops_containing_label);
3128 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3129 loop that it is not contained within, that loop is marked invalid.
3130 If any INSN or CALL_INSN uses a label's address, then the loop containing
3131 that label is marked invalid, because it could be jumped into from
3132 anywhere.
3134 Also look for blocks of code ending in an unconditional branch that
3135 exits the loop. If such a block is surrounded by a conditional
3136 branch around the block, move the block elsewhere (see below) and
3137 invert the jump to point to the code block. This may eliminate a
3138 label in our loop and will simplify processing by both us and a
3139 possible second cse pass. */
3141 for (insn = f; insn; insn = NEXT_INSN (insn))
3142 if (INSN_P (insn))
3144 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3146 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3148 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3149 if (note)
3150 invalidate_loops_containing_label (XEXP (note, 0));
3153 if (!JUMP_P (insn))
3154 continue;
3156 mark_loop_jump (PATTERN (insn), this_loop);
3158 /* See if this is an unconditional branch outside the loop. */
3159 if (this_loop
3160 && (GET_CODE (PATTERN (insn)) == RETURN
3161 || (any_uncondjump_p (insn)
3162 && onlyjump_p (insn)
3163 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3164 != this_loop)))
3165 && get_max_uid () < max_uid_for_loop)
3167 rtx p;
3168 rtx our_next = next_real_insn (insn);
3169 rtx last_insn_to_move = NEXT_INSN (insn);
3170 struct loop *dest_loop;
3171 struct loop *outer_loop = NULL;
3173 /* Go backwards until we reach the start of the loop, a label,
3174 or a JUMP_INSN. */
3175 for (p = PREV_INSN (insn);
3176 !LABEL_P (p)
3177 && ! (NOTE_P (p)
3178 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3179 && !JUMP_P (p);
3180 p = PREV_INSN (p))
3183 /* Check for the case where we have a jump to an inner nested
3184 loop, and do not perform the optimization in that case. */
3186 if (JUMP_LABEL (insn))
3188 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3189 if (dest_loop)
3191 for (outer_loop = dest_loop; outer_loop;
3192 outer_loop = outer_loop->outer)
3193 if (outer_loop == this_loop)
3194 break;
3198 /* Make sure that the target of P is within the current loop. */
3200 if (JUMP_P (p) && JUMP_LABEL (p)
3201 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3202 outer_loop = this_loop;
3204 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3205 we have a block of code to try to move.
3207 We look backward and then forward from the target of INSN
3208 to find a BARRIER at the same loop depth as the target.
3209 If we find such a BARRIER, we make a new label for the start
3210 of the block, invert the jump in P and point it to that label,
3211 and move the block of code to the spot we found. */
3213 if (! outer_loop
3214 && JUMP_P (p)
3215 && JUMP_LABEL (p) != 0
3216 /* Just ignore jumps to labels that were never emitted.
3217 These always indicate compilation errors. */
3218 && INSN_UID (JUMP_LABEL (p)) != 0
3219 && any_condjump_p (p) && onlyjump_p (p)
3220 && next_real_insn (JUMP_LABEL (p)) == our_next
3221 /* If it's not safe to move the sequence, then we
3222 mustn't try. */
3223 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3224 &last_insn_to_move))
3226 rtx target
3227 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3228 struct loop *target_loop = uid_loop[INSN_UID (target)];
3229 rtx loc, loc2;
3230 rtx tmp;
3232 /* Search for possible garbage past the conditional jumps
3233 and look for the last barrier. */
3234 for (tmp = last_insn_to_move;
3235 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3236 if (BARRIER_P (tmp))
3237 last_insn_to_move = tmp;
3239 for (loc = target; loc; loc = PREV_INSN (loc))
3240 if (BARRIER_P (loc)
3241 /* Don't move things inside a tablejump. */
3242 && ((loc2 = next_nonnote_insn (loc)) == 0
3243 || !LABEL_P (loc2)
3244 || (loc2 = next_nonnote_insn (loc2)) == 0
3245 || !JUMP_P (loc2)
3246 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3247 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3248 && uid_loop[INSN_UID (loc)] == target_loop)
3249 break;
3251 if (loc == 0)
3252 for (loc = target; loc; loc = NEXT_INSN (loc))
3253 if (BARRIER_P (loc)
3254 /* Don't move things inside a tablejump. */
3255 && ((loc2 = next_nonnote_insn (loc)) == 0
3256 || !LABEL_P (loc2)
3257 || (loc2 = next_nonnote_insn (loc2)) == 0
3258 || !JUMP_P (loc2)
3259 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3260 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3261 && uid_loop[INSN_UID (loc)] == target_loop)
3262 break;
3264 if (loc)
3266 rtx cond_label = JUMP_LABEL (p);
3267 rtx new_label = get_label_after (p);
3269 /* Ensure our label doesn't go away. */
3270 LABEL_NUSES (cond_label)++;
3272 /* Verify that uid_loop is large enough and that
3273 we can invert P. */
3274 if (invert_jump (p, new_label, 1))
3276 rtx q, r;
3277 bool only_notes;
3279 /* If no suitable BARRIER was found, create a suitable
3280 one before TARGET. Since TARGET is a fall through
3281 path, we'll need to insert a jump around our block
3282 and add a BARRIER before TARGET.
3284 This creates an extra unconditional jump outside
3285 the loop. However, the benefits of removing rarely
3286 executed instructions from inside the loop usually
3287 outweighs the cost of the extra unconditional jump
3288 outside the loop. */
3289 if (loc == 0)
3291 rtx temp;
3293 temp = gen_jump (JUMP_LABEL (insn));
3294 temp = emit_jump_insn_before (temp, target);
3295 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3296 LABEL_NUSES (JUMP_LABEL (insn))++;
3297 loc = emit_barrier_before (target);
3300 /* Include the BARRIER after INSN and copy the
3301 block after LOC. */
3302 only_notes = squeeze_notes (&new_label,
3303 &last_insn_to_move);
3304 gcc_assert (!only_notes);
3306 reorder_insns (new_label, last_insn_to_move, loc);
3308 /* All those insns are now in TARGET_LOOP. */
3309 for (q = new_label;
3310 q != NEXT_INSN (last_insn_to_move);
3311 q = NEXT_INSN (q))
3312 uid_loop[INSN_UID (q)] = target_loop;
3314 /* The label jumped to by INSN is no longer a loop
3315 exit. Unless INSN does not have a label (e.g.,
3316 it is a RETURN insn), search loop->exit_labels
3317 to find its label_ref, and remove it. Also turn
3318 off LABEL_OUTSIDE_LOOP_P bit. */
3319 if (JUMP_LABEL (insn))
3321 for (q = 0, r = this_loop->exit_labels;
3323 q = r, r = LABEL_NEXTREF (r))
3324 if (XEXP (r, 0) == JUMP_LABEL (insn))
3326 LABEL_OUTSIDE_LOOP_P (r) = 0;
3327 if (q)
3328 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3329 else
3330 this_loop->exit_labels = LABEL_NEXTREF (r);
3331 break;
3334 for (loop = this_loop; loop && loop != target_loop;
3335 loop = loop->outer)
3336 loop->exit_count--;
3338 /* If we didn't find it, then something is
3339 wrong. */
3340 gcc_assert (r);
3343 /* P is now a jump outside the loop, so it must be put
3344 in loop->exit_labels, and marked as such.
3345 The easiest way to do this is to just call
3346 mark_loop_jump again for P. */
3347 mark_loop_jump (PATTERN (p), this_loop);
3349 /* If INSN now jumps to the insn after it,
3350 delete INSN. */
3351 if (JUMP_LABEL (insn) != 0
3352 && (next_real_insn (JUMP_LABEL (insn))
3353 == next_real_insn (insn)))
3354 delete_related_insns (insn);
3357 /* Continue the loop after where the conditional
3358 branch used to jump, since the only branch insn
3359 in the block (if it still remains) is an inter-loop
3360 branch and hence needs no processing. */
3361 insn = NEXT_INSN (cond_label);
3363 if (--LABEL_NUSES (cond_label) == 0)
3364 delete_related_insns (cond_label);
3366 /* This loop will be continued with NEXT_INSN (insn). */
3367 insn = PREV_INSN (insn);
3374 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3375 loops it is contained in, mark the target loop invalid.
3377 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3379 static void
3380 mark_loop_jump (rtx x, struct loop *loop)
3382 struct loop *dest_loop;
3383 struct loop *outer_loop;
3384 int i;
3386 switch (GET_CODE (x))
3388 case PC:
3389 case USE:
3390 case CLOBBER:
3391 case REG:
3392 case MEM:
3393 case CONST_INT:
3394 case CONST_DOUBLE:
3395 case RETURN:
3396 return;
3398 case CONST:
3399 /* There could be a label reference in here. */
3400 mark_loop_jump (XEXP (x, 0), loop);
3401 return;
3403 case PLUS:
3404 case MINUS:
3405 case MULT:
3406 mark_loop_jump (XEXP (x, 0), loop);
3407 mark_loop_jump (XEXP (x, 1), loop);
3408 return;
3410 case LO_SUM:
3411 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3412 mark_loop_jump (XEXP (x, 1), loop);
3413 return;
3415 case SIGN_EXTEND:
3416 case ZERO_EXTEND:
3417 mark_loop_jump (XEXP (x, 0), loop);
3418 return;
3420 case LABEL_REF:
3421 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3423 /* Link together all labels that branch outside the loop. This
3424 is used by final_[bg]iv_value and the loop unrolling code. Also
3425 mark this LABEL_REF so we know that this branch should predict
3426 false. */
3428 /* A check to make sure the label is not in an inner nested loop,
3429 since this does not count as a loop exit. */
3430 if (dest_loop)
3432 for (outer_loop = dest_loop; outer_loop;
3433 outer_loop = outer_loop->outer)
3434 if (outer_loop == loop)
3435 break;
3437 else
3438 outer_loop = NULL;
3440 if (loop && ! outer_loop)
3442 LABEL_OUTSIDE_LOOP_P (x) = 1;
3443 LABEL_NEXTREF (x) = loop->exit_labels;
3444 loop->exit_labels = x;
3446 for (outer_loop = loop;
3447 outer_loop && outer_loop != dest_loop;
3448 outer_loop = outer_loop->outer)
3449 outer_loop->exit_count++;
3452 /* If this is inside a loop, but not in the current loop or one enclosed
3453 by it, it invalidates at least one loop. */
3455 if (! dest_loop)
3456 return;
3458 /* We must invalidate every nested loop containing the target of this
3459 label, except those that also contain the jump insn. */
3461 for (; dest_loop; dest_loop = dest_loop->outer)
3463 /* Stop when we reach a loop that also contains the jump insn. */
3464 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3465 if (dest_loop == outer_loop)
3466 return;
3468 /* If we get here, we know we need to invalidate a loop. */
3469 if (loop_dump_stream && ! dest_loop->invalid)
3470 fprintf (loop_dump_stream,
3471 "\nLoop at %d ignored due to multiple entry points.\n",
3472 INSN_UID (dest_loop->start));
3474 dest_loop->invalid = 1;
3476 return;
3478 case SET:
3479 /* If this is not setting pc, ignore. */
3480 if (SET_DEST (x) == pc_rtx)
3481 mark_loop_jump (SET_SRC (x), loop);
3482 return;
3484 case IF_THEN_ELSE:
3485 mark_loop_jump (XEXP (x, 1), loop);
3486 mark_loop_jump (XEXP (x, 2), loop);
3487 return;
3489 case PARALLEL:
3490 case ADDR_VEC:
3491 for (i = 0; i < XVECLEN (x, 0); i++)
3492 mark_loop_jump (XVECEXP (x, 0, i), loop);
3493 return;
3495 case ADDR_DIFF_VEC:
3496 for (i = 0; i < XVECLEN (x, 1); i++)
3497 mark_loop_jump (XVECEXP (x, 1, i), loop);
3498 return;
3500 default:
3501 /* Strictly speaking this is not a jump into the loop, only a possible
3502 jump out of the loop. However, we have no way to link the destination
3503 of this jump onto the list of exit labels. To be safe we mark this
3504 loop and any containing loops as invalid. */
3505 if (loop)
3507 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3509 if (loop_dump_stream && ! outer_loop->invalid)
3510 fprintf (loop_dump_stream,
3511 "\nLoop at %d ignored due to unknown exit jump.\n",
3512 INSN_UID (outer_loop->start));
3513 outer_loop->invalid = 1;
3516 return;
3520 /* Return nonzero if there is a label in the range from
3521 insn INSN to and including the insn whose luid is END
3522 INSN must have an assigned luid (i.e., it must not have
3523 been previously created by loop.c). */
3525 static int
3526 labels_in_range_p (rtx insn, int end)
3528 while (insn && INSN_LUID (insn) <= end)
3530 if (LABEL_P (insn))
3531 return 1;
3532 insn = NEXT_INSN (insn);
3535 return 0;
3538 /* Record that a memory reference X is being set. */
3540 static void
3541 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3542 void *data ATTRIBUTE_UNUSED)
3544 struct loop_info *loop_info = data;
3546 if (x == 0 || !MEM_P (x))
3547 return;
3549 /* Count number of memory writes.
3550 This affects heuristics in strength_reduce. */
3551 loop_info->num_mem_sets++;
3553 /* BLKmode MEM means all memory is clobbered. */
3554 if (GET_MODE (x) == BLKmode)
3556 if (MEM_READONLY_P (x))
3557 loop_info->unknown_constant_address_altered = 1;
3558 else
3559 loop_info->unknown_address_altered = 1;
3561 return;
3564 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3565 loop_info->store_mems);
3568 /* X is a value modified by an INSN that references a biv inside a loop
3569 exit test (i.e., X is somehow related to the value of the biv). If X
3570 is a pseudo that is used more than once, then the biv is (effectively)
3571 used more than once. DATA is a pointer to a loop_regs structure. */
3573 static void
3574 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3576 struct loop_regs *regs = (struct loop_regs *) data;
3578 if (x == 0)
3579 return;
3581 while (GET_CODE (x) == STRICT_LOW_PART
3582 || GET_CODE (x) == SIGN_EXTRACT
3583 || GET_CODE (x) == ZERO_EXTRACT
3584 || GET_CODE (x) == SUBREG)
3585 x = XEXP (x, 0);
3587 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3588 return;
3590 /* If we do not have usage information, or if we know the register
3591 is used more than once, note that fact for check_dbra_loop. */
3592 if (REGNO (x) >= max_reg_before_loop
3593 || ! regs->array[REGNO (x)].single_usage
3594 || regs->array[REGNO (x)].single_usage == const0_rtx)
3595 regs->multiple_uses = 1;
3598 /* Return nonzero if the rtx X is invariant over the current loop.
3600 The value is 2 if we refer to something only conditionally invariant.
3602 A memory ref is invariant if it is not volatile and does not conflict
3603 with anything stored in `loop_info->store_mems'. */
3605 static int
3606 loop_invariant_p (const struct loop *loop, rtx x)
3608 struct loop_info *loop_info = LOOP_INFO (loop);
3609 struct loop_regs *regs = LOOP_REGS (loop);
3610 int i;
3611 enum rtx_code code;
3612 const char *fmt;
3613 int conditional = 0;
3614 rtx mem_list_entry;
3616 if (x == 0)
3617 return 1;
3618 code = GET_CODE (x);
3619 switch (code)
3621 case CONST_INT:
3622 case CONST_DOUBLE:
3623 case SYMBOL_REF:
3624 case CONST:
3625 return 1;
3627 case LABEL_REF:
3628 return 1;
3630 case PC:
3631 case CC0:
3632 case UNSPEC_VOLATILE:
3633 return 0;
3635 case REG:
3636 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3637 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3638 && ! current_function_has_nonlocal_goto)
3639 return 1;
3641 if (LOOP_INFO (loop)->has_call
3642 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3643 return 0;
3645 /* Out-of-range regs can occur when we are called from unrolling.
3646 These registers created by the unroller are set in the loop,
3647 hence are never invariant.
3648 Other out-of-range regs can be generated by load_mems; those that
3649 are written to in the loop are not invariant, while those that are
3650 not written to are invariant. It would be easy for load_mems
3651 to set n_times_set correctly for these registers, however, there
3652 is no easy way to distinguish them from registers created by the
3653 unroller. */
3655 if (REGNO (x) >= (unsigned) regs->num)
3656 return 0;
3658 if (regs->array[REGNO (x)].set_in_loop < 0)
3659 return 2;
3661 return regs->array[REGNO (x)].set_in_loop == 0;
3663 case MEM:
3664 /* Volatile memory references must be rejected. Do this before
3665 checking for read-only items, so that volatile read-only items
3666 will be rejected also. */
3667 if (MEM_VOLATILE_P (x))
3668 return 0;
3670 /* See if there is any dependence between a store and this load. */
3671 mem_list_entry = loop_info->store_mems;
3672 while (mem_list_entry)
3674 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3675 x, rtx_varies_p))
3676 return 0;
3678 mem_list_entry = XEXP (mem_list_entry, 1);
3681 /* It's not invalidated by a store in memory
3682 but we must still verify the address is invariant. */
3683 break;
3685 case ASM_OPERANDS:
3686 /* Don't mess with insns declared volatile. */
3687 if (MEM_VOLATILE_P (x))
3688 return 0;
3689 break;
3691 default:
3692 break;
3695 fmt = GET_RTX_FORMAT (code);
3696 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3698 if (fmt[i] == 'e')
3700 int tem = loop_invariant_p (loop, XEXP (x, i));
3701 if (tem == 0)
3702 return 0;
3703 if (tem == 2)
3704 conditional = 1;
3706 else if (fmt[i] == 'E')
3708 int j;
3709 for (j = 0; j < XVECLEN (x, i); j++)
3711 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3712 if (tem == 0)
3713 return 0;
3714 if (tem == 2)
3715 conditional = 1;
3721 return 1 + conditional;
3724 /* Return nonzero if all the insns in the loop that set REG
3725 are INSN and the immediately following insns,
3726 and if each of those insns sets REG in an invariant way
3727 (not counting uses of REG in them).
3729 The value is 2 if some of these insns are only conditionally invariant.
3731 We assume that INSN itself is the first set of REG
3732 and that its source is invariant. */
3734 static int
3735 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3736 rtx insn)
3738 struct loop_regs *regs = LOOP_REGS (loop);
3739 rtx p = insn;
3740 unsigned int regno = REGNO (reg);
3741 rtx temp;
3742 /* Number of sets we have to insist on finding after INSN. */
3743 int count = n_sets - 1;
3744 int old = regs->array[regno].set_in_loop;
3745 int value = 0;
3746 int this;
3748 /* If N_SETS hit the limit, we can't rely on its value. */
3749 if (n_sets == 127)
3750 return 0;
3752 regs->array[regno].set_in_loop = 0;
3754 while (count > 0)
3756 enum rtx_code code;
3757 rtx set;
3759 p = NEXT_INSN (p);
3760 code = GET_CODE (p);
3762 /* If library call, skip to end of it. */
3763 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3764 p = XEXP (temp, 0);
3766 this = 0;
3767 if (code == INSN
3768 && (set = single_set (p))
3769 && REG_P (SET_DEST (set))
3770 && REGNO (SET_DEST (set)) == regno)
3772 this = loop_invariant_p (loop, SET_SRC (set));
3773 if (this != 0)
3774 value |= this;
3775 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3777 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3778 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3779 notes are OK. */
3780 this = (CONSTANT_P (XEXP (temp, 0))
3781 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3782 && loop_invariant_p (loop, XEXP (temp, 0))));
3783 if (this != 0)
3784 value |= this;
3787 if (this != 0)
3788 count--;
3789 else if (code != NOTE)
3791 regs->array[regno].set_in_loop = old;
3792 return 0;
3796 regs->array[regno].set_in_loop = old;
3797 /* If loop_invariant_p ever returned 2, we return 2. */
3798 return 1 + (value & 2);
3801 /* Look at all uses (not sets) of registers in X. For each, if it is
3802 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3803 a different insn, set USAGE[REGNO] to const0_rtx. */
3805 static void
3806 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3808 enum rtx_code code = GET_CODE (x);
3809 const char *fmt = GET_RTX_FORMAT (code);
3810 int i, j;
3812 if (code == REG)
3813 regs->array[REGNO (x)].single_usage
3814 = (regs->array[REGNO (x)].single_usage != 0
3815 && regs->array[REGNO (x)].single_usage != insn)
3816 ? const0_rtx : insn;
3818 else if (code == SET)
3820 /* Don't count SET_DEST if it is a REG; otherwise count things
3821 in SET_DEST because if a register is partially modified, it won't
3822 show up as a potential movable so we don't care how USAGE is set
3823 for it. */
3824 if (!REG_P (SET_DEST (x)))
3825 find_single_use_in_loop (regs, insn, SET_DEST (x));
3826 find_single_use_in_loop (regs, insn, SET_SRC (x));
3828 else
3829 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3831 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3832 find_single_use_in_loop (regs, insn, XEXP (x, i));
3833 else if (fmt[i] == 'E')
3834 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3835 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3839 /* Count and record any set in X which is contained in INSN. Update
3840 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3841 in X. */
3843 static void
3844 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3846 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3847 /* Don't move a reg that has an explicit clobber.
3848 It's not worth the pain to try to do it correctly. */
3849 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3851 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3853 rtx dest = SET_DEST (x);
3854 while (GET_CODE (dest) == SUBREG
3855 || GET_CODE (dest) == ZERO_EXTRACT
3856 || GET_CODE (dest) == STRICT_LOW_PART)
3857 dest = XEXP (dest, 0);
3858 if (REG_P (dest))
3860 int i;
3861 int regno = REGNO (dest);
3862 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3864 /* If this is the first setting of this reg
3865 in current basic block, and it was set before,
3866 it must be set in two basic blocks, so it cannot
3867 be moved out of the loop. */
3868 if (regs->array[regno].set_in_loop > 0
3869 && last_set[regno] == 0)
3870 regs->array[regno+i].may_not_optimize = 1;
3871 /* If this is not first setting in current basic block,
3872 see if reg was used in between previous one and this.
3873 If so, neither one can be moved. */
3874 if (last_set[regno] != 0
3875 && reg_used_between_p (dest, last_set[regno], insn))
3876 regs->array[regno+i].may_not_optimize = 1;
3877 if (regs->array[regno+i].set_in_loop < 127)
3878 ++regs->array[regno+i].set_in_loop;
3879 last_set[regno+i] = insn;
3885 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3886 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3887 contained in insn INSN is used by any insn that precedes INSN in
3888 cyclic order starting from the loop entry point.
3890 We don't want to use INSN_LUID here because if we restrict INSN to those
3891 that have a valid INSN_LUID, it means we cannot move an invariant out
3892 from an inner loop past two loops. */
3894 static int
3895 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3897 rtx reg = SET_DEST (set);
3898 rtx p;
3900 /* Scan forward checking for register usage. If we hit INSN, we
3901 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3902 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3904 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3905 return 1;
3907 if (p == loop->end)
3908 p = loop->start;
3911 return 0;
3915 /* Information we collect about arrays that we might want to prefetch. */
3916 struct prefetch_info
3918 struct iv_class *class; /* Class this prefetch is based on. */
3919 struct induction *giv; /* GIV this prefetch is based on. */
3920 rtx base_address; /* Start prefetching from this address plus
3921 index. */
3922 HOST_WIDE_INT index;
3923 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3924 iteration. */
3925 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3926 prefetch area in one iteration. */
3927 unsigned int total_bytes; /* Total bytes loop will access in this block.
3928 This is set only for loops with known
3929 iteration counts and is 0xffffffff
3930 otherwise. */
3931 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3932 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3933 unsigned int write : 1; /* 1 for read/write prefetches. */
3936 /* Data used by check_store function. */
3937 struct check_store_data
3939 rtx mem_address;
3940 int mem_write;
3943 static void check_store (rtx, rtx, void *);
3944 static void emit_prefetch_instructions (struct loop *);
3945 static int rtx_equal_for_prefetch_p (rtx, rtx);
3947 /* Set mem_write when mem_address is found. Used as callback to
3948 note_stores. */
3949 static void
3950 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3952 struct check_store_data *d = (struct check_store_data *) data;
3954 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3955 d->mem_write = 1;
3958 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3959 important to get some addresses combined. Later more sophisticated
3960 transformations can be added when necessary.
3962 ??? Same trick with swapping operand is done at several other places.
3963 It can be nice to develop some common way to handle this. */
3965 static int
3966 rtx_equal_for_prefetch_p (rtx x, rtx y)
3968 int i;
3969 int j;
3970 enum rtx_code code = GET_CODE (x);
3971 const char *fmt;
3973 if (x == y)
3974 return 1;
3975 if (code != GET_CODE (y))
3976 return 0;
3978 if (GET_MODE (x) != GET_MODE (y))
3979 return 0;
3981 switch (code)
3983 case PC:
3984 case CC0:
3985 case CONST_INT:
3986 case CONST_DOUBLE:
3987 return 0;
3989 case LABEL_REF:
3990 return XEXP (x, 0) == XEXP (y, 0);
3992 default:
3993 break;
3996 if (COMMUTATIVE_ARITH_P (x))
3998 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3999 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
4000 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
4001 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
4004 /* Compare the elements. If any pair of corresponding elements fails to
4005 match, return 0 for the whole thing. */
4007 fmt = GET_RTX_FORMAT (code);
4008 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4010 switch (fmt[i])
4012 case 'w':
4013 if (XWINT (x, i) != XWINT (y, i))
4014 return 0;
4015 break;
4017 case 'i':
4018 if (XINT (x, i) != XINT (y, i))
4019 return 0;
4020 break;
4022 case 'E':
4023 /* Two vectors must have the same length. */
4024 if (XVECLEN (x, i) != XVECLEN (y, i))
4025 return 0;
4027 /* And the corresponding elements must match. */
4028 for (j = 0; j < XVECLEN (x, i); j++)
4029 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4030 XVECEXP (y, i, j)) == 0)
4031 return 0;
4032 break;
4034 case 'e':
4035 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4036 return 0;
4037 break;
4039 case 's':
4040 if (strcmp (XSTR (x, i), XSTR (y, i)))
4041 return 0;
4042 break;
4044 case 'u':
4045 /* These are just backpointers, so they don't matter. */
4046 break;
4048 case '0':
4049 break;
4051 /* It is believed that rtx's at this level will never
4052 contain anything but integers and other rtx's,
4053 except for within LABEL_REFs and SYMBOL_REFs. */
4054 default:
4055 gcc_unreachable ();
4058 return 1;
4061 /* Remove constant addition value from the expression X (when present)
4062 and return it. */
4064 static HOST_WIDE_INT
4065 remove_constant_addition (rtx *x)
4067 HOST_WIDE_INT addval = 0;
4068 rtx exp = *x;
4070 /* Avoid clobbering a shared CONST expression. */
4071 if (GET_CODE (exp) == CONST)
4073 if (GET_CODE (XEXP (exp, 0)) == PLUS
4074 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4075 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4077 *x = XEXP (XEXP (exp, 0), 0);
4078 return INTVAL (XEXP (XEXP (exp, 0), 1));
4080 return 0;
4083 if (GET_CODE (exp) == CONST_INT)
4085 addval = INTVAL (exp);
4086 *x = const0_rtx;
4089 /* For plus expression recurse on ourself. */
4090 else if (GET_CODE (exp) == PLUS)
4092 addval += remove_constant_addition (&XEXP (exp, 0));
4093 addval += remove_constant_addition (&XEXP (exp, 1));
4095 /* In case our parameter was constant, remove extra zero from the
4096 expression. */
4097 if (XEXP (exp, 0) == const0_rtx)
4098 *x = XEXP (exp, 1);
4099 else if (XEXP (exp, 1) == const0_rtx)
4100 *x = XEXP (exp, 0);
4103 return addval;
4106 /* Attempt to identify accesses to arrays that are most likely to cause cache
4107 misses, and emit prefetch instructions a few prefetch blocks forward.
4109 To detect the arrays we use the GIV information that was collected by the
4110 strength reduction pass.
4112 The prefetch instructions are generated after the GIV information is done
4113 and before the strength reduction process. The new GIVs are injected into
4114 the strength reduction tables, so the prefetch addresses are optimized as
4115 well.
4117 GIVs are split into base address, stride, and constant addition values.
4118 GIVs with the same address, stride and close addition values are combined
4119 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4120 for write instructions can be used for the block we write to, on machines
4121 that support write prefetches.
4123 Several heuristics are used to determine when to prefetch. They are
4124 controlled by defined symbols that can be overridden for each target. */
4126 static void
4127 emit_prefetch_instructions (struct loop *loop)
4129 int num_prefetches = 0;
4130 int num_real_prefetches = 0;
4131 int num_real_write_prefetches = 0;
4132 int num_prefetches_before = 0;
4133 int num_write_prefetches_before = 0;
4134 int ahead = 0;
4135 int i;
4136 struct iv_class *bl;
4137 struct induction *iv;
4138 struct prefetch_info info[MAX_PREFETCHES];
4139 struct loop_ivs *ivs = LOOP_IVS (loop);
4141 if (!HAVE_prefetch || PREFETCH_BLOCK == 0)
4142 return;
4144 /* Consider only loops w/o calls. When a call is done, the loop is probably
4145 slow enough to read the memory. */
4146 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4148 if (loop_dump_stream)
4149 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4151 return;
4154 /* Don't prefetch in loops known to have few iterations. */
4155 if (PREFETCH_NO_LOW_LOOPCNT
4156 && LOOP_INFO (loop)->n_iterations
4157 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4159 if (loop_dump_stream)
4160 fprintf (loop_dump_stream,
4161 "Prefetch: ignoring loop: not enough iterations.\n");
4162 return;
4165 /* Search all induction variables and pick those interesting for the prefetch
4166 machinery. */
4167 for (bl = ivs->list; bl; bl = bl->next)
4169 struct induction *biv = bl->biv, *biv1;
4170 int basestride = 0;
4172 biv1 = biv;
4174 /* Expect all BIVs to be executed in each iteration. This makes our
4175 analysis more conservative. */
4176 while (biv1)
4178 /* Discard non-constant additions that we can't handle well yet, and
4179 BIVs that are executed multiple times; such BIVs ought to be
4180 handled in the nested loop. We accept not_every_iteration BIVs,
4181 since these only result in larger strides and make our
4182 heuristics more conservative. */
4183 if (GET_CODE (biv->add_val) != CONST_INT)
4185 if (loop_dump_stream)
4187 fprintf (loop_dump_stream,
4188 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4189 REGNO (biv->src_reg), INSN_UID (biv->insn));
4190 print_rtl (loop_dump_stream, biv->add_val);
4191 fprintf (loop_dump_stream, "\n");
4193 break;
4196 if (biv->maybe_multiple)
4198 if (loop_dump_stream)
4200 fprintf (loop_dump_stream,
4201 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4202 REGNO (biv->src_reg), INSN_UID (biv->insn));
4203 print_rtl (loop_dump_stream, biv->add_val);
4204 fprintf (loop_dump_stream, "\n");
4206 break;
4209 basestride += INTVAL (biv1->add_val);
4210 biv1 = biv1->next_iv;
4213 if (biv1 || !basestride)
4214 continue;
4216 for (iv = bl->giv; iv; iv = iv->next_iv)
4218 rtx address;
4219 rtx temp;
4220 HOST_WIDE_INT index = 0;
4221 int add = 1;
4222 HOST_WIDE_INT stride = 0;
4223 int stride_sign = 1;
4224 struct check_store_data d;
4225 const char *ignore_reason = NULL;
4226 int size = GET_MODE_SIZE (GET_MODE (iv));
4228 /* See whether an induction variable is interesting to us and if
4229 not, report the reason. */
4230 if (iv->giv_type != DEST_ADDR)
4231 ignore_reason = "giv is not a destination address";
4233 /* We are interested only in constant stride memory references
4234 in order to be able to compute density easily. */
4235 else if (GET_CODE (iv->mult_val) != CONST_INT)
4236 ignore_reason = "stride is not constant";
4238 else
4240 stride = INTVAL (iv->mult_val) * basestride;
4241 if (stride < 0)
4243 stride = -stride;
4244 stride_sign = -1;
4247 /* On some targets, reversed order prefetches are not
4248 worthwhile. */
4249 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4250 ignore_reason = "reversed order stride";
4252 /* Prefetch of accesses with an extreme stride might not be
4253 worthwhile, either. */
4254 else if (PREFETCH_NO_EXTREME_STRIDE
4255 && stride > PREFETCH_EXTREME_STRIDE)
4256 ignore_reason = "extreme stride";
4258 /* Ignore GIVs with varying add values; we can't predict the
4259 value for the next iteration. */
4260 else if (!loop_invariant_p (loop, iv->add_val))
4261 ignore_reason = "giv has varying add value";
4263 /* Ignore GIVs in the nested loops; they ought to have been
4264 handled already. */
4265 else if (iv->maybe_multiple)
4266 ignore_reason = "giv is in nested loop";
4269 if (ignore_reason != NULL)
4271 if (loop_dump_stream)
4272 fprintf (loop_dump_stream,
4273 "Prefetch: ignoring giv at %d: %s.\n",
4274 INSN_UID (iv->insn), ignore_reason);
4275 continue;
4278 /* Determine the pointer to the basic array we are examining. It is
4279 the sum of the BIV's initial value and the GIV's add_val. */
4280 address = copy_rtx (iv->add_val);
4281 temp = copy_rtx (bl->initial_value);
4283 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4284 index = remove_constant_addition (&address);
4286 d.mem_write = 0;
4287 d.mem_address = *iv->location;
4289 /* When the GIV is not always executed, we might be better off by
4290 not dirtying the cache pages. */
4291 if (PREFETCH_CONDITIONAL || iv->always_executed)
4292 note_stores (PATTERN (iv->insn), check_store, &d);
4293 else
4295 if (loop_dump_stream)
4296 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4297 INSN_UID (iv->insn), "in conditional code.");
4298 continue;
4301 /* Attempt to find another prefetch to the same array and see if we
4302 can merge this one. */
4303 for (i = 0; i < num_prefetches; i++)
4304 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4305 && stride == info[i].stride)
4307 /* In case both access same array (same location
4308 just with small difference in constant indexes), merge
4309 the prefetches. Just do the later and the earlier will
4310 get prefetched from previous iteration.
4311 The artificial threshold should not be too small,
4312 but also not bigger than small portion of memory usually
4313 traversed by single loop. */
4314 if (index >= info[i].index
4315 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4317 info[i].write |= d.mem_write;
4318 info[i].bytes_accessed += size;
4319 info[i].index = index;
4320 info[i].giv = iv;
4321 info[i].class = bl;
4322 info[num_prefetches].base_address = address;
4323 add = 0;
4324 break;
4327 if (index < info[i].index
4328 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4330 info[i].write |= d.mem_write;
4331 info[i].bytes_accessed += size;
4332 add = 0;
4333 break;
4337 /* Merging failed. */
4338 if (add)
4340 info[num_prefetches].giv = iv;
4341 info[num_prefetches].class = bl;
4342 info[num_prefetches].index = index;
4343 info[num_prefetches].stride = stride;
4344 info[num_prefetches].base_address = address;
4345 info[num_prefetches].write = d.mem_write;
4346 info[num_prefetches].bytes_accessed = size;
4347 num_prefetches++;
4348 if (num_prefetches >= MAX_PREFETCHES)
4350 if (loop_dump_stream)
4351 fprintf (loop_dump_stream,
4352 "Maximal number of prefetches exceeded.\n");
4353 return;
4359 for (i = 0; i < num_prefetches; i++)
4361 int density;
4363 /* Attempt to calculate the total number of bytes fetched by all
4364 iterations of the loop. Avoid overflow. */
4365 if (LOOP_INFO (loop)->n_iterations
4366 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4367 >= LOOP_INFO (loop)->n_iterations))
4368 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4369 else
4370 info[i].total_bytes = 0xffffffff;
4372 density = info[i].bytes_accessed * 100 / info[i].stride;
4374 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4375 if (PREFETCH_ONLY_DENSE_MEM)
4376 if (density * 256 > PREFETCH_DENSE_MEM * 100
4377 && (info[i].total_bytes / PREFETCH_BLOCK
4378 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4380 info[i].prefetch_before_loop = 1;
4381 info[i].prefetch_in_loop
4382 = (info[i].total_bytes / PREFETCH_BLOCK
4383 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4385 else
4387 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4388 if (loop_dump_stream)
4389 fprintf (loop_dump_stream,
4390 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4391 INSN_UID (info[i].giv->insn), density);
4393 else
4394 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4396 /* Find how many prefetch instructions we'll use within the loop. */
4397 if (info[i].prefetch_in_loop != 0)
4399 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4400 / PREFETCH_BLOCK);
4401 num_real_prefetches += info[i].prefetch_in_loop;
4402 if (info[i].write)
4403 num_real_write_prefetches += info[i].prefetch_in_loop;
4407 /* Determine how many iterations ahead to prefetch within the loop, based
4408 on how many prefetches we currently expect to do within the loop. */
4409 if (num_real_prefetches != 0)
4411 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4413 if (loop_dump_stream)
4414 fprintf (loop_dump_stream,
4415 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4416 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4417 num_real_prefetches = 0, num_real_write_prefetches = 0;
4420 /* We'll also use AHEAD to determine how many prefetch instructions to
4421 emit before a loop, so don't leave it zero. */
4422 if (ahead == 0)
4423 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4425 for (i = 0; i < num_prefetches; i++)
4427 /* Update if we've decided not to prefetch anything within the loop. */
4428 if (num_real_prefetches == 0)
4429 info[i].prefetch_in_loop = 0;
4431 /* Find how many prefetch instructions we'll use before the loop. */
4432 if (info[i].prefetch_before_loop != 0)
4434 int n = info[i].total_bytes / PREFETCH_BLOCK;
4435 if (n > ahead)
4436 n = ahead;
4437 info[i].prefetch_before_loop = n;
4438 num_prefetches_before += n;
4439 if (info[i].write)
4440 num_write_prefetches_before += n;
4443 if (loop_dump_stream)
4445 if (info[i].prefetch_in_loop == 0
4446 && info[i].prefetch_before_loop == 0)
4447 continue;
4448 fprintf (loop_dump_stream, "Prefetch insn: %d",
4449 INSN_UID (info[i].giv->insn));
4450 fprintf (loop_dump_stream,
4451 "; in loop: %d; before: %d; %s\n",
4452 info[i].prefetch_in_loop,
4453 info[i].prefetch_before_loop,
4454 info[i].write ? "read/write" : "read only");
4455 fprintf (loop_dump_stream,
4456 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4457 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4458 info[i].bytes_accessed, info[i].total_bytes);
4459 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4460 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4461 info[i].index, info[i].stride);
4462 print_rtl (loop_dump_stream, info[i].base_address);
4463 fprintf (loop_dump_stream, "\n");
4467 if (num_real_prefetches + num_prefetches_before > 0)
4469 /* Record that this loop uses prefetch instructions. */
4470 LOOP_INFO (loop)->has_prefetch = 1;
4472 if (loop_dump_stream)
4474 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4475 num_real_prefetches, num_real_write_prefetches);
4476 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4477 num_prefetches_before, num_write_prefetches_before);
4481 for (i = 0; i < num_prefetches; i++)
4483 int y;
4485 for (y = 0; y < info[i].prefetch_in_loop; y++)
4487 rtx loc = copy_rtx (*info[i].giv->location);
4488 rtx insn;
4489 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4490 rtx before_insn = info[i].giv->insn;
4491 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4492 rtx seq;
4494 /* We can save some effort by offsetting the address on
4495 architectures with offsettable memory references. */
4496 if (offsettable_address_p (0, VOIDmode, loc))
4497 loc = plus_constant (loc, bytes_ahead);
4498 else
4500 rtx reg = gen_reg_rtx (Pmode);
4501 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4502 GEN_INT (bytes_ahead), reg,
4503 0, before_insn);
4504 loc = reg;
4507 start_sequence ();
4508 /* Make sure the address operand is valid for prefetch. */
4509 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4510 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4511 loc = force_reg (Pmode, loc);
4512 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4513 GEN_INT (3)));
4514 seq = get_insns ();
4515 end_sequence ();
4516 emit_insn_before (seq, before_insn);
4518 /* Check all insns emitted and record the new GIV
4519 information. */
4520 insn = NEXT_INSN (prev_insn);
4521 while (insn != before_insn)
4523 insn = check_insn_for_givs (loop, insn,
4524 info[i].giv->always_executed,
4525 info[i].giv->maybe_multiple);
4526 insn = NEXT_INSN (insn);
4530 if (PREFETCH_BEFORE_LOOP)
4532 /* Emit insns before the loop to fetch the first cache lines or,
4533 if we're not prefetching within the loop, everything we expect
4534 to need. */
4535 for (y = 0; y < info[i].prefetch_before_loop; y++)
4537 rtx reg = gen_reg_rtx (Pmode);
4538 rtx loop_start = loop->start;
4539 rtx init_val = info[i].class->initial_value;
4540 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4541 info[i].giv->add_val,
4542 GEN_INT (y * PREFETCH_BLOCK));
4544 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4545 non-constant INIT_VAL to have the same mode as REG, which
4546 in this case we know to be Pmode. */
4547 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4549 rtx seq;
4551 start_sequence ();
4552 init_val = convert_to_mode (Pmode, init_val, 0);
4553 seq = get_insns ();
4554 end_sequence ();
4555 loop_insn_emit_before (loop, 0, loop_start, seq);
4557 loop_iv_add_mult_emit_before (loop, init_val,
4558 info[i].giv->mult_val,
4559 add_val, reg, 0, loop_start);
4560 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4561 GEN_INT (3)),
4562 loop_start);
4567 return;
4570 /* Communication with routines called via `note_stores'. */
4572 static rtx note_insn;
4574 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4576 static rtx addr_placeholder;
4578 /* ??? Unfinished optimizations, and possible future optimizations,
4579 for the strength reduction code. */
4581 /* ??? The interaction of biv elimination, and recognition of 'constant'
4582 bivs, may cause problems. */
4584 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4585 performance problems.
4587 Perhaps don't eliminate things that can be combined with an addressing
4588 mode. Find all givs that have the same biv, mult_val, and add_val;
4589 then for each giv, check to see if its only use dies in a following
4590 memory address. If so, generate a new memory address and check to see
4591 if it is valid. If it is valid, then store the modified memory address,
4592 otherwise, mark the giv as not done so that it will get its own iv. */
4594 /* ??? Could try to optimize branches when it is known that a biv is always
4595 positive. */
4597 /* ??? When replace a biv in a compare insn, we should replace with closest
4598 giv so that an optimized branch can still be recognized by the combiner,
4599 e.g. the VAX acb insn. */
4601 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4602 was rerun in loop_optimize whenever a register was added or moved.
4603 Also, some of the optimizations could be a little less conservative. */
4605 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4606 is a backward branch in that range that branches to somewhere between
4607 LOOP->START and INSN. Returns 0 otherwise. */
4609 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4610 In practice, this is not a problem, because this function is seldom called,
4611 and uses a negligible amount of CPU time on average. */
4613 static int
4614 back_branch_in_range_p (const struct loop *loop, rtx insn)
4616 rtx p, q, target_insn;
4617 rtx loop_start = loop->start;
4618 rtx loop_end = loop->end;
4619 rtx orig_loop_end = loop->end;
4621 /* Stop before we get to the backward branch at the end of the loop. */
4622 loop_end = prev_nonnote_insn (loop_end);
4623 if (BARRIER_P (loop_end))
4624 loop_end = PREV_INSN (loop_end);
4626 /* Check in case insn has been deleted, search forward for first non
4627 deleted insn following it. */
4628 while (INSN_DELETED_P (insn))
4629 insn = NEXT_INSN (insn);
4631 /* Check for the case where insn is the last insn in the loop. Deal
4632 with the case where INSN was a deleted loop test insn, in which case
4633 it will now be the NOTE_LOOP_END. */
4634 if (insn == loop_end || insn == orig_loop_end)
4635 return 0;
4637 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4639 if (JUMP_P (p))
4641 target_insn = JUMP_LABEL (p);
4643 /* Search from loop_start to insn, to see if one of them is
4644 the target_insn. We can't use INSN_LUID comparisons here,
4645 since insn may not have an LUID entry. */
4646 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4647 if (q == target_insn)
4648 return 1;
4652 return 0;
4655 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4656 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4657 callback.
4659 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4660 least once for every loop iteration except for the last one.
4662 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4663 loop iteration.
4665 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4666 static void
4667 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4669 int not_every_iteration = 0;
4670 int maybe_multiple = 0;
4671 int past_loop_latch = 0;
4672 bool exit_test_is_entry = false;
4673 rtx p;
4675 /* If loop_scan_start points to the loop exit test, the loop body
4676 cannot be counted on running on every iteration, and we have to
4677 be wary of subversive use of gotos inside expression
4678 statements. */
4679 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4681 exit_test_is_entry = true;
4682 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4685 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4686 for (p = next_insn_in_loop (loop, loop->scan_start);
4687 p != NULL_RTX;
4688 p = next_insn_in_loop (loop, p))
4690 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4692 /* Past CODE_LABEL, we get to insns that may be executed multiple
4693 times. The only way we can be sure that they can't is if every
4694 jump insn between here and the end of the loop either
4695 returns, exits the loop, is a jump to a location that is still
4696 behind the label, or is a jump to the loop start. */
4698 if (LABEL_P (p))
4700 rtx insn = p;
4702 maybe_multiple = 0;
4704 while (1)
4706 insn = NEXT_INSN (insn);
4707 if (insn == loop->scan_start)
4708 break;
4709 if (insn == loop->end)
4711 if (loop->top != 0)
4712 insn = loop->top;
4713 else
4714 break;
4715 if (insn == loop->scan_start)
4716 break;
4719 if (JUMP_P (insn)
4720 && GET_CODE (PATTERN (insn)) != RETURN
4721 && (!any_condjump_p (insn)
4722 || (JUMP_LABEL (insn) != 0
4723 && JUMP_LABEL (insn) != loop->scan_start
4724 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4726 maybe_multiple = 1;
4727 break;
4732 /* Past a jump, we get to insns for which we can't count
4733 on whether they will be executed during each iteration. */
4734 /* This code appears twice in strength_reduce. There is also similar
4735 code in scan_loop. */
4736 if (JUMP_P (p)
4737 /* If we enter the loop in the middle, and scan around to the
4738 beginning, don't set not_every_iteration for that.
4739 This can be any kind of jump, since we want to know if insns
4740 will be executed if the loop is executed. */
4741 && (exit_test_is_entry
4742 || !(JUMP_LABEL (p) == loop->top
4743 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4744 && any_uncondjump_p (p))
4745 || (NEXT_INSN (p) == loop->end
4746 && any_condjump_p (p))))))
4748 rtx label = 0;
4750 /* If this is a jump outside the loop, then it also doesn't
4751 matter. Check to see if the target of this branch is on the
4752 loop->exits_labels list. */
4754 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4755 if (XEXP (label, 0) == JUMP_LABEL (p))
4756 break;
4758 if (!label)
4759 not_every_iteration = 1;
4762 /* Note if we pass a loop latch. If we do, then we can not clear
4763 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4764 a loop since a jump before the last CODE_LABEL may have started
4765 a new loop iteration.
4767 Note that LOOP_TOP is only set for rotated loops and we need
4768 this check for all loops, so compare against the CODE_LABEL
4769 which immediately follows LOOP_START. */
4770 if (JUMP_P (p)
4771 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4772 past_loop_latch = 1;
4774 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4775 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4776 or not an insn is known to be executed each iteration of the
4777 loop, whether or not any iterations are known to occur.
4779 Therefore, if we have just passed a label and have no more labels
4780 between here and the test insn of the loop, and we have not passed
4781 a jump to the top of the loop, then we know these insns will be
4782 executed each iteration. */
4784 if (not_every_iteration
4785 && !past_loop_latch
4786 && LABEL_P (p)
4787 && no_labels_between_p (p, loop->end))
4788 not_every_iteration = 0;
4792 static void
4793 loop_bivs_find (struct loop *loop)
4795 struct loop_regs *regs = LOOP_REGS (loop);
4796 struct loop_ivs *ivs = LOOP_IVS (loop);
4797 /* Temporary list pointers for traversing ivs->list. */
4798 struct iv_class *bl, **backbl;
4800 ivs->list = 0;
4802 for_each_insn_in_loop (loop, check_insn_for_bivs);
4804 /* Scan ivs->list to remove all regs that proved not to be bivs.
4805 Make a sanity check against regs->n_times_set. */
4806 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4808 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4809 /* Above happens if register modified by subreg, etc. */
4810 /* Make sure it is not recognized as a basic induction var: */
4811 || regs->array[bl->regno].n_times_set != bl->biv_count
4812 /* If never incremented, it is invariant that we decided not to
4813 move. So leave it alone. */
4814 || ! bl->incremented)
4816 if (loop_dump_stream)
4817 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4818 bl->regno,
4819 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4820 ? "not induction variable"
4821 : (! bl->incremented ? "never incremented"
4822 : "count error")));
4824 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4825 *backbl = bl->next;
4827 else
4829 backbl = &bl->next;
4831 if (loop_dump_stream)
4832 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4838 /* Determine how BIVS are initialized by looking through pre-header
4839 extended basic block. */
4840 static void
4841 loop_bivs_init_find (struct loop *loop)
4843 struct loop_ivs *ivs = LOOP_IVS (loop);
4844 /* Temporary list pointers for traversing ivs->list. */
4845 struct iv_class *bl;
4846 int call_seen;
4847 rtx p;
4849 /* Find initial value for each biv by searching backwards from loop_start,
4850 halting at first label. Also record any test condition. */
4852 call_seen = 0;
4853 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4855 rtx test;
4857 note_insn = p;
4859 if (CALL_P (p))
4860 call_seen = 1;
4862 if (INSN_P (p))
4863 note_stores (PATTERN (p), record_initial, ivs);
4865 /* Record any test of a biv that branches around the loop if no store
4866 between it and the start of loop. We only care about tests with
4867 constants and registers and only certain of those. */
4868 if (JUMP_P (p)
4869 && JUMP_LABEL (p) != 0
4870 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4871 && (test = get_condition_for_loop (loop, p)) != 0
4872 && REG_P (XEXP (test, 0))
4873 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4874 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4875 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4876 && bl->init_insn == 0)
4878 /* If an NE test, we have an initial value! */
4879 if (GET_CODE (test) == NE)
4881 bl->init_insn = p;
4882 bl->init_set = gen_rtx_SET (VOIDmode,
4883 XEXP (test, 0), XEXP (test, 1));
4885 else
4886 bl->initial_test = test;
4892 /* Look at the each biv and see if we can say anything better about its
4893 initial value from any initializing insns set up above. (This is done
4894 in two passes to avoid missing SETs in a PARALLEL.) */
4895 static void
4896 loop_bivs_check (struct loop *loop)
4898 struct loop_ivs *ivs = LOOP_IVS (loop);
4899 /* Temporary list pointers for traversing ivs->list. */
4900 struct iv_class *bl;
4901 struct iv_class **backbl;
4903 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4905 rtx src;
4906 rtx note;
4908 if (! bl->init_insn)
4909 continue;
4911 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4912 is a constant, use the value of that. */
4913 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4914 && CONSTANT_P (XEXP (note, 0)))
4915 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4916 && CONSTANT_P (XEXP (note, 0))))
4917 src = XEXP (note, 0);
4918 else
4919 src = SET_SRC (bl->init_set);
4921 if (loop_dump_stream)
4922 fprintf (loop_dump_stream,
4923 "Biv %d: initialized at insn %d: initial value ",
4924 bl->regno, INSN_UID (bl->init_insn));
4926 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4927 || GET_MODE (src) == VOIDmode)
4928 && valid_initial_value_p (src, bl->init_insn,
4929 LOOP_INFO (loop)->pre_header_has_call,
4930 loop->start))
4932 bl->initial_value = src;
4934 if (loop_dump_stream)
4936 print_simple_rtl (loop_dump_stream, src);
4937 fputc ('\n', loop_dump_stream);
4940 /* If we can't make it a giv,
4941 let biv keep initial value of "itself". */
4942 else if (loop_dump_stream)
4943 fprintf (loop_dump_stream, "is complex\n");
4948 /* Search the loop for general induction variables. */
4950 static void
4951 loop_givs_find (struct loop* loop)
4953 for_each_insn_in_loop (loop, check_insn_for_givs);
4957 /* For each giv for which we still don't know whether or not it is
4958 replaceable, check to see if it is replaceable because its final value
4959 can be calculated. */
4961 static void
4962 loop_givs_check (struct loop *loop)
4964 struct loop_ivs *ivs = LOOP_IVS (loop);
4965 struct iv_class *bl;
4967 for (bl = ivs->list; bl; bl = bl->next)
4969 struct induction *v;
4971 for (v = bl->giv; v; v = v->next_iv)
4972 if (! v->replaceable && ! v->not_replaceable)
4973 check_final_value (loop, v);
4977 /* Try to generate the simplest rtx for the expression
4978 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4979 value of giv's. */
4981 static rtx
4982 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4984 rtx temp, mult_res;
4985 rtx result;
4987 /* The modes must all be the same. This should always be true. For now,
4988 check to make sure. */
4989 gcc_assert (GET_MODE (mult1) == mode || GET_MODE (mult1) == VOIDmode);
4990 gcc_assert (GET_MODE (mult2) == mode || GET_MODE (mult2) == VOIDmode);
4991 gcc_assert (GET_MODE (add1) == mode || GET_MODE (add1) == VOIDmode);
4993 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4994 will be a constant. */
4995 if (GET_CODE (mult1) == CONST_INT)
4997 temp = mult2;
4998 mult2 = mult1;
4999 mult1 = temp;
5002 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
5003 if (! mult_res)
5004 mult_res = gen_rtx_MULT (mode, mult1, mult2);
5006 /* Again, put the constant second. */
5007 if (GET_CODE (add1) == CONST_INT)
5009 temp = add1;
5010 add1 = mult_res;
5011 mult_res = temp;
5014 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
5015 if (! result)
5016 result = gen_rtx_PLUS (mode, add1, mult_res);
5018 return result;
5021 /* Searches the list of induction struct's for the biv BL, to try to calculate
5022 the total increment value for one iteration of the loop as a constant.
5024 Returns the increment value as an rtx, simplified as much as possible,
5025 if it can be calculated. Otherwise, returns 0. */
5027 static rtx
5028 biv_total_increment (const struct iv_class *bl)
5030 struct induction *v;
5031 rtx result;
5033 /* For increment, must check every instruction that sets it. Each
5034 instruction must be executed only once each time through the loop.
5035 To verify this, we check that the insn is always executed, and that
5036 there are no backward branches after the insn that branch to before it.
5037 Also, the insn must have a mult_val of one (to make sure it really is
5038 an increment). */
5040 result = const0_rtx;
5041 for (v = bl->biv; v; v = v->next_iv)
5043 if (v->always_computable && v->mult_val == const1_rtx
5044 && ! v->maybe_multiple
5045 && SCALAR_INT_MODE_P (v->mode))
5047 /* If we have already counted it, skip it. */
5048 if (v->same)
5049 continue;
5051 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5053 else
5054 return 0;
5057 return result;
5060 /* Try to prove that the register is dead after the loop exits. Trace every
5061 loop exit looking for an insn that will always be executed, which sets
5062 the register to some value, and appears before the first use of the register
5063 is found. If successful, then return 1, otherwise return 0. */
5065 /* ?? Could be made more intelligent in the handling of jumps, so that
5066 it can search past if statements and other similar structures. */
5068 static int
5069 reg_dead_after_loop (const struct loop *loop, rtx reg)
5071 rtx insn, label;
5072 int jump_count = 0;
5073 int label_count = 0;
5075 /* In addition to checking all exits of this loop, we must also check
5076 all exits of inner nested loops that would exit this loop. We don't
5077 have any way to identify those, so we just give up if there are any
5078 such inner loop exits. */
5080 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5081 label_count++;
5083 if (label_count != loop->exit_count)
5084 return 0;
5086 /* HACK: Must also search the loop fall through exit, create a label_ref
5087 here which points to the loop->end, and append the loop_number_exit_labels
5088 list to it. */
5089 label = gen_rtx_LABEL_REF (Pmode, loop->end);
5090 LABEL_NEXTREF (label) = loop->exit_labels;
5092 for (; label; label = LABEL_NEXTREF (label))
5094 /* Succeed if find an insn which sets the biv or if reach end of
5095 function. Fail if find an insn that uses the biv, or if come to
5096 a conditional jump. */
5098 insn = NEXT_INSN (XEXP (label, 0));
5099 while (insn)
5101 if (INSN_P (insn))
5103 rtx set, note;
5105 if (reg_referenced_p (reg, PATTERN (insn)))
5106 return 0;
5108 note = find_reg_equal_equiv_note (insn);
5109 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5110 return 0;
5112 set = single_set (insn);
5113 if (set && rtx_equal_p (SET_DEST (set), reg))
5114 break;
5116 if (JUMP_P (insn))
5118 if (GET_CODE (PATTERN (insn)) == RETURN)
5119 break;
5120 else if (!any_uncondjump_p (insn)
5121 /* Prevent infinite loop following infinite loops. */
5122 || jump_count++ > 20)
5123 return 0;
5124 else
5125 insn = JUMP_LABEL (insn);
5129 insn = NEXT_INSN (insn);
5133 /* Success, the register is dead on all loop exits. */
5134 return 1;
5137 /* Try to calculate the final value of the biv, the value it will have at
5138 the end of the loop. If we can do it, return that value. */
5140 static rtx
5141 final_biv_value (const struct loop *loop, struct iv_class *bl)
5143 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5144 rtx increment, tem;
5146 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5148 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5149 return 0;
5151 /* The final value for reversed bivs must be calculated differently than
5152 for ordinary bivs. In this case, there is already an insn after the
5153 loop which sets this biv's final value (if necessary), and there are
5154 no other loop exits, so we can return any value. */
5155 if (bl->reversed)
5157 if (loop_dump_stream)
5158 fprintf (loop_dump_stream,
5159 "Final biv value for %d, reversed biv.\n", bl->regno);
5161 return const0_rtx;
5164 /* Try to calculate the final value as initial value + (number of iterations
5165 * increment). For this to work, increment must be invariant, the only
5166 exit from the loop must be the fall through at the bottom (otherwise
5167 it may not have its final value when the loop exits), and the initial
5168 value of the biv must be invariant. */
5170 if (n_iterations != 0
5171 && ! loop->exit_count
5172 && loop_invariant_p (loop, bl->initial_value))
5174 increment = biv_total_increment (bl);
5176 if (increment && loop_invariant_p (loop, increment))
5178 /* Can calculate the loop exit value, emit insns after loop
5179 end to calculate this value into a temporary register in
5180 case it is needed later. */
5182 tem = gen_reg_rtx (bl->biv->mode);
5183 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5184 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5185 bl->initial_value, tem);
5187 if (loop_dump_stream)
5188 fprintf (loop_dump_stream,
5189 "Final biv value for %d, calculated.\n", bl->regno);
5191 return tem;
5195 /* Check to see if the biv is dead at all loop exits. */
5196 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5198 if (loop_dump_stream)
5199 fprintf (loop_dump_stream,
5200 "Final biv value for %d, biv dead after loop exit.\n",
5201 bl->regno);
5203 return const0_rtx;
5206 return 0;
5209 /* Return nonzero if it is possible to eliminate the biv BL provided
5210 all givs are reduced. This is possible if either the reg is not
5211 used outside the loop, or we can compute what its final value will
5212 be. */
5214 static int
5215 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5216 int threshold, int insn_count)
5218 /* For architectures with a decrement_and_branch_until_zero insn,
5219 don't do this if we put a REG_NONNEG note on the endtest for this
5220 biv. */
5222 #ifdef HAVE_decrement_and_branch_until_zero
5223 if (bl->nonneg)
5225 if (loop_dump_stream)
5226 fprintf (loop_dump_stream,
5227 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5228 return 0;
5230 #endif
5232 /* Check that biv is used outside loop or if it has a final value.
5233 Compare against bl->init_insn rather than loop->start. We aren't
5234 concerned with any uses of the biv between init_insn and
5235 loop->start since these won't be affected by the value of the biv
5236 elsewhere in the function, so long as init_insn doesn't use the
5237 biv itself. */
5239 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5240 && bl->init_insn
5241 && INSN_UID (bl->init_insn) < max_uid_for_loop
5242 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5243 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5244 || (bl->final_value = final_biv_value (loop, bl)))
5245 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5247 if (loop_dump_stream)
5249 fprintf (loop_dump_stream,
5250 "Cannot eliminate biv %d.\n",
5251 bl->regno);
5252 fprintf (loop_dump_stream,
5253 "First use: insn %d, last use: insn %d.\n",
5254 REGNO_FIRST_UID (bl->regno),
5255 REGNO_LAST_UID (bl->regno));
5257 return 0;
5261 /* Reduce each giv of BL that we have decided to reduce. */
5263 static void
5264 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5266 struct induction *v;
5268 for (v = bl->giv; v; v = v->next_iv)
5270 struct induction *tv;
5271 if (! v->ignore && v->same == 0)
5273 int auto_inc_opt = 0;
5275 /* If the code for derived givs immediately below has already
5276 allocated a new_reg, we must keep it. */
5277 if (! v->new_reg)
5278 v->new_reg = gen_reg_rtx (v->mode);
5280 #ifdef AUTO_INC_DEC
5281 /* If the target has auto-increment addressing modes, and
5282 this is an address giv, then try to put the increment
5283 immediately after its use, so that flow can create an
5284 auto-increment addressing mode. */
5285 /* Don't do this for loops entered at the bottom, to avoid
5286 this invalid transformation:
5287 jmp L; -> jmp L;
5288 TOP: TOP:
5289 use giv use giv
5290 L: inc giv
5291 inc biv L:
5292 test biv test giv
5293 cbr TOP cbr TOP
5295 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5296 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5297 /* We don't handle reversed biv's because bl->biv->insn
5298 does not have a valid INSN_LUID. */
5299 && ! bl->reversed
5300 && v->always_executed && ! v->maybe_multiple
5301 && INSN_UID (v->insn) < max_uid_for_loop
5302 && !loop->top)
5304 /* If other giv's have been combined with this one, then
5305 this will work only if all uses of the other giv's occur
5306 before this giv's insn. This is difficult to check.
5308 We simplify this by looking for the common case where
5309 there is one DEST_REG giv, and this giv's insn is the
5310 last use of the dest_reg of that DEST_REG giv. If the
5311 increment occurs after the address giv, then we can
5312 perform the optimization. (Otherwise, the increment
5313 would have to go before other_giv, and we would not be
5314 able to combine it with the address giv to get an
5315 auto-inc address.) */
5316 if (v->combined_with)
5318 struct induction *other_giv = 0;
5320 for (tv = bl->giv; tv; tv = tv->next_iv)
5321 if (tv->same == v)
5323 if (other_giv)
5324 break;
5325 else
5326 other_giv = tv;
5328 if (! tv && other_giv
5329 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5330 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5331 == INSN_UID (v->insn))
5332 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5333 auto_inc_opt = 1;
5335 /* Check for case where increment is before the address
5336 giv. Do this test in "loop order". */
5337 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5338 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5339 || (INSN_LUID (bl->biv->insn)
5340 > INSN_LUID (loop->scan_start))))
5341 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5342 && (INSN_LUID (loop->scan_start)
5343 < INSN_LUID (bl->biv->insn))))
5344 auto_inc_opt = -1;
5345 else
5346 auto_inc_opt = 1;
5348 #ifdef HAVE_cc0
5350 rtx prev;
5352 /* We can't put an insn immediately after one setting
5353 cc0, or immediately before one using cc0. */
5354 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5355 || (auto_inc_opt == -1
5356 && (prev = prev_nonnote_insn (v->insn)) != 0
5357 && INSN_P (prev)
5358 && sets_cc0_p (PATTERN (prev))))
5359 auto_inc_opt = 0;
5361 #endif
5363 if (auto_inc_opt)
5364 v->auto_inc_opt = 1;
5366 #endif
5368 /* For each place where the biv is incremented, add an insn
5369 to increment the new, reduced reg for the giv. */
5370 for (tv = bl->biv; tv; tv = tv->next_iv)
5372 rtx insert_before;
5374 /* Skip if location is the same as a previous one. */
5375 if (tv->same)
5376 continue;
5377 if (! auto_inc_opt)
5378 insert_before = NEXT_INSN (tv->insn);
5379 else if (auto_inc_opt == 1)
5380 insert_before = NEXT_INSN (v->insn);
5381 else
5382 insert_before = v->insn;
5384 if (tv->mult_val == const1_rtx)
5385 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5386 v->new_reg, v->new_reg,
5387 0, insert_before);
5388 else /* tv->mult_val == const0_rtx */
5389 /* A multiply is acceptable here
5390 since this is presumed to be seldom executed. */
5391 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5392 v->add_val, v->new_reg,
5393 0, insert_before);
5396 /* Add code at loop start to initialize giv's reduced reg. */
5398 loop_iv_add_mult_hoist (loop,
5399 extend_value_for_giv (v, bl->initial_value),
5400 v->mult_val, v->add_val, v->new_reg);
5406 /* Check for givs whose first use is their definition and whose
5407 last use is the definition of another giv. If so, it is likely
5408 dead and should not be used to derive another giv nor to
5409 eliminate a biv. */
5411 static void
5412 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5414 struct induction *v;
5416 for (v = bl->giv; v; v = v->next_iv)
5418 if (v->ignore
5419 || (v->same && v->same->ignore))
5420 continue;
5422 if (v->giv_type == DEST_REG
5423 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5425 struct induction *v1;
5427 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5428 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5429 v->maybe_dead = 1;
5435 static void
5436 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5438 struct induction *v;
5440 for (v = bl->giv; v; v = v->next_iv)
5442 if (v->same && v->same->ignore)
5443 v->ignore = 1;
5445 if (v->ignore)
5446 continue;
5448 /* Update expression if this was combined, in case other giv was
5449 replaced. */
5450 if (v->same)
5451 v->new_reg = replace_rtx (v->new_reg,
5452 v->same->dest_reg, v->same->new_reg);
5454 /* See if this register is known to be a pointer to something. If
5455 so, see if we can find the alignment. First see if there is a
5456 destination register that is a pointer. If so, this shares the
5457 alignment too. Next see if we can deduce anything from the
5458 computational information. If not, and this is a DEST_ADDR
5459 giv, at least we know that it's a pointer, though we don't know
5460 the alignment. */
5461 if (REG_P (v->new_reg)
5462 && v->giv_type == DEST_REG
5463 && REG_POINTER (v->dest_reg))
5464 mark_reg_pointer (v->new_reg,
5465 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5466 else if (REG_P (v->new_reg)
5467 && REG_POINTER (v->src_reg))
5469 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5471 if (align == 0
5472 || GET_CODE (v->add_val) != CONST_INT
5473 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5474 align = 0;
5476 mark_reg_pointer (v->new_reg, align);
5478 else if (REG_P (v->new_reg)
5479 && REG_P (v->add_val)
5480 && REG_POINTER (v->add_val))
5482 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5484 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5485 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5486 align = 0;
5488 mark_reg_pointer (v->new_reg, align);
5490 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5491 mark_reg_pointer (v->new_reg, 0);
5493 if (v->giv_type == DEST_ADDR)
5495 /* Store reduced reg as the address in the memref where we found
5496 this giv. */
5497 if (validate_change_maybe_volatile (v->insn, v->location,
5498 v->new_reg))
5499 /* Yay, it worked! */;
5500 /* Not replaceable; emit an insn to set the original
5501 giv reg from the reduced giv. */
5502 else if (REG_P (*v->location))
5504 rtx tem;
5505 start_sequence ();
5506 tem = force_operand (v->new_reg, *v->location);
5507 if (tem != *v->location)
5508 emit_move_insn (*v->location, tem);
5509 tem = get_insns ();
5510 end_sequence ();
5511 loop_insn_emit_before (loop, 0, v->insn, tem);
5513 else if (GET_CODE (*v->location) == PLUS
5514 && REG_P (XEXP (*v->location, 0))
5515 && CONSTANT_P (XEXP (*v->location, 1)))
5517 rtx tem;
5518 start_sequence ();
5519 tem = expand_simple_binop (GET_MODE (*v->location), MINUS,
5520 v->new_reg, XEXP (*v->location, 1),
5521 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5522 emit_move_insn (XEXP (*v->location, 0), tem);
5523 tem = get_insns ();
5524 end_sequence ();
5525 loop_insn_emit_before (loop, 0, v->insn, tem);
5527 else
5529 /* If it wasn't a reg, create a pseudo and use that. */
5530 rtx reg, seq;
5531 start_sequence ();
5532 reg = force_reg (v->mode, *v->location);
5533 if (validate_change_maybe_volatile (v->insn, v->location, reg))
5535 seq = get_insns ();
5536 end_sequence ();
5537 loop_insn_emit_before (loop, 0, v->insn, seq);
5539 else
5541 end_sequence ();
5542 if (loop_dump_stream)
5543 fprintf (loop_dump_stream,
5544 "unable to reduce iv in insn %d\n",
5545 INSN_UID (v->insn));
5546 bl->all_reduced = 0;
5547 v->ignore = 1;
5548 continue;
5552 else if (v->replaceable)
5554 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5556 else
5558 rtx original_insn = v->insn;
5559 rtx note;
5561 /* Not replaceable; emit an insn to set the original giv reg from
5562 the reduced giv, same as above. */
5563 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5564 gen_move_insn (v->dest_reg,
5565 v->new_reg));
5567 /* The original insn may have a REG_EQUAL note. This note is
5568 now incorrect and may result in invalid substitutions later.
5569 The original insn is dead, but may be part of a libcall
5570 sequence, which doesn't seem worth the bother of handling. */
5571 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5572 if (note)
5573 remove_note (original_insn, note);
5576 /* When a loop is reversed, givs which depend on the reversed
5577 biv, and which are live outside the loop, must be set to their
5578 correct final value. This insn is only needed if the giv is
5579 not replaceable. The correct final value is the same as the
5580 value that the giv starts the reversed loop with. */
5581 if (bl->reversed && ! v->replaceable)
5582 loop_iv_add_mult_sink (loop,
5583 extend_value_for_giv (v, bl->initial_value),
5584 v->mult_val, v->add_val, v->dest_reg);
5585 else if (v->final_value)
5586 loop_insn_sink_or_swim (loop,
5587 gen_load_of_final_value (v->dest_reg,
5588 v->final_value));
5590 if (loop_dump_stream)
5592 fprintf (loop_dump_stream, "giv at %d reduced to ",
5593 INSN_UID (v->insn));
5594 print_simple_rtl (loop_dump_stream, v->new_reg);
5595 fprintf (loop_dump_stream, "\n");
5601 static int
5602 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5603 struct iv_class *bl, struct induction *v,
5604 rtx test_reg)
5606 struct induction *biv;
5607 int add_cost = 0;
5608 int benefit;
5610 benefit = v->benefit;
5611 PUT_MODE (test_reg, v->mode);
5612 for (biv = bl->biv; biv; biv = biv->next_iv)
5614 int cost = iv_add_mult_cost (biv->add_val, v->mult_val,
5615 test_reg, test_reg);
5616 if (cost > add_cost)
5617 add_cost = cost;
5620 /* Reduce benefit if not replaceable, since we will insert a
5621 move-insn to replace the insn that calculates this giv. Don't do
5622 this unless the giv is a user variable, since it will often be
5623 marked non-replaceable because of the duplication of the exit
5624 code outside the loop. In such a case, the copies we insert are
5625 dead and will be deleted. So they don't have a cost. Similar
5626 situations exist. */
5627 /* ??? The new final_[bg]iv_value code does a much better job of
5628 finding replaceable giv's, and hence this code may no longer be
5629 necessary. */
5630 if (! v->replaceable && ! bl->eliminable
5631 && REG_USERVAR_P (v->dest_reg))
5632 benefit -= copy_cost;
5634 /* Decrease the benefit to count the add-insns that we will insert
5635 to increment the reduced reg for the giv. ??? This can
5636 overestimate the run-time cost of the additional insns, e.g. if
5637 there are multiple basic blocks that increment the biv, but only
5638 one of these blocks is executed during each iteration. There is
5639 no good way to detect cases like this with the current structure
5640 of the loop optimizer. This code is more accurate for
5641 determining code size than run-time benefits. */
5642 benefit -= add_cost * bl->biv_count;
5644 /* Decide whether to strength-reduce this giv or to leave the code
5645 unchanged (recompute it from the biv each time it is used). This
5646 decision can be made independently for each giv. */
5648 #ifdef AUTO_INC_DEC
5649 /* Attempt to guess whether autoincrement will handle some of the
5650 new add insns; if so, increase BENEFIT (undo the subtraction of
5651 add_cost that was done above). */
5652 if (v->giv_type == DEST_ADDR
5653 /* Increasing the benefit is risky, since this is only a guess.
5654 Avoid increasing register pressure in cases where there would
5655 be no other benefit from reducing this giv. */
5656 && benefit > 0
5657 && GET_CODE (v->mult_val) == CONST_INT)
5659 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5661 if (HAVE_POST_INCREMENT
5662 && INTVAL (v->mult_val) == size)
5663 benefit += add_cost * bl->biv_count;
5664 else if (HAVE_PRE_INCREMENT
5665 && INTVAL (v->mult_val) == size)
5666 benefit += add_cost * bl->biv_count;
5667 else if (HAVE_POST_DECREMENT
5668 && -INTVAL (v->mult_val) == size)
5669 benefit += add_cost * bl->biv_count;
5670 else if (HAVE_PRE_DECREMENT
5671 && -INTVAL (v->mult_val) == size)
5672 benefit += add_cost * bl->biv_count;
5674 #endif
5676 return benefit;
5680 /* Free IV structures for LOOP. */
5682 static void
5683 loop_ivs_free (struct loop *loop)
5685 struct loop_ivs *ivs = LOOP_IVS (loop);
5686 struct iv_class *iv = ivs->list;
5688 free (ivs->regs);
5690 while (iv)
5692 struct iv_class *next = iv->next;
5693 struct induction *induction;
5694 struct induction *next_induction;
5696 for (induction = iv->biv; induction; induction = next_induction)
5698 next_induction = induction->next_iv;
5699 free (induction);
5701 for (induction = iv->giv; induction; induction = next_induction)
5703 next_induction = induction->next_iv;
5704 free (induction);
5707 free (iv);
5708 iv = next;
5712 /* Look back before LOOP->START for the insn that sets REG and return
5713 the equivalent constant if there is a REG_EQUAL note otherwise just
5714 the SET_SRC of REG. */
5716 static rtx
5717 loop_find_equiv_value (const struct loop *loop, rtx reg)
5719 rtx loop_start = loop->start;
5720 rtx insn, set;
5721 rtx ret;
5723 ret = reg;
5724 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5726 if (LABEL_P (insn))
5727 break;
5729 else if (INSN_P (insn) && reg_set_p (reg, insn))
5731 /* We found the last insn before the loop that sets the register.
5732 If it sets the entire register, and has a REG_EQUAL note,
5733 then use the value of the REG_EQUAL note. */
5734 if ((set = single_set (insn))
5735 && (SET_DEST (set) == reg))
5737 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5739 /* Only use the REG_EQUAL note if it is a constant.
5740 Other things, divide in particular, will cause
5741 problems later if we use them. */
5742 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5743 && CONSTANT_P (XEXP (note, 0)))
5744 ret = XEXP (note, 0);
5745 else
5746 ret = SET_SRC (set);
5748 /* We cannot do this if it changes between the
5749 assignment and loop start though. */
5750 if (modified_between_p (ret, insn, loop_start))
5751 ret = reg;
5753 break;
5756 return ret;
5759 /* Find and return register term common to both expressions OP0 and
5760 OP1 or NULL_RTX if no such term exists. Each expression must be a
5761 REG or a PLUS of a REG. */
5763 static rtx
5764 find_common_reg_term (rtx op0, rtx op1)
5766 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5767 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5769 rtx op00;
5770 rtx op01;
5771 rtx op10;
5772 rtx op11;
5774 if (GET_CODE (op0) == PLUS)
5775 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5776 else
5777 op01 = const0_rtx, op00 = op0;
5779 if (GET_CODE (op1) == PLUS)
5780 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5781 else
5782 op11 = const0_rtx, op10 = op1;
5784 /* Find and return common register term if present. */
5785 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5786 return op00;
5787 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5788 return op01;
5791 /* No common register term found. */
5792 return NULL_RTX;
5795 /* Determine the loop iterator and calculate the number of loop
5796 iterations. Returns the exact number of loop iterations if it can
5797 be calculated, otherwise returns zero. */
5799 static unsigned HOST_WIDE_INT
5800 loop_iterations (struct loop *loop)
5802 struct loop_info *loop_info = LOOP_INFO (loop);
5803 struct loop_ivs *ivs = LOOP_IVS (loop);
5804 rtx comparison, comparison_value;
5805 rtx iteration_var, initial_value, increment, final_value;
5806 enum rtx_code comparison_code;
5807 HOST_WIDE_INT inc;
5808 unsigned HOST_WIDE_INT abs_inc;
5809 unsigned HOST_WIDE_INT abs_diff;
5810 int off_by_one;
5811 int increment_dir;
5812 int unsigned_p, compare_dir, final_larger;
5813 rtx last_loop_insn;
5814 struct iv_class *bl;
5816 loop_info->n_iterations = 0;
5817 loop_info->initial_value = 0;
5818 loop_info->initial_equiv_value = 0;
5819 loop_info->comparison_value = 0;
5820 loop_info->final_value = 0;
5821 loop_info->final_equiv_value = 0;
5822 loop_info->increment = 0;
5823 loop_info->iteration_var = 0;
5824 loop_info->iv = 0;
5826 /* We used to use prev_nonnote_insn here, but that fails because it might
5827 accidentally get the branch for a contained loop if the branch for this
5828 loop was deleted. We can only trust branches immediately before the
5829 loop_end. */
5830 last_loop_insn = PREV_INSN (loop->end);
5832 /* ??? We should probably try harder to find the jump insn
5833 at the end of the loop. The following code assumes that
5834 the last loop insn is a jump to the top of the loop. */
5835 if (!JUMP_P (last_loop_insn))
5837 if (loop_dump_stream)
5838 fprintf (loop_dump_stream,
5839 "Loop iterations: No final conditional branch found.\n");
5840 return 0;
5843 /* If there is a more than a single jump to the top of the loop
5844 we cannot (easily) determine the iteration count. */
5845 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5847 if (loop_dump_stream)
5848 fprintf (loop_dump_stream,
5849 "Loop iterations: Loop has multiple back edges.\n");
5850 return 0;
5853 /* Find the iteration variable. If the last insn is a conditional
5854 branch, and the insn before tests a register value, make that the
5855 iteration variable. */
5857 comparison = get_condition_for_loop (loop, last_loop_insn);
5858 if (comparison == 0)
5860 if (loop_dump_stream)
5861 fprintf (loop_dump_stream,
5862 "Loop iterations: No final comparison found.\n");
5863 return 0;
5866 /* ??? Get_condition may switch position of induction variable and
5867 invariant register when it canonicalizes the comparison. */
5869 comparison_code = GET_CODE (comparison);
5870 iteration_var = XEXP (comparison, 0);
5871 comparison_value = XEXP (comparison, 1);
5873 if (!REG_P (iteration_var))
5875 if (loop_dump_stream)
5876 fprintf (loop_dump_stream,
5877 "Loop iterations: Comparison not against register.\n");
5878 return 0;
5881 /* The only new registers that are created before loop iterations
5882 are givs made from biv increments or registers created by
5883 load_mems. In the latter case, it is possible that try_copy_prop
5884 will propagate a new pseudo into the old iteration register but
5885 this will be marked by having the REG_USERVAR_P bit set. */
5887 gcc_assert ((unsigned) REGNO (iteration_var) < ivs->n_regs
5888 || REG_USERVAR_P (iteration_var));
5890 /* Determine the initial value of the iteration variable, and the amount
5891 that it is incremented each loop. Use the tables constructed by
5892 the strength reduction pass to calculate these values. */
5894 /* Clear the result values, in case no answer can be found. */
5895 initial_value = 0;
5896 increment = 0;
5898 /* The iteration variable can be either a giv or a biv. Check to see
5899 which it is, and compute the variable's initial value, and increment
5900 value if possible. */
5902 /* If this is a new register, can't handle it since we don't have any
5903 reg_iv_type entry for it. */
5904 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5906 if (loop_dump_stream)
5907 fprintf (loop_dump_stream,
5908 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5909 return 0;
5912 /* Reject iteration variables larger than the host wide int size, since they
5913 could result in a number of iterations greater than the range of our
5914 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5915 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5916 > HOST_BITS_PER_WIDE_INT))
5918 if (loop_dump_stream)
5919 fprintf (loop_dump_stream,
5920 "Loop iterations: Iteration var rejected because mode too large.\n");
5921 return 0;
5923 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5925 if (loop_dump_stream)
5926 fprintf (loop_dump_stream,
5927 "Loop iterations: Iteration var not an integer.\n");
5928 return 0;
5931 /* Try swapping the comparison to identify a suitable iv. */
5932 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5933 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5934 && REG_P (comparison_value)
5935 && REGNO (comparison_value) < ivs->n_regs)
5937 rtx temp = comparison_value;
5938 comparison_code = swap_condition (comparison_code);
5939 comparison_value = iteration_var;
5940 iteration_var = temp;
5943 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5945 gcc_assert (REGNO (iteration_var) < ivs->n_regs);
5947 /* Grab initial value, only useful if it is a constant. */
5948 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5949 initial_value = bl->initial_value;
5950 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5952 if (loop_dump_stream)
5953 fprintf (loop_dump_stream,
5954 "Loop iterations: Basic induction var not set once in each iteration.\n");
5955 return 0;
5958 increment = biv_total_increment (bl);
5960 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5962 HOST_WIDE_INT offset = 0;
5963 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5964 rtx biv_initial_value;
5966 gcc_assert (REGNO (v->src_reg) < ivs->n_regs);
5968 if (!v->always_executed || v->maybe_multiple)
5970 if (loop_dump_stream)
5971 fprintf (loop_dump_stream,
5972 "Loop iterations: General induction var not set once in each iteration.\n");
5973 return 0;
5976 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5978 /* Increment value is mult_val times the increment value of the biv. */
5980 increment = biv_total_increment (bl);
5981 if (increment)
5983 struct induction *biv_inc;
5985 increment = fold_rtx_mult_add (v->mult_val,
5986 extend_value_for_giv (v, increment),
5987 const0_rtx, v->mode);
5988 /* The caller assumes that one full increment has occurred at the
5989 first loop test. But that's not true when the biv is incremented
5990 after the giv is set (which is the usual case), e.g.:
5991 i = 6; do {;} while (i++ < 9) .
5992 Therefore, we bias the initial value by subtracting the amount of
5993 the increment that occurs between the giv set and the giv test. */
5994 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5996 if (loop_insn_first_p (v->insn, biv_inc->insn))
5998 if (REG_P (biv_inc->add_val))
6000 if (loop_dump_stream)
6001 fprintf (loop_dump_stream,
6002 "Loop iterations: Basic induction var add_val is REG %d.\n",
6003 REGNO (biv_inc->add_val));
6004 return 0;
6007 /* If we have already counted it, skip it. */
6008 if (biv_inc->same)
6009 continue;
6011 offset -= INTVAL (biv_inc->add_val);
6015 if (loop_dump_stream)
6016 fprintf (loop_dump_stream,
6017 "Loop iterations: Giv iterator, initial value bias %ld.\n",
6018 (long) offset);
6020 /* Initial value is mult_val times the biv's initial value plus
6021 add_val. Only useful if it is a constant. */
6022 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
6023 initial_value
6024 = fold_rtx_mult_add (v->mult_val,
6025 plus_constant (biv_initial_value, offset),
6026 v->add_val, v->mode);
6028 else
6030 if (loop_dump_stream)
6031 fprintf (loop_dump_stream,
6032 "Loop iterations: Not basic or general induction var.\n");
6033 return 0;
6036 if (initial_value == 0)
6037 return 0;
6039 unsigned_p = 0;
6040 off_by_one = 0;
6041 switch (comparison_code)
6043 case LEU:
6044 unsigned_p = 1;
6045 case LE:
6046 compare_dir = 1;
6047 off_by_one = 1;
6048 break;
6049 case GEU:
6050 unsigned_p = 1;
6051 case GE:
6052 compare_dir = -1;
6053 off_by_one = -1;
6054 break;
6055 case EQ:
6056 /* Cannot determine loop iterations with this case. */
6057 compare_dir = 0;
6058 break;
6059 case LTU:
6060 unsigned_p = 1;
6061 case LT:
6062 compare_dir = 1;
6063 break;
6064 case GTU:
6065 unsigned_p = 1;
6066 case GT:
6067 compare_dir = -1;
6068 break;
6069 case NE:
6070 compare_dir = 0;
6071 break;
6072 default:
6073 gcc_unreachable ();
6076 /* If the comparison value is an invariant register, then try to find
6077 its value from the insns before the start of the loop. */
6079 final_value = comparison_value;
6080 if (REG_P (comparison_value)
6081 && loop_invariant_p (loop, comparison_value))
6083 final_value = loop_find_equiv_value (loop, comparison_value);
6085 /* If we don't get an invariant final value, we are better
6086 off with the original register. */
6087 if (! loop_invariant_p (loop, final_value))
6088 final_value = comparison_value;
6091 /* Calculate the approximate final value of the induction variable
6092 (on the last successful iteration). The exact final value
6093 depends on the branch operator, and increment sign. It will be
6094 wrong if the iteration variable is not incremented by one each
6095 time through the loop and (comparison_value + off_by_one -
6096 initial_value) % increment != 0.
6097 ??? Note that the final_value may overflow and thus final_larger
6098 will be bogus. A potentially infinite loop will be classified
6099 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6100 if (off_by_one)
6101 final_value = plus_constant (final_value, off_by_one);
6103 /* Save the calculated values describing this loop's bounds, in case
6104 precondition_loop_p will need them later. These values can not be
6105 recalculated inside precondition_loop_p because strength reduction
6106 optimizations may obscure the loop's structure.
6108 These values are only required by precondition_loop_p and insert_bct
6109 whenever the number of iterations cannot be computed at compile time.
6110 Only the difference between final_value and initial_value is
6111 important. Note that final_value is only approximate. */
6112 loop_info->initial_value = initial_value;
6113 loop_info->comparison_value = comparison_value;
6114 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6115 loop_info->increment = increment;
6116 loop_info->iteration_var = iteration_var;
6117 loop_info->comparison_code = comparison_code;
6118 loop_info->iv = bl;
6120 /* Try to determine the iteration count for loops such
6121 as (for i = init; i < init + const; i++). When running the
6122 loop optimization twice, the first pass often converts simple
6123 loops into this form. */
6125 if (REG_P (initial_value))
6127 rtx reg1;
6128 rtx reg2;
6129 rtx const2;
6131 reg1 = initial_value;
6132 if (GET_CODE (final_value) == PLUS)
6133 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6134 else
6135 reg2 = final_value, const2 = const0_rtx;
6137 /* Check for initial_value = reg1, final_value = reg2 + const2,
6138 where reg1 != reg2. */
6139 if (REG_P (reg2) && reg2 != reg1)
6141 rtx temp;
6143 /* Find what reg1 is equivalent to. Hopefully it will
6144 either be reg2 or reg2 plus a constant. */
6145 temp = loop_find_equiv_value (loop, reg1);
6147 if (find_common_reg_term (temp, reg2))
6148 initial_value = temp;
6149 else if (loop_invariant_p (loop, reg2))
6151 /* Find what reg2 is equivalent to. Hopefully it will
6152 either be reg1 or reg1 plus a constant. Let's ignore
6153 the latter case for now since it is not so common. */
6154 temp = loop_find_equiv_value (loop, reg2);
6156 if (temp == loop_info->iteration_var)
6157 temp = initial_value;
6158 if (temp == reg1)
6159 final_value = (const2 == const0_rtx)
6160 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6165 loop_info->initial_equiv_value = initial_value;
6166 loop_info->final_equiv_value = final_value;
6168 /* For EQ comparison loops, we don't have a valid final value.
6169 Check this now so that we won't leave an invalid value if we
6170 return early for any other reason. */
6171 if (comparison_code == EQ)
6172 loop_info->final_equiv_value = loop_info->final_value = 0;
6174 if (increment == 0)
6176 if (loop_dump_stream)
6177 fprintf (loop_dump_stream,
6178 "Loop iterations: Increment value can't be calculated.\n");
6179 return 0;
6182 if (GET_CODE (increment) != CONST_INT)
6184 /* If we have a REG, check to see if REG holds a constant value. */
6185 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6186 clear if it is worthwhile to try to handle such RTL. */
6187 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6188 increment = loop_find_equiv_value (loop, increment);
6190 if (GET_CODE (increment) != CONST_INT)
6192 if (loop_dump_stream)
6194 fprintf (loop_dump_stream,
6195 "Loop iterations: Increment value not constant ");
6196 print_simple_rtl (loop_dump_stream, increment);
6197 fprintf (loop_dump_stream, ".\n");
6199 return 0;
6201 loop_info->increment = increment;
6204 if (GET_CODE (initial_value) != CONST_INT)
6206 if (loop_dump_stream)
6208 fprintf (loop_dump_stream,
6209 "Loop iterations: Initial value not constant ");
6210 print_simple_rtl (loop_dump_stream, initial_value);
6211 fprintf (loop_dump_stream, ".\n");
6213 return 0;
6215 else if (GET_CODE (final_value) != CONST_INT)
6217 if (loop_dump_stream)
6219 fprintf (loop_dump_stream,
6220 "Loop iterations: Final value not constant ");
6221 print_simple_rtl (loop_dump_stream, final_value);
6222 fprintf (loop_dump_stream, ".\n");
6224 return 0;
6226 else if (comparison_code == EQ)
6228 rtx inc_once;
6230 if (loop_dump_stream)
6231 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6233 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6234 GET_MODE (iteration_var));
6236 if (inc_once == final_value)
6238 /* The iterator value once through the loop is equal to the
6239 comparison value. Either we have an infinite loop, or
6240 we'll loop twice. */
6241 if (increment == const0_rtx)
6242 return 0;
6243 loop_info->n_iterations = 2;
6245 else
6246 loop_info->n_iterations = 1;
6248 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6249 loop_info->final_value
6250 = gen_int_mode ((INTVAL (loop_info->initial_value)
6251 + loop_info->n_iterations * INTVAL (increment)),
6252 GET_MODE (iteration_var));
6253 else
6254 loop_info->final_value
6255 = plus_constant (loop_info->initial_value,
6256 loop_info->n_iterations * INTVAL (increment));
6257 loop_info->final_equiv_value
6258 = gen_int_mode ((INTVAL (initial_value)
6259 + loop_info->n_iterations * INTVAL (increment)),
6260 GET_MODE (iteration_var));
6261 return loop_info->n_iterations;
6264 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6265 if (unsigned_p)
6266 final_larger
6267 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6268 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6269 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6270 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6271 else
6272 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6273 - (INTVAL (final_value) < INTVAL (initial_value));
6275 if (INTVAL (increment) > 0)
6276 increment_dir = 1;
6277 else if (INTVAL (increment) == 0)
6278 increment_dir = 0;
6279 else
6280 increment_dir = -1;
6282 /* There are 27 different cases: compare_dir = -1, 0, 1;
6283 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6284 There are 4 normal cases, 4 reverse cases (where the iteration variable
6285 will overflow before the loop exits), 4 infinite loop cases, and 15
6286 immediate exit (0 or 1 iteration depending on loop type) cases.
6287 Only try to optimize the normal cases. */
6289 /* (compare_dir/final_larger/increment_dir)
6290 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6291 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6292 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6293 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6295 /* ?? If the meaning of reverse loops (where the iteration variable
6296 will overflow before the loop exits) is undefined, then could
6297 eliminate all of these special checks, and just always assume
6298 the loops are normal/immediate/infinite. Note that this means
6299 the sign of increment_dir does not have to be known. Also,
6300 since it does not really hurt if immediate exit loops or infinite loops
6301 are optimized, then that case could be ignored also, and hence all
6302 loops can be optimized.
6304 According to ANSI Spec, the reverse loop case result is undefined,
6305 because the action on overflow is undefined.
6307 See also the special test for NE loops below. */
6309 if (final_larger == increment_dir && final_larger != 0
6310 && (final_larger == compare_dir || compare_dir == 0))
6311 /* Normal case. */
6313 else
6315 if (loop_dump_stream)
6316 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6317 return 0;
6320 /* Calculate the number of iterations, final_value is only an approximation,
6321 so correct for that. Note that abs_diff and n_iterations are
6322 unsigned, because they can be as large as 2^n - 1. */
6324 inc = INTVAL (increment);
6325 gcc_assert (inc);
6326 if (inc > 0)
6328 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6329 abs_inc = inc;
6331 else
6333 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6334 abs_inc = -inc;
6337 /* Given that iteration_var is going to iterate over its own mode,
6338 not HOST_WIDE_INT, disregard higher bits that might have come
6339 into the picture due to sign extension of initial and final
6340 values. */
6341 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6342 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6343 << 1) - 1;
6345 /* For NE tests, make sure that the iteration variable won't miss
6346 the final value. If abs_diff mod abs_incr is not zero, then the
6347 iteration variable will overflow before the loop exits, and we
6348 can not calculate the number of iterations. */
6349 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6350 return 0;
6352 /* Note that the number of iterations could be calculated using
6353 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6354 handle potential overflow of the summation. */
6355 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6356 return loop_info->n_iterations;
6359 /* Perform strength reduction and induction variable elimination.
6361 Pseudo registers created during this function will be beyond the
6362 last valid index in several tables including
6363 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6364 problem here, because the added registers cannot be givs outside of
6365 their loop, and hence will never be reconsidered. But scan_loop
6366 must check regnos to make sure they are in bounds. */
6368 static void
6369 strength_reduce (struct loop *loop, int flags)
6371 struct loop_info *loop_info = LOOP_INFO (loop);
6372 struct loop_regs *regs = LOOP_REGS (loop);
6373 struct loop_ivs *ivs = LOOP_IVS (loop);
6374 rtx p;
6375 /* Temporary list pointer for traversing ivs->list. */
6376 struct iv_class *bl;
6377 /* Ratio of extra register life span we can justify
6378 for saving an instruction. More if loop doesn't call subroutines
6379 since in that case saving an insn makes more difference
6380 and more registers are available. */
6381 /* ??? could set this to last value of threshold in move_movables */
6382 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6383 /* Map of pseudo-register replacements. */
6384 rtx *reg_map = NULL;
6385 int reg_map_size;
6386 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6387 int insn_count = count_insns_in_loop (loop);
6389 addr_placeholder = gen_reg_rtx (Pmode);
6391 ivs->n_regs = max_reg_before_loop;
6392 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6394 /* Find all BIVs in loop. */
6395 loop_bivs_find (loop);
6397 /* Exit if there are no bivs. */
6398 if (! ivs->list)
6400 loop_ivs_free (loop);
6401 return;
6404 /* Determine how BIVS are initialized by looking through pre-header
6405 extended basic block. */
6406 loop_bivs_init_find (loop);
6408 /* Look at the each biv and see if we can say anything better about its
6409 initial value from any initializing insns set up above. */
6410 loop_bivs_check (loop);
6412 /* Search the loop for general induction variables. */
6413 loop_givs_find (loop);
6415 /* Try to calculate and save the number of loop iterations. This is
6416 set to zero if the actual number can not be calculated. This must
6417 be called after all giv's have been identified, since otherwise it may
6418 fail if the iteration variable is a giv. */
6419 loop_iterations (loop);
6421 #ifdef HAVE_prefetch
6422 if (flags & LOOP_PREFETCH)
6423 emit_prefetch_instructions (loop);
6424 #endif
6426 /* Now for each giv for which we still don't know whether or not it is
6427 replaceable, check to see if it is replaceable because its final value
6428 can be calculated. This must be done after loop_iterations is called,
6429 so that final_giv_value will work correctly. */
6430 loop_givs_check (loop);
6432 /* Try to prove that the loop counter variable (if any) is always
6433 nonnegative; if so, record that fact with a REG_NONNEG note
6434 so that "decrement and branch until zero" insn can be used. */
6435 check_dbra_loop (loop, insn_count);
6437 /* Create reg_map to hold substitutions for replaceable giv regs.
6438 Some givs might have been made from biv increments, so look at
6439 ivs->reg_iv_type for a suitable size. */
6440 reg_map_size = ivs->n_regs;
6441 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6443 /* Examine each iv class for feasibility of strength reduction/induction
6444 variable elimination. */
6446 for (bl = ivs->list; bl; bl = bl->next)
6448 struct induction *v;
6449 int benefit;
6451 /* Test whether it will be possible to eliminate this biv
6452 provided all givs are reduced. */
6453 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6455 /* This will be true at the end, if all givs which depend on this
6456 biv have been strength reduced.
6457 We can't (currently) eliminate the biv unless this is so. */
6458 bl->all_reduced = 1;
6460 /* Check each extension dependent giv in this class to see if its
6461 root biv is safe from wrapping in the interior mode. */
6462 check_ext_dependent_givs (loop, bl);
6464 /* Combine all giv's for this iv_class. */
6465 combine_givs (regs, bl);
6467 for (v = bl->giv; v; v = v->next_iv)
6469 struct induction *tv;
6471 if (v->ignore || v->same)
6472 continue;
6474 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6476 /* If an insn is not to be strength reduced, then set its ignore
6477 flag, and clear bl->all_reduced. */
6479 /* A giv that depends on a reversed biv must be reduced if it is
6480 used after the loop exit, otherwise, it would have the wrong
6481 value after the loop exit. To make it simple, just reduce all
6482 of such giv's whether or not we know they are used after the loop
6483 exit. */
6485 if (v->lifetime * threshold * benefit < insn_count
6486 && ! bl->reversed)
6488 if (loop_dump_stream)
6489 fprintf (loop_dump_stream,
6490 "giv of insn %d not worth while, %d vs %d.\n",
6491 INSN_UID (v->insn),
6492 v->lifetime * threshold * benefit, insn_count);
6493 v->ignore = 1;
6494 bl->all_reduced = 0;
6496 else if (!v->always_computable
6497 && (may_trap_or_fault_p (v->add_val)
6498 || may_trap_or_fault_p (v->mult_val)))
6500 if (loop_dump_stream)
6501 fprintf (loop_dump_stream,
6502 "giv of insn %d: not always computable.\n",
6503 INSN_UID (v->insn));
6504 v->ignore = 1;
6505 bl->all_reduced = 0;
6507 else
6509 /* Check that we can increment the reduced giv without a
6510 multiply insn. If not, reject it. */
6512 for (tv = bl->biv; tv; tv = tv->next_iv)
6513 if (tv->mult_val == const1_rtx
6514 && ! product_cheap_p (tv->add_val, v->mult_val))
6516 if (loop_dump_stream)
6517 fprintf (loop_dump_stream,
6518 "giv of insn %d: would need a multiply.\n",
6519 INSN_UID (v->insn));
6520 v->ignore = 1;
6521 bl->all_reduced = 0;
6522 break;
6527 /* Check for givs whose first use is their definition and whose
6528 last use is the definition of another giv. If so, it is likely
6529 dead and should not be used to derive another giv nor to
6530 eliminate a biv. */
6531 loop_givs_dead_check (loop, bl);
6533 /* Reduce each giv that we decided to reduce. */
6534 loop_givs_reduce (loop, bl);
6536 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6537 as not reduced.
6539 For each giv register that can be reduced now: if replaceable,
6540 substitute reduced reg wherever the old giv occurs;
6541 else add new move insn "giv_reg = reduced_reg". */
6542 loop_givs_rescan (loop, bl, reg_map);
6544 /* All the givs based on the biv bl have been reduced if they
6545 merit it. */
6547 /* For each giv not marked as maybe dead that has been combined with a
6548 second giv, clear any "maybe dead" mark on that second giv.
6549 v->new_reg will either be or refer to the register of the giv it
6550 combined with.
6552 Doing this clearing avoids problems in biv elimination where
6553 a giv's new_reg is a complex value that can't be put in the
6554 insn but the giv combined with (with a reg as new_reg) is
6555 marked maybe_dead. Since the register will be used in either
6556 case, we'd prefer it be used from the simpler giv. */
6558 for (v = bl->giv; v; v = v->next_iv)
6559 if (! v->maybe_dead && v->same)
6560 v->same->maybe_dead = 0;
6562 /* Try to eliminate the biv, if it is a candidate.
6563 This won't work if ! bl->all_reduced,
6564 since the givs we planned to use might not have been reduced.
6566 We have to be careful that we didn't initially think we could
6567 eliminate this biv because of a giv that we now think may be
6568 dead and shouldn't be used as a biv replacement.
6570 Also, there is the possibility that we may have a giv that looks
6571 like it can be used to eliminate a biv, but the resulting insn
6572 isn't valid. This can happen, for example, on the 88k, where a
6573 JUMP_INSN can compare a register only with zero. Attempts to
6574 replace it with a compare with a constant will fail.
6576 Note that in cases where this call fails, we may have replaced some
6577 of the occurrences of the biv with a giv, but no harm was done in
6578 doing so in the rare cases where it can occur. */
6580 if (bl->all_reduced == 1 && bl->eliminable
6581 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6583 /* ?? If we created a new test to bypass the loop entirely,
6584 or otherwise drop straight in, based on this test, then
6585 we might want to rewrite it also. This way some later
6586 pass has more hope of removing the initialization of this
6587 biv entirely. */
6589 /* If final_value != 0, then the biv may be used after loop end
6590 and we must emit an insn to set it just in case.
6592 Reversed bivs already have an insn after the loop setting their
6593 value, so we don't need another one. We can't calculate the
6594 proper final value for such a biv here anyways. */
6595 if (bl->final_value && ! bl->reversed)
6596 loop_insn_sink_or_swim (loop,
6597 gen_load_of_final_value (bl->biv->dest_reg,
6598 bl->final_value));
6600 if (loop_dump_stream)
6601 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6602 bl->regno);
6604 /* See above note wrt final_value. But since we couldn't eliminate
6605 the biv, we must set the value after the loop instead of before. */
6606 else if (bl->final_value && ! bl->reversed)
6607 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6608 bl->final_value));
6611 /* Go through all the instructions in the loop, making all the
6612 register substitutions scheduled in REG_MAP. */
6614 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6615 if (INSN_P (p))
6617 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6618 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6619 INSN_CODE (p) = -1;
6622 if (loop_dump_stream)
6623 fprintf (loop_dump_stream, "\n");
6625 loop_ivs_free (loop);
6626 if (reg_map)
6627 free (reg_map);
6630 /*Record all basic induction variables calculated in the insn. */
6631 static rtx
6632 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6633 int maybe_multiple)
6635 struct loop_ivs *ivs = LOOP_IVS (loop);
6636 rtx set;
6637 rtx dest_reg;
6638 rtx inc_val;
6639 rtx mult_val;
6640 rtx *location;
6642 if (NONJUMP_INSN_P (p)
6643 && (set = single_set (p))
6644 && REG_P (SET_DEST (set)))
6646 dest_reg = SET_DEST (set);
6647 if (REGNO (dest_reg) < max_reg_before_loop
6648 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6649 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6651 if (basic_induction_var (loop, SET_SRC (set),
6652 GET_MODE (SET_SRC (set)),
6653 dest_reg, p, &inc_val, &mult_val,
6654 &location, VOIDmode))
6656 /* It is a possible basic induction variable.
6657 Create and initialize an induction structure for it. */
6659 struct induction *v = xmalloc (sizeof (struct induction));
6661 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6662 not_every_iteration, maybe_multiple);
6663 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6665 else if (REGNO (dest_reg) < ivs->n_regs)
6666 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6669 return p;
6672 /* Record all givs calculated in the insn.
6673 A register is a giv if: it is only set once, it is a function of a
6674 biv and a constant (or invariant), and it is not a biv. */
6675 static rtx
6676 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6677 int maybe_multiple)
6679 struct loop_regs *regs = LOOP_REGS (loop);
6681 rtx set;
6682 /* Look for a general induction variable in a register. */
6683 if (NONJUMP_INSN_P (p)
6684 && (set = single_set (p))
6685 && REG_P (SET_DEST (set))
6686 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6688 rtx src_reg;
6689 rtx dest_reg;
6690 rtx add_val;
6691 rtx mult_val;
6692 rtx ext_val;
6693 int benefit;
6694 rtx regnote = 0;
6695 rtx last_consec_insn;
6697 dest_reg = SET_DEST (set);
6698 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6699 return p;
6701 if (/* SET_SRC is a giv. */
6702 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6703 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6704 /* Equivalent expression is a giv. */
6705 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6706 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6707 &add_val, &mult_val, &ext_val, 0,
6708 &benefit, VOIDmode)))
6709 /* Don't try to handle any regs made by loop optimization.
6710 We have nothing on them in regno_first_uid, etc. */
6711 && REGNO (dest_reg) < max_reg_before_loop
6712 /* Don't recognize a BASIC_INDUCT_VAR here. */
6713 && dest_reg != src_reg
6714 /* This must be the only place where the register is set. */
6715 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6716 /* or all sets must be consecutive and make a giv. */
6717 || (benefit = consec_sets_giv (loop, benefit, p,
6718 src_reg, dest_reg,
6719 &add_val, &mult_val, &ext_val,
6720 &last_consec_insn))))
6722 struct induction *v = xmalloc (sizeof (struct induction));
6724 /* If this is a library call, increase benefit. */
6725 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6726 benefit += libcall_benefit (p);
6728 /* Skip the consecutive insns, if there are any. */
6729 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6730 p = last_consec_insn;
6732 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6733 ext_val, benefit, DEST_REG, not_every_iteration,
6734 maybe_multiple, (rtx*) 0);
6739 /* Look for givs which are memory addresses. */
6740 if (NONJUMP_INSN_P (p))
6741 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6742 maybe_multiple);
6744 /* Update the status of whether giv can derive other givs. This can
6745 change when we pass a label or an insn that updates a biv. */
6746 if (INSN_P (p) || LABEL_P (p))
6747 update_giv_derive (loop, p);
6748 return p;
6751 /* Return 1 if X is a valid source for an initial value (or as value being
6752 compared against in an initial test).
6754 X must be either a register or constant and must not be clobbered between
6755 the current insn and the start of the loop.
6757 INSN is the insn containing X. */
6759 static int
6760 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6762 if (CONSTANT_P (x))
6763 return 1;
6765 /* Only consider pseudos we know about initialized in insns whose luids
6766 we know. */
6767 if (!REG_P (x)
6768 || REGNO (x) >= max_reg_before_loop)
6769 return 0;
6771 /* Don't use call-clobbered registers across a call which clobbers it. On
6772 some machines, don't use any hard registers at all. */
6773 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6774 && (SMALL_REGISTER_CLASSES
6775 || (call_seen && call_used_regs[REGNO (x)])))
6776 return 0;
6778 /* Don't use registers that have been clobbered before the start of the
6779 loop. */
6780 if (reg_set_between_p (x, insn, loop_start))
6781 return 0;
6783 return 1;
6786 /* Scan X for memory refs and check each memory address
6787 as a possible giv. INSN is the insn whose pattern X comes from.
6788 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6789 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6790 more than once in each loop iteration. */
6792 static void
6793 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6794 int not_every_iteration, int maybe_multiple)
6796 int i, j;
6797 enum rtx_code code;
6798 const char *fmt;
6800 if (x == 0)
6801 return;
6803 code = GET_CODE (x);
6804 switch (code)
6806 case REG:
6807 case CONST_INT:
6808 case CONST:
6809 case CONST_DOUBLE:
6810 case SYMBOL_REF:
6811 case LABEL_REF:
6812 case PC:
6813 case CC0:
6814 case ADDR_VEC:
6815 case ADDR_DIFF_VEC:
6816 case USE:
6817 case CLOBBER:
6818 return;
6820 case MEM:
6822 rtx src_reg;
6823 rtx add_val;
6824 rtx mult_val;
6825 rtx ext_val;
6826 int benefit;
6828 /* This code used to disable creating GIVs with mult_val == 1 and
6829 add_val == 0. However, this leads to lost optimizations when
6830 it comes time to combine a set of related DEST_ADDR GIVs, since
6831 this one would not be seen. */
6833 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6834 &mult_val, &ext_val, 1, &benefit,
6835 GET_MODE (x)))
6837 /* Found one; record it. */
6838 struct induction *v = xmalloc (sizeof (struct induction));
6840 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6841 add_val, ext_val, benefit, DEST_ADDR,
6842 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6844 v->mem = x;
6847 return;
6849 default:
6850 break;
6853 /* Recursively scan the subexpressions for other mem refs. */
6855 fmt = GET_RTX_FORMAT (code);
6856 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6857 if (fmt[i] == 'e')
6858 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6859 maybe_multiple);
6860 else if (fmt[i] == 'E')
6861 for (j = 0; j < XVECLEN (x, i); j++)
6862 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6863 maybe_multiple);
6866 /* Fill in the data about one biv update.
6867 V is the `struct induction' in which we record the biv. (It is
6868 allocated by the caller, with alloca.)
6869 INSN is the insn that sets it.
6870 DEST_REG is the biv's reg.
6872 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6873 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6874 being set to INC_VAL.
6876 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6877 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6878 can be executed more than once per iteration. If MAYBE_MULTIPLE
6879 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6880 executed exactly once per iteration. */
6882 static void
6883 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6884 rtx inc_val, rtx mult_val, rtx *location,
6885 int not_every_iteration, int maybe_multiple)
6887 struct loop_ivs *ivs = LOOP_IVS (loop);
6888 struct iv_class *bl;
6890 v->insn = insn;
6891 v->src_reg = dest_reg;
6892 v->dest_reg = dest_reg;
6893 v->mult_val = mult_val;
6894 v->add_val = inc_val;
6895 v->ext_dependent = NULL_RTX;
6896 v->location = location;
6897 v->mode = GET_MODE (dest_reg);
6898 v->always_computable = ! not_every_iteration;
6899 v->always_executed = ! not_every_iteration;
6900 v->maybe_multiple = maybe_multiple;
6901 v->same = 0;
6903 /* Add this to the reg's iv_class, creating a class
6904 if this is the first incrementation of the reg. */
6906 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6907 if (bl == 0)
6909 /* Create and initialize new iv_class. */
6911 bl = xmalloc (sizeof (struct iv_class));
6913 bl->regno = REGNO (dest_reg);
6914 bl->biv = 0;
6915 bl->giv = 0;
6916 bl->biv_count = 0;
6917 bl->giv_count = 0;
6919 /* Set initial value to the reg itself. */
6920 bl->initial_value = dest_reg;
6921 bl->final_value = 0;
6922 /* We haven't seen the initializing insn yet. */
6923 bl->init_insn = 0;
6924 bl->init_set = 0;
6925 bl->initial_test = 0;
6926 bl->incremented = 0;
6927 bl->eliminable = 0;
6928 bl->nonneg = 0;
6929 bl->reversed = 0;
6930 bl->total_benefit = 0;
6932 /* Add this class to ivs->list. */
6933 bl->next = ivs->list;
6934 ivs->list = bl;
6936 /* Put it in the array of biv register classes. */
6937 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6939 else
6941 /* Check if location is the same as a previous one. */
6942 struct induction *induction;
6943 for (induction = bl->biv; induction; induction = induction->next_iv)
6944 if (location == induction->location)
6946 v->same = induction;
6947 break;
6951 /* Update IV_CLASS entry for this biv. */
6952 v->next_iv = bl->biv;
6953 bl->biv = v;
6954 bl->biv_count++;
6955 if (mult_val == const1_rtx)
6956 bl->incremented = 1;
6958 if (loop_dump_stream)
6959 loop_biv_dump (v, loop_dump_stream, 0);
6962 /* Fill in the data about one giv.
6963 V is the `struct induction' in which we record the giv. (It is
6964 allocated by the caller, with alloca.)
6965 INSN is the insn that sets it.
6966 BENEFIT estimates the savings from deleting this insn.
6967 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6968 into a register or is used as a memory address.
6970 SRC_REG is the biv reg which the giv is computed from.
6971 DEST_REG is the giv's reg (if the giv is stored in a reg).
6972 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6973 LOCATION points to the place where this giv's value appears in INSN. */
6975 static void
6976 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6977 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6978 rtx ext_val, int benefit, enum g_types type,
6979 int not_every_iteration, int maybe_multiple, rtx *location)
6981 struct loop_ivs *ivs = LOOP_IVS (loop);
6982 struct induction *b;
6983 struct iv_class *bl;
6984 rtx set = single_set (insn);
6985 rtx temp;
6987 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6988 undo the MULT canonicalization that we performed earlier. */
6989 temp = simplify_rtx (add_val);
6990 if (temp
6991 && ! (GET_CODE (add_val) == MULT
6992 && GET_CODE (temp) == ASHIFT))
6993 add_val = temp;
6995 v->insn = insn;
6996 v->src_reg = src_reg;
6997 v->giv_type = type;
6998 v->dest_reg = dest_reg;
6999 v->mult_val = mult_val;
7000 v->add_val = add_val;
7001 v->ext_dependent = ext_val;
7002 v->benefit = benefit;
7003 v->location = location;
7004 v->cant_derive = 0;
7005 v->combined_with = 0;
7006 v->maybe_multiple = maybe_multiple;
7007 v->maybe_dead = 0;
7008 v->derive_adjustment = 0;
7009 v->same = 0;
7010 v->ignore = 0;
7011 v->new_reg = 0;
7012 v->final_value = 0;
7013 v->same_insn = 0;
7014 v->auto_inc_opt = 0;
7015 v->shared = 0;
7017 /* The v->always_computable field is used in update_giv_derive, to
7018 determine whether a giv can be used to derive another giv. For a
7019 DEST_REG giv, INSN computes a new value for the giv, so its value
7020 isn't computable if INSN insn't executed every iteration.
7021 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
7022 it does not compute a new value. Hence the value is always computable
7023 regardless of whether INSN is executed each iteration. */
7025 if (type == DEST_ADDR)
7026 v->always_computable = 1;
7027 else
7028 v->always_computable = ! not_every_iteration;
7030 v->always_executed = ! not_every_iteration;
7032 if (type == DEST_ADDR)
7034 v->mode = GET_MODE (*location);
7035 v->lifetime = 1;
7037 else /* type == DEST_REG */
7039 v->mode = GET_MODE (SET_DEST (set));
7041 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
7043 /* If the lifetime is zero, it means that this register is
7044 really a dead store. So mark this as a giv that can be
7045 ignored. This will not prevent the biv from being eliminated. */
7046 if (v->lifetime == 0)
7047 v->ignore = 1;
7049 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7050 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7053 /* Add the giv to the class of givs computed from one biv. */
7055 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
7056 gcc_assert (bl);
7057 v->next_iv = bl->giv;
7058 bl->giv = v;
7060 /* Don't count DEST_ADDR. This is supposed to count the number of
7061 insns that calculate givs. */
7062 if (type == DEST_REG)
7063 bl->giv_count++;
7064 bl->total_benefit += benefit;
7066 if (type == DEST_ADDR)
7068 v->replaceable = 1;
7069 v->not_replaceable = 0;
7071 else
7073 /* The giv can be replaced outright by the reduced register only if all
7074 of the following conditions are true:
7075 - the insn that sets the giv is always executed on any iteration
7076 on which the giv is used at all
7077 (there are two ways to deduce this:
7078 either the insn is executed on every iteration,
7079 or all uses follow that insn in the same basic block),
7080 - the giv is not used outside the loop
7081 - no assignments to the biv occur during the giv's lifetime. */
7083 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
7084 /* Previous line always fails if INSN was moved by loop opt. */
7085 && REGNO_LAST_LUID (REGNO (dest_reg))
7086 < INSN_LUID (loop->end)
7087 && (! not_every_iteration
7088 || last_use_this_basic_block (dest_reg, insn)))
7090 /* Now check that there are no assignments to the biv within the
7091 giv's lifetime. This requires two separate checks. */
7093 /* Check each biv update, and fail if any are between the first
7094 and last use of the giv.
7096 If this loop contains an inner loop that was unrolled, then
7097 the insn modifying the biv may have been emitted by the loop
7098 unrolling code, and hence does not have a valid luid. Just
7099 mark the biv as not replaceable in this case. It is not very
7100 useful as a biv, because it is used in two different loops.
7101 It is very unlikely that we would be able to optimize the giv
7102 using this biv anyways. */
7104 v->replaceable = 1;
7105 v->not_replaceable = 0;
7106 for (b = bl->biv; b; b = b->next_iv)
7108 if (INSN_UID (b->insn) >= max_uid_for_loop
7109 || ((INSN_LUID (b->insn)
7110 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7111 && (INSN_LUID (b->insn)
7112 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7114 v->replaceable = 0;
7115 v->not_replaceable = 1;
7116 break;
7120 /* If there are any backwards branches that go from after the
7121 biv update to before it, then this giv is not replaceable. */
7122 if (v->replaceable)
7123 for (b = bl->biv; b; b = b->next_iv)
7124 if (back_branch_in_range_p (loop, b->insn))
7126 v->replaceable = 0;
7127 v->not_replaceable = 1;
7128 break;
7131 else
7133 /* May still be replaceable, we don't have enough info here to
7134 decide. */
7135 v->replaceable = 0;
7136 v->not_replaceable = 0;
7140 /* Record whether the add_val contains a const_int, for later use by
7141 combine_givs. */
7143 rtx tem = add_val;
7145 v->no_const_addval = 1;
7146 if (tem == const0_rtx)
7148 else if (CONSTANT_P (add_val))
7149 v->no_const_addval = 0;
7150 if (GET_CODE (tem) == PLUS)
7152 while (1)
7154 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7155 tem = XEXP (tem, 0);
7156 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7157 tem = XEXP (tem, 1);
7158 else
7159 break;
7161 if (CONSTANT_P (XEXP (tem, 1)))
7162 v->no_const_addval = 0;
7166 if (loop_dump_stream)
7167 loop_giv_dump (v, loop_dump_stream, 0);
7170 /* Try to calculate the final value of the giv, the value it will have at
7171 the end of the loop. If we can do it, return that value. */
7173 static rtx
7174 final_giv_value (const struct loop *loop, struct induction *v)
7176 struct loop_ivs *ivs = LOOP_IVS (loop);
7177 struct iv_class *bl;
7178 rtx insn;
7179 rtx increment, tem;
7180 rtx seq;
7181 rtx loop_end = loop->end;
7182 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7184 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7186 /* The final value for givs which depend on reversed bivs must be calculated
7187 differently than for ordinary givs. In this case, there is already an
7188 insn after the loop which sets this giv's final value (if necessary),
7189 and there are no other loop exits, so we can return any value. */
7190 if (bl->reversed)
7192 if (loop_dump_stream)
7193 fprintf (loop_dump_stream,
7194 "Final giv value for %d, depends on reversed biv\n",
7195 REGNO (v->dest_reg));
7196 return const0_rtx;
7199 /* Try to calculate the final value as a function of the biv it depends
7200 upon. The only exit from the loop must be the fall through at the bottom
7201 and the insn that sets the giv must be executed on every iteration
7202 (otherwise the giv may not have its final value when the loop exits). */
7204 /* ??? Can calculate the final giv value by subtracting off the
7205 extra biv increments times the giv's mult_val. The loop must have
7206 only one exit for this to work, but the loop iterations does not need
7207 to be known. */
7209 if (n_iterations != 0
7210 && ! loop->exit_count
7211 && v->always_executed)
7213 /* ?? It is tempting to use the biv's value here since these insns will
7214 be put after the loop, and hence the biv will have its final value
7215 then. However, this fails if the biv is subsequently eliminated.
7216 Perhaps determine whether biv's are eliminable before trying to
7217 determine whether giv's are replaceable so that we can use the
7218 biv value here if it is not eliminable. */
7220 /* We are emitting code after the end of the loop, so we must make
7221 sure that bl->initial_value is still valid then. It will still
7222 be valid if it is invariant. */
7224 increment = biv_total_increment (bl);
7226 if (increment && loop_invariant_p (loop, increment)
7227 && loop_invariant_p (loop, bl->initial_value))
7229 /* Can calculate the loop exit value of its biv as
7230 (n_iterations * increment) + initial_value */
7232 /* The loop exit value of the giv is then
7233 (final_biv_value - extra increments) * mult_val + add_val.
7234 The extra increments are any increments to the biv which
7235 occur in the loop after the giv's value is calculated.
7236 We must search from the insn that sets the giv to the end
7237 of the loop to calculate this value. */
7239 /* Put the final biv value in tem. */
7240 tem = gen_reg_rtx (v->mode);
7241 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7242 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7243 GEN_INT (n_iterations),
7244 extend_value_for_giv (v, bl->initial_value),
7245 tem);
7247 /* Subtract off extra increments as we find them. */
7248 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7249 insn = NEXT_INSN (insn))
7251 struct induction *biv;
7253 for (biv = bl->biv; biv; biv = biv->next_iv)
7254 if (biv->insn == insn)
7256 start_sequence ();
7257 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7258 biv->add_val, NULL_RTX, 0,
7259 OPTAB_LIB_WIDEN);
7260 seq = get_insns ();
7261 end_sequence ();
7262 loop_insn_sink (loop, seq);
7266 /* Now calculate the giv's final value. */
7267 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7269 if (loop_dump_stream)
7270 fprintf (loop_dump_stream,
7271 "Final giv value for %d, calc from biv's value.\n",
7272 REGNO (v->dest_reg));
7274 return tem;
7278 /* Replaceable giv's should never reach here. */
7279 gcc_assert (!v->replaceable);
7281 /* Check to see if the biv is dead at all loop exits. */
7282 if (reg_dead_after_loop (loop, v->dest_reg))
7284 if (loop_dump_stream)
7285 fprintf (loop_dump_stream,
7286 "Final giv value for %d, giv dead after loop exit.\n",
7287 REGNO (v->dest_reg));
7289 return const0_rtx;
7292 return 0;
7295 /* All this does is determine whether a giv can be made replaceable because
7296 its final value can be calculated. This code can not be part of record_giv
7297 above, because final_giv_value requires that the number of loop iterations
7298 be known, and that can not be accurately calculated until after all givs
7299 have been identified. */
7301 static void
7302 check_final_value (const struct loop *loop, struct induction *v)
7304 rtx final_value = 0;
7306 /* DEST_ADDR givs will never reach here, because they are always marked
7307 replaceable above in record_giv. */
7309 /* The giv can be replaced outright by the reduced register only if all
7310 of the following conditions are true:
7311 - the insn that sets the giv is always executed on any iteration
7312 on which the giv is used at all
7313 (there are two ways to deduce this:
7314 either the insn is executed on every iteration,
7315 or all uses follow that insn in the same basic block),
7316 - its final value can be calculated (this condition is different
7317 than the one above in record_giv)
7318 - it's not used before the it's set
7319 - no assignments to the biv occur during the giv's lifetime. */
7321 #if 0
7322 /* This is only called now when replaceable is known to be false. */
7323 /* Clear replaceable, so that it won't confuse final_giv_value. */
7324 v->replaceable = 0;
7325 #endif
7327 if ((final_value = final_giv_value (loop, v))
7328 && (v->always_executed
7329 || last_use_this_basic_block (v->dest_reg, v->insn)))
7331 int biv_increment_seen = 0, before_giv_insn = 0;
7332 rtx p = v->insn;
7333 rtx last_giv_use;
7335 v->replaceable = 1;
7336 v->not_replaceable = 0;
7338 /* When trying to determine whether or not a biv increment occurs
7339 during the lifetime of the giv, we can ignore uses of the variable
7340 outside the loop because final_value is true. Hence we can not
7341 use regno_last_uid and regno_first_uid as above in record_giv. */
7343 /* Search the loop to determine whether any assignments to the
7344 biv occur during the giv's lifetime. Start with the insn
7345 that sets the giv, and search around the loop until we come
7346 back to that insn again.
7348 Also fail if there is a jump within the giv's lifetime that jumps
7349 to somewhere outside the lifetime but still within the loop. This
7350 catches spaghetti code where the execution order is not linear, and
7351 hence the above test fails. Here we assume that the giv lifetime
7352 does not extend from one iteration of the loop to the next, so as
7353 to make the test easier. Since the lifetime isn't known yet,
7354 this requires two loops. See also record_giv above. */
7356 last_giv_use = v->insn;
7358 while (1)
7360 p = NEXT_INSN (p);
7361 if (p == loop->end)
7363 before_giv_insn = 1;
7364 p = NEXT_INSN (loop->start);
7366 if (p == v->insn)
7367 break;
7369 if (INSN_P (p))
7371 /* It is possible for the BIV increment to use the GIV if we
7372 have a cycle. Thus we must be sure to check each insn for
7373 both BIV and GIV uses, and we must check for BIV uses
7374 first. */
7376 if (! biv_increment_seen
7377 && reg_set_p (v->src_reg, PATTERN (p)))
7378 biv_increment_seen = 1;
7380 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7382 if (biv_increment_seen || before_giv_insn)
7384 v->replaceable = 0;
7385 v->not_replaceable = 1;
7386 break;
7388 last_giv_use = p;
7393 /* Now that the lifetime of the giv is known, check for branches
7394 from within the lifetime to outside the lifetime if it is still
7395 replaceable. */
7397 if (v->replaceable)
7399 p = v->insn;
7400 while (1)
7402 p = NEXT_INSN (p);
7403 if (p == loop->end)
7404 p = NEXT_INSN (loop->start);
7405 if (p == last_giv_use)
7406 break;
7408 if (JUMP_P (p) && JUMP_LABEL (p)
7409 && LABEL_NAME (JUMP_LABEL (p))
7410 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7411 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7412 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7413 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7415 v->replaceable = 0;
7416 v->not_replaceable = 1;
7418 if (loop_dump_stream)
7419 fprintf (loop_dump_stream,
7420 "Found branch outside giv lifetime.\n");
7422 break;
7427 /* If it is replaceable, then save the final value. */
7428 if (v->replaceable)
7429 v->final_value = final_value;
7432 if (loop_dump_stream && v->replaceable)
7433 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7434 INSN_UID (v->insn), REGNO (v->dest_reg));
7437 /* Update the status of whether a giv can derive other givs.
7439 We need to do something special if there is or may be an update to the biv
7440 between the time the giv is defined and the time it is used to derive
7441 another giv.
7443 In addition, a giv that is only conditionally set is not allowed to
7444 derive another giv once a label has been passed.
7446 The cases we look at are when a label or an update to a biv is passed. */
7448 static void
7449 update_giv_derive (const struct loop *loop, rtx p)
7451 struct loop_ivs *ivs = LOOP_IVS (loop);
7452 struct iv_class *bl;
7453 struct induction *biv, *giv;
7454 rtx tem;
7455 int dummy;
7457 /* Search all IV classes, then all bivs, and finally all givs.
7459 There are three cases we are concerned with. First we have the situation
7460 of a giv that is only updated conditionally. In that case, it may not
7461 derive any givs after a label is passed.
7463 The second case is when a biv update occurs, or may occur, after the
7464 definition of a giv. For certain biv updates (see below) that are
7465 known to occur between the giv definition and use, we can adjust the
7466 giv definition. For others, or when the biv update is conditional,
7467 we must prevent the giv from deriving any other givs. There are two
7468 sub-cases within this case.
7470 If this is a label, we are concerned with any biv update that is done
7471 conditionally, since it may be done after the giv is defined followed by
7472 a branch here (actually, we need to pass both a jump and a label, but
7473 this extra tracking doesn't seem worth it).
7475 If this is a jump, we are concerned about any biv update that may be
7476 executed multiple times. We are actually only concerned about
7477 backward jumps, but it is probably not worth performing the test
7478 on the jump again here.
7480 If this is a biv update, we must adjust the giv status to show that a
7481 subsequent biv update was performed. If this adjustment cannot be done,
7482 the giv cannot derive further givs. */
7484 for (bl = ivs->list; bl; bl = bl->next)
7485 for (biv = bl->biv; biv; biv = biv->next_iv)
7486 if (LABEL_P (p) || JUMP_P (p)
7487 || biv->insn == p)
7489 /* Skip if location is the same as a previous one. */
7490 if (biv->same)
7491 continue;
7493 for (giv = bl->giv; giv; giv = giv->next_iv)
7495 /* If cant_derive is already true, there is no point in
7496 checking all of these conditions again. */
7497 if (giv->cant_derive)
7498 continue;
7500 /* If this giv is conditionally set and we have passed a label,
7501 it cannot derive anything. */
7502 if (LABEL_P (p) && ! giv->always_computable)
7503 giv->cant_derive = 1;
7505 /* Skip givs that have mult_val == 0, since
7506 they are really invariants. Also skip those that are
7507 replaceable, since we know their lifetime doesn't contain
7508 any biv update. */
7509 else if (giv->mult_val == const0_rtx || giv->replaceable)
7510 continue;
7512 /* The only way we can allow this giv to derive another
7513 is if this is a biv increment and we can form the product
7514 of biv->add_val and giv->mult_val. In this case, we will
7515 be able to compute a compensation. */
7516 else if (biv->insn == p)
7518 rtx ext_val_dummy;
7520 tem = 0;
7521 if (biv->mult_val == const1_rtx)
7522 tem = simplify_giv_expr (loop,
7523 gen_rtx_MULT (giv->mode,
7524 biv->add_val,
7525 giv->mult_val),
7526 &ext_val_dummy, &dummy);
7528 if (tem && giv->derive_adjustment)
7529 tem = simplify_giv_expr
7530 (loop,
7531 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7532 &ext_val_dummy, &dummy);
7534 if (tem)
7535 giv->derive_adjustment = tem;
7536 else
7537 giv->cant_derive = 1;
7539 else if ((LABEL_P (p) && ! biv->always_computable)
7540 || (JUMP_P (p) && biv->maybe_multiple))
7541 giv->cant_derive = 1;
7546 /* Check whether an insn is an increment legitimate for a basic induction var.
7547 X is the source of insn P, or a part of it.
7548 MODE is the mode in which X should be interpreted.
7550 DEST_REG is the putative biv, also the destination of the insn.
7551 We accept patterns of these forms:
7552 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7553 REG = INVARIANT + REG
7555 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7556 store the additive term into *INC_VAL, and store the place where
7557 we found the additive term into *LOCATION.
7559 If X is an assignment of an invariant into DEST_REG, we set
7560 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7562 We also want to detect a BIV when it corresponds to a variable whose
7563 mode was promoted. In that case, an increment of the variable may be
7564 a PLUS that adds a SUBREG of that variable to an invariant and then
7565 sign- or zero-extends the result of the PLUS into the variable. Or
7566 it may be a PLUS that adds the variable to an invariant, takes SUBREG
7567 of the result and then sign- or zero-extends it into the variable.
7569 Most GIVs in such cases will be in the promoted mode, since that is the
7570 probably the natural computation mode (and almost certainly the mode
7571 used for addresses) on the machine. So we view the pseudo-reg containing
7572 the variable as the BIV, as if it were simply incremented.
7574 Note that treating the entire pseudo as a BIV will result in making
7575 simple increments to any GIVs based on it. However, if the variable
7576 overflows in its declared mode but not its promoted mode, the result will
7577 be incorrect. This is acceptable if the variable is signed, since
7578 overflows in such cases are undefined, but not if it is unsigned, since
7579 those overflows are defined. So we only check for SIGN_EXTEND and
7580 not ZERO_EXTEND.
7582 If we happen to detect such a promoted BIV, we set inner_mode to the
7583 mode in which the BIV is incremented.
7585 If we cannot find a biv, we return 0. */
7587 static int
7588 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7589 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7590 rtx **location, enum machine_mode inner_mode)
7592 enum rtx_code code;
7593 rtx *argp, arg;
7594 rtx insn, last, inc;
7596 code = GET_CODE (x);
7597 *location = NULL;
7598 switch (code)
7600 case PLUS:
7601 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7602 || (GET_CODE (XEXP (x, 0)) == SUBREG
7603 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7604 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7606 argp = &XEXP (x, 1);
7608 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7609 || (GET_CODE (XEXP (x, 1)) == SUBREG
7610 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7611 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7613 argp = &XEXP (x, 0);
7615 else
7616 return 0;
7618 arg = *argp;
7619 if (loop_invariant_p (loop, arg) != 1)
7620 return 0;
7622 /* convert_modes can emit new instructions, e.g. when arg is a loop
7623 invariant MEM and dest_reg has a different mode.
7624 These instructions would be emitted after the end of the function
7625 and then *inc_val would be an uninitialized pseudo.
7626 Detect this and bail in this case.
7627 Other alternatives to solve this can be introducing a convert_modes
7628 variant which is allowed to fail but not allowed to emit new
7629 instructions, emit these instructions before loop start and let
7630 it be garbage collected if *inc_val is never used or saving the
7631 *inc_val initialization sequence generated here and when *inc_val
7632 is going to be actually used, emit it at some suitable place. */
7633 last = get_last_insn ();
7634 if (inner_mode != VOIDmode)
7636 arg = convert_modes (inner_mode, GET_MODE (x), arg, 0);
7637 inc = convert_modes (GET_MODE (dest_reg), inner_mode, arg, 0);
7639 else
7640 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7641 if (get_last_insn () != last)
7643 delete_insns_since (last);
7644 return 0;
7647 *inc_val = inc;
7648 *mult_val = const1_rtx;
7649 *location = argp;
7650 return 1;
7652 case SUBREG:
7653 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7654 handle addition of promoted variables. */
7655 return basic_induction_var (loop, SUBREG_REG (x),
7656 GET_MODE (SUBREG_REG (x)),
7657 dest_reg, p, inc_val, mult_val,
7658 location, GET_MODE (x));
7660 case REG:
7661 /* If this register is assigned in a previous insn, look at its
7662 source, but don't go outside the loop or past a label. */
7664 /* If this sets a register to itself, we would repeat any previous
7665 biv increment if we applied this strategy blindly. */
7666 if (rtx_equal_p (dest_reg, x))
7667 return 0;
7669 insn = p;
7670 while (1)
7672 rtx set, dest;
7675 insn = PREV_INSN (insn);
7677 while (insn && NOTE_P (insn)
7678 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7680 if (!insn)
7681 break;
7682 set = single_set (insn);
7683 if (set == 0)
7684 break;
7685 dest = SET_DEST (set);
7686 if (dest == x
7687 || (GET_CODE (dest) == SUBREG
7688 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7689 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7690 && SUBREG_REG (dest) == x))
7691 return basic_induction_var (loop, SET_SRC (set),
7692 (GET_MODE (SET_SRC (set)) == VOIDmode
7693 ? GET_MODE (x)
7694 : GET_MODE (SET_SRC (set))),
7695 dest_reg, insn,
7696 inc_val, mult_val,
7697 location, inner_mode);
7699 while (GET_CODE (dest) == SUBREG
7700 || GET_CODE (dest) == ZERO_EXTRACT
7701 || GET_CODE (dest) == STRICT_LOW_PART)
7702 dest = XEXP (dest, 0);
7703 if (dest == x)
7704 break;
7706 /* Fall through. */
7708 /* Can accept constant setting of biv only when inside inner most loop.
7709 Otherwise, a biv of an inner loop may be incorrectly recognized
7710 as a biv of the outer loop,
7711 causing code to be moved INTO the inner loop. */
7712 case MEM:
7713 if (loop_invariant_p (loop, x) != 1)
7714 return 0;
7715 case CONST_INT:
7716 case SYMBOL_REF:
7717 case CONST:
7718 /* convert_modes dies if we try to convert to or from CCmode, so just
7719 exclude that case. It is very unlikely that a condition code value
7720 would be a useful iterator anyways. convert_modes dies if we try to
7721 convert a float mode to non-float or vice versa too. */
7722 if (loop->level == 1
7723 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7724 && GET_MODE_CLASS (mode) != MODE_CC)
7726 /* Possible bug here? Perhaps we don't know the mode of X. */
7727 last = get_last_insn ();
7728 if (inner_mode != VOIDmode)
7730 x = convert_modes (inner_mode, mode, x, 0);
7731 inc = convert_modes (GET_MODE (dest_reg), inner_mode, x, 0);
7733 else
7734 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7735 if (get_last_insn () != last)
7737 delete_insns_since (last);
7738 return 0;
7741 *inc_val = inc;
7742 *mult_val = const0_rtx;
7743 return 1;
7745 else
7746 return 0;
7748 case SIGN_EXTEND:
7749 /* Ignore this BIV if signed arithmetic overflow is defined. */
7750 if (flag_wrapv)
7751 return 0;
7752 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7753 dest_reg, p, inc_val, mult_val,
7754 location, inner_mode);
7756 case ASHIFTRT:
7757 /* Similar, since this can be a sign extension. */
7758 if (flag_wrapv)
7759 return 0;
7760 if (rtx_equal_p (dest_reg, XEXP (x, 0)))
7761 return 0;
7763 for (insn = PREV_INSN (p);
7764 (insn && NOTE_P (insn)
7765 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7766 insn = PREV_INSN (insn))
7769 if (insn)
7771 rtx op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7772 rtx set = single_set (insn);
7773 enum machine_mode inner_mode;
7775 /* We're looking for sign-extension by double shift. */
7776 if (!(set
7777 && SET_DEST (set) == op0
7778 && GET_CODE (SET_SRC (set)) == ASHIFT
7779 && GET_CODE (op1) == CONST_INT
7780 && INTVAL (op1) >= 0
7781 && XEXP (SET_SRC (set), 1) == op1))
7782 return 0;
7784 mode = GET_MODE (op0);
7785 inner_mode = mode_for_size (GET_MODE_BITSIZE (mode) - INTVAL (op1),
7786 MODE_INT, 1);
7787 if (inner_mode != BLKmode)
7788 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7789 mode, dest_reg, insn,
7790 inc_val, mult_val,
7791 location, inner_mode);
7793 return 0;
7795 default:
7796 return 0;
7800 /* A general induction variable (giv) is any quantity that is a linear
7801 function of a basic induction variable,
7802 i.e. giv = biv * mult_val + add_val.
7803 The coefficients can be any loop invariant quantity.
7804 A giv need not be computed directly from the biv;
7805 it can be computed by way of other givs. */
7807 /* Determine whether X computes a giv.
7808 If it does, return a nonzero value
7809 which is the benefit from eliminating the computation of X;
7810 set *SRC_REG to the register of the biv that it is computed from;
7811 set *ADD_VAL and *MULT_VAL to the coefficients,
7812 such that the value of X is biv * mult + add; */
7814 static int
7815 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7816 rtx *add_val, rtx *mult_val, rtx *ext_val,
7817 int is_addr, int *pbenefit,
7818 enum machine_mode addr_mode)
7820 struct loop_ivs *ivs = LOOP_IVS (loop);
7821 rtx orig_x = x;
7823 /* If this is an invariant, forget it, it isn't a giv. */
7824 if (loop_invariant_p (loop, x) == 1)
7825 return 0;
7827 *pbenefit = 0;
7828 *ext_val = NULL_RTX;
7829 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7830 if (x == 0)
7831 return 0;
7833 switch (GET_CODE (x))
7835 case USE:
7836 case CONST_INT:
7837 /* Since this is now an invariant and wasn't before, it must be a giv
7838 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7839 with. */
7840 *src_reg = ivs->list->biv->dest_reg;
7841 *mult_val = const0_rtx;
7842 *add_val = x;
7843 break;
7845 case REG:
7846 /* This is equivalent to a BIV. */
7847 *src_reg = x;
7848 *mult_val = const1_rtx;
7849 *add_val = const0_rtx;
7850 break;
7852 case PLUS:
7853 /* Either (plus (biv) (invar)) or
7854 (plus (mult (biv) (invar_1)) (invar_2)). */
7855 if (GET_CODE (XEXP (x, 0)) == MULT)
7857 *src_reg = XEXP (XEXP (x, 0), 0);
7858 *mult_val = XEXP (XEXP (x, 0), 1);
7860 else
7862 *src_reg = XEXP (x, 0);
7863 *mult_val = const1_rtx;
7865 *add_val = XEXP (x, 1);
7866 break;
7868 case MULT:
7869 /* ADD_VAL is zero. */
7870 *src_reg = XEXP (x, 0);
7871 *mult_val = XEXP (x, 1);
7872 *add_val = const0_rtx;
7873 break;
7875 default:
7876 gcc_unreachable ();
7879 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7880 unless they are CONST_INT). */
7881 if (GET_CODE (*add_val) == USE)
7882 *add_val = XEXP (*add_val, 0);
7883 if (GET_CODE (*mult_val) == USE)
7884 *mult_val = XEXP (*mult_val, 0);
7886 if (is_addr)
7887 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7888 else
7889 *pbenefit += rtx_cost (orig_x, SET);
7891 /* Always return true if this is a giv so it will be detected as such,
7892 even if the benefit is zero or negative. This allows elimination
7893 of bivs that might otherwise not be eliminated. */
7894 return 1;
7897 /* Given an expression, X, try to form it as a linear function of a biv.
7898 We will canonicalize it to be of the form
7899 (plus (mult (BIV) (invar_1))
7900 (invar_2))
7901 with possible degeneracies.
7903 The invariant expressions must each be of a form that can be used as a
7904 machine operand. We surround then with a USE rtx (a hack, but localized
7905 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7906 routine; it is the caller's responsibility to strip them.
7908 If no such canonicalization is possible (i.e., two biv's are used or an
7909 expression that is neither invariant nor a biv or giv), this routine
7910 returns 0.
7912 For a nonzero return, the result will have a code of CONST_INT, USE,
7913 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7915 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7917 static rtx sge_plus (enum machine_mode, rtx, rtx);
7918 static rtx sge_plus_constant (rtx, rtx);
7920 static rtx
7921 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7923 struct loop_ivs *ivs = LOOP_IVS (loop);
7924 struct loop_regs *regs = LOOP_REGS (loop);
7925 enum machine_mode mode = GET_MODE (x);
7926 rtx arg0, arg1;
7927 rtx tem;
7929 /* If this is not an integer mode, or if we cannot do arithmetic in this
7930 mode, this can't be a giv. */
7931 if (mode != VOIDmode
7932 && (GET_MODE_CLASS (mode) != MODE_INT
7933 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7934 return NULL_RTX;
7936 switch (GET_CODE (x))
7938 case PLUS:
7939 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7940 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7941 if (arg0 == 0 || arg1 == 0)
7942 return NULL_RTX;
7944 /* Put constant last, CONST_INT last if both constant. */
7945 if ((GET_CODE (arg0) == USE
7946 || GET_CODE (arg0) == CONST_INT)
7947 && ! ((GET_CODE (arg0) == USE
7948 && GET_CODE (arg1) == USE)
7949 || GET_CODE (arg1) == CONST_INT))
7950 tem = arg0, arg0 = arg1, arg1 = tem;
7952 /* Handle addition of zero, then addition of an invariant. */
7953 if (arg1 == const0_rtx)
7954 return arg0;
7955 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7956 switch (GET_CODE (arg0))
7958 case CONST_INT:
7959 case USE:
7960 /* Adding two invariants must result in an invariant, so enclose
7961 addition operation inside a USE and return it. */
7962 if (GET_CODE (arg0) == USE)
7963 arg0 = XEXP (arg0, 0);
7964 if (GET_CODE (arg1) == USE)
7965 arg1 = XEXP (arg1, 0);
7967 if (GET_CODE (arg0) == CONST_INT)
7968 tem = arg0, arg0 = arg1, arg1 = tem;
7969 if (GET_CODE (arg1) == CONST_INT)
7970 tem = sge_plus_constant (arg0, arg1);
7971 else
7972 tem = sge_plus (mode, arg0, arg1);
7974 if (GET_CODE (tem) != CONST_INT)
7975 tem = gen_rtx_USE (mode, tem);
7976 return tem;
7978 case REG:
7979 case MULT:
7980 /* biv + invar or mult + invar. Return sum. */
7981 return gen_rtx_PLUS (mode, arg0, arg1);
7983 case PLUS:
7984 /* (a + invar_1) + invar_2. Associate. */
7985 return
7986 simplify_giv_expr (loop,
7987 gen_rtx_PLUS (mode,
7988 XEXP (arg0, 0),
7989 gen_rtx_PLUS (mode,
7990 XEXP (arg0, 1),
7991 arg1)),
7992 ext_val, benefit);
7994 default:
7995 gcc_unreachable ();
7998 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7999 MULT to reduce cases. */
8000 if (REG_P (arg0))
8001 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
8002 if (REG_P (arg1))
8003 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
8005 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
8006 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
8007 Recurse to associate the second PLUS. */
8008 if (GET_CODE (arg1) == MULT)
8009 tem = arg0, arg0 = arg1, arg1 = tem;
8011 if (GET_CODE (arg1) == PLUS)
8012 return
8013 simplify_giv_expr (loop,
8014 gen_rtx_PLUS (mode,
8015 gen_rtx_PLUS (mode, arg0,
8016 XEXP (arg1, 0)),
8017 XEXP (arg1, 1)),
8018 ext_val, benefit);
8020 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
8021 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
8022 return NULL_RTX;
8024 if (!rtx_equal_p (arg0, arg1))
8025 return NULL_RTX;
8027 return simplify_giv_expr (loop,
8028 gen_rtx_MULT (mode,
8029 XEXP (arg0, 0),
8030 gen_rtx_PLUS (mode,
8031 XEXP (arg0, 1),
8032 XEXP (arg1, 1))),
8033 ext_val, benefit);
8035 case MINUS:
8036 /* Handle "a - b" as "a + b * (-1)". */
8037 return simplify_giv_expr (loop,
8038 gen_rtx_PLUS (mode,
8039 XEXP (x, 0),
8040 gen_rtx_MULT (mode,
8041 XEXP (x, 1),
8042 constm1_rtx)),
8043 ext_val, benefit);
8045 case MULT:
8046 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8047 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
8048 if (arg0 == 0 || arg1 == 0)
8049 return NULL_RTX;
8051 /* Put constant last, CONST_INT last if both constant. */
8052 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
8053 && GET_CODE (arg1) != CONST_INT)
8054 tem = arg0, arg0 = arg1, arg1 = tem;
8056 /* If second argument is not now constant, not giv. */
8057 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
8058 return NULL_RTX;
8060 /* Handle multiply by 0 or 1. */
8061 if (arg1 == const0_rtx)
8062 return const0_rtx;
8064 else if (arg1 == const1_rtx)
8065 return arg0;
8067 switch (GET_CODE (arg0))
8069 case REG:
8070 /* biv * invar. Done. */
8071 return gen_rtx_MULT (mode, arg0, arg1);
8073 case CONST_INT:
8074 /* Product of two constants. */
8075 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
8077 case USE:
8078 /* invar * invar is a giv, but attempt to simplify it somehow. */
8079 if (GET_CODE (arg1) != CONST_INT)
8080 return NULL_RTX;
8082 arg0 = XEXP (arg0, 0);
8083 if (GET_CODE (arg0) == MULT)
8085 /* (invar_0 * invar_1) * invar_2. Associate. */
8086 return simplify_giv_expr (loop,
8087 gen_rtx_MULT (mode,
8088 XEXP (arg0, 0),
8089 gen_rtx_MULT (mode,
8090 XEXP (arg0,
8092 arg1)),
8093 ext_val, benefit);
8095 /* Propagate the MULT expressions to the innermost nodes. */
8096 else if (GET_CODE (arg0) == PLUS)
8098 /* (invar_0 + invar_1) * invar_2. Distribute. */
8099 return simplify_giv_expr (loop,
8100 gen_rtx_PLUS (mode,
8101 gen_rtx_MULT (mode,
8102 XEXP (arg0,
8104 arg1),
8105 gen_rtx_MULT (mode,
8106 XEXP (arg0,
8108 arg1)),
8109 ext_val, benefit);
8111 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
8113 case MULT:
8114 /* (a * invar_1) * invar_2. Associate. */
8115 return simplify_giv_expr (loop,
8116 gen_rtx_MULT (mode,
8117 XEXP (arg0, 0),
8118 gen_rtx_MULT (mode,
8119 XEXP (arg0, 1),
8120 arg1)),
8121 ext_val, benefit);
8123 case PLUS:
8124 /* (a + invar_1) * invar_2. Distribute. */
8125 return simplify_giv_expr (loop,
8126 gen_rtx_PLUS (mode,
8127 gen_rtx_MULT (mode,
8128 XEXP (arg0, 0),
8129 arg1),
8130 gen_rtx_MULT (mode,
8131 XEXP (arg0, 1),
8132 arg1)),
8133 ext_val, benefit);
8135 default:
8136 gcc_unreachable ();
8139 case ASHIFT:
8140 /* Shift by constant is multiply by power of two. */
8141 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8142 return 0;
8144 return
8145 simplify_giv_expr (loop,
8146 gen_rtx_MULT (mode,
8147 XEXP (x, 0),
8148 GEN_INT ((HOST_WIDE_INT) 1
8149 << INTVAL (XEXP (x, 1)))),
8150 ext_val, benefit);
8152 case NEG:
8153 /* "-a" is "a * (-1)" */
8154 return simplify_giv_expr (loop,
8155 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8156 ext_val, benefit);
8158 case NOT:
8159 /* "~a" is "-a - 1". Silly, but easy. */
8160 return simplify_giv_expr (loop,
8161 gen_rtx_MINUS (mode,
8162 gen_rtx_NEG (mode, XEXP (x, 0)),
8163 const1_rtx),
8164 ext_val, benefit);
8166 case USE:
8167 /* Already in proper form for invariant. */
8168 return x;
8170 case SIGN_EXTEND:
8171 case ZERO_EXTEND:
8172 case TRUNCATE:
8173 /* Conditionally recognize extensions of simple IVs. After we've
8174 computed loop traversal counts and verified the range of the
8175 source IV, we'll reevaluate this as a GIV. */
8176 if (*ext_val == NULL_RTX)
8178 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8179 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8181 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8182 return arg0;
8185 goto do_default;
8187 case REG:
8188 /* If this is a new register, we can't deal with it. */
8189 if (REGNO (x) >= max_reg_before_loop)
8190 return 0;
8192 /* Check for biv or giv. */
8193 switch (REG_IV_TYPE (ivs, REGNO (x)))
8195 case BASIC_INDUCT:
8196 return x;
8197 case GENERAL_INDUCT:
8199 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8201 /* Form expression from giv and add benefit. Ensure this giv
8202 can derive another and subtract any needed adjustment if so. */
8204 /* Increasing the benefit here is risky. The only case in which it
8205 is arguably correct is if this is the only use of V. In other
8206 cases, this will artificially inflate the benefit of the current
8207 giv, and lead to suboptimal code. Thus, it is disabled, since
8208 potentially not reducing an only marginally beneficial giv is
8209 less harmful than reducing many givs that are not really
8210 beneficial. */
8212 rtx single_use = regs->array[REGNO (x)].single_usage;
8213 if (single_use && single_use != const0_rtx)
8214 *benefit += v->benefit;
8217 if (v->cant_derive)
8218 return 0;
8220 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8221 v->src_reg, v->mult_val),
8222 v->add_val);
8224 if (v->derive_adjustment)
8225 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8226 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8227 if (*ext_val)
8229 if (!v->ext_dependent)
8230 return arg0;
8232 else
8234 *ext_val = v->ext_dependent;
8235 return arg0;
8237 return 0;
8240 default:
8241 do_default:
8242 /* If it isn't an induction variable, and it is invariant, we
8243 may be able to simplify things further by looking through
8244 the bits we just moved outside the loop. */
8245 if (loop_invariant_p (loop, x) == 1)
8247 struct movable *m;
8248 struct loop_movables *movables = LOOP_MOVABLES (loop);
8250 for (m = movables->head; m; m = m->next)
8251 if (rtx_equal_p (x, m->set_dest))
8253 /* Ok, we found a match. Substitute and simplify. */
8255 /* If we match another movable, we must use that, as
8256 this one is going away. */
8257 if (m->match)
8258 return simplify_giv_expr (loop, m->match->set_dest,
8259 ext_val, benefit);
8261 /* If consec is nonzero, this is a member of a group of
8262 instructions that were moved together. We handle this
8263 case only to the point of seeking to the last insn and
8264 looking for a REG_EQUAL. Fail if we don't find one. */
8265 if (m->consec != 0)
8267 int i = m->consec;
8268 tem = m->insn;
8271 tem = NEXT_INSN (tem);
8273 while (--i > 0);
8275 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8276 if (tem)
8277 tem = XEXP (tem, 0);
8279 else
8281 tem = single_set (m->insn);
8282 if (tem)
8283 tem = SET_SRC (tem);
8286 if (tem)
8288 /* What we are most interested in is pointer
8289 arithmetic on invariants -- only take
8290 patterns we may be able to do something with. */
8291 if (GET_CODE (tem) == PLUS
8292 || GET_CODE (tem) == MULT
8293 || GET_CODE (tem) == ASHIFT
8294 || GET_CODE (tem) == CONST_INT
8295 || GET_CODE (tem) == SYMBOL_REF)
8297 tem = simplify_giv_expr (loop, tem, ext_val,
8298 benefit);
8299 if (tem)
8300 return tem;
8302 else if (GET_CODE (tem) == CONST
8303 && GET_CODE (XEXP (tem, 0)) == PLUS
8304 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8305 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8307 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8308 ext_val, benefit);
8309 if (tem)
8310 return tem;
8313 break;
8316 break;
8319 /* Fall through to general case. */
8320 default:
8321 /* If invariant, return as USE (unless CONST_INT).
8322 Otherwise, not giv. */
8323 if (GET_CODE (x) == USE)
8324 x = XEXP (x, 0);
8326 if (loop_invariant_p (loop, x) == 1)
8328 if (GET_CODE (x) == CONST_INT)
8329 return x;
8330 if (GET_CODE (x) == CONST
8331 && GET_CODE (XEXP (x, 0)) == PLUS
8332 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8333 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8334 x = XEXP (x, 0);
8335 return gen_rtx_USE (mode, x);
8337 else
8338 return 0;
8342 /* This routine folds invariants such that there is only ever one
8343 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8345 static rtx
8346 sge_plus_constant (rtx x, rtx c)
8348 if (GET_CODE (x) == CONST_INT)
8349 return GEN_INT (INTVAL (x) + INTVAL (c));
8350 else if (GET_CODE (x) != PLUS)
8351 return gen_rtx_PLUS (GET_MODE (x), x, c);
8352 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8354 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8355 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8357 else if (GET_CODE (XEXP (x, 0)) == PLUS
8358 || GET_CODE (XEXP (x, 1)) != PLUS)
8360 return gen_rtx_PLUS (GET_MODE (x),
8361 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8363 else
8365 return gen_rtx_PLUS (GET_MODE (x),
8366 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8370 static rtx
8371 sge_plus (enum machine_mode mode, rtx x, rtx y)
8373 while (GET_CODE (y) == PLUS)
8375 rtx a = XEXP (y, 0);
8376 if (GET_CODE (a) == CONST_INT)
8377 x = sge_plus_constant (x, a);
8378 else
8379 x = gen_rtx_PLUS (mode, x, a);
8380 y = XEXP (y, 1);
8382 if (GET_CODE (y) == CONST_INT)
8383 x = sge_plus_constant (x, y);
8384 else
8385 x = gen_rtx_PLUS (mode, x, y);
8386 return x;
8389 /* Help detect a giv that is calculated by several consecutive insns;
8390 for example,
8391 giv = biv * M
8392 giv = giv + A
8393 The caller has already identified the first insn P as having a giv as dest;
8394 we check that all other insns that set the same register follow
8395 immediately after P, that they alter nothing else,
8396 and that the result of the last is still a giv.
8398 The value is 0 if the reg set in P is not really a giv.
8399 Otherwise, the value is the amount gained by eliminating
8400 all the consecutive insns that compute the value.
8402 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8403 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8405 The coefficients of the ultimate giv value are stored in
8406 *MULT_VAL and *ADD_VAL. */
8408 static int
8409 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8410 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8411 rtx *ext_val, rtx *last_consec_insn)
8413 struct loop_ivs *ivs = LOOP_IVS (loop);
8414 struct loop_regs *regs = LOOP_REGS (loop);
8415 int count;
8416 enum rtx_code code;
8417 int benefit;
8418 rtx temp;
8419 rtx set;
8421 /* Indicate that this is a giv so that we can update the value produced in
8422 each insn of the multi-insn sequence.
8424 This induction structure will be used only by the call to
8425 general_induction_var below, so we can allocate it on our stack.
8426 If this is a giv, our caller will replace the induct var entry with
8427 a new induction structure. */
8428 struct induction *v;
8430 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8431 return 0;
8433 v = alloca (sizeof (struct induction));
8434 v->src_reg = src_reg;
8435 v->mult_val = *mult_val;
8436 v->add_val = *add_val;
8437 v->benefit = first_benefit;
8438 v->cant_derive = 0;
8439 v->derive_adjustment = 0;
8440 v->ext_dependent = NULL_RTX;
8442 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8443 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8445 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8447 while (count > 0)
8449 p = NEXT_INSN (p);
8450 code = GET_CODE (p);
8452 /* If libcall, skip to end of call sequence. */
8453 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8454 p = XEXP (temp, 0);
8456 if (code == INSN
8457 && (set = single_set (p))
8458 && REG_P (SET_DEST (set))
8459 && SET_DEST (set) == dest_reg
8460 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8461 add_val, mult_val, ext_val, 0,
8462 &benefit, VOIDmode)
8463 /* Giv created by equivalent expression. */
8464 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8465 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8466 add_val, mult_val, ext_val, 0,
8467 &benefit, VOIDmode)))
8468 && src_reg == v->src_reg)
8470 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8471 benefit += libcall_benefit (p);
8473 count--;
8474 v->mult_val = *mult_val;
8475 v->add_val = *add_val;
8476 v->benefit += benefit;
8478 else if (code != NOTE)
8480 /* Allow insns that set something other than this giv to a
8481 constant. Such insns are needed on machines which cannot
8482 include long constants and should not disqualify a giv. */
8483 if (code == INSN
8484 && (set = single_set (p))
8485 && SET_DEST (set) != dest_reg
8486 && CONSTANT_P (SET_SRC (set)))
8487 continue;
8489 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8490 return 0;
8494 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8495 *last_consec_insn = p;
8496 return v->benefit;
8499 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8500 represented by G1. If no such expression can be found, or it is clear that
8501 it cannot possibly be a valid address, 0 is returned.
8503 To perform the computation, we note that
8504 G1 = x * v + a and
8505 G2 = y * v + b
8506 where `v' is the biv.
8508 So G2 = (y/b) * G1 + (b - a*y/x).
8510 Note that MULT = y/x.
8512 Update: A and B are now allowed to be additive expressions such that
8513 B contains all variables in A. That is, computing B-A will not require
8514 subtracting variables. */
8516 static rtx
8517 express_from_1 (rtx a, rtx b, rtx mult)
8519 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8521 if (mult == const0_rtx)
8522 return b;
8524 /* If MULT is not 1, we cannot handle A with non-constants, since we
8525 would then be required to subtract multiples of the registers in A.
8526 This is theoretically possible, and may even apply to some Fortran
8527 constructs, but it is a lot of work and we do not attempt it here. */
8529 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8530 return NULL_RTX;
8532 /* In general these structures are sorted top to bottom (down the PLUS
8533 chain), but not left to right across the PLUS. If B is a higher
8534 order giv than A, we can strip one level and recurse. If A is higher
8535 order, we'll eventually bail out, but won't know that until the end.
8536 If they are the same, we'll strip one level around this loop. */
8538 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8540 rtx ra, rb, oa, ob, tmp;
8542 ra = XEXP (a, 0), oa = XEXP (a, 1);
8543 if (GET_CODE (ra) == PLUS)
8544 tmp = ra, ra = oa, oa = tmp;
8546 rb = XEXP (b, 0), ob = XEXP (b, 1);
8547 if (GET_CODE (rb) == PLUS)
8548 tmp = rb, rb = ob, ob = tmp;
8550 if (rtx_equal_p (ra, rb))
8551 /* We matched: remove one reg completely. */
8552 a = oa, b = ob;
8553 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8554 /* An alternate match. */
8555 a = oa, b = rb;
8556 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8557 /* An alternate match. */
8558 a = ra, b = ob;
8559 else
8561 /* Indicates an extra register in B. Strip one level from B and
8562 recurse, hoping B was the higher order expression. */
8563 ob = express_from_1 (a, ob, mult);
8564 if (ob == NULL_RTX)
8565 return NULL_RTX;
8566 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8570 /* Here we are at the last level of A, go through the cases hoping to
8571 get rid of everything but a constant. */
8573 if (GET_CODE (a) == PLUS)
8575 rtx ra, oa;
8577 ra = XEXP (a, 0), oa = XEXP (a, 1);
8578 if (rtx_equal_p (oa, b))
8579 oa = ra;
8580 else if (!rtx_equal_p (ra, b))
8581 return NULL_RTX;
8583 if (GET_CODE (oa) != CONST_INT)
8584 return NULL_RTX;
8586 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8588 else if (GET_CODE (a) == CONST_INT)
8590 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8592 else if (CONSTANT_P (a))
8594 enum machine_mode mode_a = GET_MODE (a);
8595 enum machine_mode mode_b = GET_MODE (b);
8596 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8597 return simplify_gen_binary (MINUS, mode, b, a);
8599 else if (GET_CODE (b) == PLUS)
8601 if (rtx_equal_p (a, XEXP (b, 0)))
8602 return XEXP (b, 1);
8603 else if (rtx_equal_p (a, XEXP (b, 1)))
8604 return XEXP (b, 0);
8605 else
8606 return NULL_RTX;
8608 else if (rtx_equal_p (a, b))
8609 return const0_rtx;
8611 return NULL_RTX;
8614 static rtx
8615 express_from (struct induction *g1, struct induction *g2)
8617 rtx mult, add;
8619 /* The value that G1 will be multiplied by must be a constant integer. Also,
8620 the only chance we have of getting a valid address is if b*c/a (see above
8621 for notation) is also an integer. */
8622 if (GET_CODE (g1->mult_val) == CONST_INT
8623 && GET_CODE (g2->mult_val) == CONST_INT)
8625 if (g1->mult_val == const0_rtx
8626 || (g1->mult_val == constm1_rtx
8627 && INTVAL (g2->mult_val)
8628 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8629 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8630 return NULL_RTX;
8631 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8633 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8634 mult = const1_rtx;
8635 else
8637 /* ??? Find out if the one is a multiple of the other? */
8638 return NULL_RTX;
8641 add = express_from_1 (g1->add_val, g2->add_val, mult);
8642 if (add == NULL_RTX)
8644 /* Failed. If we've got a multiplication factor between G1 and G2,
8645 scale G1's addend and try again. */
8646 if (INTVAL (mult) > 1)
8648 rtx g1_add_val = g1->add_val;
8649 if (GET_CODE (g1_add_val) == MULT
8650 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8652 HOST_WIDE_INT m;
8653 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8654 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8655 XEXP (g1_add_val, 0), GEN_INT (m));
8657 else
8659 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8660 mult);
8663 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8666 if (add == NULL_RTX)
8667 return NULL_RTX;
8669 /* Form simplified final result. */
8670 if (mult == const0_rtx)
8671 return add;
8672 else if (mult == const1_rtx)
8673 mult = g1->dest_reg;
8674 else
8675 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8677 if (add == const0_rtx)
8678 return mult;
8679 else
8681 if (GET_CODE (add) == PLUS
8682 && CONSTANT_P (XEXP (add, 1)))
8684 rtx tem = XEXP (add, 1);
8685 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8686 add = tem;
8689 return gen_rtx_PLUS (g2->mode, mult, add);
8693 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8694 represented by G1. This indicates that G2 should be combined with G1 and
8695 that G2 can use (either directly or via an address expression) a register
8696 used to represent G1. */
8698 static rtx
8699 combine_givs_p (struct induction *g1, struct induction *g2)
8701 rtx comb, ret;
8703 /* With the introduction of ext dependent givs, we must care for modes.
8704 G2 must not use a wider mode than G1. */
8705 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8706 return NULL_RTX;
8708 ret = comb = express_from (g1, g2);
8709 if (comb == NULL_RTX)
8710 return NULL_RTX;
8711 if (g1->mode != g2->mode)
8712 ret = gen_lowpart (g2->mode, comb);
8714 /* If these givs are identical, they can be combined. We use the results
8715 of express_from because the addends are not in a canonical form, so
8716 rtx_equal_p is a weaker test. */
8717 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8718 combination to be the other way round. */
8719 if (comb == g1->dest_reg
8720 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8722 return ret;
8725 /* If G2 can be expressed as a function of G1 and that function is valid
8726 as an address and no more expensive than using a register for G2,
8727 the expression of G2 in terms of G1 can be used. */
8728 if (ret != NULL_RTX
8729 && g2->giv_type == DEST_ADDR
8730 && memory_address_p (GET_MODE (g2->mem), ret))
8731 return ret;
8733 return NULL_RTX;
8736 /* See if BL is monotonic and has a constant per-iteration increment.
8737 Return the increment if so, otherwise return 0. */
8739 static HOST_WIDE_INT
8740 get_monotonic_increment (struct iv_class *bl)
8742 struct induction *v;
8743 rtx incr;
8745 /* Get the total increment and check that it is constant. */
8746 incr = biv_total_increment (bl);
8747 if (incr == 0 || GET_CODE (incr) != CONST_INT)
8748 return 0;
8750 for (v = bl->biv; v != 0; v = v->next_iv)
8752 if (GET_CODE (v->add_val) != CONST_INT)
8753 return 0;
8755 if (INTVAL (v->add_val) < 0 && INTVAL (incr) >= 0)
8756 return 0;
8758 if (INTVAL (v->add_val) > 0 && INTVAL (incr) <= 0)
8759 return 0;
8761 return INTVAL (incr);
8765 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8766 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8767 to which the biv belongs and INCR is its per-iteration increment. */
8769 static bool
8770 biased_biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8771 HOST_WIDE_INT incr, enum machine_mode mode,
8772 unsigned HOST_WIDE_INT bias)
8774 unsigned HOST_WIDE_INT initial, maximum, span, delta;
8776 /* We need to be able to manipulate MODE-size constants. */
8777 if (HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode))
8778 return false;
8780 /* The number of loop iterations must be constant. */
8781 if (LOOP_INFO (loop)->n_iterations == 0)
8782 return false;
8784 /* So must the biv's initial value. */
8785 if (bl->initial_value == 0 || GET_CODE (bl->initial_value) != CONST_INT)
8786 return false;
8788 initial = bias + INTVAL (bl->initial_value);
8789 maximum = GET_MODE_MASK (mode);
8791 /* Make sure that the initial value is within range. */
8792 if (initial > maximum)
8793 return false;
8795 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8796 (calculated to arbitrary precision) must be <= SPAN. */
8797 if (incr < 0)
8799 delta = -incr;
8800 span = initial;
8802 else
8804 delta = incr;
8805 /* Handle the special case in which MAXIMUM is the largest
8806 unsigned HOST_WIDE_INT and INITIAL is 0. */
8807 if (maximum + 1 == initial)
8808 span = LOOP_INFO (loop)->n_iterations * delta;
8809 else
8810 span = maximum + 1 - initial;
8812 return (span / LOOP_INFO (loop)->n_iterations >= delta);
8816 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8817 the loop to which BL belongs and INCR is its per-iteration increment.
8818 UNSIGNEDP is true if the biv should be treated as unsigned. */
8820 static bool
8821 biv_fits_mode_p (const struct loop *loop, struct iv_class *bl,
8822 HOST_WIDE_INT incr, enum machine_mode mode, bool unsignedp)
8824 struct loop_info *loop_info;
8825 unsigned HOST_WIDE_INT bias;
8827 /* A biv's value will always be limited to its natural mode.
8828 Larger modes will observe the same wrap-around. */
8829 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (bl->biv->src_reg)))
8830 mode = GET_MODE (bl->biv->src_reg);
8832 loop_info = LOOP_INFO (loop);
8834 bias = (unsignedp ? 0 : (GET_MODE_MASK (mode) >> 1) + 1);
8835 if (biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8836 return true;
8838 if (mode == GET_MODE (bl->biv->src_reg)
8839 && bl->biv->src_reg == loop_info->iteration_var
8840 && loop_info->comparison_value
8841 && loop_invariant_p (loop, loop_info->comparison_value))
8843 /* If the increment is +1, and the exit test is a <, the BIV
8844 cannot overflow. (For <=, we have the problematic case that
8845 the comparison value might be the maximum value of the range.) */
8846 if (incr == 1)
8848 if (loop_info->comparison_code == LT)
8849 return true;
8850 if (loop_info->comparison_code == LTU && unsignedp)
8851 return true;
8854 /* Likewise for increment -1 and exit test >. */
8855 if (incr == -1)
8857 if (loop_info->comparison_code == GT)
8858 return true;
8859 if (loop_info->comparison_code == GTU && unsignedp)
8860 return true;
8863 return false;
8867 /* Return false iff it is provable that biv BL will not wrap at any point
8868 in its update sequence. Note that at the RTL level we may not have
8869 information about the signedness of BL; in that case, check for both
8870 signed and unsigned overflow. */
8872 static bool
8873 biv_may_wrap_p (const struct loop *loop, struct iv_class *bl)
8875 HOST_WIDE_INT incr;
8876 bool check_signed, check_unsigned;
8877 enum machine_mode mode;
8879 /* If the increment is not monotonic, we'd have to check separately
8880 at each increment step. Not Worth It. */
8881 incr = get_monotonic_increment (bl);
8882 if (incr == 0)
8883 return true;
8885 /* If this biv is the loop iteration variable, then we may be able to
8886 deduce a sign based on the loop condition. */
8887 /* ??? This is not 100% reliable; consider an unsigned biv that is cast
8888 to signed for the comparison. However, this same bug appears all
8889 through loop.c. */
8890 check_signed = check_unsigned = true;
8891 if (bl->biv->src_reg == LOOP_INFO (loop)->iteration_var)
8893 switch (LOOP_INFO (loop)->comparison_code)
8895 case GTU: case GEU: case LTU: case LEU:
8896 check_signed = false;
8897 break;
8898 case GT: case GE: case LT: case LE:
8899 check_unsigned = false;
8900 break;
8901 default:
8902 break;
8906 mode = GET_MODE (bl->biv->src_reg);
8908 if (check_unsigned
8909 && !biased_biv_fits_mode_p (loop, bl, incr, mode, 0))
8910 return true;
8912 if (check_signed)
8914 unsigned HOST_WIDE_INT bias = (GET_MODE_MASK (mode) >> 1) + 1;
8915 if (!biased_biv_fits_mode_p (loop, bl, incr, mode, bias))
8916 return true;
8919 return false;
8923 /* Given that X is an extension or truncation of BL, return true
8924 if it is unaffected by overflow. LOOP is the loop to which
8925 BL belongs and INCR is its per-iteration increment. */
8927 static bool
8928 extension_within_bounds_p (const struct loop *loop, struct iv_class *bl,
8929 HOST_WIDE_INT incr, rtx x)
8931 enum machine_mode mode;
8932 bool signedp, unsignedp;
8934 switch (GET_CODE (x))
8936 case SIGN_EXTEND:
8937 case ZERO_EXTEND:
8938 mode = GET_MODE (XEXP (x, 0));
8939 signedp = (GET_CODE (x) == SIGN_EXTEND);
8940 unsignedp = (GET_CODE (x) == ZERO_EXTEND);
8941 break;
8943 case TRUNCATE:
8944 /* We don't know whether this value is being used as signed
8945 or unsigned, so check the conditions for both. */
8946 mode = GET_MODE (x);
8947 signedp = unsignedp = true;
8948 break;
8950 default:
8951 gcc_unreachable ();
8954 return ((!signedp || biv_fits_mode_p (loop, bl, incr, mode, false))
8955 && (!unsignedp || biv_fits_mode_p (loop, bl, incr, mode, true)));
8959 /* Check each extension dependent giv in this class to see if its
8960 root biv is safe from wrapping in the interior mode, which would
8961 make the giv illegal. */
8963 static void
8964 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8966 struct induction *v;
8967 HOST_WIDE_INT incr;
8969 incr = get_monotonic_increment (bl);
8971 /* Invalidate givs that fail the tests. */
8972 for (v = bl->giv; v; v = v->next_iv)
8973 if (v->ext_dependent)
8975 if (incr != 0
8976 && extension_within_bounds_p (loop, bl, incr, v->ext_dependent))
8978 if (loop_dump_stream)
8979 fprintf (loop_dump_stream,
8980 "Verified ext dependent giv at %d of reg %d\n",
8981 INSN_UID (v->insn), bl->regno);
8983 else
8985 if (loop_dump_stream)
8986 fprintf (loop_dump_stream,
8987 "Failed ext dependent giv at %d\n",
8988 INSN_UID (v->insn));
8990 v->ignore = 1;
8991 bl->all_reduced = 0;
8996 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8998 static rtx
8999 extend_value_for_giv (struct induction *v, rtx value)
9001 rtx ext_dep = v->ext_dependent;
9003 if (! ext_dep)
9004 return value;
9006 /* Recall that check_ext_dependent_givs verified that the known bounds
9007 of a biv did not overflow or wrap with respect to the extension for
9008 the giv. Therefore, constants need no additional adjustment. */
9009 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
9010 return value;
9012 /* Otherwise, we must adjust the value to compensate for the
9013 differing modes of the biv and the giv. */
9014 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
9017 struct combine_givs_stats
9019 int giv_number;
9020 int total_benefit;
9023 static int
9024 cmp_combine_givs_stats (const void *xp, const void *yp)
9026 const struct combine_givs_stats * const x =
9027 (const struct combine_givs_stats *) xp;
9028 const struct combine_givs_stats * const y =
9029 (const struct combine_givs_stats *) yp;
9030 int d;
9031 d = y->total_benefit - x->total_benefit;
9032 /* Stabilize the sort. */
9033 if (!d)
9034 d = x->giv_number - y->giv_number;
9035 return d;
9038 /* Check all pairs of givs for iv_class BL and see if any can be combined with
9039 any other. If so, point SAME to the giv combined with and set NEW_REG to
9040 be an expression (in terms of the other giv's DEST_REG) equivalent to the
9041 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
9043 static void
9044 combine_givs (struct loop_regs *regs, struct iv_class *bl)
9046 /* Additional benefit to add for being combined multiple times. */
9047 const int extra_benefit = 3;
9049 struct induction *g1, *g2, **giv_array;
9050 int i, j, k, giv_count;
9051 struct combine_givs_stats *stats;
9052 rtx *can_combine;
9054 /* Count givs, because bl->giv_count is incorrect here. */
9055 giv_count = 0;
9056 for (g1 = bl->giv; g1; g1 = g1->next_iv)
9057 if (!g1->ignore)
9058 giv_count++;
9060 giv_array = alloca (giv_count * sizeof (struct induction *));
9061 i = 0;
9062 for (g1 = bl->giv; g1; g1 = g1->next_iv)
9063 if (!g1->ignore)
9064 giv_array[i++] = g1;
9066 stats = xcalloc (giv_count, sizeof (*stats));
9067 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
9069 for (i = 0; i < giv_count; i++)
9071 int this_benefit;
9072 rtx single_use;
9074 g1 = giv_array[i];
9075 stats[i].giv_number = i;
9077 /* If a DEST_REG GIV is used only once, do not allow it to combine
9078 with anything, for in doing so we will gain nothing that cannot
9079 be had by simply letting the GIV with which we would have combined
9080 to be reduced on its own. The lossage shows up in particular with
9081 DEST_ADDR targets on hosts with reg+reg addressing, though it can
9082 be seen elsewhere as well. */
9083 if (g1->giv_type == DEST_REG
9084 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
9085 && single_use != const0_rtx)
9086 continue;
9088 this_benefit = g1->benefit;
9089 /* Add an additional weight for zero addends. */
9090 if (g1->no_const_addval)
9091 this_benefit += 1;
9093 for (j = 0; j < giv_count; j++)
9095 rtx this_combine;
9097 g2 = giv_array[j];
9098 if (g1 != g2
9099 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
9101 can_combine[i * giv_count + j] = this_combine;
9102 this_benefit += g2->benefit + extra_benefit;
9105 stats[i].total_benefit = this_benefit;
9108 /* Iterate, combining until we can't. */
9109 restart:
9110 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
9112 if (loop_dump_stream)
9114 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
9115 for (k = 0; k < giv_count; k++)
9117 g1 = giv_array[stats[k].giv_number];
9118 if (!g1->combined_with && !g1->same)
9119 fprintf (loop_dump_stream, " {%d, %d}",
9120 INSN_UID (giv_array[stats[k].giv_number]->insn),
9121 stats[k].total_benefit);
9123 putc ('\n', loop_dump_stream);
9126 for (k = 0; k < giv_count; k++)
9128 int g1_add_benefit = 0;
9130 i = stats[k].giv_number;
9131 g1 = giv_array[i];
9133 /* If it has already been combined, skip. */
9134 if (g1->combined_with || g1->same)
9135 continue;
9137 for (j = 0; j < giv_count; j++)
9139 g2 = giv_array[j];
9140 if (g1 != g2 && can_combine[i * giv_count + j]
9141 /* If it has already been combined, skip. */
9142 && ! g2->same && ! g2->combined_with)
9144 int l;
9146 g2->new_reg = can_combine[i * giv_count + j];
9147 g2->same = g1;
9148 /* For destination, we now may replace by mem expression instead
9149 of register. This changes the costs considerably, so add the
9150 compensation. */
9151 if (g2->giv_type == DEST_ADDR)
9152 g2->benefit = (g2->benefit + reg_address_cost
9153 - address_cost (g2->new_reg,
9154 GET_MODE (g2->mem)));
9155 g1->combined_with++;
9156 g1->lifetime += g2->lifetime;
9158 g1_add_benefit += g2->benefit;
9160 /* ??? The new final_[bg]iv_value code does a much better job
9161 of finding replaceable giv's, and hence this code may no
9162 longer be necessary. */
9163 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
9164 g1_add_benefit -= copy_cost;
9166 /* To help optimize the next set of combinations, remove
9167 this giv from the benefits of other potential mates. */
9168 for (l = 0; l < giv_count; ++l)
9170 int m = stats[l].giv_number;
9171 if (can_combine[m * giv_count + j])
9172 stats[l].total_benefit -= g2->benefit + extra_benefit;
9175 if (loop_dump_stream)
9176 fprintf (loop_dump_stream,
9177 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9178 INSN_UID (g2->insn), INSN_UID (g1->insn),
9179 g1->benefit, g1_add_benefit, g1->lifetime);
9183 /* To help optimize the next set of combinations, remove
9184 this giv from the benefits of other potential mates. */
9185 if (g1->combined_with)
9187 for (j = 0; j < giv_count; ++j)
9189 int m = stats[j].giv_number;
9190 if (can_combine[m * giv_count + i])
9191 stats[j].total_benefit -= g1->benefit + extra_benefit;
9194 g1->benefit += g1_add_benefit;
9196 /* We've finished with this giv, and everything it touched.
9197 Restart the combination so that proper weights for the
9198 rest of the givs are properly taken into account. */
9199 /* ??? Ideally we would compact the arrays at this point, so
9200 as to not cover old ground. But sanely compacting
9201 can_combine is tricky. */
9202 goto restart;
9206 /* Clean up. */
9207 free (stats);
9208 free (can_combine);
9211 /* Generate sequence for REG = B * M + A. B is the initial value of
9212 the basic induction variable, M a multiplicative constant, A an
9213 additive constant and REG the destination register. */
9215 static rtx
9216 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9218 rtx seq;
9219 rtx result;
9221 start_sequence ();
9222 /* Use unsigned arithmetic. */
9223 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9224 if (reg != result)
9225 emit_move_insn (reg, result);
9226 seq = get_insns ();
9227 end_sequence ();
9229 return seq;
9233 /* Update registers created in insn sequence SEQ. */
9235 static void
9236 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9238 rtx insn;
9240 /* Update register info for alias analysis. */
9242 insn = seq;
9243 while (insn != NULL_RTX)
9245 rtx set = single_set (insn);
9247 if (set && REG_P (SET_DEST (set)))
9248 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9250 insn = NEXT_INSN (insn);
9255 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9256 is the initial value of the basic induction variable, M a
9257 multiplicative constant, A an additive constant and REG the
9258 destination register. */
9260 static void
9261 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9262 rtx reg, basic_block before_bb, rtx before_insn)
9264 rtx seq;
9266 if (! before_insn)
9268 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9269 return;
9272 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9273 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9275 /* Increase the lifetime of any invariants moved further in code. */
9276 update_reg_last_use (a, before_insn);
9277 update_reg_last_use (b, before_insn);
9278 update_reg_last_use (m, before_insn);
9280 /* It is possible that the expansion created lots of new registers.
9281 Iterate over the sequence we just created and record them all. We
9282 must do this before inserting the sequence. */
9283 loop_regs_update (loop, seq);
9285 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9289 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9290 initial value of the basic induction variable, M a multiplicative
9291 constant, A an additive constant and REG the destination
9292 register. */
9294 static void
9295 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9297 rtx seq;
9299 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9300 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9302 /* Increase the lifetime of any invariants moved further in code.
9303 ???? Is this really necessary? */
9304 update_reg_last_use (a, loop->sink);
9305 update_reg_last_use (b, loop->sink);
9306 update_reg_last_use (m, loop->sink);
9308 /* It is possible that the expansion created lots of new registers.
9309 Iterate over the sequence we just created and record them all. We
9310 must do this before inserting the sequence. */
9311 loop_regs_update (loop, seq);
9313 loop_insn_sink (loop, seq);
9317 /* Emit insns after loop to set REG = B * M + A. B is the initial
9318 value of the basic induction variable, M a multiplicative constant,
9319 A an additive constant and REG the destination register. */
9321 static void
9322 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9324 rtx seq;
9326 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9327 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9329 /* It is possible that the expansion created lots of new registers.
9330 Iterate over the sequence we just created and record them all. We
9331 must do this before inserting the sequence. */
9332 loop_regs_update (loop, seq);
9334 loop_insn_hoist (loop, seq);
9339 /* Similar to gen_add_mult, but compute cost rather than generating
9340 sequence. */
9342 static int
9343 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9345 int cost = 0;
9346 rtx last, result;
9348 start_sequence ();
9349 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9350 if (reg != result)
9351 emit_move_insn (reg, result);
9352 last = get_last_insn ();
9353 while (last)
9355 rtx t = single_set (last);
9356 if (t)
9357 cost += rtx_cost (SET_SRC (t), SET);
9358 last = PREV_INSN (last);
9360 end_sequence ();
9361 return cost;
9364 /* Test whether A * B can be computed without
9365 an actual multiply insn. Value is 1 if so.
9367 ??? This function stinks because it generates a ton of wasted RTL
9368 ??? and as a result fragments GC memory to no end. There are other
9369 ??? places in the compiler which are invoked a lot and do the same
9370 ??? thing, generate wasted RTL just to see if something is possible. */
9372 static int
9373 product_cheap_p (rtx a, rtx b)
9375 rtx tmp;
9376 int win, n_insns;
9378 /* If only one is constant, make it B. */
9379 if (GET_CODE (a) == CONST_INT)
9380 tmp = a, a = b, b = tmp;
9382 /* If first constant, both constant, so don't need multiply. */
9383 if (GET_CODE (a) == CONST_INT)
9384 return 1;
9386 /* If second not constant, neither is constant, so would need multiply. */
9387 if (GET_CODE (b) != CONST_INT)
9388 return 0;
9390 /* One operand is constant, so might not need multiply insn. Generate the
9391 code for the multiply and see if a call or multiply, or long sequence
9392 of insns is generated. */
9394 start_sequence ();
9395 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9396 tmp = get_insns ();
9397 end_sequence ();
9399 win = 1;
9400 if (tmp == NULL_RTX)
9402 else if (INSN_P (tmp))
9404 n_insns = 0;
9405 while (tmp != NULL_RTX)
9407 rtx next = NEXT_INSN (tmp);
9409 if (++n_insns > 3
9410 || !NONJUMP_INSN_P (tmp)
9411 || (GET_CODE (PATTERN (tmp)) == SET
9412 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9413 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9414 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9415 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9417 win = 0;
9418 break;
9421 tmp = next;
9424 else if (GET_CODE (tmp) == SET
9425 && GET_CODE (SET_SRC (tmp)) == MULT)
9426 win = 0;
9427 else if (GET_CODE (tmp) == PARALLEL
9428 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9429 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9430 win = 0;
9432 return win;
9435 /* Check to see if loop can be terminated by a "decrement and branch until
9436 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9437 Also try reversing an increment loop to a decrement loop
9438 to see if the optimization can be performed.
9439 Value is nonzero if optimization was performed. */
9441 /* This is useful even if the architecture doesn't have such an insn,
9442 because it might change a loops which increments from 0 to n to a loop
9443 which decrements from n to 0. A loop that decrements to zero is usually
9444 faster than one that increments from zero. */
9446 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9447 such as approx_final_value, biv_total_increment, loop_iterations, and
9448 final_[bg]iv_value. */
9450 static int
9451 check_dbra_loop (struct loop *loop, int insn_count)
9453 struct loop_info *loop_info = LOOP_INFO (loop);
9454 struct loop_regs *regs = LOOP_REGS (loop);
9455 struct loop_ivs *ivs = LOOP_IVS (loop);
9456 struct iv_class *bl;
9457 rtx reg;
9458 enum machine_mode mode;
9459 rtx jump_label;
9460 rtx final_value;
9461 rtx start_value;
9462 rtx new_add_val;
9463 rtx comparison;
9464 rtx before_comparison;
9465 rtx p;
9466 rtx jump;
9467 rtx first_compare;
9468 int compare_and_branch;
9469 rtx loop_start = loop->start;
9470 rtx loop_end = loop->end;
9472 /* If last insn is a conditional branch, and the insn before tests a
9473 register value, try to optimize it. Otherwise, we can't do anything. */
9475 jump = PREV_INSN (loop_end);
9476 comparison = get_condition_for_loop (loop, jump);
9477 if (comparison == 0)
9478 return 0;
9479 if (!onlyjump_p (jump))
9480 return 0;
9482 /* Try to compute whether the compare/branch at the loop end is one or
9483 two instructions. */
9484 get_condition (jump, &first_compare, false, true);
9485 if (first_compare == jump)
9486 compare_and_branch = 1;
9487 else if (first_compare == prev_nonnote_insn (jump))
9488 compare_and_branch = 2;
9489 else
9490 return 0;
9493 /* If more than one condition is present to control the loop, then
9494 do not proceed, as this function does not know how to rewrite
9495 loop tests with more than one condition.
9497 Look backwards from the first insn in the last comparison
9498 sequence and see if we've got another comparison sequence. */
9500 rtx jump1;
9501 if ((jump1 = prev_nonnote_insn (first_compare))
9502 && JUMP_P (jump1))
9503 return 0;
9506 /* Check all of the bivs to see if the compare uses one of them.
9507 Skip biv's set more than once because we can't guarantee that
9508 it will be zero on the last iteration. Also skip if the biv is
9509 used between its update and the test insn. */
9511 for (bl = ivs->list; bl; bl = bl->next)
9513 if (bl->biv_count == 1
9514 && ! bl->biv->maybe_multiple
9515 && bl->biv->dest_reg == XEXP (comparison, 0)
9516 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9517 first_compare))
9518 break;
9521 /* Try swapping the comparison to identify a suitable biv. */
9522 if (!bl)
9523 for (bl = ivs->list; bl; bl = bl->next)
9524 if (bl->biv_count == 1
9525 && ! bl->biv->maybe_multiple
9526 && bl->biv->dest_reg == XEXP (comparison, 1)
9527 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9528 first_compare))
9530 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9531 VOIDmode,
9532 XEXP (comparison, 1),
9533 XEXP (comparison, 0));
9534 break;
9537 if (! bl)
9538 return 0;
9540 /* Look for the case where the basic induction variable is always
9541 nonnegative, and equals zero on the last iteration.
9542 In this case, add a reg_note REG_NONNEG, which allows the
9543 m68k DBRA instruction to be used. */
9545 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9546 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9547 && GET_CODE (bl->biv->add_val) == CONST_INT
9548 && INTVAL (bl->biv->add_val) < 0)
9550 /* Initial value must be greater than 0,
9551 init_val % -dec_value == 0 to ensure that it equals zero on
9552 the last iteration */
9554 if (GET_CODE (bl->initial_value) == CONST_INT
9555 && INTVAL (bl->initial_value) > 0
9556 && (INTVAL (bl->initial_value)
9557 % (-INTVAL (bl->biv->add_val))) == 0)
9559 /* Register always nonnegative, add REG_NOTE to branch. */
9560 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9561 REG_NOTES (jump)
9562 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9563 REG_NOTES (jump));
9564 bl->nonneg = 1;
9566 return 1;
9569 /* If the decrement is 1 and the value was tested as >= 0 before
9570 the loop, then we can safely optimize. */
9571 for (p = loop_start; p; p = PREV_INSN (p))
9573 if (LABEL_P (p))
9574 break;
9575 if (!JUMP_P (p))
9576 continue;
9578 before_comparison = get_condition_for_loop (loop, p);
9579 if (before_comparison
9580 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9581 && (GET_CODE (before_comparison) == LT
9582 || GET_CODE (before_comparison) == LTU)
9583 && XEXP (before_comparison, 1) == const0_rtx
9584 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9585 && INTVAL (bl->biv->add_val) == -1)
9587 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9588 REG_NOTES (jump)
9589 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9590 REG_NOTES (jump));
9591 bl->nonneg = 1;
9593 return 1;
9597 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9598 && INTVAL (bl->biv->add_val) > 0)
9600 /* Try to change inc to dec, so can apply above optimization. */
9601 /* Can do this if:
9602 all registers modified are induction variables or invariant,
9603 all memory references have non-overlapping addresses
9604 (obviously true if only one write)
9605 allow 2 insns for the compare/jump at the end of the loop. */
9606 /* Also, we must avoid any instructions which use both the reversed
9607 biv and another biv. Such instructions will fail if the loop is
9608 reversed. We meet this condition by requiring that either
9609 no_use_except_counting is true, or else that there is only
9610 one biv. */
9611 int num_nonfixed_reads = 0;
9612 /* 1 if the iteration var is used only to count iterations. */
9613 int no_use_except_counting = 0;
9614 /* 1 if the loop has no memory store, or it has a single memory store
9615 which is reversible. */
9616 int reversible_mem_store = 1;
9618 if (bl->giv_count == 0
9619 && !loop->exit_count
9620 && !loop_info->has_multiple_exit_targets)
9622 rtx bivreg = regno_reg_rtx[bl->regno];
9623 struct iv_class *blt;
9625 /* If there are no givs for this biv, and the only exit is the
9626 fall through at the end of the loop, then
9627 see if perhaps there are no uses except to count. */
9628 no_use_except_counting = 1;
9629 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9630 if (INSN_P (p))
9632 rtx set = single_set (p);
9634 if (set && REG_P (SET_DEST (set))
9635 && REGNO (SET_DEST (set)) == bl->regno)
9636 /* An insn that sets the biv is okay. */
9638 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9639 /* An insn that doesn't mention the biv is okay. */
9641 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9642 || p == prev_nonnote_insn (loop_end))
9644 /* If either of these insns uses the biv and sets a pseudo
9645 that has more than one usage, then the biv has uses
9646 other than counting since it's used to derive a value
9647 that is used more than one time. */
9648 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9649 regs);
9650 if (regs->multiple_uses)
9652 no_use_except_counting = 0;
9653 break;
9656 else
9658 no_use_except_counting = 0;
9659 break;
9663 /* A biv has uses besides counting if it is used to set
9664 another biv. */
9665 for (blt = ivs->list; blt; blt = blt->next)
9666 if (blt->init_set
9667 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9669 no_use_except_counting = 0;
9670 break;
9674 if (no_use_except_counting)
9675 /* No need to worry about MEMs. */
9677 else if (loop_info->num_mem_sets <= 1)
9679 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9680 if (INSN_P (p))
9681 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9683 /* If the loop has a single store, and the destination address is
9684 invariant, then we can't reverse the loop, because this address
9685 might then have the wrong value at loop exit.
9686 This would work if the source was invariant also, however, in that
9687 case, the insn should have been moved out of the loop. */
9689 if (loop_info->num_mem_sets == 1)
9691 struct induction *v;
9693 /* If we could prove that each of the memory locations
9694 written to was different, then we could reverse the
9695 store -- but we don't presently have any way of
9696 knowing that. */
9697 reversible_mem_store = 0;
9699 /* If the store depends on a register that is set after the
9700 store, it depends on the initial value, and is thus not
9701 reversible. */
9702 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9704 if (v->giv_type == DEST_REG
9705 && reg_mentioned_p (v->dest_reg,
9706 PATTERN (loop_info->first_loop_store_insn))
9707 && loop_insn_first_p (loop_info->first_loop_store_insn,
9708 v->insn))
9709 reversible_mem_store = 0;
9713 else
9714 return 0;
9716 /* This code only acts for innermost loops. Also it simplifies
9717 the memory address check by only reversing loops with
9718 zero or one memory access.
9719 Two memory accesses could involve parts of the same array,
9720 and that can't be reversed.
9721 If the biv is used only for counting, than we don't need to worry
9722 about all these things. */
9724 if ((num_nonfixed_reads <= 1
9725 && ! loop_info->has_nonconst_call
9726 && ! loop_info->has_prefetch
9727 && ! loop_info->has_volatile
9728 && reversible_mem_store
9729 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9730 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9731 && (bl == ivs->list && bl->next == 0))
9732 || (no_use_except_counting && ! loop_info->has_prefetch))
9734 rtx tem;
9736 /* Loop can be reversed. */
9737 if (loop_dump_stream)
9738 fprintf (loop_dump_stream, "Can reverse loop\n");
9740 /* Now check other conditions:
9742 The increment must be a constant, as must the initial value,
9743 and the comparison code must be LT.
9745 This test can probably be improved since +/- 1 in the constant
9746 can be obtained by changing LT to LE and vice versa; this is
9747 confusing. */
9749 if (comparison
9750 /* for constants, LE gets turned into LT */
9751 && (GET_CODE (comparison) == LT
9752 || (GET_CODE (comparison) == LE
9753 && no_use_except_counting)
9754 || GET_CODE (comparison) == LTU))
9756 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9757 rtx initial_value, comparison_value;
9758 int nonneg = 0;
9759 enum rtx_code cmp_code;
9760 int comparison_const_width;
9761 unsigned HOST_WIDE_INT comparison_sign_mask;
9762 bool keep_first_compare;
9764 add_val = INTVAL (bl->biv->add_val);
9765 comparison_value = XEXP (comparison, 1);
9766 if (GET_MODE (comparison_value) == VOIDmode)
9767 comparison_const_width
9768 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9769 else
9770 comparison_const_width
9771 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9772 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9773 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9774 comparison_sign_mask
9775 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9777 /* If the comparison value is not a loop invariant, then we
9778 can not reverse this loop.
9780 ??? If the insns which initialize the comparison value as
9781 a whole compute an invariant result, then we could move
9782 them out of the loop and proceed with loop reversal. */
9783 if (! loop_invariant_p (loop, comparison_value))
9784 return 0;
9786 if (GET_CODE (comparison_value) == CONST_INT)
9787 comparison_val = INTVAL (comparison_value);
9788 initial_value = bl->initial_value;
9790 /* Normalize the initial value if it is an integer and
9791 has no other use except as a counter. This will allow
9792 a few more loops to be reversed. */
9793 if (no_use_except_counting
9794 && GET_CODE (comparison_value) == CONST_INT
9795 && GET_CODE (initial_value) == CONST_INT)
9797 comparison_val = comparison_val - INTVAL (bl->initial_value);
9798 /* The code below requires comparison_val to be a multiple
9799 of add_val in order to do the loop reversal, so
9800 round up comparison_val to a multiple of add_val.
9801 Since comparison_value is constant, we know that the
9802 current comparison code is LT. */
9803 comparison_val = comparison_val + add_val - 1;
9804 comparison_val
9805 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9806 /* We postpone overflow checks for COMPARISON_VAL here;
9807 even if there is an overflow, we might still be able to
9808 reverse the loop, if converting the loop exit test to
9809 NE is possible. */
9810 initial_value = const0_rtx;
9813 /* First check if we can do a vanilla loop reversal. */
9814 if (initial_value == const0_rtx
9815 && GET_CODE (comparison_value) == CONST_INT
9816 /* Now do postponed overflow checks on COMPARISON_VAL. */
9817 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9818 & comparison_sign_mask))
9820 /* Register will always be nonnegative, with value
9821 0 on last iteration */
9822 add_adjust = add_val;
9823 nonneg = 1;
9824 cmp_code = GE;
9826 else
9827 return 0;
9829 if (GET_CODE (comparison) == LE)
9830 add_adjust -= add_val;
9832 /* If the initial value is not zero, or if the comparison
9833 value is not an exact multiple of the increment, then we
9834 can not reverse this loop. */
9835 if (initial_value == const0_rtx
9836 && GET_CODE (comparison_value) == CONST_INT)
9838 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9839 return 0;
9841 else
9843 if (! no_use_except_counting || add_val != 1)
9844 return 0;
9847 final_value = comparison_value;
9849 /* Reset these in case we normalized the initial value
9850 and comparison value above. */
9851 if (GET_CODE (comparison_value) == CONST_INT
9852 && GET_CODE (initial_value) == CONST_INT)
9854 comparison_value = GEN_INT (comparison_val);
9855 final_value
9856 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9858 bl->initial_value = initial_value;
9860 /* Save some info needed to produce the new insns. */
9861 reg = bl->biv->dest_reg;
9862 mode = GET_MODE (reg);
9863 jump_label = condjump_label (PREV_INSN (loop_end));
9864 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9866 /* Set start_value; if this is not a CONST_INT, we need
9867 to generate a SUB.
9868 Initialize biv to start_value before loop start.
9869 The old initializing insn will be deleted as a
9870 dead store by flow.c. */
9871 if (initial_value == const0_rtx
9872 && GET_CODE (comparison_value) == CONST_INT)
9874 start_value
9875 = gen_int_mode (comparison_val - add_adjust, mode);
9876 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9878 else if (GET_CODE (initial_value) == CONST_INT)
9880 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9881 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9883 if (add_insn == 0)
9884 return 0;
9886 start_value
9887 = gen_rtx_PLUS (mode, comparison_value, offset);
9888 loop_insn_hoist (loop, add_insn);
9889 if (GET_CODE (comparison) == LE)
9890 final_value = gen_rtx_PLUS (mode, comparison_value,
9891 GEN_INT (add_val));
9893 else if (! add_adjust)
9895 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9896 initial_value);
9898 if (sub_insn == 0)
9899 return 0;
9900 start_value
9901 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9902 loop_insn_hoist (loop, sub_insn);
9904 else
9905 /* We could handle the other cases too, but it'll be
9906 better to have a testcase first. */
9907 return 0;
9909 /* We may not have a single insn which can increment a reg, so
9910 create a sequence to hold all the insns from expand_inc. */
9911 start_sequence ();
9912 expand_inc (reg, new_add_val);
9913 tem = get_insns ();
9914 end_sequence ();
9916 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9917 delete_insn (bl->biv->insn);
9919 /* Update biv info to reflect its new status. */
9920 bl->biv->insn = p;
9921 bl->initial_value = start_value;
9922 bl->biv->add_val = new_add_val;
9924 /* Update loop info. */
9925 loop_info->initial_value = reg;
9926 loop_info->initial_equiv_value = reg;
9927 loop_info->final_value = const0_rtx;
9928 loop_info->final_equiv_value = const0_rtx;
9929 loop_info->comparison_value = const0_rtx;
9930 loop_info->comparison_code = cmp_code;
9931 loop_info->increment = new_add_val;
9933 /* Inc LABEL_NUSES so that delete_insn will
9934 not delete the label. */
9935 LABEL_NUSES (XEXP (jump_label, 0))++;
9937 /* If we have a separate comparison insn that does more
9938 than just set cc0, the result of the comparison might
9939 be used outside the loop. */
9940 keep_first_compare = (compare_and_branch == 2
9941 #ifdef HAVE_CC0
9942 && sets_cc0_p (first_compare) <= 0
9943 #endif
9946 /* Emit an insn after the end of the loop to set the biv's
9947 proper exit value if it is used anywhere outside the loop. */
9948 if (keep_first_compare
9949 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9950 || ! bl->init_insn
9951 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9952 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9954 if (keep_first_compare)
9955 loop_insn_sink (loop, PATTERN (first_compare));
9957 /* Delete compare/branch at end of loop. */
9958 delete_related_insns (PREV_INSN (loop_end));
9959 if (compare_and_branch == 2)
9960 delete_related_insns (first_compare);
9962 /* Add new compare/branch insn at end of loop. */
9963 start_sequence ();
9964 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9965 mode, 0,
9966 XEXP (jump_label, 0));
9967 tem = get_insns ();
9968 end_sequence ();
9969 emit_jump_insn_before (tem, loop_end);
9971 for (tem = PREV_INSN (loop_end);
9972 tem && !JUMP_P (tem);
9973 tem = PREV_INSN (tem))
9976 if (tem)
9977 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9979 if (nonneg)
9981 if (tem)
9983 /* Increment of LABEL_NUSES done above. */
9984 /* Register is now always nonnegative,
9985 so add REG_NONNEG note to the branch. */
9986 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9987 REG_NOTES (tem));
9989 bl->nonneg = 1;
9992 /* No insn may reference both the reversed and another biv or it
9993 will fail (see comment near the top of the loop reversal
9994 code).
9995 Earlier on, we have verified that the biv has no use except
9996 counting, or it is the only biv in this function.
9997 However, the code that computes no_use_except_counting does
9998 not verify reg notes. It's possible to have an insn that
9999 references another biv, and has a REG_EQUAL note with an
10000 expression based on the reversed biv. To avoid this case,
10001 remove all REG_EQUAL notes based on the reversed biv
10002 here. */
10003 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
10004 if (INSN_P (p))
10006 rtx *pnote;
10007 rtx set = single_set (p);
10008 /* If this is a set of a GIV based on the reversed biv, any
10009 REG_EQUAL notes should still be correct. */
10010 if (! set
10011 || !REG_P (SET_DEST (set))
10012 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
10013 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
10014 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
10015 for (pnote = &REG_NOTES (p); *pnote;)
10017 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
10018 && reg_mentioned_p (regno_reg_rtx[bl->regno],
10019 XEXP (*pnote, 0)))
10020 *pnote = XEXP (*pnote, 1);
10021 else
10022 pnote = &XEXP (*pnote, 1);
10026 /* Mark that this biv has been reversed. Each giv which depends
10027 on this biv, and which is also live past the end of the loop
10028 will have to be fixed up. */
10030 bl->reversed = 1;
10032 if (loop_dump_stream)
10034 fprintf (loop_dump_stream, "Reversed loop");
10035 if (bl->nonneg)
10036 fprintf (loop_dump_stream, " and added reg_nonneg\n");
10037 else
10038 fprintf (loop_dump_stream, "\n");
10041 return 1;
10046 return 0;
10049 /* Verify whether the biv BL appears to be eliminable,
10050 based on the insns in the loop that refer to it.
10052 If ELIMINATE_P is nonzero, actually do the elimination.
10054 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
10055 determine whether invariant insns should be placed inside or at the
10056 start of the loop. */
10058 static int
10059 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
10060 int eliminate_p, int threshold, int insn_count)
10062 struct loop_ivs *ivs = LOOP_IVS (loop);
10063 rtx reg = bl->biv->dest_reg;
10064 rtx p;
10066 /* Scan all insns in the loop, stopping if we find one that uses the
10067 biv in a way that we cannot eliminate. */
10069 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10071 enum rtx_code code = GET_CODE (p);
10072 basic_block where_bb = 0;
10073 rtx where_insn = threshold >= insn_count ? 0 : p;
10074 rtx note;
10076 /* If this is a libcall that sets a giv, skip ahead to its end. */
10077 if (INSN_P (p))
10079 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
10081 if (note)
10083 rtx last = XEXP (note, 0);
10084 rtx set = single_set (last);
10086 if (set && REG_P (SET_DEST (set)))
10088 unsigned int regno = REGNO (SET_DEST (set));
10090 if (regno < ivs->n_regs
10091 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
10092 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
10093 p = last;
10098 /* Closely examine the insn if the biv is mentioned. */
10099 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
10100 && reg_mentioned_p (reg, PATTERN (p))
10101 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
10102 eliminate_p, where_bb, where_insn))
10104 if (loop_dump_stream)
10105 fprintf (loop_dump_stream,
10106 "Cannot eliminate biv %d: biv used in insn %d.\n",
10107 bl->regno, INSN_UID (p));
10108 break;
10111 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
10112 if (eliminate_p
10113 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
10114 && reg_mentioned_p (reg, XEXP (note, 0)))
10115 remove_note (p, note);
10118 if (p == loop->end)
10120 if (loop_dump_stream)
10121 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
10122 bl->regno, eliminate_p ? "was" : "can be");
10123 return 1;
10126 return 0;
10129 /* INSN and REFERENCE are instructions in the same insn chain.
10130 Return nonzero if INSN is first. */
10132 static int
10133 loop_insn_first_p (rtx insn, rtx reference)
10135 rtx p, q;
10137 for (p = insn, q = reference;;)
10139 /* Start with test for not first so that INSN == REFERENCE yields not
10140 first. */
10141 if (q == insn || ! p)
10142 return 0;
10143 if (p == reference || ! q)
10144 return 1;
10146 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
10147 previous insn, hence the <= comparison below does not work if
10148 P is a note. */
10149 if (INSN_UID (p) < max_uid_for_loop
10150 && INSN_UID (q) < max_uid_for_loop
10151 && !NOTE_P (p))
10152 return INSN_LUID (p) <= INSN_LUID (q);
10154 if (INSN_UID (p) >= max_uid_for_loop
10155 || NOTE_P (p))
10156 p = NEXT_INSN (p);
10157 if (INSN_UID (q) >= max_uid_for_loop)
10158 q = NEXT_INSN (q);
10162 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
10163 the offset that we have to take into account due to auto-increment /
10164 div derivation is zero. */
10165 static int
10166 biv_elimination_giv_has_0_offset (struct induction *biv,
10167 struct induction *giv, rtx insn)
10169 /* If the giv V had the auto-inc address optimization applied
10170 to it, and INSN occurs between the giv insn and the biv
10171 insn, then we'd have to adjust the value used here.
10172 This is rare, so we don't bother to make this possible. */
10173 if (giv->auto_inc_opt
10174 && ((loop_insn_first_p (giv->insn, insn)
10175 && loop_insn_first_p (insn, biv->insn))
10176 || (loop_insn_first_p (biv->insn, insn)
10177 && loop_insn_first_p (insn, giv->insn))))
10178 return 0;
10180 return 1;
10183 /* If BL appears in X (part of the pattern of INSN), see if we can
10184 eliminate its use. If so, return 1. If not, return 0.
10186 If BIV does not appear in X, return 1.
10188 If ELIMINATE_P is nonzero, actually do the elimination.
10189 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10190 Depending on how many items have been moved out of the loop, it
10191 will either be before INSN (when WHERE_INSN is nonzero) or at the
10192 start of the loop (when WHERE_INSN is zero). */
10194 static int
10195 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10196 struct iv_class *bl, int eliminate_p,
10197 basic_block where_bb, rtx where_insn)
10199 enum rtx_code code = GET_CODE (x);
10200 rtx reg = bl->biv->dest_reg;
10201 enum machine_mode mode = GET_MODE (reg);
10202 struct induction *v;
10203 rtx arg, tem;
10204 #ifdef HAVE_cc0
10205 rtx new;
10206 #endif
10207 int arg_operand;
10208 const char *fmt;
10209 int i, j;
10211 switch (code)
10213 case REG:
10214 /* If we haven't already been able to do something with this BIV,
10215 we can't eliminate it. */
10216 if (x == reg)
10217 return 0;
10218 return 1;
10220 case SET:
10221 /* If this sets the BIV, it is not a problem. */
10222 if (SET_DEST (x) == reg)
10223 return 1;
10225 /* If this is an insn that defines a giv, it is also ok because
10226 it will go away when the giv is reduced. */
10227 for (v = bl->giv; v; v = v->next_iv)
10228 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10229 return 1;
10231 #ifdef HAVE_cc0
10232 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10234 /* Can replace with any giv that was reduced and
10235 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10236 Require a constant for MULT_VAL, so we know it's nonzero.
10237 ??? We disable this optimization to avoid potential
10238 overflows. */
10240 for (v = bl->giv; v; v = v->next_iv)
10241 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10242 && v->add_val == const0_rtx
10243 && ! v->ignore && ! v->maybe_dead && v->always_computable
10244 && v->mode == mode
10245 && 0)
10247 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10248 continue;
10250 if (! eliminate_p)
10251 return 1;
10253 /* If the giv has the opposite direction of change,
10254 then reverse the comparison. */
10255 if (INTVAL (v->mult_val) < 0)
10256 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10257 const0_rtx, v->new_reg);
10258 else
10259 new = v->new_reg;
10261 /* We can probably test that giv's reduced reg. */
10262 if (validate_change (insn, &SET_SRC (x), new, 0))
10263 return 1;
10266 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10267 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10268 Require a constant for MULT_VAL, so we know it's nonzero.
10269 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10270 overflow problem. */
10272 for (v = bl->giv; v; v = v->next_iv)
10273 if (GET_CODE (v->mult_val) == CONST_INT
10274 && v->mult_val != const0_rtx
10275 && ! v->ignore && ! v->maybe_dead && v->always_computable
10276 && v->mode == mode
10277 && (GET_CODE (v->add_val) == SYMBOL_REF
10278 || GET_CODE (v->add_val) == LABEL_REF
10279 || GET_CODE (v->add_val) == CONST
10280 || (REG_P (v->add_val)
10281 && REG_POINTER (v->add_val))))
10283 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10284 continue;
10286 if (! eliminate_p)
10287 return 1;
10289 /* If the giv has the opposite direction of change,
10290 then reverse the comparison. */
10291 if (INTVAL (v->mult_val) < 0)
10292 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10293 v->new_reg);
10294 else
10295 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10296 copy_rtx (v->add_val));
10298 /* Replace biv with the giv's reduced register. */
10299 update_reg_last_use (v->add_val, insn);
10300 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10301 return 1;
10303 /* Insn doesn't support that constant or invariant. Copy it
10304 into a register (it will be a loop invariant.) */
10305 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10307 loop_insn_emit_before (loop, 0, where_insn,
10308 gen_move_insn (tem,
10309 copy_rtx (v->add_val)));
10311 /* Substitute the new register for its invariant value in
10312 the compare expression. */
10313 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10314 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10315 return 1;
10318 #endif
10319 break;
10321 case COMPARE:
10322 case EQ: case NE:
10323 case GT: case GE: case GTU: case GEU:
10324 case LT: case LE: case LTU: case LEU:
10325 /* See if either argument is the biv. */
10326 if (XEXP (x, 0) == reg)
10327 arg = XEXP (x, 1), arg_operand = 1;
10328 else if (XEXP (x, 1) == reg)
10329 arg = XEXP (x, 0), arg_operand = 0;
10330 else
10331 break;
10333 if (GET_CODE (arg) != CONST_INT)
10334 return 0;
10336 /* Unless we're dealing with an equality comparison, if we can't
10337 determine that the original biv doesn't wrap, then we must not
10338 apply the transformation. */
10339 /* ??? Actually, what we must do is verify that the transformed
10340 giv doesn't wrap. But the general case of this transformation
10341 was disabled long ago due to wrapping problems, and there's no
10342 point reviving it this close to end-of-life for loop.c. The
10343 only case still enabled is known (via the check on add_val) to
10344 be pointer arithmetic, which in theory never overflows for
10345 valid programs. */
10346 /* Without lifetime analysis, we don't know how COMPARE will be
10347 used, so we must assume the worst. */
10348 if (code != EQ && code != NE && biv_may_wrap_p (loop, bl))
10349 return 0;
10351 /* Try to replace with any giv that has constant positive mult_val
10352 and a pointer add_val. */
10353 for (v = bl->giv; v; v = v->next_iv)
10354 if (GET_CODE (v->mult_val) == CONST_INT
10355 && INTVAL (v->mult_val) > 0
10356 && (GET_CODE (v->add_val) == SYMBOL_REF
10357 || GET_CODE (v->add_val) == LABEL_REF
10358 || GET_CODE (v->add_val) == CONST
10359 || (REG_P (v->add_val) && REG_POINTER (v->add_val)))
10360 && ! v->ignore && ! v->maybe_dead && v->always_computable
10361 && v->mode == mode)
10363 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10364 continue;
10366 if (! eliminate_p)
10367 return 1;
10369 /* Replace biv with the giv's reduced reg. */
10370 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10372 /* Load the value into a register. */
10373 tem = gen_reg_rtx (mode);
10374 loop_iv_add_mult_emit_before (loop, arg, v->mult_val, v->add_val,
10375 tem, where_bb, where_insn);
10377 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10379 if (apply_change_group ())
10380 return 1;
10383 /* If we get here, the biv can't be eliminated. */
10384 return 0;
10386 case MEM:
10387 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10388 biv is used in it, since it will be replaced. */
10389 for (v = bl->giv; v; v = v->next_iv)
10390 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10391 return 1;
10392 break;
10394 default:
10395 break;
10398 /* See if any subexpression fails elimination. */
10399 fmt = GET_RTX_FORMAT (code);
10400 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10402 switch (fmt[i])
10404 case 'e':
10405 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10406 eliminate_p, where_bb, where_insn))
10407 return 0;
10408 break;
10410 case 'E':
10411 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10412 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10413 eliminate_p, where_bb, where_insn))
10414 return 0;
10415 break;
10419 return 1;
10422 /* Return nonzero if the last use of REG
10423 is in an insn following INSN in the same basic block. */
10425 static int
10426 last_use_this_basic_block (rtx reg, rtx insn)
10428 rtx n;
10429 for (n = insn;
10430 n && !LABEL_P (n) && !JUMP_P (n);
10431 n = NEXT_INSN (n))
10433 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10434 return 1;
10436 return 0;
10439 /* Called via `note_stores' to record the initial value of a biv. Here we
10440 just record the location of the set and process it later. */
10442 static void
10443 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10445 struct loop_ivs *ivs = (struct loop_ivs *) data;
10446 struct iv_class *bl;
10448 if (!REG_P (dest)
10449 || REGNO (dest) >= ivs->n_regs
10450 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10451 return;
10453 bl = REG_IV_CLASS (ivs, REGNO (dest));
10455 /* If this is the first set found, record it. */
10456 if (bl->init_insn == 0)
10458 bl->init_insn = note_insn;
10459 bl->init_set = set;
10463 /* If any of the registers in X are "old" and currently have a last use earlier
10464 than INSN, update them to have a last use of INSN. Their actual last use
10465 will be the previous insn but it will not have a valid uid_luid so we can't
10466 use it. X must be a source expression only. */
10468 static void
10469 update_reg_last_use (rtx x, rtx insn)
10471 /* Check for the case where INSN does not have a valid luid. In this case,
10472 there is no need to modify the regno_last_uid, as this can only happen
10473 when code is inserted after the loop_end to set a pseudo's final value,
10474 and hence this insn will never be the last use of x.
10475 ???? This comment is not correct. See for example loop_givs_reduce.
10476 This may insert an insn before another new insn. */
10477 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10478 && INSN_UID (insn) < max_uid_for_loop
10479 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10481 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10483 else
10485 int i, j;
10486 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10487 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10489 if (fmt[i] == 'e')
10490 update_reg_last_use (XEXP (x, i), insn);
10491 else if (fmt[i] == 'E')
10492 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10493 update_reg_last_use (XVECEXP (x, i, j), insn);
10498 /* Similar to rtlanal.c:get_condition, except that we also put an
10499 invariant last unless both operands are invariants. */
10501 static rtx
10502 get_condition_for_loop (const struct loop *loop, rtx x)
10504 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10506 if (comparison == 0
10507 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10508 || loop_invariant_p (loop, XEXP (comparison, 1)))
10509 return comparison;
10511 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10512 XEXP (comparison, 1), XEXP (comparison, 0));
10515 /* Scan the function and determine whether it has indirect (computed) jumps.
10517 This is taken mostly from flow.c; similar code exists elsewhere
10518 in the compiler. It may be useful to put this into rtlanal.c. */
10519 static int
10520 indirect_jump_in_function_p (rtx start)
10522 rtx insn;
10524 for (insn = start; insn; insn = NEXT_INSN (insn))
10525 if (computed_jump_p (insn))
10526 return 1;
10528 return 0;
10531 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10532 documentation for LOOP_MEMS for the definition of `appropriate'.
10533 This function is called from prescan_loop via for_each_rtx. */
10535 static int
10536 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10538 struct loop_info *loop_info = data;
10539 int i;
10540 rtx m = *mem;
10542 if (m == NULL_RTX)
10543 return 0;
10545 switch (GET_CODE (m))
10547 case MEM:
10548 break;
10550 case CLOBBER:
10551 /* We're not interested in MEMs that are only clobbered. */
10552 return -1;
10554 case CONST_DOUBLE:
10555 /* We're not interested in the MEM associated with a
10556 CONST_DOUBLE, so there's no need to traverse into this. */
10557 return -1;
10559 case EXPR_LIST:
10560 /* We're not interested in any MEMs that only appear in notes. */
10561 return -1;
10563 default:
10564 /* This is not a MEM. */
10565 return 0;
10568 /* See if we've already seen this MEM. */
10569 for (i = 0; i < loop_info->mems_idx; ++i)
10570 if (rtx_equal_p (m, loop_info->mems[i].mem))
10572 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10573 loop_info->mems[i].mem = m;
10574 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10575 /* The modes of the two memory accesses are different. If
10576 this happens, something tricky is going on, and we just
10577 don't optimize accesses to this MEM. */
10578 loop_info->mems[i].optimize = 0;
10580 return 0;
10583 /* Resize the array, if necessary. */
10584 if (loop_info->mems_idx == loop_info->mems_allocated)
10586 if (loop_info->mems_allocated != 0)
10587 loop_info->mems_allocated *= 2;
10588 else
10589 loop_info->mems_allocated = 32;
10591 loop_info->mems = xrealloc (loop_info->mems,
10592 loop_info->mems_allocated * sizeof (loop_mem_info));
10595 /* Actually insert the MEM. */
10596 loop_info->mems[loop_info->mems_idx].mem = m;
10597 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10598 because we can't put it in a register. We still store it in the
10599 table, though, so that if we see the same address later, but in a
10600 non-BLK mode, we'll not think we can optimize it at that point. */
10601 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10602 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10603 ++loop_info->mems_idx;
10605 return 0;
10609 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10611 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10612 register that is modified by an insn between FROM and TO. If the
10613 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10614 more, stop incrementing it, to avoid overflow.
10616 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10617 register I is used, if it is only used once. Otherwise, it is set
10618 to 0 (for no uses) or const0_rtx for more than one use. This
10619 parameter may be zero, in which case this processing is not done.
10621 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10622 optimize register I. */
10624 static void
10625 loop_regs_scan (const struct loop *loop, int extra_size)
10627 struct loop_regs *regs = LOOP_REGS (loop);
10628 int old_nregs;
10629 /* last_set[n] is nonzero iff reg n has been set in the current
10630 basic block. In that case, it is the insn that last set reg n. */
10631 rtx *last_set;
10632 rtx insn;
10633 int i;
10635 old_nregs = regs->num;
10636 regs->num = max_reg_num ();
10638 /* Grow the regs array if not allocated or too small. */
10639 if (regs->num >= regs->size)
10641 regs->size = regs->num + extra_size;
10643 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10645 /* Zero the new elements. */
10646 memset (regs->array + old_nregs, 0,
10647 (regs->size - old_nregs) * sizeof (*regs->array));
10650 /* Clear previously scanned fields but do not clear n_times_set. */
10651 for (i = 0; i < old_nregs; i++)
10653 regs->array[i].set_in_loop = 0;
10654 regs->array[i].may_not_optimize = 0;
10655 regs->array[i].single_usage = NULL_RTX;
10658 last_set = xcalloc (regs->num, sizeof (rtx));
10660 /* Scan the loop, recording register usage. */
10661 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10662 insn = NEXT_INSN (insn))
10664 if (INSN_P (insn))
10666 /* Record registers that have exactly one use. */
10667 find_single_use_in_loop (regs, insn, PATTERN (insn));
10669 /* Include uses in REG_EQUAL notes. */
10670 if (REG_NOTES (insn))
10671 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10673 if (GET_CODE (PATTERN (insn)) == SET
10674 || GET_CODE (PATTERN (insn)) == CLOBBER)
10675 count_one_set (regs, insn, PATTERN (insn), last_set);
10676 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10678 int i;
10679 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10680 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10681 last_set);
10685 if (LABEL_P (insn) || JUMP_P (insn))
10686 memset (last_set, 0, regs->num * sizeof (rtx));
10688 /* Invalidate all registers used for function argument passing.
10689 We check rtx_varies_p for the same reason as below, to allow
10690 optimizing PIC calculations. */
10691 if (CALL_P (insn))
10693 rtx link;
10694 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10695 link;
10696 link = XEXP (link, 1))
10698 rtx op, reg;
10700 if (GET_CODE (op = XEXP (link, 0)) == USE
10701 && REG_P (reg = XEXP (op, 0))
10702 && rtx_varies_p (reg, 1))
10703 regs->array[REGNO (reg)].may_not_optimize = 1;
10708 /* Invalidate all hard registers clobbered by calls. With one exception:
10709 a call-clobbered PIC register is still function-invariant for our
10710 purposes, since we can hoist any PIC calculations out of the loop.
10711 Thus the call to rtx_varies_p. */
10712 if (LOOP_INFO (loop)->has_call)
10713 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10714 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10715 && rtx_varies_p (regno_reg_rtx[i], 1))
10717 regs->array[i].may_not_optimize = 1;
10718 regs->array[i].set_in_loop = 1;
10721 #ifdef AVOID_CCMODE_COPIES
10722 /* Don't try to move insns which set CC registers if we should not
10723 create CCmode register copies. */
10724 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10725 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10726 regs->array[i].may_not_optimize = 1;
10727 #endif
10729 /* Set regs->array[I].n_times_set for the new registers. */
10730 for (i = old_nregs; i < regs->num; i++)
10731 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10733 free (last_set);
10736 /* Returns the number of real INSNs in the LOOP. */
10738 static int
10739 count_insns_in_loop (const struct loop *loop)
10741 int count = 0;
10742 rtx insn;
10744 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10745 insn = NEXT_INSN (insn))
10746 if (INSN_P (insn))
10747 ++count;
10749 return count;
10752 /* Move MEMs into registers for the duration of the loop. */
10754 static void
10755 load_mems (const struct loop *loop)
10757 struct loop_info *loop_info = LOOP_INFO (loop);
10758 struct loop_regs *regs = LOOP_REGS (loop);
10759 int maybe_never = 0;
10760 int i;
10761 rtx p, prev_ebb_head;
10762 rtx label = NULL_RTX;
10763 rtx end_label;
10764 /* Nonzero if the next instruction may never be executed. */
10765 int next_maybe_never = 0;
10766 unsigned int last_max_reg = max_reg_num ();
10768 if (loop_info->mems_idx == 0)
10769 return;
10771 /* We cannot use next_label here because it skips over normal insns. */
10772 end_label = next_nonnote_insn (loop->end);
10773 if (end_label && !LABEL_P (end_label))
10774 end_label = NULL_RTX;
10776 /* Check to see if it's possible that some instructions in the loop are
10777 never executed. Also check if there is a goto out of the loop other
10778 than right after the end of the loop. */
10779 for (p = next_insn_in_loop (loop, loop->scan_start);
10780 p != NULL_RTX;
10781 p = next_insn_in_loop (loop, p))
10783 if (LABEL_P (p))
10784 maybe_never = 1;
10785 else if (JUMP_P (p)
10786 /* If we enter the loop in the middle, and scan
10787 around to the beginning, don't set maybe_never
10788 for that. This must be an unconditional jump,
10789 otherwise the code at the top of the loop might
10790 never be executed. Unconditional jumps are
10791 followed a by barrier then loop end. */
10792 && ! (JUMP_P (p)
10793 && JUMP_LABEL (p) == loop->top
10794 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10795 && any_uncondjump_p (p)))
10797 /* If this is a jump outside of the loop but not right
10798 after the end of the loop, we would have to emit new fixup
10799 sequences for each such label. */
10800 if (/* If we can't tell where control might go when this
10801 JUMP_INSN is executed, we must be conservative. */
10802 !JUMP_LABEL (p)
10803 || (JUMP_LABEL (p) != end_label
10804 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10805 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10806 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10807 return;
10809 if (!any_condjump_p (p))
10810 /* Something complicated. */
10811 maybe_never = 1;
10812 else
10813 /* If there are any more instructions in the loop, they
10814 might not be reached. */
10815 next_maybe_never = 1;
10817 else if (next_maybe_never)
10818 maybe_never = 1;
10821 /* Find start of the extended basic block that enters the loop. */
10822 for (p = loop->start;
10823 PREV_INSN (p) && !LABEL_P (p);
10824 p = PREV_INSN (p))
10826 prev_ebb_head = p;
10828 cselib_init (true);
10830 /* Build table of mems that get set to constant values before the
10831 loop. */
10832 for (; p != loop->start; p = NEXT_INSN (p))
10833 cselib_process_insn (p);
10835 /* Actually move the MEMs. */
10836 for (i = 0; i < loop_info->mems_idx; ++i)
10838 regset_head load_copies;
10839 regset_head store_copies;
10840 int written = 0;
10841 rtx reg;
10842 rtx mem = loop_info->mems[i].mem;
10843 rtx mem_list_entry;
10845 if (MEM_VOLATILE_P (mem)
10846 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10847 /* There's no telling whether or not MEM is modified. */
10848 loop_info->mems[i].optimize = 0;
10850 /* Go through the MEMs written to in the loop to see if this
10851 one is aliased by one of them. */
10852 mem_list_entry = loop_info->store_mems;
10853 while (mem_list_entry)
10855 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10856 written = 1;
10857 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10858 mem, rtx_varies_p))
10860 /* MEM is indeed aliased by this store. */
10861 loop_info->mems[i].optimize = 0;
10862 break;
10864 mem_list_entry = XEXP (mem_list_entry, 1);
10867 if (flag_float_store && written
10868 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10869 loop_info->mems[i].optimize = 0;
10871 /* If this MEM is written to, we must be sure that there
10872 are no reads from another MEM that aliases this one. */
10873 if (loop_info->mems[i].optimize && written)
10875 int j;
10877 for (j = 0; j < loop_info->mems_idx; ++j)
10879 if (j == i)
10880 continue;
10881 else if (true_dependence (mem,
10882 VOIDmode,
10883 loop_info->mems[j].mem,
10884 rtx_varies_p))
10886 /* It's not safe to hoist loop_info->mems[i] out of
10887 the loop because writes to it might not be
10888 seen by reads from loop_info->mems[j]. */
10889 loop_info->mems[i].optimize = 0;
10890 break;
10895 if (maybe_never && may_trap_p (mem))
10896 /* We can't access the MEM outside the loop; it might
10897 cause a trap that wouldn't have happened otherwise. */
10898 loop_info->mems[i].optimize = 0;
10900 if (!loop_info->mems[i].optimize)
10901 /* We thought we were going to lift this MEM out of the
10902 loop, but later discovered that we could not. */
10903 continue;
10905 INIT_REG_SET (&load_copies);
10906 INIT_REG_SET (&store_copies);
10908 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10909 order to keep scan_loop from moving stores to this MEM
10910 out of the loop just because this REG is neither a
10911 user-variable nor used in the loop test. */
10912 reg = gen_reg_rtx (GET_MODE (mem));
10913 REG_USERVAR_P (reg) = 1;
10914 loop_info->mems[i].reg = reg;
10916 /* Now, replace all references to the MEM with the
10917 corresponding pseudos. */
10918 maybe_never = 0;
10919 for (p = next_insn_in_loop (loop, loop->scan_start);
10920 p != NULL_RTX;
10921 p = next_insn_in_loop (loop, p))
10923 if (INSN_P (p))
10925 rtx set;
10927 set = single_set (p);
10929 /* See if this copies the mem into a register that isn't
10930 modified afterwards. We'll try to do copy propagation
10931 a little further on. */
10932 if (set
10933 /* @@@ This test is _way_ too conservative. */
10934 && ! maybe_never
10935 && REG_P (SET_DEST (set))
10936 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10937 && REGNO (SET_DEST (set)) < last_max_reg
10938 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10939 && rtx_equal_p (SET_SRC (set), mem))
10940 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10942 /* See if this copies the mem from a register that isn't
10943 modified afterwards. We'll try to remove the
10944 redundant copy later on by doing a little register
10945 renaming and copy propagation. This will help
10946 to untangle things for the BIV detection code. */
10947 if (set
10948 && ! maybe_never
10949 && REG_P (SET_SRC (set))
10950 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10951 && REGNO (SET_SRC (set)) < last_max_reg
10952 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10953 && rtx_equal_p (SET_DEST (set), mem))
10954 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10956 /* If this is a call which uses / clobbers this memory
10957 location, we must not change the interface here. */
10958 if (CALL_P (p)
10959 && reg_mentioned_p (loop_info->mems[i].mem,
10960 CALL_INSN_FUNCTION_USAGE (p)))
10962 cancel_changes (0);
10963 loop_info->mems[i].optimize = 0;
10964 break;
10966 else
10967 /* Replace the memory reference with the shadow register. */
10968 replace_loop_mems (p, loop_info->mems[i].mem,
10969 loop_info->mems[i].reg, written);
10972 if (LABEL_P (p)
10973 || JUMP_P (p))
10974 maybe_never = 1;
10977 if (! loop_info->mems[i].optimize)
10978 ; /* We found we couldn't do the replacement, so do nothing. */
10979 else if (! apply_change_group ())
10980 /* We couldn't replace all occurrences of the MEM. */
10981 loop_info->mems[i].optimize = 0;
10982 else
10984 /* Load the memory immediately before LOOP->START, which is
10985 the NOTE_LOOP_BEG. */
10986 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
10987 rtx set;
10988 rtx best = mem;
10989 unsigned j;
10990 struct elt_loc_list *const_equiv = 0;
10991 reg_set_iterator rsi;
10993 if (e)
10995 struct elt_loc_list *equiv;
10996 struct elt_loc_list *best_equiv = 0;
10997 for (equiv = e->locs; equiv; equiv = equiv->next)
10999 if (CONSTANT_P (equiv->loc))
11000 const_equiv = equiv;
11001 else if (REG_P (equiv->loc)
11002 /* Extending hard register lifetimes causes crash
11003 on SRC targets. Doing so on non-SRC is
11004 probably also not good idea, since we most
11005 probably have pseudoregister equivalence as
11006 well. */
11007 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
11008 best_equiv = equiv;
11010 /* Use the constant equivalence if that is cheap enough. */
11011 if (! best_equiv)
11012 best_equiv = const_equiv;
11013 else if (const_equiv
11014 && (rtx_cost (const_equiv->loc, SET)
11015 <= rtx_cost (best_equiv->loc, SET)))
11017 best_equiv = const_equiv;
11018 const_equiv = 0;
11021 /* If best_equiv is nonzero, we know that MEM is set to a
11022 constant or register before the loop. We will use this
11023 knowledge to initialize the shadow register with that
11024 constant or reg rather than by loading from MEM. */
11025 if (best_equiv)
11026 best = copy_rtx (best_equiv->loc);
11029 set = gen_move_insn (reg, best);
11030 set = loop_insn_hoist (loop, set);
11031 if (REG_P (best))
11033 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
11034 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
11036 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
11037 break;
11041 if (const_equiv)
11042 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
11044 if (written)
11046 if (label == NULL_RTX)
11048 label = gen_label_rtx ();
11049 emit_label_after (label, loop->end);
11052 /* Store the memory immediately after END, which is
11053 the NOTE_LOOP_END. */
11054 set = gen_move_insn (copy_rtx (mem), reg);
11055 loop_insn_emit_after (loop, 0, label, set);
11058 if (loop_dump_stream)
11060 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
11061 REGNO (reg), (written ? "r/w" : "r/o"));
11062 print_rtl (loop_dump_stream, mem);
11063 fputc ('\n', loop_dump_stream);
11066 /* Attempt a bit of copy propagation. This helps untangle the
11067 data flow, and enables {basic,general}_induction_var to find
11068 more bivs/givs. */
11069 EXECUTE_IF_SET_IN_REG_SET
11070 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11072 try_copy_prop (loop, reg, j);
11074 CLEAR_REG_SET (&load_copies);
11076 EXECUTE_IF_SET_IN_REG_SET
11077 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11079 try_swap_copy_prop (loop, reg, j);
11081 CLEAR_REG_SET (&store_copies);
11085 /* Now, we need to replace all references to the previous exit
11086 label with the new one. */
11087 if (label != NULL_RTX && end_label != NULL_RTX)
11088 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11089 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11090 redirect_jump (p, label, false);
11092 cselib_finish ();
11095 /* For communication between note_reg_stored and its caller. */
11096 struct note_reg_stored_arg
11098 int set_seen;
11099 rtx reg;
11102 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11103 is equal to ARG. */
11104 static void
11105 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11107 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11108 if (t->reg == x)
11109 t->set_seen = 1;
11112 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11113 There must be exactly one insn that sets this pseudo; it will be
11114 deleted if all replacements succeed and we can prove that the register
11115 is not used after the loop. */
11117 static void
11118 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11120 /* This is the reg that we are copying from. */
11121 rtx reg_rtx = regno_reg_rtx[regno];
11122 rtx init_insn = 0;
11123 rtx insn;
11124 /* These help keep track of whether we replaced all uses of the reg. */
11125 int replaced_last = 0;
11126 int store_is_first = 0;
11128 for (insn = next_insn_in_loop (loop, loop->scan_start);
11129 insn != NULL_RTX;
11130 insn = next_insn_in_loop (loop, insn))
11132 rtx set;
11134 /* Only substitute within one extended basic block from the initializing
11135 insn. */
11136 if (LABEL_P (insn) && init_insn)
11137 break;
11139 if (! INSN_P (insn))
11140 continue;
11142 /* Is this the initializing insn? */
11143 set = single_set (insn);
11144 if (set
11145 && REG_P (SET_DEST (set))
11146 && REGNO (SET_DEST (set)) == regno)
11148 gcc_assert (!init_insn);
11150 init_insn = insn;
11151 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11152 store_is_first = 1;
11155 /* Only substitute after seeing the initializing insn. */
11156 if (init_insn && insn != init_insn)
11158 struct note_reg_stored_arg arg;
11160 replace_loop_regs (insn, reg_rtx, replacement);
11161 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11162 replaced_last = 1;
11164 /* Stop replacing when REPLACEMENT is modified. */
11165 arg.reg = replacement;
11166 arg.set_seen = 0;
11167 note_stores (PATTERN (insn), note_reg_stored, &arg);
11168 if (arg.set_seen)
11170 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11172 /* It is possible that we've turned previously valid REG_EQUAL to
11173 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11174 REPLACEMENT is modified, we get different meaning. */
11175 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11176 remove_note (insn, note);
11177 break;
11181 gcc_assert (init_insn);
11182 if (apply_change_group ())
11184 if (loop_dump_stream)
11185 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11186 if (store_is_first && replaced_last)
11188 rtx first;
11189 rtx retval_note;
11191 /* Assume we're just deleting INIT_INSN. */
11192 first = init_insn;
11193 /* Look for REG_RETVAL note. If we're deleting the end of
11194 the libcall sequence, the whole sequence can go. */
11195 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11196 /* If we found a REG_RETVAL note, find the first instruction
11197 in the sequence. */
11198 if (retval_note)
11199 first = XEXP (retval_note, 0);
11201 /* Delete the instructions. */
11202 loop_delete_insns (first, init_insn);
11204 if (loop_dump_stream)
11205 fprintf (loop_dump_stream, ".\n");
11209 /* Replace all the instructions from FIRST up to and including LAST
11210 with NOTE_INSN_DELETED notes. */
11212 static void
11213 loop_delete_insns (rtx first, rtx last)
11215 while (1)
11217 if (loop_dump_stream)
11218 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11219 INSN_UID (first));
11220 delete_insn (first);
11222 /* If this was the LAST instructions we're supposed to delete,
11223 we're done. */
11224 if (first == last)
11225 break;
11227 first = NEXT_INSN (first);
11231 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11232 loop LOOP if the order of the sets of these registers can be
11233 swapped. There must be exactly one insn within the loop that sets
11234 this pseudo followed immediately by a move insn that sets
11235 REPLACEMENT with REGNO. */
11236 static void
11237 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11238 unsigned int regno)
11240 rtx insn;
11241 rtx set = NULL_RTX;
11242 unsigned int new_regno;
11244 new_regno = REGNO (replacement);
11246 for (insn = next_insn_in_loop (loop, loop->scan_start);
11247 insn != NULL_RTX;
11248 insn = next_insn_in_loop (loop, insn))
11250 /* Search for the insn that copies REGNO to NEW_REGNO? */
11251 if (INSN_P (insn)
11252 && (set = single_set (insn))
11253 && REG_P (SET_DEST (set))
11254 && REGNO (SET_DEST (set)) == new_regno
11255 && REG_P (SET_SRC (set))
11256 && REGNO (SET_SRC (set)) == regno)
11257 break;
11260 if (insn != NULL_RTX)
11262 rtx prev_insn;
11263 rtx prev_set;
11265 /* Some DEF-USE info would come in handy here to make this
11266 function more general. For now, just check the previous insn
11267 which is the most likely candidate for setting REGNO. */
11269 prev_insn = PREV_INSN (insn);
11271 if (INSN_P (insn)
11272 && (prev_set = single_set (prev_insn))
11273 && REG_P (SET_DEST (prev_set))
11274 && REGNO (SET_DEST (prev_set)) == regno)
11276 /* We have:
11277 (set (reg regno) (expr))
11278 (set (reg new_regno) (reg regno))
11280 so try converting this to:
11281 (set (reg new_regno) (expr))
11282 (set (reg regno) (reg new_regno))
11284 The former construct is often generated when a global
11285 variable used for an induction variable is shadowed by a
11286 register (NEW_REGNO). The latter construct improves the
11287 chances of GIV replacement and BIV elimination. */
11289 validate_change (prev_insn, &SET_DEST (prev_set),
11290 replacement, 1);
11291 validate_change (insn, &SET_DEST (set),
11292 SET_SRC (set), 1);
11293 validate_change (insn, &SET_SRC (set),
11294 replacement, 1);
11296 if (apply_change_group ())
11298 if (loop_dump_stream)
11299 fprintf (loop_dump_stream,
11300 " Swapped set of reg %d at %d with reg %d at %d.\n",
11301 regno, INSN_UID (insn),
11302 new_regno, INSN_UID (prev_insn));
11304 /* Update first use of REGNO. */
11305 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11306 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11308 /* Now perform copy propagation to hopefully
11309 remove all uses of REGNO within the loop. */
11310 try_copy_prop (loop, replacement, regno);
11316 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11318 static int
11319 find_mem_in_note_1 (rtx *x, void *data)
11321 if (*x != NULL_RTX && MEM_P (*x))
11323 rtx *res = (rtx *) data;
11324 *res = *x;
11325 return 1;
11327 return 0;
11330 /* Returns the first MEM found in NOTE by depth-first search. */
11332 static rtx
11333 find_mem_in_note (rtx note)
11335 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11336 return note;
11337 return NULL_RTX;
11340 /* Replace MEM with its associated pseudo register. This function is
11341 called from load_mems via for_each_rtx. DATA is actually a pointer
11342 to a structure describing the instruction currently being scanned
11343 and the MEM we are currently replacing. */
11345 static int
11346 replace_loop_mem (rtx *mem, void *data)
11348 loop_replace_args *args = (loop_replace_args *) data;
11349 rtx m = *mem;
11351 if (m == NULL_RTX)
11352 return 0;
11354 switch (GET_CODE (m))
11356 case MEM:
11357 break;
11359 case CONST_DOUBLE:
11360 /* We're not interested in the MEM associated with a
11361 CONST_DOUBLE, so there's no need to traverse into one. */
11362 return -1;
11364 default:
11365 /* This is not a MEM. */
11366 return 0;
11369 if (!rtx_equal_p (args->match, m))
11370 /* This is not the MEM we are currently replacing. */
11371 return 0;
11373 /* Actually replace the MEM. */
11374 validate_change (args->insn, mem, args->replacement, 1);
11376 return 0;
11379 static void
11380 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11382 loop_replace_args args;
11384 args.insn = insn;
11385 args.match = mem;
11386 args.replacement = reg;
11388 for_each_rtx (&insn, replace_loop_mem, &args);
11390 /* If we hoist a mem write out of the loop, then REG_EQUAL
11391 notes referring to the mem are no longer valid. */
11392 if (written)
11394 rtx note, sub;
11395 rtx *link;
11397 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11399 if (REG_NOTE_KIND (note) == REG_EQUAL
11400 && (sub = find_mem_in_note (note))
11401 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11403 /* Remove the note. */
11404 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11405 break;
11411 /* Replace one register with another. Called through for_each_rtx; PX points
11412 to the rtx being scanned. DATA is actually a pointer to
11413 a structure of arguments. */
11415 static int
11416 replace_loop_reg (rtx *px, void *data)
11418 rtx x = *px;
11419 loop_replace_args *args = (loop_replace_args *) data;
11421 if (x == NULL_RTX)
11422 return 0;
11424 if (x == args->match)
11425 validate_change (args->insn, px, args->replacement, 1);
11427 return 0;
11430 static void
11431 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11433 loop_replace_args args;
11435 args.insn = insn;
11436 args.match = reg;
11437 args.replacement = replacement;
11439 for_each_rtx (&insn, replace_loop_reg, &args);
11442 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11443 (ignored in the interim). */
11445 static rtx
11446 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11447 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11448 rtx pattern)
11450 return emit_insn_after (pattern, where_insn);
11454 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11455 in basic block WHERE_BB (ignored in the interim) within the loop
11456 otherwise hoist PATTERN into the loop pre-header. */
11458 static rtx
11459 loop_insn_emit_before (const struct loop *loop,
11460 basic_block where_bb ATTRIBUTE_UNUSED,
11461 rtx where_insn, rtx pattern)
11463 if (! where_insn)
11464 return loop_insn_hoist (loop, pattern);
11465 return emit_insn_before (pattern, where_insn);
11469 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11470 WHERE_BB (ignored in the interim) within the loop. */
11472 static rtx
11473 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11474 basic_block where_bb ATTRIBUTE_UNUSED,
11475 rtx where_insn, rtx pattern)
11477 return emit_call_insn_before (pattern, where_insn);
11481 /* Hoist insn for PATTERN into the loop pre-header. */
11483 static rtx
11484 loop_insn_hoist (const struct loop *loop, rtx pattern)
11486 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11490 /* Hoist call insn for PATTERN into the loop pre-header. */
11492 static rtx
11493 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11495 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11499 /* Sink insn for PATTERN after the loop end. */
11501 static rtx
11502 loop_insn_sink (const struct loop *loop, rtx pattern)
11504 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11507 /* bl->final_value can be either general_operand or PLUS of general_operand
11508 and constant. Emit sequence of instructions to load it into REG. */
11509 static rtx
11510 gen_load_of_final_value (rtx reg, rtx final_value)
11512 rtx seq;
11513 start_sequence ();
11514 final_value = force_operand (final_value, reg);
11515 if (final_value != reg)
11516 emit_move_insn (reg, final_value);
11517 seq = get_insns ();
11518 end_sequence ();
11519 return seq;
11522 /* If the loop has multiple exits, emit insn for PATTERN before the
11523 loop to ensure that it will always be executed no matter how the
11524 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11525 since this is slightly more efficient. */
11527 static rtx
11528 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11530 if (loop->exit_count)
11531 return loop_insn_hoist (loop, pattern);
11532 else
11533 return loop_insn_sink (loop, pattern);
11536 static void
11537 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11539 struct iv_class *bl;
11540 int iv_num = 0;
11542 if (! loop || ! file)
11543 return;
11545 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11546 iv_num++;
11548 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11550 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11552 loop_iv_class_dump (bl, file, verbose);
11553 fputc ('\n', file);
11558 static void
11559 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11560 int verbose ATTRIBUTE_UNUSED)
11562 struct induction *v;
11563 rtx incr;
11564 int i;
11566 if (! bl || ! file)
11567 return;
11569 fprintf (file, "IV class for reg %d, benefit %d\n",
11570 bl->regno, bl->total_benefit);
11572 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11573 if (bl->initial_value)
11575 fprintf (file, ", init val: ");
11576 print_simple_rtl (file, bl->initial_value);
11578 if (bl->initial_test)
11580 fprintf (file, ", init test: ");
11581 print_simple_rtl (file, bl->initial_test);
11583 fputc ('\n', file);
11585 if (bl->final_value)
11587 fprintf (file, " Final val: ");
11588 print_simple_rtl (file, bl->final_value);
11589 fputc ('\n', file);
11592 if ((incr = biv_total_increment (bl)))
11594 fprintf (file, " Total increment: ");
11595 print_simple_rtl (file, incr);
11596 fputc ('\n', file);
11599 /* List the increments. */
11600 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11602 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11603 print_simple_rtl (file, v->add_val);
11604 fputc ('\n', file);
11607 /* List the givs. */
11608 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11610 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11611 i, INSN_UID (v->insn), v->benefit);
11612 if (v->giv_type == DEST_ADDR)
11613 print_simple_rtl (file, v->mem);
11614 else
11615 print_simple_rtl (file, single_set (v->insn));
11616 fputc ('\n', file);
11621 static void
11622 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11624 if (! v || ! file)
11625 return;
11627 fprintf (file,
11628 "Biv %d: insn %d",
11629 REGNO (v->dest_reg), INSN_UID (v->insn));
11630 fprintf (file, " const ");
11631 print_simple_rtl (file, v->add_val);
11633 if (verbose && v->final_value)
11635 fputc ('\n', file);
11636 fprintf (file, " final ");
11637 print_simple_rtl (file, v->final_value);
11640 fputc ('\n', file);
11644 static void
11645 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11647 if (! v || ! file)
11648 return;
11650 if (v->giv_type == DEST_REG)
11651 fprintf (file, "Giv %d: insn %d",
11652 REGNO (v->dest_reg), INSN_UID (v->insn));
11653 else
11654 fprintf (file, "Dest address: insn %d",
11655 INSN_UID (v->insn));
11657 fprintf (file, " src reg %d benefit %d",
11658 REGNO (v->src_reg), v->benefit);
11659 fprintf (file, " lifetime %d",
11660 v->lifetime);
11662 if (v->replaceable)
11663 fprintf (file, " replaceable");
11665 if (v->no_const_addval)
11666 fprintf (file, " ncav");
11668 if (v->ext_dependent)
11670 switch (GET_CODE (v->ext_dependent))
11672 case SIGN_EXTEND:
11673 fprintf (file, " ext se");
11674 break;
11675 case ZERO_EXTEND:
11676 fprintf (file, " ext ze");
11677 break;
11678 case TRUNCATE:
11679 fprintf (file, " ext tr");
11680 break;
11681 default:
11682 gcc_unreachable ();
11686 fputc ('\n', file);
11687 fprintf (file, " mult ");
11688 print_simple_rtl (file, v->mult_val);
11690 fputc ('\n', file);
11691 fprintf (file, " add ");
11692 print_simple_rtl (file, v->add_val);
11694 if (verbose && v->final_value)
11696 fputc ('\n', file);
11697 fprintf (file, " final ");
11698 print_simple_rtl (file, v->final_value);
11701 fputc ('\n', file);
11705 void
11706 debug_ivs (const struct loop *loop)
11708 loop_ivs_dump (loop, stderr, 1);
11712 void
11713 debug_iv_class (const struct iv_class *bl)
11715 loop_iv_class_dump (bl, stderr, 1);
11719 void
11720 debug_biv (const struct induction *v)
11722 loop_biv_dump (v, stderr, 1);
11726 void
11727 debug_giv (const struct induction *v)
11729 loop_giv_dump (v, stderr, 1);
11733 #define LOOP_BLOCK_NUM_1(INSN) \
11734 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11736 /* The notes do not have an assigned block, so look at the next insn. */
11737 #define LOOP_BLOCK_NUM(INSN) \
11738 ((INSN) ? (NOTE_P (INSN) \
11739 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11740 : LOOP_BLOCK_NUM_1 (INSN)) \
11741 : -1)
11743 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11745 static void
11746 loop_dump_aux (const struct loop *loop, FILE *file,
11747 int verbose ATTRIBUTE_UNUSED)
11749 rtx label;
11751 if (! loop || ! file || !BB_HEAD (loop->first))
11752 return;
11754 /* Print diagnostics to compare our concept of a loop with
11755 what the loop notes say. */
11756 if (! PREV_INSN (BB_HEAD (loop->first))
11757 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11758 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11759 != NOTE_INSN_LOOP_BEG)
11760 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11761 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11762 if (! NEXT_INSN (BB_END (loop->last))
11763 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11764 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11765 != NOTE_INSN_LOOP_END)
11766 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11767 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11769 if (loop->start)
11771 fprintf (file,
11772 ";; start %d (%d), end %d (%d)\n",
11773 LOOP_BLOCK_NUM (loop->start),
11774 LOOP_INSN_UID (loop->start),
11775 LOOP_BLOCK_NUM (loop->end),
11776 LOOP_INSN_UID (loop->end));
11777 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11778 LOOP_BLOCK_NUM (loop->top),
11779 LOOP_INSN_UID (loop->top),
11780 LOOP_BLOCK_NUM (loop->scan_start),
11781 LOOP_INSN_UID (loop->scan_start));
11782 fprintf (file, ";; exit_count %d", loop->exit_count);
11783 if (loop->exit_count)
11785 fputs (", labels:", file);
11786 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11788 fprintf (file, " %d ",
11789 LOOP_INSN_UID (XEXP (label, 0)));
11792 fputs ("\n", file);
11796 /* Call this function from the debugger to dump LOOP. */
11798 void
11799 debug_loop (const struct loop *loop)
11801 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11804 /* Call this function from the debugger to dump LOOPS. */
11806 void
11807 debug_loops (const struct loops *loops)
11809 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
11812 static bool
11813 gate_handle_loop_optimize (void)
11815 return (optimize > 0 && flag_loop_optimize);
11818 /* Move constant computations out of loops. */
11819 static void
11820 rest_of_handle_loop_optimize (void)
11822 int do_prefetch;
11824 /* CFG is no longer maintained up-to-date. */
11825 free_bb_for_insn ();
11826 profile_status = PROFILE_ABSENT;
11828 do_prefetch = flag_prefetch_loop_arrays ? LOOP_PREFETCH : 0;
11830 if (flag_rerun_loop_opt)
11832 cleanup_barriers ();
11834 /* We only want to perform unrolling once. */
11835 loop_optimize (get_insns (), dump_file, 0);
11837 /* The first call to loop_optimize makes some instructions
11838 trivially dead. We delete those instructions now in the
11839 hope that doing so will make the heuristics in loop work
11840 better and possibly speed up compilation. */
11841 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11843 /* The regscan pass is currently necessary as the alias
11844 analysis code depends on this information. */
11845 reg_scan (get_insns (), max_reg_num ());
11847 cleanup_barriers ();
11848 loop_optimize (get_insns (), dump_file, do_prefetch);
11850 /* Loop can create trivially dead instructions. */
11851 delete_trivially_dead_insns (get_insns (), max_reg_num ());
11852 find_basic_blocks (get_insns ());
11855 struct tree_opt_pass pass_loop_optimize =
11857 "old-loop", /* name */
11858 gate_handle_loop_optimize, /* gate */
11859 rest_of_handle_loop_optimize, /* execute */
11860 NULL, /* sub */
11861 NULL, /* next */
11862 0, /* static_pass_number */
11863 TV_LOOP, /* tv_id */
11864 0, /* properties_required */
11865 0, /* properties_provided */
11866 0, /* properties_destroyed */
11867 0, /* todo_flags_start */
11868 TODO_dump_func |
11869 TODO_ggc_collect, /* todo_flags_finish */
11870 'L' /* letter */