PR rtl-optimization/16968
[official-gcc.git] / gcc / loop.c
blobfaf6cb23b8254e9bfad9d81c7d88c7cb01b86055
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables.
27 Basic induction variables (BIVs) are a pseudo registers which are set within
28 a loop only by incrementing or decrementing its value. General induction
29 variables (GIVs) are pseudo registers with a value which is a linear function
30 of a basic induction variable. BIVs are recognized by `basic_induction_var';
31 GIVs by `general_induction_var'.
33 Once induction variables are identified, strength reduction is applied to the
34 general induction variables, and induction variable elimination is applied to
35 the basic induction variables.
37 It also finds cases where
38 a register is set within the loop by zero-extending a narrower value
39 and changes these to zero the entire register once before the loop
40 and merely copy the low part within the loop.
42 Most of the complexity is in heuristics to decide when it is worth
43 while to do these things. */
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "rtl.h"
50 #include "tm_p.h"
51 #include "function.h"
52 #include "expr.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "insn-config.h"
56 #include "regs.h"
57 #include "recog.h"
58 #include "flags.h"
59 #include "real.h"
60 #include "cselib.h"
61 #include "except.h"
62 #include "toplev.h"
63 #include "predict.h"
64 #include "insn-flags.h"
65 #include "optabs.h"
66 #include "cfgloop.h"
67 #include "ggc.h"
69 /* Get the loop info pointer of a loop. */
70 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
72 /* Get a pointer to the loop movables structure. */
73 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
75 /* Get a pointer to the loop registers structure. */
76 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
78 /* Get a pointer to the loop induction variables structure. */
79 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
81 /* Get the luid of an insn. Catch the error of trying to reference the LUID
82 of an insn added during loop, since these don't have LUIDs. */
84 #define INSN_LUID(INSN) \
85 (INSN_UID (INSN) < max_uid_for_loop ? uid_luid[INSN_UID (INSN)] \
86 : (abort (), -1))
88 #define REGNO_FIRST_LUID(REGNO) \
89 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
90 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
91 : 0)
92 #define REGNO_LAST_LUID(REGNO) \
93 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
94 ? uid_luid[REGNO_LAST_UID (REGNO)] \
95 : INT_MAX)
97 /* A "basic induction variable" or biv is a pseudo reg that is set
98 (within this loop) only by incrementing or decrementing it. */
99 /* A "general induction variable" or giv is a pseudo reg whose
100 value is a linear function of a biv. */
102 /* Bivs are recognized by `basic_induction_var';
103 Givs by `general_induction_var'. */
105 /* An enum for the two different types of givs, those that are used
106 as memory addresses and those that are calculated into registers. */
107 enum g_types
109 DEST_ADDR,
110 DEST_REG
114 /* A `struct induction' is created for every instruction that sets
115 an induction variable (either a biv or a giv). */
117 struct induction
119 rtx insn; /* The insn that sets a biv or giv */
120 rtx new_reg; /* New register, containing strength reduced
121 version of this giv. */
122 rtx src_reg; /* Biv from which this giv is computed.
123 (If this is a biv, then this is the biv.) */
124 enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */
125 rtx dest_reg; /* Destination register for insn: this is the
126 register which was the biv or giv.
127 For a biv, this equals src_reg.
128 For a DEST_ADDR type giv, this is 0. */
129 rtx *location; /* Place in the insn where this giv occurs.
130 If GIV_TYPE is DEST_REG, this is 0. */
131 /* For a biv, this is the place where add_val
132 was found. */
133 enum machine_mode mode; /* The mode of this biv or giv */
134 rtx mem; /* For DEST_ADDR, the memory object. */
135 rtx mult_val; /* Multiplicative factor for src_reg. */
136 rtx add_val; /* Additive constant for that product. */
137 int benefit; /* Gain from eliminating this insn. */
138 rtx final_value; /* If the giv is used outside the loop, and its
139 final value could be calculated, it is put
140 here, and the giv is made replaceable. Set
141 the giv to this value before the loop. */
142 unsigned combined_with; /* The number of givs this giv has been
143 combined with. If nonzero, this giv
144 cannot combine with any other giv. */
145 unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced
146 variable for the original variable.
147 0 means they must be kept separate and the
148 new one must be copied into the old pseudo
149 reg each time the old one is set. */
150 unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is
151 1 if we know that the giv definitely can
152 not be made replaceable, in which case we
153 don't bother checking the variable again
154 even if further info is available.
155 Both this and the above can be zero. */
156 unsigned ignore : 1; /* 1 prohibits further processing of giv */
157 unsigned always_computable : 1;/* 1 if this value is computable every
158 iteration. */
159 unsigned always_executed : 1; /* 1 if this set occurs each iteration. */
160 unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv
161 update may be done multiple times per
162 iteration. */
163 unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive
164 another giv. This occurs in many cases
165 where a giv's lifetime spans an update to
166 a biv. */
167 unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case,
168 we won't use it to eliminate a biv, it
169 would probably lose. */
170 unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next
171 to it to try to form an auto-inc address. */
172 unsigned shared : 1;
173 unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */
174 int lifetime; /* Length of life of this giv */
175 rtx derive_adjustment; /* If nonzero, is an adjustment to be
176 subtracted from add_val when this giv
177 derives another. This occurs when the
178 giv spans a biv update by incrementation. */
179 rtx ext_dependent; /* If nonzero, is a sign or zero extension
180 if a biv on which this giv is dependent. */
181 struct induction *next_iv; /* For givs, links together all givs that are
182 based on the same biv. For bivs, links
183 together all biv entries that refer to the
184 same biv register. */
185 struct induction *same; /* For givs, if the giv has been combined with
186 another giv, this points to the base giv.
187 The base giv will have COMBINED_WITH nonzero.
188 For bivs, if the biv has the same LOCATION
189 than another biv, this points to the base
190 biv. */
191 struct induction *same_insn; /* If there are multiple identical givs in
192 the same insn, then all but one have this
193 field set, and they all point to the giv
194 that doesn't have this field set. */
195 rtx last_use; /* For a giv made from a biv increment, this is
196 a substitute for the lifetime information. */
200 /* A `struct iv_class' is created for each biv. */
202 struct iv_class
204 unsigned int regno; /* Pseudo reg which is the biv. */
205 int biv_count; /* Number of insns setting this reg. */
206 struct induction *biv; /* List of all insns that set this reg. */
207 int giv_count; /* Number of DEST_REG givs computed from this
208 biv. The resulting count is only used in
209 check_dbra_loop. */
210 struct induction *giv; /* List of all insns that compute a giv
211 from this reg. */
212 int total_benefit; /* Sum of BENEFITs of all those givs. */
213 rtx initial_value; /* Value of reg at loop start. */
214 rtx initial_test; /* Test performed on BIV before loop. */
215 rtx final_value; /* Value of reg at loop end, if known. */
216 struct iv_class *next; /* Links all class structures together. */
217 rtx init_insn; /* insn which initializes biv, 0 if none. */
218 rtx init_set; /* SET of INIT_INSN, if any. */
219 unsigned incremented : 1; /* 1 if somewhere incremented/decremented */
220 unsigned eliminable : 1; /* 1 if plausible candidate for
221 elimination. */
222 unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for
223 this. */
224 unsigned reversed : 1; /* 1 if we reversed the loop that this
225 biv controls. */
226 unsigned all_reduced : 1; /* 1 if all givs using this biv have
227 been reduced. */
231 /* Definitions used by the basic induction variable discovery code. */
232 enum iv_mode
234 UNKNOWN_INDUCT,
235 BASIC_INDUCT,
236 NOT_BASIC_INDUCT,
237 GENERAL_INDUCT
241 /* A `struct iv' is created for every register. */
243 struct iv
245 enum iv_mode type;
246 union
248 struct iv_class *class;
249 struct induction *info;
250 } iv;
254 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
255 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
256 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
259 struct loop_ivs
261 /* Indexed by register number, contains pointer to `struct
262 iv' if register is an induction variable. */
263 struct iv *regs;
265 /* Size of regs array. */
266 unsigned int n_regs;
268 /* The head of a list which links together (via the next field)
269 every iv class for the current loop. */
270 struct iv_class *list;
274 typedef struct loop_mem_info
276 rtx mem; /* The MEM itself. */
277 rtx reg; /* Corresponding pseudo, if any. */
278 int optimize; /* Nonzero if we can optimize access to this MEM. */
279 } loop_mem_info;
283 struct loop_reg
285 /* Number of times the reg is set during the loop being scanned.
286 During code motion, a negative value indicates a reg that has
287 been made a candidate; in particular -2 means that it is an
288 candidate that we know is equal to a constant and -1 means that
289 it is a candidate not known equal to a constant. After code
290 motion, regs moved have 0 (which is accurate now) while the
291 failed candidates have the original number of times set.
293 Therefore, at all times, == 0 indicates an invariant register;
294 < 0 a conditionally invariant one. */
295 int set_in_loop;
297 /* Original value of set_in_loop; same except that this value
298 is not set negative for a reg whose sets have been made candidates
299 and not set to 0 for a reg that is moved. */
300 int n_times_set;
302 /* Contains the insn in which a register was used if it was used
303 exactly once; contains const0_rtx if it was used more than once. */
304 rtx single_usage;
306 /* Nonzero indicates that the register cannot be moved or strength
307 reduced. */
308 char may_not_optimize;
310 /* Nonzero means reg N has already been moved out of one loop.
311 This reduces the desire to move it out of another. */
312 char moved_once;
316 struct loop_regs
318 int num; /* Number of regs used in table. */
319 int size; /* Size of table. */
320 struct loop_reg *array; /* Register usage info. array. */
321 int multiple_uses; /* Nonzero if a reg has multiple uses. */
326 struct loop_movables
328 /* Head of movable chain. */
329 struct movable *head;
330 /* Last movable in chain. */
331 struct movable *last;
335 /* Information pertaining to a loop. */
337 struct loop_info
339 /* Nonzero if there is a subroutine call in the current loop. */
340 int has_call;
341 /* Nonzero if there is a libcall in the current loop. */
342 int has_libcall;
343 /* Nonzero if there is a non constant call in the current loop. */
344 int has_nonconst_call;
345 /* Nonzero if there is a prefetch instruction in the current loop. */
346 int has_prefetch;
347 /* Nonzero if there is a volatile memory reference in the current
348 loop. */
349 int has_volatile;
350 /* Nonzero if there is a tablejump in the current loop. */
351 int has_tablejump;
352 /* Nonzero if there are ways to leave the loop other than falling
353 off the end. */
354 int has_multiple_exit_targets;
355 /* Nonzero if there is an indirect jump in the current function. */
356 int has_indirect_jump;
357 /* Register or constant initial loop value. */
358 rtx initial_value;
359 /* Register or constant value used for comparison test. */
360 rtx comparison_value;
361 /* Register or constant approximate final value. */
362 rtx final_value;
363 /* Register or constant initial loop value with term common to
364 final_value removed. */
365 rtx initial_equiv_value;
366 /* Register or constant final loop value with term common to
367 initial_value removed. */
368 rtx final_equiv_value;
369 /* Register corresponding to iteration variable. */
370 rtx iteration_var;
371 /* Constant loop increment. */
372 rtx increment;
373 enum rtx_code comparison_code;
374 /* Holds the number of loop iterations. It is zero if the number
375 could not be calculated. Must be unsigned since the number of
376 iterations can be as high as 2^wordsize - 1. For loops with a
377 wider iterator, this number will be zero if the number of loop
378 iterations is too large for an unsigned integer to hold. */
379 unsigned HOST_WIDE_INT n_iterations;
380 int used_count_register;
381 /* The loop iterator induction variable. */
382 struct iv_class *iv;
383 /* List of MEMs that are stored in this loop. */
384 rtx store_mems;
385 /* Array of MEMs that are used (read or written) in this loop, but
386 cannot be aliased by anything in this loop, except perhaps
387 themselves. In other words, if mems[i] is altered during
388 the loop, it is altered by an expression that is rtx_equal_p to
389 it. */
390 loop_mem_info *mems;
391 /* The index of the next available slot in MEMS. */
392 int mems_idx;
393 /* The number of elements allocated in MEMS. */
394 int mems_allocated;
395 /* Nonzero if we don't know what MEMs were changed in the current
396 loop. This happens if the loop contains a call (in which case
397 `has_call' will also be set) or if we store into more than
398 NUM_STORES MEMs. */
399 int unknown_address_altered;
400 /* The above doesn't count any readonly memory locations that are
401 stored. This does. */
402 int unknown_constant_address_altered;
403 /* Count of memory write instructions discovered in the loop. */
404 int num_mem_sets;
405 /* The insn where the first of these was found. */
406 rtx first_loop_store_insn;
407 /* The chain of movable insns in loop. */
408 struct loop_movables movables;
409 /* The registers used the in loop. */
410 struct loop_regs regs;
411 /* The induction variable information in loop. */
412 struct loop_ivs ivs;
413 /* Nonzero if call is in pre_header extended basic block. */
414 int pre_header_has_call;
417 /* Not really meaningful values, but at least something. */
418 #ifndef SIMULTANEOUS_PREFETCHES
419 #define SIMULTANEOUS_PREFETCHES 3
420 #endif
421 #ifndef PREFETCH_BLOCK
422 #define PREFETCH_BLOCK 32
423 #endif
424 #ifndef HAVE_prefetch
425 #define HAVE_prefetch 0
426 #define CODE_FOR_prefetch 0
427 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
428 #endif
430 /* Give up the prefetch optimizations once we exceed a given threshold.
431 It is unlikely that we would be able to optimize something in a loop
432 with so many detected prefetches. */
433 #define MAX_PREFETCHES 100
434 /* The number of prefetch blocks that are beneficial to fetch at once before
435 a loop with a known (and low) iteration count. */
436 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
437 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
438 since it is likely that the data are already in the cache. */
439 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
441 /* Parameterize some prefetch heuristics so they can be turned on and off
442 easily for performance testing on new architectures. These can be
443 defined in target-dependent files. */
445 /* Prefetch is worthwhile only when loads/stores are dense. */
446 #ifndef PREFETCH_ONLY_DENSE_MEM
447 #define PREFETCH_ONLY_DENSE_MEM 1
448 #endif
450 /* Define what we mean by "dense" loads and stores; This value divided by 256
451 is the minimum percentage of memory references that worth prefetching. */
452 #ifndef PREFETCH_DENSE_MEM
453 #define PREFETCH_DENSE_MEM 220
454 #endif
456 /* Do not prefetch for a loop whose iteration count is known to be low. */
457 #ifndef PREFETCH_NO_LOW_LOOPCNT
458 #define PREFETCH_NO_LOW_LOOPCNT 1
459 #endif
461 /* Define what we mean by a "low" iteration count. */
462 #ifndef PREFETCH_LOW_LOOPCNT
463 #define PREFETCH_LOW_LOOPCNT 32
464 #endif
466 /* Do not prefetch for a loop that contains a function call; such a loop is
467 probably not an internal loop. */
468 #ifndef PREFETCH_NO_CALL
469 #define PREFETCH_NO_CALL 1
470 #endif
472 /* Do not prefetch accesses with an extreme stride. */
473 #ifndef PREFETCH_NO_EXTREME_STRIDE
474 #define PREFETCH_NO_EXTREME_STRIDE 1
475 #endif
477 /* Define what we mean by an "extreme" stride. */
478 #ifndef PREFETCH_EXTREME_STRIDE
479 #define PREFETCH_EXTREME_STRIDE 4096
480 #endif
482 /* Define a limit to how far apart indices can be and still be merged
483 into a single prefetch. */
484 #ifndef PREFETCH_EXTREME_DIFFERENCE
485 #define PREFETCH_EXTREME_DIFFERENCE 4096
486 #endif
488 /* Issue prefetch instructions before the loop to fetch data to be used
489 in the first few loop iterations. */
490 #ifndef PREFETCH_BEFORE_LOOP
491 #define PREFETCH_BEFORE_LOOP 1
492 #endif
494 /* Do not handle reversed order prefetches (negative stride). */
495 #ifndef PREFETCH_NO_REVERSE_ORDER
496 #define PREFETCH_NO_REVERSE_ORDER 1
497 #endif
499 /* Prefetch even if the GIV is in conditional code. */
500 #ifndef PREFETCH_CONDITIONAL
501 #define PREFETCH_CONDITIONAL 1
502 #endif
504 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
505 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
507 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
508 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
509 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
511 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
512 ((REGNO) < FIRST_PSEUDO_REGISTER \
513 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
516 /* Vector mapping INSN_UIDs to luids.
517 The luids are like uids but increase monotonically always.
518 We use them to see whether a jump comes from outside a given loop. */
520 static int *uid_luid;
522 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
523 number the insn is contained in. */
525 static struct loop **uid_loop;
527 /* 1 + largest uid of any insn. */
529 static int max_uid_for_loop;
531 /* Number of loops detected in current function. Used as index to the
532 next few tables. */
534 static int max_loop_num;
536 /* Bound on pseudo register number before loop optimization.
537 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
538 static unsigned int max_reg_before_loop;
540 /* The value to pass to the next call of reg_scan_update. */
541 static int loop_max_reg;
543 /* During the analysis of a loop, a chain of `struct movable's
544 is made to record all the movable insns found.
545 Then the entire chain can be scanned to decide which to move. */
547 struct movable
549 rtx insn; /* A movable insn */
550 rtx set_src; /* The expression this reg is set from. */
551 rtx set_dest; /* The destination of this SET. */
552 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
553 of any registers used within the LIBCALL. */
554 int consec; /* Number of consecutive following insns
555 that must be moved with this one. */
556 unsigned int regno; /* The register it sets */
557 short lifetime; /* lifetime of that register;
558 may be adjusted when matching movables
559 that load the same value are found. */
560 short savings; /* Number of insns we can move for this reg,
561 including other movables that force this
562 or match this one. */
563 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
564 a low part that we should avoid changing when
565 clearing the rest of the reg. */
566 unsigned int cond : 1; /* 1 if only conditionally movable */
567 unsigned int force : 1; /* 1 means MUST move this insn */
568 unsigned int global : 1; /* 1 means reg is live outside this loop */
569 /* If PARTIAL is 1, GLOBAL means something different:
570 that the reg is live outside the range from where it is set
571 to the following label. */
572 unsigned int done : 1; /* 1 inhibits further processing of this */
574 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
575 In particular, moving it does not make it
576 invariant. */
577 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
578 load SRC, rather than copying INSN. */
579 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
580 first insn of a consecutive sets group. */
581 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
582 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
583 the original insn with a copy from that
584 pseudo, rather than deleting it. */
585 struct movable *match; /* First entry for same value */
586 struct movable *forces; /* An insn that must be moved if this is */
587 struct movable *next;
591 static FILE *loop_dump_stream;
593 /* Forward declarations. */
595 static void invalidate_loops_containing_label (rtx);
596 static void find_and_verify_loops (rtx, struct loops *);
597 static void mark_loop_jump (rtx, struct loop *);
598 static void prescan_loop (struct loop *);
599 static int reg_in_basic_block_p (rtx, rtx);
600 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
601 static int labels_in_range_p (rtx, int);
602 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
603 static void note_addr_stored (rtx, rtx, void *);
604 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
605 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
606 static rtx find_regs_nested (rtx, rtx);
607 static void scan_loop (struct loop*, int);
608 #if 0
609 static void replace_call_address (rtx, rtx, rtx);
610 #endif
611 static rtx skip_consec_insns (rtx, int);
612 static int libcall_benefit (rtx);
613 static rtx libcall_other_reg (rtx, rtx);
614 static void record_excess_regs (rtx, rtx, rtx *);
615 static void ignore_some_movables (struct loop_movables *);
616 static void force_movables (struct loop_movables *);
617 static void combine_movables (struct loop_movables *, struct loop_regs *);
618 static int num_unmoved_movables (const struct loop *);
619 static int regs_match_p (rtx, rtx, struct loop_movables *);
620 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
621 struct loop_regs *);
622 static void add_label_notes (rtx, rtx);
623 static void move_movables (struct loop *loop, struct loop_movables *, int,
624 int);
625 static void loop_movables_add (struct loop_movables *, struct movable *);
626 static void loop_movables_free (struct loop_movables *);
627 static int count_nonfixed_reads (const struct loop *, rtx);
628 static void loop_bivs_find (struct loop *);
629 static void loop_bivs_init_find (struct loop *);
630 static void loop_bivs_check (struct loop *);
631 static void loop_givs_find (struct loop *);
632 static void loop_givs_check (struct loop *);
633 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
634 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
635 struct induction *, rtx);
636 static void loop_givs_dead_check (struct loop *, struct iv_class *);
637 static void loop_givs_reduce (struct loop *, struct iv_class *);
638 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
639 static void loop_ivs_free (struct loop *);
640 static void strength_reduce (struct loop *, int);
641 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
642 static int valid_initial_value_p (rtx, rtx, int, rtx);
643 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
644 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
645 rtx, rtx *, int, int);
646 static void check_final_value (const struct loop *, struct induction *);
647 static void loop_ivs_dump (const struct loop *, FILE *, int);
648 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
649 static void loop_biv_dump (const struct induction *, FILE *, int);
650 static void loop_giv_dump (const struct induction *, FILE *, int);
651 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
652 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
653 rtx *);
654 static void update_giv_derive (const struct loop *, rtx);
655 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
656 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
657 rtx, rtx, rtx *, rtx *, rtx **);
658 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
659 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
660 rtx *, rtx *, int, int *, enum machine_mode);
661 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
662 rtx *, rtx *, rtx *);
663 static int check_dbra_loop (struct loop *, int);
664 static rtx express_from_1 (rtx, rtx, rtx);
665 static rtx combine_givs_p (struct induction *, struct induction *);
666 static int cmp_combine_givs_stats (const void *, const void *);
667 static void combine_givs (struct loop_regs *, struct iv_class *);
668 static int product_cheap_p (rtx, rtx);
669 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
670 int, int);
671 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
672 struct iv_class *, int, basic_block, rtx);
673 static int last_use_this_basic_block (rtx, rtx);
674 static void record_initial (rtx, rtx, void *);
675 static void update_reg_last_use (rtx, rtx);
676 static rtx next_insn_in_loop (const struct loop *, rtx);
677 static void loop_regs_scan (const struct loop *, int);
678 static int count_insns_in_loop (const struct loop *);
679 static int find_mem_in_note_1 (rtx *, void *);
680 static rtx find_mem_in_note (rtx);
681 static void load_mems (const struct loop *);
682 static int insert_loop_mem (rtx *, void *);
683 static int replace_loop_mem (rtx *, void *);
684 static void replace_loop_mems (rtx, rtx, rtx, int);
685 static int replace_loop_reg (rtx *, void *);
686 static void replace_loop_regs (rtx insn, rtx, rtx);
687 static void note_reg_stored (rtx, rtx, void *);
688 static void try_copy_prop (const struct loop *, rtx, unsigned int);
689 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
690 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
691 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
692 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
693 static void loop_regs_update (const struct loop *, rtx);
694 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
695 static int loop_invariant_p (const struct loop *, rtx);
696 static rtx loop_insn_hoist (const struct loop *, rtx);
697 static void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx,
698 rtx, basic_block, rtx);
699 static rtx loop_insn_emit_before (const struct loop *, basic_block,
700 rtx, rtx);
701 static int loop_insn_first_p (rtx, rtx);
702 static rtx get_condition_for_loop (const struct loop *, rtx);
703 static void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx);
704 static void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx);
705 static rtx extend_value_for_giv (struct induction *, rtx);
706 static rtx loop_insn_sink (const struct loop *, rtx);
708 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
709 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
710 rtx, rtx);
711 static rtx loop_call_insn_hoist (const struct loop *, rtx);
712 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
714 static void loop_dump_aux (const struct loop *, FILE *, int);
715 static void loop_delete_insns (rtx, rtx);
716 static HOST_WIDE_INT remove_constant_addition (rtx *);
717 static rtx gen_load_of_final_value (rtx, rtx);
718 void debug_ivs (const struct loop *);
719 void debug_iv_class (const struct iv_class *);
720 void debug_biv (const struct induction *);
721 void debug_giv (const struct induction *);
722 void debug_loop (const struct loop *);
723 void debug_loops (const struct loops *);
725 typedef struct loop_replace_args
727 rtx match;
728 rtx replacement;
729 rtx insn;
730 } loop_replace_args;
732 /* Nonzero iff INSN is between START and END, inclusive. */
733 #define INSN_IN_RANGE_P(INSN, START, END) \
734 (INSN_UID (INSN) < max_uid_for_loop \
735 && INSN_LUID (INSN) >= INSN_LUID (START) \
736 && INSN_LUID (INSN) <= INSN_LUID (END))
738 /* Indirect_jump_in_function is computed once per function. */
739 static int indirect_jump_in_function;
740 static int indirect_jump_in_function_p (rtx);
742 static int compute_luids (rtx, rtx, int);
744 static int biv_elimination_giv_has_0_offset (struct induction *,
745 struct induction *, rtx);
747 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
748 copy the value of the strength reduced giv to its original register. */
749 static int copy_cost;
751 /* Cost of using a register, to normalize the benefits of a giv. */
752 static int reg_address_cost;
754 void
755 init_loop (void)
757 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
759 reg_address_cost = address_cost (reg, SImode);
761 copy_cost = COSTS_N_INSNS (1);
764 /* Compute the mapping from uids to luids.
765 LUIDs are numbers assigned to insns, like uids,
766 except that luids increase monotonically through the code.
767 Start at insn START and stop just before END. Assign LUIDs
768 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
769 static int
770 compute_luids (rtx start, rtx end, int prev_luid)
772 int i;
773 rtx insn;
775 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
777 if (INSN_UID (insn) >= max_uid_for_loop)
778 continue;
779 /* Don't assign luids to line-number NOTEs, so that the distance in
780 luids between two insns is not affected by -g. */
781 if (!NOTE_P (insn)
782 || NOTE_LINE_NUMBER (insn) <= 0)
783 uid_luid[INSN_UID (insn)] = ++i;
784 else
785 /* Give a line number note the same luid as preceding insn. */
786 uid_luid[INSN_UID (insn)] = i;
788 return i + 1;
791 /* Entry point of this file. Perform loop optimization
792 on the current function. F is the first insn of the function
793 and DUMPFILE is a stream for output of a trace of actions taken
794 (or 0 if none should be output). */
796 void
797 loop_optimize (rtx f, FILE *dumpfile, int flags)
799 rtx insn;
800 int i;
801 struct loops loops_data;
802 struct loops *loops = &loops_data;
803 struct loop_info *loops_info;
805 loop_dump_stream = dumpfile;
807 init_recog_no_volatile ();
809 max_reg_before_loop = max_reg_num ();
810 loop_max_reg = max_reg_before_loop;
812 regs_may_share = 0;
814 /* Count the number of loops. */
816 max_loop_num = 0;
817 for (insn = f; insn; insn = NEXT_INSN (insn))
819 if (NOTE_P (insn)
820 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
821 max_loop_num++;
824 /* Don't waste time if no loops. */
825 if (max_loop_num == 0)
826 return;
828 loops->num = max_loop_num;
830 /* Get size to use for tables indexed by uids.
831 Leave some space for labels allocated by find_and_verify_loops. */
832 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
834 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
835 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
837 /* Allocate storage for array of loops. */
838 loops->array = xcalloc (loops->num, sizeof (struct loop));
840 /* Find and process each loop.
841 First, find them, and record them in order of their beginnings. */
842 find_and_verify_loops (f, loops);
844 /* Allocate and initialize auxiliary loop information. */
845 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
846 for (i = 0; i < (int) loops->num; i++)
847 loops->array[i].aux = loops_info + i;
849 /* Now find all register lifetimes. This must be done after
850 find_and_verify_loops, because it might reorder the insns in the
851 function. */
852 reg_scan (f, max_reg_before_loop, 1);
854 /* This must occur after reg_scan so that registers created by gcse
855 will have entries in the register tables.
857 We could have added a call to reg_scan after gcse_main in toplev.c,
858 but moving this call to init_alias_analysis is more efficient. */
859 init_alias_analysis ();
861 /* See if we went too far. Note that get_max_uid already returns
862 one more that the maximum uid of all insn. */
863 if (get_max_uid () > max_uid_for_loop)
864 abort ();
865 /* Now reset it to the actual size we need. See above. */
866 max_uid_for_loop = get_max_uid ();
868 /* find_and_verify_loops has already called compute_luids, but it
869 might have rearranged code afterwards, so we need to recompute
870 the luids now. */
871 compute_luids (f, NULL_RTX, 0);
873 /* Don't leave gaps in uid_luid for insns that have been
874 deleted. It is possible that the first or last insn
875 using some register has been deleted by cross-jumping.
876 Make sure that uid_luid for that former insn's uid
877 points to the general area where that insn used to be. */
878 for (i = 0; i < max_uid_for_loop; i++)
880 uid_luid[0] = uid_luid[i];
881 if (uid_luid[0] != 0)
882 break;
884 for (i = 0; i < max_uid_for_loop; i++)
885 if (uid_luid[i] == 0)
886 uid_luid[i] = uid_luid[i - 1];
888 /* Determine if the function has indirect jump. On some systems
889 this prevents low overhead loop instructions from being used. */
890 indirect_jump_in_function = indirect_jump_in_function_p (f);
892 /* Now scan the loops, last ones first, since this means inner ones are done
893 before outer ones. */
894 for (i = max_loop_num - 1; i >= 0; i--)
896 struct loop *loop = &loops->array[i];
898 if (! loop->invalid && loop->end)
900 scan_loop (loop, flags);
901 ggc_collect ();
905 end_alias_analysis ();
907 /* Clean up. */
908 for (i = 0; i < (int) loops->num; i++)
909 free (loops_info[i].mems);
911 free (uid_luid);
912 free (uid_loop);
913 free (loops_info);
914 free (loops->array);
917 /* Returns the next insn, in execution order, after INSN. START and
918 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
919 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
920 insn-stream; it is used with loops that are entered near the
921 bottom. */
923 static rtx
924 next_insn_in_loop (const struct loop *loop, rtx insn)
926 insn = NEXT_INSN (insn);
928 if (insn == loop->end)
930 if (loop->top)
931 /* Go to the top of the loop, and continue there. */
932 insn = loop->top;
933 else
934 /* We're done. */
935 insn = NULL_RTX;
938 if (insn == loop->scan_start)
939 /* We're done. */
940 insn = NULL_RTX;
942 return insn;
945 /* Find any register references hidden inside X and add them to
946 the dependency list DEPS. This is used to look inside CLOBBER (MEM
947 when checking whether a PARALLEL can be pulled out of a loop. */
949 static rtx
950 find_regs_nested (rtx deps, rtx x)
952 enum rtx_code code = GET_CODE (x);
953 if (code == REG)
954 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
955 else
957 const char *fmt = GET_RTX_FORMAT (code);
958 int i, j;
959 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
961 if (fmt[i] == 'e')
962 deps = find_regs_nested (deps, XEXP (x, i));
963 else if (fmt[i] == 'E')
964 for (j = 0; j < XVECLEN (x, i); j++)
965 deps = find_regs_nested (deps, XVECEXP (x, i, j));
968 return deps;
971 /* Optimize one loop described by LOOP. */
973 /* ??? Could also move memory writes out of loops if the destination address
974 is invariant, the source is invariant, the memory write is not volatile,
975 and if we can prove that no read inside the loop can read this address
976 before the write occurs. If there is a read of this address after the
977 write, then we can also mark the memory read as invariant. */
979 static void
980 scan_loop (struct loop *loop, int flags)
982 struct loop_info *loop_info = LOOP_INFO (loop);
983 struct loop_regs *regs = LOOP_REGS (loop);
984 int i;
985 rtx loop_start = loop->start;
986 rtx loop_end = loop->end;
987 rtx p;
988 /* 1 if we are scanning insns that could be executed zero times. */
989 int maybe_never = 0;
990 /* 1 if we are scanning insns that might never be executed
991 due to a subroutine call which might exit before they are reached. */
992 int call_passed = 0;
993 /* Number of insns in the loop. */
994 int insn_count;
995 int tem;
996 rtx temp, update_start, update_end;
997 /* The SET from an insn, if it is the only SET in the insn. */
998 rtx set, set1;
999 /* Chain describing insns movable in current loop. */
1000 struct loop_movables *movables = LOOP_MOVABLES (loop);
1001 /* Ratio of extra register life span we can justify
1002 for saving an instruction. More if loop doesn't call subroutines
1003 since in that case saving an insn makes more difference
1004 and more registers are available. */
1005 int threshold;
1006 int in_libcall;
1008 loop->top = 0;
1010 movables->head = 0;
1011 movables->last = 0;
1013 /* Determine whether this loop starts with a jump down to a test at
1014 the end. This will occur for a small number of loops with a test
1015 that is too complex to duplicate in front of the loop.
1017 We search for the first insn or label in the loop, skipping NOTEs.
1018 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1019 (because we might have a loop executed only once that contains a
1020 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1021 (in case we have a degenerate loop).
1023 Note that if we mistakenly think that a loop is entered at the top
1024 when, in fact, it is entered at the exit test, the only effect will be
1025 slightly poorer optimization. Making the opposite error can generate
1026 incorrect code. Since very few loops now start with a jump to the
1027 exit test, the code here to detect that case is very conservative. */
1029 for (p = NEXT_INSN (loop_start);
1030 p != loop_end
1031 && !LABEL_P (p) && ! INSN_P (p)
1032 && (!NOTE_P (p)
1033 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
1034 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
1035 p = NEXT_INSN (p))
1038 loop->scan_start = p;
1040 /* If loop end is the end of the current function, then emit a
1041 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1042 note insn. This is the position we use when sinking insns out of
1043 the loop. */
1044 if (NEXT_INSN (loop->end) != 0)
1045 loop->sink = NEXT_INSN (loop->end);
1046 else
1047 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
1049 /* Set up variables describing this loop. */
1050 prescan_loop (loop);
1051 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
1053 /* If loop has a jump before the first label,
1054 the true entry is the target of that jump.
1055 Start scan from there.
1056 But record in LOOP->TOP the place where the end-test jumps
1057 back to so we can scan that after the end of the loop. */
1058 if (JUMP_P (p)
1059 /* Loop entry must be unconditional jump (and not a RETURN) */
1060 && any_uncondjump_p (p)
1061 && JUMP_LABEL (p) != 0
1062 /* Check to see whether the jump actually
1063 jumps out of the loop (meaning it's no loop).
1064 This case can happen for things like
1065 do {..} while (0). If this label was generated previously
1066 by loop, we can't tell anything about it and have to reject
1067 the loop. */
1068 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
1070 loop->top = next_label (loop->scan_start);
1071 loop->scan_start = JUMP_LABEL (p);
1074 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1075 as required by loop_reg_used_before_p. So skip such loops. (This
1076 test may never be true, but it's best to play it safe.)
1078 Also, skip loops where we do not start scanning at a label. This
1079 test also rejects loops starting with a JUMP_INSN that failed the
1080 test above. */
1082 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
1083 || !LABEL_P (loop->scan_start))
1085 if (loop_dump_stream)
1086 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
1087 INSN_UID (loop_start), INSN_UID (loop_end));
1088 return;
1091 /* Allocate extra space for REGs that might be created by load_mems.
1092 We allocate a little extra slop as well, in the hopes that we
1093 won't have to reallocate the regs array. */
1094 loop_regs_scan (loop, loop_info->mems_idx + 16);
1095 insn_count = count_insns_in_loop (loop);
1097 if (loop_dump_stream)
1098 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
1099 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
1101 /* Scan through the loop finding insns that are safe to move.
1102 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1103 this reg will be considered invariant for subsequent insns.
1104 We consider whether subsequent insns use the reg
1105 in deciding whether it is worth actually moving.
1107 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1108 and therefore it is possible that the insns we are scanning
1109 would never be executed. At such times, we must make sure
1110 that it is safe to execute the insn once instead of zero times.
1111 When MAYBE_NEVER is 0, all insns will be executed at least once
1112 so that is not a problem. */
1114 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
1115 p != NULL_RTX;
1116 p = next_insn_in_loop (loop, p))
1118 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
1119 in_libcall--;
1120 if (NONJUMP_INSN_P (p))
1122 /* Do not scan past an optimization barrier. */
1123 if (GET_CODE (PATTERN (p)) == ASM_INPUT)
1124 break;
1125 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
1126 if (temp)
1127 in_libcall++;
1128 if (! in_libcall
1129 && (set = single_set (p))
1130 && REG_P (SET_DEST (set))
1131 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1132 && SET_DEST (set) != pic_offset_table_rtx
1133 #endif
1134 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
1136 int tem1 = 0;
1137 int tem2 = 0;
1138 int move_insn = 0;
1139 int insert_temp = 0;
1140 rtx src = SET_SRC (set);
1141 rtx dependencies = 0;
1143 /* Figure out what to use as a source of this insn. If a
1144 REG_EQUIV note is given or if a REG_EQUAL note with a
1145 constant operand is specified, use it as the source and
1146 mark that we should move this insn by calling
1147 emit_move_insn rather that duplicating the insn.
1149 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1150 note is present. */
1151 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1152 if (temp)
1153 src = XEXP (temp, 0), move_insn = 1;
1154 else
1156 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1157 if (temp && CONSTANT_P (XEXP (temp, 0)))
1158 src = XEXP (temp, 0), move_insn = 1;
1159 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
1161 src = XEXP (temp, 0);
1162 /* A libcall block can use regs that don't appear in
1163 the equivalent expression. To move the libcall,
1164 we must move those regs too. */
1165 dependencies = libcall_other_reg (p, src);
1169 /* For parallels, add any possible uses to the dependencies, as
1170 we can't move the insn without resolving them first.
1171 MEMs inside CLOBBERs may also reference registers; these
1172 count as implicit uses. */
1173 if (GET_CODE (PATTERN (p)) == PARALLEL)
1175 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
1177 rtx x = XVECEXP (PATTERN (p), 0, i);
1178 if (GET_CODE (x) == USE)
1179 dependencies
1180 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
1181 dependencies);
1182 else if (GET_CODE (x) == CLOBBER
1183 && MEM_P (XEXP (x, 0)))
1184 dependencies = find_regs_nested (dependencies,
1185 XEXP (XEXP (x, 0), 0));
1189 if (/* The register is used in basic blocks other
1190 than the one where it is set (meaning that
1191 something after this point in the loop might
1192 depend on its value before the set). */
1193 ! reg_in_basic_block_p (p, SET_DEST (set))
1194 /* And the set is not guaranteed to be executed once
1195 the loop starts, or the value before the set is
1196 needed before the set occurs...
1198 ??? Note we have quadratic behavior here, mitigated
1199 by the fact that the previous test will often fail for
1200 large loops. Rather than re-scanning the entire loop
1201 each time for register usage, we should build tables
1202 of the register usage and use them here instead. */
1203 && (maybe_never
1204 || loop_reg_used_before_p (loop, set, p)))
1205 /* It is unsafe to move the set. However, it may be OK to
1206 move the source into a new pseudo, and substitute a
1207 reg-to-reg copy for the original insn.
1209 This code used to consider it OK to move a set of a variable
1210 which was not created by the user and not used in an exit
1211 test.
1212 That behavior is incorrect and was removed. */
1213 insert_temp = 1;
1215 /* Don't try to optimize a MODE_CC set with a constant
1216 source. It probably will be combined with a conditional
1217 jump. */
1218 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
1219 && CONSTANT_P (src))
1221 /* Don't try to optimize a register that was made
1222 by loop-optimization for an inner loop.
1223 We don't know its life-span, so we can't compute
1224 the benefit. */
1225 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
1227 /* Don't move the source and add a reg-to-reg copy:
1228 - with -Os (this certainly increases size),
1229 - if the mode doesn't support copy operations (obviously),
1230 - if the source is already a reg (the motion will gain nothing),
1231 - if the source is a legitimate constant (likewise). */
1232 else if (insert_temp
1233 && (optimize_size
1234 || ! can_copy_p (GET_MODE (SET_SRC (set)))
1235 || REG_P (SET_SRC (set))
1236 || (CONSTANT_P (SET_SRC (set))
1237 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
1239 else if ((tem = loop_invariant_p (loop, src))
1240 && (dependencies == 0
1241 || (tem2
1242 = loop_invariant_p (loop, dependencies)) != 0)
1243 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
1244 || (tem1
1245 = consec_sets_invariant_p
1246 (loop, SET_DEST (set),
1247 regs->array[REGNO (SET_DEST (set))].set_in_loop,
1248 p)))
1249 /* If the insn can cause a trap (such as divide by zero),
1250 can't move it unless it's guaranteed to be executed
1251 once loop is entered. Even a function call might
1252 prevent the trap insn from being reached
1253 (since it might exit!) */
1254 && ! ((maybe_never || call_passed)
1255 && may_trap_p (src)))
1257 struct movable *m;
1258 int regno = REGNO (SET_DEST (set));
1260 /* A potential lossage is where we have a case where two insns
1261 can be combined as long as they are both in the loop, but
1262 we move one of them outside the loop. For large loops,
1263 this can lose. The most common case of this is the address
1264 of a function being called.
1266 Therefore, if this register is marked as being used
1267 exactly once if we are in a loop with calls
1268 (a "large loop"), see if we can replace the usage of
1269 this register with the source of this SET. If we can,
1270 delete this insn.
1272 Don't do this if P has a REG_RETVAL note or if we have
1273 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
1275 if (loop_info->has_call
1276 && regs->array[regno].single_usage != 0
1277 && regs->array[regno].single_usage != const0_rtx
1278 && REGNO_FIRST_UID (regno) == INSN_UID (p)
1279 && (REGNO_LAST_UID (regno)
1280 == INSN_UID (regs->array[regno].single_usage))
1281 && regs->array[regno].set_in_loop == 1
1282 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
1283 && ! side_effects_p (SET_SRC (set))
1284 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
1285 && (! SMALL_REGISTER_CLASSES
1286 || (! (REG_P (SET_SRC (set))
1287 && (REGNO (SET_SRC (set))
1288 < FIRST_PSEUDO_REGISTER))))
1289 && regno >= FIRST_PSEUDO_REGISTER
1290 /* This test is not redundant; SET_SRC (set) might be
1291 a call-clobbered register and the life of REGNO
1292 might span a call. */
1293 && ! modified_between_p (SET_SRC (set), p,
1294 regs->array[regno].single_usage)
1295 && no_labels_between_p (p,
1296 regs->array[regno].single_usage)
1297 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
1298 regs->array[regno].single_usage))
1300 /* Replace any usage in a REG_EQUAL note. Must copy
1301 the new source, so that we don't get rtx sharing
1302 between the SET_SOURCE and REG_NOTES of insn p. */
1303 REG_NOTES (regs->array[regno].single_usage)
1304 = (replace_rtx
1305 (REG_NOTES (regs->array[regno].single_usage),
1306 SET_DEST (set), copy_rtx (SET_SRC (set))));
1308 delete_insn (p);
1309 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1310 i++)
1311 regs->array[regno+i].set_in_loop = 0;
1312 continue;
1315 m = xmalloc (sizeof (struct movable));
1316 m->next = 0;
1317 m->insn = p;
1318 m->set_src = src;
1319 m->dependencies = dependencies;
1320 m->set_dest = SET_DEST (set);
1321 m->force = 0;
1322 m->consec
1323 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
1324 m->done = 0;
1325 m->forces = 0;
1326 m->partial = 0;
1327 m->move_insn = move_insn;
1328 m->move_insn_first = 0;
1329 m->insert_temp = insert_temp;
1330 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1331 m->savemode = VOIDmode;
1332 m->regno = regno;
1333 /* Set M->cond if either loop_invariant_p
1334 or consec_sets_invariant_p returned 2
1335 (only conditionally invariant). */
1336 m->cond = ((tem | tem1 | tem2) > 1);
1337 m->global = LOOP_REG_GLOBAL_P (loop, regno);
1338 m->match = 0;
1339 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1340 m->savings = regs->array[regno].n_times_set;
1341 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
1342 m->savings += libcall_benefit (p);
1343 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1344 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
1345 /* Add M to the end of the chain MOVABLES. */
1346 loop_movables_add (movables, m);
1348 if (m->consec > 0)
1350 /* It is possible for the first instruction to have a
1351 REG_EQUAL note but a non-invariant SET_SRC, so we must
1352 remember the status of the first instruction in case
1353 the last instruction doesn't have a REG_EQUAL note. */
1354 m->move_insn_first = m->move_insn;
1356 /* Skip this insn, not checking REG_LIBCALL notes. */
1357 p = next_nonnote_insn (p);
1358 /* Skip the consecutive insns, if there are any. */
1359 p = skip_consec_insns (p, m->consec);
1360 /* Back up to the last insn of the consecutive group. */
1361 p = prev_nonnote_insn (p);
1363 /* We must now reset m->move_insn, m->is_equiv, and
1364 possibly m->set_src to correspond to the effects of
1365 all the insns. */
1366 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1367 if (temp)
1368 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1369 else
1371 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1372 if (temp && CONSTANT_P (XEXP (temp, 0)))
1373 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1374 else
1375 m->move_insn = 0;
1378 m->is_equiv
1379 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1382 /* If this register is always set within a STRICT_LOW_PART
1383 or set to zero, then its high bytes are constant.
1384 So clear them outside the loop and within the loop
1385 just load the low bytes.
1386 We must check that the machine has an instruction to do so.
1387 Also, if the value loaded into the register
1388 depends on the same register, this cannot be done. */
1389 else if (SET_SRC (set) == const0_rtx
1390 && NONJUMP_INSN_P (NEXT_INSN (p))
1391 && (set1 = single_set (NEXT_INSN (p)))
1392 && GET_CODE (set1) == SET
1393 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1394 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1395 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1396 == SET_DEST (set))
1397 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1399 int regno = REGNO (SET_DEST (set));
1400 if (regs->array[regno].set_in_loop == 2)
1402 struct movable *m;
1403 m = xmalloc (sizeof (struct movable));
1404 m->next = 0;
1405 m->insn = p;
1406 m->set_dest = SET_DEST (set);
1407 m->dependencies = 0;
1408 m->force = 0;
1409 m->consec = 0;
1410 m->done = 0;
1411 m->forces = 0;
1412 m->move_insn = 0;
1413 m->move_insn_first = 0;
1414 m->insert_temp = insert_temp;
1415 m->partial = 1;
1416 /* If the insn may not be executed on some cycles,
1417 we can't clear the whole reg; clear just high part.
1418 Not even if the reg is used only within this loop.
1419 Consider this:
1420 while (1)
1421 while (s != t) {
1422 if (foo ()) x = *s;
1423 use (x);
1425 Clearing x before the inner loop could clobber a value
1426 being saved from the last time around the outer loop.
1427 However, if the reg is not used outside this loop
1428 and all uses of the register are in the same
1429 basic block as the store, there is no problem.
1431 If this insn was made by loop, we don't know its
1432 INSN_LUID and hence must make a conservative
1433 assumption. */
1434 m->global = (INSN_UID (p) >= max_uid_for_loop
1435 || LOOP_REG_GLOBAL_P (loop, regno)
1436 || (labels_in_range_p
1437 (p, REGNO_FIRST_LUID (regno))));
1438 if (maybe_never && m->global)
1439 m->savemode = GET_MODE (SET_SRC (set1));
1440 else
1441 m->savemode = VOIDmode;
1442 m->regno = regno;
1443 m->cond = 0;
1444 m->match = 0;
1445 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1446 m->savings = 1;
1447 for (i = 0;
1448 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1449 i++)
1450 regs->array[regno+i].set_in_loop = -1;
1451 /* Add M to the end of the chain MOVABLES. */
1452 loop_movables_add (movables, m);
1457 /* Past a call insn, we get to insns which might not be executed
1458 because the call might exit. This matters for insns that trap.
1459 Constant and pure call insns always return, so they don't count. */
1460 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1461 call_passed = 1;
1462 /* Past a label or a jump, we get to insns for which we
1463 can't count on whether or how many times they will be
1464 executed during each iteration. Therefore, we can
1465 only move out sets of trivial variables
1466 (those not used after the loop). */
1467 /* Similar code appears twice in strength_reduce. */
1468 else if ((LABEL_P (p) || JUMP_P (p))
1469 /* If we enter the loop in the middle, and scan around to the
1470 beginning, don't set maybe_never for that. This must be an
1471 unconditional jump, otherwise the code at the top of the
1472 loop might never be executed. Unconditional jumps are
1473 followed by a barrier then the loop_end. */
1474 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1475 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1476 && any_uncondjump_p (p)))
1477 maybe_never = 1;
1480 /* If one movable subsumes another, ignore that other. */
1482 ignore_some_movables (movables);
1484 /* For each movable insn, see if the reg that it loads
1485 leads when it dies right into another conditionally movable insn.
1486 If so, record that the second insn "forces" the first one,
1487 since the second can be moved only if the first is. */
1489 force_movables (movables);
1491 /* See if there are multiple movable insns that load the same value.
1492 If there are, make all but the first point at the first one
1493 through the `match' field, and add the priorities of them
1494 all together as the priority of the first. */
1496 combine_movables (movables, regs);
1498 /* Now consider each movable insn to decide whether it is worth moving.
1499 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1501 For machines with few registers this increases code size, so do not
1502 move moveables when optimizing for code size on such machines.
1503 (The 18 below is the value for i386.) */
1505 if (!optimize_size
1506 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1508 move_movables (loop, movables, threshold, insn_count);
1510 /* Recalculate regs->array if move_movables has created new
1511 registers. */
1512 if (max_reg_num () > regs->num)
1514 loop_regs_scan (loop, 0);
1515 for (update_start = loop_start;
1516 PREV_INSN (update_start)
1517 && !LABEL_P (PREV_INSN (update_start));
1518 update_start = PREV_INSN (update_start))
1520 update_end = NEXT_INSN (loop_end);
1522 reg_scan_update (update_start, update_end, loop_max_reg);
1523 loop_max_reg = max_reg_num ();
1527 /* Now candidates that still are negative are those not moved.
1528 Change regs->array[I].set_in_loop to indicate that those are not actually
1529 invariant. */
1530 for (i = 0; i < regs->num; i++)
1531 if (regs->array[i].set_in_loop < 0)
1532 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1534 /* Now that we've moved some things out of the loop, we might be able to
1535 hoist even more memory references. */
1536 load_mems (loop);
1538 /* Recalculate regs->array if load_mems has created new registers. */
1539 if (max_reg_num () > regs->num)
1540 loop_regs_scan (loop, 0);
1542 for (update_start = loop_start;
1543 PREV_INSN (update_start)
1544 && !LABEL_P (PREV_INSN (update_start));
1545 update_start = PREV_INSN (update_start))
1547 update_end = NEXT_INSN (loop_end);
1549 reg_scan_update (update_start, update_end, loop_max_reg);
1550 loop_max_reg = max_reg_num ();
1552 if (flag_strength_reduce)
1554 if (update_end && LABEL_P (update_end))
1555 /* Ensure our label doesn't go away. */
1556 LABEL_NUSES (update_end)++;
1558 strength_reduce (loop, flags);
1560 reg_scan_update (update_start, update_end, loop_max_reg);
1561 loop_max_reg = max_reg_num ();
1563 if (update_end && LABEL_P (update_end)
1564 && --LABEL_NUSES (update_end) == 0)
1565 delete_related_insns (update_end);
1569 /* The movable information is required for strength reduction. */
1570 loop_movables_free (movables);
1572 free (regs->array);
1573 regs->array = 0;
1574 regs->num = 0;
1577 /* Add elements to *OUTPUT to record all the pseudo-regs
1578 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1580 static void
1581 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1583 enum rtx_code code;
1584 const char *fmt;
1585 int i;
1587 code = GET_CODE (in_this);
1589 switch (code)
1591 case PC:
1592 case CC0:
1593 case CONST_INT:
1594 case CONST_DOUBLE:
1595 case CONST:
1596 case SYMBOL_REF:
1597 case LABEL_REF:
1598 return;
1600 case REG:
1601 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1602 && ! reg_mentioned_p (in_this, not_in_this))
1603 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1604 return;
1606 default:
1607 break;
1610 fmt = GET_RTX_FORMAT (code);
1611 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1613 int j;
1615 switch (fmt[i])
1617 case 'E':
1618 for (j = 0; j < XVECLEN (in_this, i); j++)
1619 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1620 break;
1622 case 'e':
1623 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1624 break;
1629 /* Check what regs are referred to in the libcall block ending with INSN,
1630 aside from those mentioned in the equivalent value.
1631 If there are none, return 0.
1632 If there are one or more, return an EXPR_LIST containing all of them. */
1634 static rtx
1635 libcall_other_reg (rtx insn, rtx equiv)
1637 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1638 rtx p = XEXP (note, 0);
1639 rtx output = 0;
1641 /* First, find all the regs used in the libcall block
1642 that are not mentioned as inputs to the result. */
1644 while (p != insn)
1646 if (INSN_P (p))
1647 record_excess_regs (PATTERN (p), equiv, &output);
1648 p = NEXT_INSN (p);
1651 return output;
1654 /* Return 1 if all uses of REG
1655 are between INSN and the end of the basic block. */
1657 static int
1658 reg_in_basic_block_p (rtx insn, rtx reg)
1660 int regno = REGNO (reg);
1661 rtx p;
1663 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1664 return 0;
1666 /* Search this basic block for the already recorded last use of the reg. */
1667 for (p = insn; p; p = NEXT_INSN (p))
1669 switch (GET_CODE (p))
1671 case NOTE:
1672 break;
1674 case INSN:
1675 case CALL_INSN:
1676 /* Ordinary insn: if this is the last use, we win. */
1677 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1678 return 1;
1679 break;
1681 case JUMP_INSN:
1682 /* Jump insn: if this is the last use, we win. */
1683 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1684 return 1;
1685 /* Otherwise, it's the end of the basic block, so we lose. */
1686 return 0;
1688 case CODE_LABEL:
1689 case BARRIER:
1690 /* It's the end of the basic block, so we lose. */
1691 return 0;
1693 default:
1694 break;
1698 /* The "last use" that was recorded can't be found after the first
1699 use. This can happen when the last use was deleted while
1700 processing an inner loop, this inner loop was then completely
1701 unrolled, and the outer loop is always exited after the inner loop,
1702 so that everything after the first use becomes a single basic block. */
1703 return 1;
1706 /* Compute the benefit of eliminating the insns in the block whose
1707 last insn is LAST. This may be a group of insns used to compute a
1708 value directly or can contain a library call. */
1710 static int
1711 libcall_benefit (rtx last)
1713 rtx insn;
1714 int benefit = 0;
1716 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1717 insn != last; insn = NEXT_INSN (insn))
1719 if (CALL_P (insn))
1720 benefit += 10; /* Assume at least this many insns in a library
1721 routine. */
1722 else if (NONJUMP_INSN_P (insn)
1723 && GET_CODE (PATTERN (insn)) != USE
1724 && GET_CODE (PATTERN (insn)) != CLOBBER)
1725 benefit++;
1728 return benefit;
1731 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1733 static rtx
1734 skip_consec_insns (rtx insn, int count)
1736 for (; count > 0; count--)
1738 rtx temp;
1740 /* If first insn of libcall sequence, skip to end. */
1741 /* Do this at start of loop, since INSN is guaranteed to
1742 be an insn here. */
1743 if (!NOTE_P (insn)
1744 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1745 insn = XEXP (temp, 0);
1748 insn = NEXT_INSN (insn);
1749 while (NOTE_P (insn));
1752 return insn;
1755 /* Ignore any movable whose insn falls within a libcall
1756 which is part of another movable.
1757 We make use of the fact that the movable for the libcall value
1758 was made later and so appears later on the chain. */
1760 static void
1761 ignore_some_movables (struct loop_movables *movables)
1763 struct movable *m, *m1;
1765 for (m = movables->head; m; m = m->next)
1767 /* Is this a movable for the value of a libcall? */
1768 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1769 if (note)
1771 rtx insn;
1772 /* Check for earlier movables inside that range,
1773 and mark them invalid. We cannot use LUIDs here because
1774 insns created by loop.c for prior loops don't have LUIDs.
1775 Rather than reject all such insns from movables, we just
1776 explicitly check each insn in the libcall (since invariant
1777 libcalls aren't that common). */
1778 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1779 for (m1 = movables->head; m1 != m; m1 = m1->next)
1780 if (m1->insn == insn)
1781 m1->done = 1;
1786 /* For each movable insn, see if the reg that it loads
1787 leads when it dies right into another conditionally movable insn.
1788 If so, record that the second insn "forces" the first one,
1789 since the second can be moved only if the first is. */
1791 static void
1792 force_movables (struct loop_movables *movables)
1794 struct movable *m, *m1;
1796 for (m1 = movables->head; m1; m1 = m1->next)
1797 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1798 if (!m1->partial && !m1->done)
1800 int regno = m1->regno;
1801 for (m = m1->next; m; m = m->next)
1802 /* ??? Could this be a bug? What if CSE caused the
1803 register of M1 to be used after this insn?
1804 Since CSE does not update regno_last_uid,
1805 this insn M->insn might not be where it dies.
1806 But very likely this doesn't matter; what matters is
1807 that M's reg is computed from M1's reg. */
1808 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1809 && !m->done)
1810 break;
1811 if (m != 0 && m->set_src == m1->set_dest
1812 /* If m->consec, m->set_src isn't valid. */
1813 && m->consec == 0)
1814 m = 0;
1816 /* Increase the priority of the moving the first insn
1817 since it permits the second to be moved as well.
1818 Likewise for insns already forced by the first insn. */
1819 if (m != 0)
1821 struct movable *m2;
1823 m->forces = m1;
1824 for (m2 = m1; m2; m2 = m2->forces)
1826 m2->lifetime += m->lifetime;
1827 m2->savings += m->savings;
1833 /* Find invariant expressions that are equal and can be combined into
1834 one register. */
1836 static void
1837 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1839 struct movable *m;
1840 char *matched_regs = xmalloc (regs->num);
1841 enum machine_mode mode;
1843 /* Regs that are set more than once are not allowed to match
1844 or be matched. I'm no longer sure why not. */
1845 /* Only pseudo registers are allowed to match or be matched,
1846 since move_movables does not validate the change. */
1847 /* Perhaps testing m->consec_sets would be more appropriate here? */
1849 for (m = movables->head; m; m = m->next)
1850 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1851 && m->regno >= FIRST_PSEUDO_REGISTER
1852 && !m->insert_temp
1853 && !m->partial)
1855 struct movable *m1;
1856 int regno = m->regno;
1858 memset (matched_regs, 0, regs->num);
1859 matched_regs[regno] = 1;
1861 /* We want later insns to match the first one. Don't make the first
1862 one match any later ones. So start this loop at m->next. */
1863 for (m1 = m->next; m1; m1 = m1->next)
1864 if (m != m1 && m1->match == 0
1865 && !m1->insert_temp
1866 && regs->array[m1->regno].n_times_set == 1
1867 && m1->regno >= FIRST_PSEUDO_REGISTER
1868 /* A reg used outside the loop mustn't be eliminated. */
1869 && !m1->global
1870 /* A reg used for zero-extending mustn't be eliminated. */
1871 && !m1->partial
1872 && (matched_regs[m1->regno]
1875 /* Can combine regs with different modes loaded from the
1876 same constant only if the modes are the same or
1877 if both are integer modes with M wider or the same
1878 width as M1. The check for integer is redundant, but
1879 safe, since the only case of differing destination
1880 modes with equal sources is when both sources are
1881 VOIDmode, i.e., CONST_INT. */
1882 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1883 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1884 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1885 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1886 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1887 /* See if the source of M1 says it matches M. */
1888 && ((REG_P (m1->set_src)
1889 && matched_regs[REGNO (m1->set_src)])
1890 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1891 movables, regs))))
1892 && ((m->dependencies == m1->dependencies)
1893 || rtx_equal_p (m->dependencies, m1->dependencies)))
1895 m->lifetime += m1->lifetime;
1896 m->savings += m1->savings;
1897 m1->done = 1;
1898 m1->match = m;
1899 matched_regs[m1->regno] = 1;
1903 /* Now combine the regs used for zero-extension.
1904 This can be done for those not marked `global'
1905 provided their lives don't overlap. */
1907 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1908 mode = GET_MODE_WIDER_MODE (mode))
1910 struct movable *m0 = 0;
1912 /* Combine all the registers for extension from mode MODE.
1913 Don't combine any that are used outside this loop. */
1914 for (m = movables->head; m; m = m->next)
1915 if (m->partial && ! m->global
1916 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1918 struct movable *m1;
1920 int first = REGNO_FIRST_LUID (m->regno);
1921 int last = REGNO_LAST_LUID (m->regno);
1923 if (m0 == 0)
1925 /* First one: don't check for overlap, just record it. */
1926 m0 = m;
1927 continue;
1930 /* Make sure they extend to the same mode.
1931 (Almost always true.) */
1932 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1933 continue;
1935 /* We already have one: check for overlap with those
1936 already combined together. */
1937 for (m1 = movables->head; m1 != m; m1 = m1->next)
1938 if (m1 == m0 || (m1->partial && m1->match == m0))
1939 if (! (REGNO_FIRST_LUID (m1->regno) > last
1940 || REGNO_LAST_LUID (m1->regno) < first))
1941 goto overlap;
1943 /* No overlap: we can combine this with the others. */
1944 m0->lifetime += m->lifetime;
1945 m0->savings += m->savings;
1946 m->done = 1;
1947 m->match = m0;
1949 overlap:
1954 /* Clean up. */
1955 free (matched_regs);
1958 /* Returns the number of movable instructions in LOOP that were not
1959 moved outside the loop. */
1961 static int
1962 num_unmoved_movables (const struct loop *loop)
1964 int num = 0;
1965 struct movable *m;
1967 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1968 if (!m->done)
1969 ++num;
1971 return num;
1975 /* Return 1 if regs X and Y will become the same if moved. */
1977 static int
1978 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1980 unsigned int xn = REGNO (x);
1981 unsigned int yn = REGNO (y);
1982 struct movable *mx, *my;
1984 for (mx = movables->head; mx; mx = mx->next)
1985 if (mx->regno == xn)
1986 break;
1988 for (my = movables->head; my; my = my->next)
1989 if (my->regno == yn)
1990 break;
1992 return (mx && my
1993 && ((mx->match == my->match && mx->match != 0)
1994 || mx->match == my
1995 || mx == my->match));
1998 /* Return 1 if X and Y are identical-looking rtx's.
1999 This is the Lisp function EQUAL for rtx arguments.
2001 If two registers are matching movables or a movable register and an
2002 equivalent constant, consider them equal. */
2004 static int
2005 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
2006 struct loop_regs *regs)
2008 int i;
2009 int j;
2010 struct movable *m;
2011 enum rtx_code code;
2012 const char *fmt;
2014 if (x == y)
2015 return 1;
2016 if (x == 0 || y == 0)
2017 return 0;
2019 code = GET_CODE (x);
2021 /* If we have a register and a constant, they may sometimes be
2022 equal. */
2023 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
2024 && CONSTANT_P (y))
2026 for (m = movables->head; m; m = m->next)
2027 if (m->move_insn && m->regno == REGNO (x)
2028 && rtx_equal_p (m->set_src, y))
2029 return 1;
2031 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
2032 && CONSTANT_P (x))
2034 for (m = movables->head; m; m = m->next)
2035 if (m->move_insn && m->regno == REGNO (y)
2036 && rtx_equal_p (m->set_src, x))
2037 return 1;
2040 /* Otherwise, rtx's of different codes cannot be equal. */
2041 if (code != GET_CODE (y))
2042 return 0;
2044 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2045 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2047 if (GET_MODE (x) != GET_MODE (y))
2048 return 0;
2050 /* These three types of rtx's can be compared nonrecursively. */
2051 if (code == REG)
2052 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
2054 if (code == LABEL_REF)
2055 return XEXP (x, 0) == XEXP (y, 0);
2056 if (code == SYMBOL_REF)
2057 return XSTR (x, 0) == XSTR (y, 0);
2059 /* Compare the elements. If any pair of corresponding elements
2060 fail to match, return 0 for the whole things. */
2062 fmt = GET_RTX_FORMAT (code);
2063 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2065 switch (fmt[i])
2067 case 'w':
2068 if (XWINT (x, i) != XWINT (y, i))
2069 return 0;
2070 break;
2072 case 'i':
2073 if (XINT (x, i) != XINT (y, i))
2074 return 0;
2075 break;
2077 case 'E':
2078 /* Two vectors must have the same length. */
2079 if (XVECLEN (x, i) != XVECLEN (y, i))
2080 return 0;
2082 /* And the corresponding elements must match. */
2083 for (j = 0; j < XVECLEN (x, i); j++)
2084 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2085 movables, regs) == 0)
2086 return 0;
2087 break;
2089 case 'e':
2090 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
2091 == 0)
2092 return 0;
2093 break;
2095 case 's':
2096 if (strcmp (XSTR (x, i), XSTR (y, i)))
2097 return 0;
2098 break;
2100 case 'u':
2101 /* These are just backpointers, so they don't matter. */
2102 break;
2104 case '0':
2105 break;
2107 /* It is believed that rtx's at this level will never
2108 contain anything but integers and other rtx's,
2109 except for within LABEL_REFs and SYMBOL_REFs. */
2110 default:
2111 abort ();
2114 return 1;
2117 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2118 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2119 references is incremented once for each added note. */
2121 static void
2122 add_label_notes (rtx x, rtx insns)
2124 enum rtx_code code = GET_CODE (x);
2125 int i, j;
2126 const char *fmt;
2127 rtx insn;
2129 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2131 /* This code used to ignore labels that referred to dispatch tables to
2132 avoid flow generating (slightly) worse code.
2134 We no longer ignore such label references (see LABEL_REF handling in
2135 mark_jump_label for additional information). */
2136 for (insn = insns; insn; insn = NEXT_INSN (insn))
2137 if (reg_mentioned_p (XEXP (x, 0), insn))
2139 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
2140 REG_NOTES (insn));
2141 if (LABEL_P (XEXP (x, 0)))
2142 LABEL_NUSES (XEXP (x, 0))++;
2146 fmt = GET_RTX_FORMAT (code);
2147 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2149 if (fmt[i] == 'e')
2150 add_label_notes (XEXP (x, i), insns);
2151 else if (fmt[i] == 'E')
2152 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2153 add_label_notes (XVECEXP (x, i, j), insns);
2157 /* Scan MOVABLES, and move the insns that deserve to be moved.
2158 If two matching movables are combined, replace one reg with the
2159 other throughout. */
2161 static void
2162 move_movables (struct loop *loop, struct loop_movables *movables,
2163 int threshold, int insn_count)
2165 struct loop_regs *regs = LOOP_REGS (loop);
2166 int nregs = regs->num;
2167 rtx new_start = 0;
2168 struct movable *m;
2169 rtx p;
2170 rtx loop_start = loop->start;
2171 rtx loop_end = loop->end;
2172 /* Map of pseudo-register replacements to handle combining
2173 when we move several insns that load the same value
2174 into different pseudo-registers. */
2175 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
2176 char *already_moved = xcalloc (nregs, sizeof (char));
2178 for (m = movables->head; m; m = m->next)
2180 /* Describe this movable insn. */
2182 if (loop_dump_stream)
2184 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
2185 INSN_UID (m->insn), m->regno, m->lifetime);
2186 if (m->consec > 0)
2187 fprintf (loop_dump_stream, "consec %d, ", m->consec);
2188 if (m->cond)
2189 fprintf (loop_dump_stream, "cond ");
2190 if (m->force)
2191 fprintf (loop_dump_stream, "force ");
2192 if (m->global)
2193 fprintf (loop_dump_stream, "global ");
2194 if (m->done)
2195 fprintf (loop_dump_stream, "done ");
2196 if (m->move_insn)
2197 fprintf (loop_dump_stream, "move-insn ");
2198 if (m->match)
2199 fprintf (loop_dump_stream, "matches %d ",
2200 INSN_UID (m->match->insn));
2201 if (m->forces)
2202 fprintf (loop_dump_stream, "forces %d ",
2203 INSN_UID (m->forces->insn));
2206 /* Ignore the insn if it's already done (it matched something else).
2207 Otherwise, see if it is now safe to move. */
2209 if (!m->done
2210 && (! m->cond
2211 || (1 == loop_invariant_p (loop, m->set_src)
2212 && (m->dependencies == 0
2213 || 1 == loop_invariant_p (loop, m->dependencies))
2214 && (m->consec == 0
2215 || 1 == consec_sets_invariant_p (loop, m->set_dest,
2216 m->consec + 1,
2217 m->insn))))
2218 && (! m->forces || m->forces->done))
2220 int regno;
2221 rtx p;
2222 int savings = m->savings;
2224 /* We have an insn that is safe to move.
2225 Compute its desirability. */
2227 p = m->insn;
2228 regno = m->regno;
2230 if (loop_dump_stream)
2231 fprintf (loop_dump_stream, "savings %d ", savings);
2233 if (regs->array[regno].moved_once && loop_dump_stream)
2234 fprintf (loop_dump_stream, "halved since already moved ");
2236 /* An insn MUST be moved if we already moved something else
2237 which is safe only if this one is moved too: that is,
2238 if already_moved[REGNO] is nonzero. */
2240 /* An insn is desirable to move if the new lifetime of the
2241 register is no more than THRESHOLD times the old lifetime.
2242 If it's not desirable, it means the loop is so big
2243 that moving won't speed things up much,
2244 and it is liable to make register usage worse. */
2246 /* It is also desirable to move if it can be moved at no
2247 extra cost because something else was already moved. */
2249 if (already_moved[regno]
2250 || (threshold * savings * m->lifetime) >=
2251 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
2252 || (m->forces && m->forces->done
2253 && regs->array[m->forces->regno].n_times_set == 1))
2255 int count;
2256 struct movable *m1;
2257 rtx first = NULL_RTX;
2258 rtx newreg = NULL_RTX;
2260 if (m->insert_temp)
2261 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
2263 /* Now move the insns that set the reg. */
2265 if (m->partial && m->match)
2267 rtx newpat, i1;
2268 rtx r1, r2;
2269 /* Find the end of this chain of matching regs.
2270 Thus, we load each reg in the chain from that one reg.
2271 And that reg is loaded with 0 directly,
2272 since it has ->match == 0. */
2273 for (m1 = m; m1->match; m1 = m1->match);
2274 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
2275 SET_DEST (PATTERN (m1->insn)));
2276 i1 = loop_insn_hoist (loop, newpat);
2278 /* Mark the moved, invariant reg as being allowed to
2279 share a hard reg with the other matching invariant. */
2280 REG_NOTES (i1) = REG_NOTES (m->insn);
2281 r1 = SET_DEST (PATTERN (m->insn));
2282 r2 = SET_DEST (PATTERN (m1->insn));
2283 regs_may_share
2284 = gen_rtx_EXPR_LIST (VOIDmode, r1,
2285 gen_rtx_EXPR_LIST (VOIDmode, r2,
2286 regs_may_share));
2287 delete_insn (m->insn);
2289 if (new_start == 0)
2290 new_start = i1;
2292 if (loop_dump_stream)
2293 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2295 /* If we are to re-generate the item being moved with a
2296 new move insn, first delete what we have and then emit
2297 the move insn before the loop. */
2298 else if (m->move_insn)
2300 rtx i1, temp, seq;
2302 for (count = m->consec; count >= 0; count--)
2304 /* If this is the first insn of a library call sequence,
2305 something is very wrong. */
2306 if (!NOTE_P (p)
2307 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2308 abort ();
2310 /* If this is the last insn of a libcall sequence, then
2311 delete every insn in the sequence except the last.
2312 The last insn is handled in the normal manner. */
2313 if (!NOTE_P (p)
2314 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2316 temp = XEXP (temp, 0);
2317 while (temp != p)
2318 temp = delete_insn (temp);
2321 temp = p;
2322 p = delete_insn (p);
2324 /* simplify_giv_expr expects that it can walk the insns
2325 at m->insn forwards and see this old sequence we are
2326 tossing here. delete_insn does preserve the next
2327 pointers, but when we skip over a NOTE we must fix
2328 it up. Otherwise that code walks into the non-deleted
2329 insn stream. */
2330 while (p && NOTE_P (p))
2331 p = NEXT_INSN (temp) = NEXT_INSN (p);
2333 if (m->insert_temp)
2335 /* Replace the original insn with a move from
2336 our newly created temp. */
2337 start_sequence ();
2338 emit_move_insn (m->set_dest, newreg);
2339 seq = get_insns ();
2340 end_sequence ();
2341 emit_insn_before (seq, p);
2345 start_sequence ();
2346 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2347 m->set_src);
2348 seq = get_insns ();
2349 end_sequence ();
2351 add_label_notes (m->set_src, seq);
2353 i1 = loop_insn_hoist (loop, seq);
2354 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2355 set_unique_reg_note (i1,
2356 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2357 m->set_src);
2359 if (loop_dump_stream)
2360 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2362 /* The more regs we move, the less we like moving them. */
2363 threshold -= 3;
2365 else
2367 for (count = m->consec; count >= 0; count--)
2369 rtx i1, temp;
2371 /* If first insn of libcall sequence, skip to end. */
2372 /* Do this at start of loop, since p is guaranteed to
2373 be an insn here. */
2374 if (!NOTE_P (p)
2375 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2376 p = XEXP (temp, 0);
2378 /* If last insn of libcall sequence, move all
2379 insns except the last before the loop. The last
2380 insn is handled in the normal manner. */
2381 if (!NOTE_P (p)
2382 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2384 rtx fn_address = 0;
2385 rtx fn_reg = 0;
2386 rtx fn_address_insn = 0;
2388 first = 0;
2389 for (temp = XEXP (temp, 0); temp != p;
2390 temp = NEXT_INSN (temp))
2392 rtx body;
2393 rtx n;
2394 rtx next;
2396 if (NOTE_P (temp))
2397 continue;
2399 body = PATTERN (temp);
2401 /* Find the next insn after TEMP,
2402 not counting USE or NOTE insns. */
2403 for (next = NEXT_INSN (temp); next != p;
2404 next = NEXT_INSN (next))
2405 if (! (NONJUMP_INSN_P (next)
2406 && GET_CODE (PATTERN (next)) == USE)
2407 && !NOTE_P (next))
2408 break;
2410 /* If that is the call, this may be the insn
2411 that loads the function address.
2413 Extract the function address from the insn
2414 that loads it into a register.
2415 If this insn was cse'd, we get incorrect code.
2417 So emit a new move insn that copies the
2418 function address into the register that the
2419 call insn will use. flow.c will delete any
2420 redundant stores that we have created. */
2421 if (CALL_P (next)
2422 && GET_CODE (body) == SET
2423 && REG_P (SET_DEST (body))
2424 && (n = find_reg_note (temp, REG_EQUAL,
2425 NULL_RTX)))
2427 fn_reg = SET_SRC (body);
2428 if (!REG_P (fn_reg))
2429 fn_reg = SET_DEST (body);
2430 fn_address = XEXP (n, 0);
2431 fn_address_insn = temp;
2433 /* We have the call insn.
2434 If it uses the register we suspect it might,
2435 load it with the correct address directly. */
2436 if (CALL_P (temp)
2437 && fn_address != 0
2438 && reg_referenced_p (fn_reg, body))
2439 loop_insn_emit_after (loop, 0, fn_address_insn,
2440 gen_move_insn
2441 (fn_reg, fn_address));
2443 if (CALL_P (temp))
2445 i1 = loop_call_insn_hoist (loop, body);
2446 /* Because the USAGE information potentially
2447 contains objects other than hard registers
2448 we need to copy it. */
2449 if (CALL_INSN_FUNCTION_USAGE (temp))
2450 CALL_INSN_FUNCTION_USAGE (i1)
2451 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2453 else
2454 i1 = loop_insn_hoist (loop, body);
2455 if (first == 0)
2456 first = i1;
2457 if (temp == fn_address_insn)
2458 fn_address_insn = i1;
2459 REG_NOTES (i1) = REG_NOTES (temp);
2460 REG_NOTES (temp) = NULL;
2461 delete_insn (temp);
2463 if (new_start == 0)
2464 new_start = first;
2466 if (m->savemode != VOIDmode)
2468 /* P sets REG to zero; but we should clear only
2469 the bits that are not covered by the mode
2470 m->savemode. */
2471 rtx reg = m->set_dest;
2472 rtx sequence;
2473 rtx tem;
2475 start_sequence ();
2476 tem = expand_simple_binop
2477 (GET_MODE (reg), AND, reg,
2478 GEN_INT ((((HOST_WIDE_INT) 1
2479 << GET_MODE_BITSIZE (m->savemode)))
2480 - 1),
2481 reg, 1, OPTAB_LIB_WIDEN);
2482 if (tem == 0)
2483 abort ();
2484 if (tem != reg)
2485 emit_move_insn (reg, tem);
2486 sequence = get_insns ();
2487 end_sequence ();
2488 i1 = loop_insn_hoist (loop, sequence);
2490 else if (CALL_P (p))
2492 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2493 /* Because the USAGE information potentially
2494 contains objects other than hard registers
2495 we need to copy it. */
2496 if (CALL_INSN_FUNCTION_USAGE (p))
2497 CALL_INSN_FUNCTION_USAGE (i1)
2498 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2500 else if (count == m->consec && m->move_insn_first)
2502 rtx seq;
2503 /* The SET_SRC might not be invariant, so we must
2504 use the REG_EQUAL note. */
2505 start_sequence ();
2506 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2507 m->set_src);
2508 seq = get_insns ();
2509 end_sequence ();
2511 add_label_notes (m->set_src, seq);
2513 i1 = loop_insn_hoist (loop, seq);
2514 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2515 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2516 : REG_EQUAL, m->set_src);
2518 else if (m->insert_temp)
2520 rtx *reg_map2 = xcalloc (REGNO (newreg),
2521 sizeof(rtx));
2522 reg_map2 [m->regno] = newreg;
2524 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2525 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2526 free (reg_map2);
2528 else
2529 i1 = loop_insn_hoist (loop, PATTERN (p));
2531 if (REG_NOTES (i1) == 0)
2533 REG_NOTES (i1) = REG_NOTES (p);
2534 REG_NOTES (p) = NULL;
2536 /* If there is a REG_EQUAL note present whose value
2537 is not loop invariant, then delete it, since it
2538 may cause problems with later optimization passes.
2539 It is possible for cse to create such notes
2540 like this as a result of record_jump_cond. */
2542 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2543 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2544 remove_note (i1, temp);
2547 if (new_start == 0)
2548 new_start = i1;
2550 if (loop_dump_stream)
2551 fprintf (loop_dump_stream, " moved to %d",
2552 INSN_UID (i1));
2554 /* If library call, now fix the REG_NOTES that contain
2555 insn pointers, namely REG_LIBCALL on FIRST
2556 and REG_RETVAL on I1. */
2557 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2559 XEXP (temp, 0) = first;
2560 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2561 XEXP (temp, 0) = i1;
2564 temp = p;
2565 delete_insn (p);
2566 p = NEXT_INSN (p);
2568 /* simplify_giv_expr expects that it can walk the insns
2569 at m->insn forwards and see this old sequence we are
2570 tossing here. delete_insn does preserve the next
2571 pointers, but when we skip over a NOTE we must fix
2572 it up. Otherwise that code walks into the non-deleted
2573 insn stream. */
2574 while (p && NOTE_P (p))
2575 p = NEXT_INSN (temp) = NEXT_INSN (p);
2577 if (m->insert_temp)
2579 rtx seq;
2580 /* Replace the original insn with a move from
2581 our newly created temp. */
2582 start_sequence ();
2583 emit_move_insn (m->set_dest, newreg);
2584 seq = get_insns ();
2585 end_sequence ();
2586 emit_insn_before (seq, p);
2590 /* The more regs we move, the less we like moving them. */
2591 threshold -= 3;
2594 m->done = 1;
2596 if (!m->insert_temp)
2598 /* Any other movable that loads the same register
2599 MUST be moved. */
2600 already_moved[regno] = 1;
2602 /* This reg has been moved out of one loop. */
2603 regs->array[regno].moved_once = 1;
2605 /* The reg set here is now invariant. */
2606 if (! m->partial)
2608 int i;
2609 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2610 regs->array[regno+i].set_in_loop = 0;
2613 /* Change the length-of-life info for the register
2614 to say it lives at least the full length of this loop.
2615 This will help guide optimizations in outer loops. */
2617 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2618 /* This is the old insn before all the moved insns.
2619 We can't use the moved insn because it is out of range
2620 in uid_luid. Only the old insns have luids. */
2621 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2622 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2623 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2626 /* Combine with this moved insn any other matching movables. */
2628 if (! m->partial)
2629 for (m1 = movables->head; m1; m1 = m1->next)
2630 if (m1->match == m)
2632 rtx temp;
2634 /* Schedule the reg loaded by M1
2635 for replacement so that shares the reg of M.
2636 If the modes differ (only possible in restricted
2637 circumstances, make a SUBREG.
2639 Note this assumes that the target dependent files
2640 treat REG and SUBREG equally, including within
2641 GO_IF_LEGITIMATE_ADDRESS and in all the
2642 predicates since we never verify that replacing the
2643 original register with a SUBREG results in a
2644 recognizable insn. */
2645 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2646 reg_map[m1->regno] = m->set_dest;
2647 else
2648 reg_map[m1->regno]
2649 = gen_lowpart_common (GET_MODE (m1->set_dest),
2650 m->set_dest);
2652 /* Get rid of the matching insn
2653 and prevent further processing of it. */
2654 m1->done = 1;
2656 /* If library call, delete all insns. */
2657 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2658 NULL_RTX)))
2659 delete_insn_chain (XEXP (temp, 0), m1->insn);
2660 else
2661 delete_insn (m1->insn);
2663 /* Any other movable that loads the same register
2664 MUST be moved. */
2665 already_moved[m1->regno] = 1;
2667 /* The reg merged here is now invariant,
2668 if the reg it matches is invariant. */
2669 if (! m->partial)
2671 int i;
2672 for (i = 0;
2673 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2674 i++)
2675 regs->array[m1->regno+i].set_in_loop = 0;
2679 else if (loop_dump_stream)
2680 fprintf (loop_dump_stream, "not desirable");
2682 else if (loop_dump_stream && !m->match)
2683 fprintf (loop_dump_stream, "not safe");
2685 if (loop_dump_stream)
2686 fprintf (loop_dump_stream, "\n");
2689 if (new_start == 0)
2690 new_start = loop_start;
2692 /* Go through all the instructions in the loop, making
2693 all the register substitutions scheduled in REG_MAP. */
2694 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2695 if (INSN_P (p))
2697 replace_regs (PATTERN (p), reg_map, nregs, 0);
2698 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2699 INSN_CODE (p) = -1;
2702 /* Clean up. */
2703 free (reg_map);
2704 free (already_moved);
2708 static void
2709 loop_movables_add (struct loop_movables *movables, struct movable *m)
2711 if (movables->head == 0)
2712 movables->head = m;
2713 else
2714 movables->last->next = m;
2715 movables->last = m;
2719 static void
2720 loop_movables_free (struct loop_movables *movables)
2722 struct movable *m;
2723 struct movable *m_next;
2725 for (m = movables->head; m; m = m_next)
2727 m_next = m->next;
2728 free (m);
2732 #if 0
2733 /* Scan X and replace the address of any MEM in it with ADDR.
2734 REG is the address that MEM should have before the replacement. */
2736 static void
2737 replace_call_address (rtx x, rtx reg, rtx addr)
2739 enum rtx_code code;
2740 int i;
2741 const char *fmt;
2743 if (x == 0)
2744 return;
2745 code = GET_CODE (x);
2746 switch (code)
2748 case PC:
2749 case CC0:
2750 case CONST_INT:
2751 case CONST_DOUBLE:
2752 case CONST:
2753 case SYMBOL_REF:
2754 case LABEL_REF:
2755 case REG:
2756 return;
2758 case SET:
2759 /* Short cut for very common case. */
2760 replace_call_address (XEXP (x, 1), reg, addr);
2761 return;
2763 case CALL:
2764 /* Short cut for very common case. */
2765 replace_call_address (XEXP (x, 0), reg, addr);
2766 return;
2768 case MEM:
2769 /* If this MEM uses a reg other than the one we expected,
2770 something is wrong. */
2771 if (XEXP (x, 0) != reg)
2772 abort ();
2773 XEXP (x, 0) = addr;
2774 return;
2776 default:
2777 break;
2780 fmt = GET_RTX_FORMAT (code);
2781 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2783 if (fmt[i] == 'e')
2784 replace_call_address (XEXP (x, i), reg, addr);
2785 else if (fmt[i] == 'E')
2787 int j;
2788 for (j = 0; j < XVECLEN (x, i); j++)
2789 replace_call_address (XVECEXP (x, i, j), reg, addr);
2793 #endif
2795 /* Return the number of memory refs to addresses that vary
2796 in the rtx X. */
2798 static int
2799 count_nonfixed_reads (const struct loop *loop, rtx x)
2801 enum rtx_code code;
2802 int i;
2803 const char *fmt;
2804 int value;
2806 if (x == 0)
2807 return 0;
2809 code = GET_CODE (x);
2810 switch (code)
2812 case PC:
2813 case CC0:
2814 case CONST_INT:
2815 case CONST_DOUBLE:
2816 case CONST:
2817 case SYMBOL_REF:
2818 case LABEL_REF:
2819 case REG:
2820 return 0;
2822 case MEM:
2823 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2824 + count_nonfixed_reads (loop, XEXP (x, 0)));
2826 default:
2827 break;
2830 value = 0;
2831 fmt = GET_RTX_FORMAT (code);
2832 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2834 if (fmt[i] == 'e')
2835 value += count_nonfixed_reads (loop, XEXP (x, i));
2836 if (fmt[i] == 'E')
2838 int j;
2839 for (j = 0; j < XVECLEN (x, i); j++)
2840 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2843 return value;
2846 /* Scan a loop setting the elements `loops_enclosed',
2847 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2848 `unknown_address_altered', `unknown_constant_address_altered', and
2849 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2850 list `store_mems' in LOOP. */
2852 static void
2853 prescan_loop (struct loop *loop)
2855 int level = 1;
2856 rtx insn;
2857 struct loop_info *loop_info = LOOP_INFO (loop);
2858 rtx start = loop->start;
2859 rtx end = loop->end;
2860 /* The label after END. Jumping here is just like falling off the
2861 end of the loop. We use next_nonnote_insn instead of next_label
2862 as a hedge against the (pathological) case where some actual insn
2863 might end up between the two. */
2864 rtx exit_target = next_nonnote_insn (end);
2866 loop_info->has_indirect_jump = indirect_jump_in_function;
2867 loop_info->pre_header_has_call = 0;
2868 loop_info->has_call = 0;
2869 loop_info->has_nonconst_call = 0;
2870 loop_info->has_prefetch = 0;
2871 loop_info->has_volatile = 0;
2872 loop_info->has_tablejump = 0;
2873 loop_info->has_multiple_exit_targets = 0;
2874 loop->level = 1;
2876 loop_info->unknown_address_altered = 0;
2877 loop_info->unknown_constant_address_altered = 0;
2878 loop_info->store_mems = NULL_RTX;
2879 loop_info->first_loop_store_insn = NULL_RTX;
2880 loop_info->mems_idx = 0;
2881 loop_info->num_mem_sets = 0;
2883 for (insn = start; insn && !LABEL_P (insn);
2884 insn = PREV_INSN (insn))
2886 if (CALL_P (insn))
2888 loop_info->pre_header_has_call = 1;
2889 break;
2893 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2894 insn = NEXT_INSN (insn))
2896 switch (GET_CODE (insn))
2898 case NOTE:
2899 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2901 ++level;
2902 /* Count number of loops contained in this one. */
2903 loop->level++;
2905 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2906 --level;
2907 break;
2909 case CALL_INSN:
2910 if (! CONST_OR_PURE_CALL_P (insn))
2912 loop_info->unknown_address_altered = 1;
2913 loop_info->has_nonconst_call = 1;
2915 else if (pure_call_p (insn))
2916 loop_info->has_nonconst_call = 1;
2917 loop_info->has_call = 1;
2918 if (can_throw_internal (insn))
2919 loop_info->has_multiple_exit_targets = 1;
2920 break;
2922 case JUMP_INSN:
2923 if (! loop_info->has_multiple_exit_targets)
2925 rtx set = pc_set (insn);
2927 if (set)
2929 rtx src = SET_SRC (set);
2930 rtx label1, label2;
2932 if (GET_CODE (src) == IF_THEN_ELSE)
2934 label1 = XEXP (src, 1);
2935 label2 = XEXP (src, 2);
2937 else
2939 label1 = src;
2940 label2 = NULL_RTX;
2945 if (label1 && label1 != pc_rtx)
2947 if (GET_CODE (label1) != LABEL_REF)
2949 /* Something tricky. */
2950 loop_info->has_multiple_exit_targets = 1;
2951 break;
2953 else if (XEXP (label1, 0) != exit_target
2954 && LABEL_OUTSIDE_LOOP_P (label1))
2956 /* A jump outside the current loop. */
2957 loop_info->has_multiple_exit_targets = 1;
2958 break;
2962 label1 = label2;
2963 label2 = NULL_RTX;
2965 while (label1);
2967 else
2969 /* A return, or something tricky. */
2970 loop_info->has_multiple_exit_targets = 1;
2973 /* Fall through. */
2975 case INSN:
2976 if (volatile_refs_p (PATTERN (insn)))
2977 loop_info->has_volatile = 1;
2979 if (JUMP_P (insn)
2980 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2981 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2982 loop_info->has_tablejump = 1;
2984 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2985 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2986 loop_info->first_loop_store_insn = insn;
2988 if (flag_non_call_exceptions && can_throw_internal (insn))
2989 loop_info->has_multiple_exit_targets = 1;
2990 break;
2992 default:
2993 break;
2997 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2998 if (/* An exception thrown by a called function might land us
2999 anywhere. */
3000 ! loop_info->has_nonconst_call
3001 /* We don't want loads for MEMs moved to a location before the
3002 one at which their stack memory becomes allocated. (Note
3003 that this is not a problem for malloc, etc., since those
3004 require actual function calls. */
3005 && ! current_function_calls_alloca
3006 /* There are ways to leave the loop other than falling off the
3007 end. */
3008 && ! loop_info->has_multiple_exit_targets)
3009 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
3010 insn = NEXT_INSN (insn))
3011 for_each_rtx (&insn, insert_loop_mem, loop_info);
3013 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3014 that loop_invariant_p and load_mems can use true_dependence
3015 to determine what is really clobbered. */
3016 if (loop_info->unknown_address_altered)
3018 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3020 loop_info->store_mems
3021 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3023 if (loop_info->unknown_constant_address_altered)
3025 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
3026 MEM_READONLY_P (mem) = 1;
3027 loop_info->store_mems
3028 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
3032 /* Invalidate all loops containing LABEL. */
3034 static void
3035 invalidate_loops_containing_label (rtx label)
3037 struct loop *loop;
3038 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
3039 loop->invalid = 1;
3042 /* Scan the function looking for loops. Record the start and end of each loop.
3043 Also mark as invalid loops any loops that contain a setjmp or are branched
3044 to from outside the loop. */
3046 static void
3047 find_and_verify_loops (rtx f, struct loops *loops)
3049 rtx insn;
3050 rtx label;
3051 int num_loops;
3052 struct loop *current_loop;
3053 struct loop *next_loop;
3054 struct loop *loop;
3056 num_loops = loops->num;
3058 compute_luids (f, NULL_RTX, 0);
3060 /* If there are jumps to undefined labels,
3061 treat them as jumps out of any/all loops.
3062 This also avoids writing past end of tables when there are no loops. */
3063 uid_loop[0] = NULL;
3065 /* Find boundaries of loops, mark which loops are contained within
3066 loops, and invalidate loops that have setjmp. */
3068 num_loops = 0;
3069 current_loop = NULL;
3070 for (insn = f; insn; insn = NEXT_INSN (insn))
3072 if (NOTE_P (insn))
3073 switch (NOTE_LINE_NUMBER (insn))
3075 case NOTE_INSN_LOOP_BEG:
3076 next_loop = loops->array + num_loops;
3077 next_loop->num = num_loops;
3078 num_loops++;
3079 next_loop->start = insn;
3080 next_loop->outer = current_loop;
3081 current_loop = next_loop;
3082 break;
3084 case NOTE_INSN_LOOP_END:
3085 if (! current_loop)
3086 abort ();
3088 current_loop->end = insn;
3089 current_loop = current_loop->outer;
3090 break;
3092 default:
3093 break;
3096 if (CALL_P (insn)
3097 && find_reg_note (insn, REG_SETJMP, NULL))
3099 /* In this case, we must invalidate our current loop and any
3100 enclosing loop. */
3101 for (loop = current_loop; loop; loop = loop->outer)
3103 loop->invalid = 1;
3104 if (loop_dump_stream)
3105 fprintf (loop_dump_stream,
3106 "\nLoop at %d ignored due to setjmp.\n",
3107 INSN_UID (loop->start));
3111 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3112 enclosing loop, but this doesn't matter. */
3113 uid_loop[INSN_UID (insn)] = current_loop;
3116 /* Any loop containing a label used in an initializer must be invalidated,
3117 because it can be jumped into from anywhere. */
3118 for (label = forced_labels; label; label = XEXP (label, 1))
3119 invalidate_loops_containing_label (XEXP (label, 0));
3121 /* Any loop containing a label used for an exception handler must be
3122 invalidated, because it can be jumped into from anywhere. */
3123 for_each_eh_label (invalidate_loops_containing_label);
3125 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3126 loop that it is not contained within, that loop is marked invalid.
3127 If any INSN or CALL_INSN uses a label's address, then the loop containing
3128 that label is marked invalid, because it could be jumped into from
3129 anywhere.
3131 Also look for blocks of code ending in an unconditional branch that
3132 exits the loop. If such a block is surrounded by a conditional
3133 branch around the block, move the block elsewhere (see below) and
3134 invert the jump to point to the code block. This may eliminate a
3135 label in our loop and will simplify processing by both us and a
3136 possible second cse pass. */
3138 for (insn = f; insn; insn = NEXT_INSN (insn))
3139 if (INSN_P (insn))
3141 struct loop *this_loop = uid_loop[INSN_UID (insn)];
3143 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
3145 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
3146 if (note)
3147 invalidate_loops_containing_label (XEXP (note, 0));
3150 if (!JUMP_P (insn))
3151 continue;
3153 mark_loop_jump (PATTERN (insn), this_loop);
3155 /* See if this is an unconditional branch outside the loop. */
3156 if (this_loop
3157 && (GET_CODE (PATTERN (insn)) == RETURN
3158 || (any_uncondjump_p (insn)
3159 && onlyjump_p (insn)
3160 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
3161 != this_loop)))
3162 && get_max_uid () < max_uid_for_loop)
3164 rtx p;
3165 rtx our_next = next_real_insn (insn);
3166 rtx last_insn_to_move = NEXT_INSN (insn);
3167 struct loop *dest_loop;
3168 struct loop *outer_loop = NULL;
3170 /* Go backwards until we reach the start of the loop, a label,
3171 or a JUMP_INSN. */
3172 for (p = PREV_INSN (insn);
3173 !LABEL_P (p)
3174 && ! (NOTE_P (p)
3175 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3176 && !JUMP_P (p);
3177 p = PREV_INSN (p))
3180 /* Check for the case where we have a jump to an inner nested
3181 loop, and do not perform the optimization in that case. */
3183 if (JUMP_LABEL (insn))
3185 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
3186 if (dest_loop)
3188 for (outer_loop = dest_loop; outer_loop;
3189 outer_loop = outer_loop->outer)
3190 if (outer_loop == this_loop)
3191 break;
3195 /* Make sure that the target of P is within the current loop. */
3197 if (JUMP_P (p) && JUMP_LABEL (p)
3198 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
3199 outer_loop = this_loop;
3201 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3202 we have a block of code to try to move.
3204 We look backward and then forward from the target of INSN
3205 to find a BARRIER at the same loop depth as the target.
3206 If we find such a BARRIER, we make a new label for the start
3207 of the block, invert the jump in P and point it to that label,
3208 and move the block of code to the spot we found. */
3210 if (! outer_loop
3211 && JUMP_P (p)
3212 && JUMP_LABEL (p) != 0
3213 /* Just ignore jumps to labels that were never emitted.
3214 These always indicate compilation errors. */
3215 && INSN_UID (JUMP_LABEL (p)) != 0
3216 && any_condjump_p (p) && onlyjump_p (p)
3217 && next_real_insn (JUMP_LABEL (p)) == our_next
3218 /* If it's not safe to move the sequence, then we
3219 mustn't try. */
3220 && insns_safe_to_move_p (p, NEXT_INSN (insn),
3221 &last_insn_to_move))
3223 rtx target
3224 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
3225 struct loop *target_loop = uid_loop[INSN_UID (target)];
3226 rtx loc, loc2;
3227 rtx tmp;
3229 /* Search for possible garbage past the conditional jumps
3230 and look for the last barrier. */
3231 for (tmp = last_insn_to_move;
3232 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
3233 if (BARRIER_P (tmp))
3234 last_insn_to_move = tmp;
3236 for (loc = target; loc; loc = PREV_INSN (loc))
3237 if (BARRIER_P (loc)
3238 /* Don't move things inside a tablejump. */
3239 && ((loc2 = next_nonnote_insn (loc)) == 0
3240 || !LABEL_P (loc2)
3241 || (loc2 = next_nonnote_insn (loc2)) == 0
3242 || !JUMP_P (loc2)
3243 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3244 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3245 && uid_loop[INSN_UID (loc)] == target_loop)
3246 break;
3248 if (loc == 0)
3249 for (loc = target; loc; loc = NEXT_INSN (loc))
3250 if (BARRIER_P (loc)
3251 /* Don't move things inside a tablejump. */
3252 && ((loc2 = next_nonnote_insn (loc)) == 0
3253 || !LABEL_P (loc2)
3254 || (loc2 = next_nonnote_insn (loc2)) == 0
3255 || !JUMP_P (loc2)
3256 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
3257 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
3258 && uid_loop[INSN_UID (loc)] == target_loop)
3259 break;
3261 if (loc)
3263 rtx cond_label = JUMP_LABEL (p);
3264 rtx new_label = get_label_after (p);
3266 /* Ensure our label doesn't go away. */
3267 LABEL_NUSES (cond_label)++;
3269 /* Verify that uid_loop is large enough and that
3270 we can invert P. */
3271 if (invert_jump (p, new_label, 1))
3273 rtx q, r;
3275 /* If no suitable BARRIER was found, create a suitable
3276 one before TARGET. Since TARGET is a fall through
3277 path, we'll need to insert a jump around our block
3278 and add a BARRIER before TARGET.
3280 This creates an extra unconditional jump outside
3281 the loop. However, the benefits of removing rarely
3282 executed instructions from inside the loop usually
3283 outweighs the cost of the extra unconditional jump
3284 outside the loop. */
3285 if (loc == 0)
3287 rtx temp;
3289 temp = gen_jump (JUMP_LABEL (insn));
3290 temp = emit_jump_insn_before (temp, target);
3291 JUMP_LABEL (temp) = JUMP_LABEL (insn);
3292 LABEL_NUSES (JUMP_LABEL (insn))++;
3293 loc = emit_barrier_before (target);
3296 /* Include the BARRIER after INSN and copy the
3297 block after LOC. */
3298 if (squeeze_notes (&new_label, &last_insn_to_move))
3299 abort ();
3300 reorder_insns (new_label, last_insn_to_move, loc);
3302 /* All those insns are now in TARGET_LOOP. */
3303 for (q = new_label;
3304 q != NEXT_INSN (last_insn_to_move);
3305 q = NEXT_INSN (q))
3306 uid_loop[INSN_UID (q)] = target_loop;
3308 /* The label jumped to by INSN is no longer a loop
3309 exit. Unless INSN does not have a label (e.g.,
3310 it is a RETURN insn), search loop->exit_labels
3311 to find its label_ref, and remove it. Also turn
3312 off LABEL_OUTSIDE_LOOP_P bit. */
3313 if (JUMP_LABEL (insn))
3315 for (q = 0, r = this_loop->exit_labels;
3317 q = r, r = LABEL_NEXTREF (r))
3318 if (XEXP (r, 0) == JUMP_LABEL (insn))
3320 LABEL_OUTSIDE_LOOP_P (r) = 0;
3321 if (q)
3322 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3323 else
3324 this_loop->exit_labels = LABEL_NEXTREF (r);
3325 break;
3328 for (loop = this_loop; loop && loop != target_loop;
3329 loop = loop->outer)
3330 loop->exit_count--;
3332 /* If we didn't find it, then something is
3333 wrong. */
3334 if (! r)
3335 abort ();
3338 /* P is now a jump outside the loop, so it must be put
3339 in loop->exit_labels, and marked as such.
3340 The easiest way to do this is to just call
3341 mark_loop_jump again for P. */
3342 mark_loop_jump (PATTERN (p), this_loop);
3344 /* If INSN now jumps to the insn after it,
3345 delete INSN. */
3346 if (JUMP_LABEL (insn) != 0
3347 && (next_real_insn (JUMP_LABEL (insn))
3348 == next_real_insn (insn)))
3349 delete_related_insns (insn);
3352 /* Continue the loop after where the conditional
3353 branch used to jump, since the only branch insn
3354 in the block (if it still remains) is an inter-loop
3355 branch and hence needs no processing. */
3356 insn = NEXT_INSN (cond_label);
3358 if (--LABEL_NUSES (cond_label) == 0)
3359 delete_related_insns (cond_label);
3361 /* This loop will be continued with NEXT_INSN (insn). */
3362 insn = PREV_INSN (insn);
3369 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3370 loops it is contained in, mark the target loop invalid.
3372 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3374 static void
3375 mark_loop_jump (rtx x, struct loop *loop)
3377 struct loop *dest_loop;
3378 struct loop *outer_loop;
3379 int i;
3381 switch (GET_CODE (x))
3383 case PC:
3384 case USE:
3385 case CLOBBER:
3386 case REG:
3387 case MEM:
3388 case CONST_INT:
3389 case CONST_DOUBLE:
3390 case RETURN:
3391 return;
3393 case CONST:
3394 /* There could be a label reference in here. */
3395 mark_loop_jump (XEXP (x, 0), loop);
3396 return;
3398 case PLUS:
3399 case MINUS:
3400 case MULT:
3401 mark_loop_jump (XEXP (x, 0), loop);
3402 mark_loop_jump (XEXP (x, 1), loop);
3403 return;
3405 case LO_SUM:
3406 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3407 mark_loop_jump (XEXP (x, 1), loop);
3408 return;
3410 case SIGN_EXTEND:
3411 case ZERO_EXTEND:
3412 mark_loop_jump (XEXP (x, 0), loop);
3413 return;
3415 case LABEL_REF:
3416 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3418 /* Link together all labels that branch outside the loop. This
3419 is used by final_[bg]iv_value and the loop unrolling code. Also
3420 mark this LABEL_REF so we know that this branch should predict
3421 false. */
3423 /* A check to make sure the label is not in an inner nested loop,
3424 since this does not count as a loop exit. */
3425 if (dest_loop)
3427 for (outer_loop = dest_loop; outer_loop;
3428 outer_loop = outer_loop->outer)
3429 if (outer_loop == loop)
3430 break;
3432 else
3433 outer_loop = NULL;
3435 if (loop && ! outer_loop)
3437 LABEL_OUTSIDE_LOOP_P (x) = 1;
3438 LABEL_NEXTREF (x) = loop->exit_labels;
3439 loop->exit_labels = x;
3441 for (outer_loop = loop;
3442 outer_loop && outer_loop != dest_loop;
3443 outer_loop = outer_loop->outer)
3444 outer_loop->exit_count++;
3447 /* If this is inside a loop, but not in the current loop or one enclosed
3448 by it, it invalidates at least one loop. */
3450 if (! dest_loop)
3451 return;
3453 /* We must invalidate every nested loop containing the target of this
3454 label, except those that also contain the jump insn. */
3456 for (; dest_loop; dest_loop = dest_loop->outer)
3458 /* Stop when we reach a loop that also contains the jump insn. */
3459 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3460 if (dest_loop == outer_loop)
3461 return;
3463 /* If we get here, we know we need to invalidate a loop. */
3464 if (loop_dump_stream && ! dest_loop->invalid)
3465 fprintf (loop_dump_stream,
3466 "\nLoop at %d ignored due to multiple entry points.\n",
3467 INSN_UID (dest_loop->start));
3469 dest_loop->invalid = 1;
3471 return;
3473 case SET:
3474 /* If this is not setting pc, ignore. */
3475 if (SET_DEST (x) == pc_rtx)
3476 mark_loop_jump (SET_SRC (x), loop);
3477 return;
3479 case IF_THEN_ELSE:
3480 mark_loop_jump (XEXP (x, 1), loop);
3481 mark_loop_jump (XEXP (x, 2), loop);
3482 return;
3484 case PARALLEL:
3485 case ADDR_VEC:
3486 for (i = 0; i < XVECLEN (x, 0); i++)
3487 mark_loop_jump (XVECEXP (x, 0, i), loop);
3488 return;
3490 case ADDR_DIFF_VEC:
3491 for (i = 0; i < XVECLEN (x, 1); i++)
3492 mark_loop_jump (XVECEXP (x, 1, i), loop);
3493 return;
3495 default:
3496 /* Strictly speaking this is not a jump into the loop, only a possible
3497 jump out of the loop. However, we have no way to link the destination
3498 of this jump onto the list of exit labels. To be safe we mark this
3499 loop and any containing loops as invalid. */
3500 if (loop)
3502 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3504 if (loop_dump_stream && ! outer_loop->invalid)
3505 fprintf (loop_dump_stream,
3506 "\nLoop at %d ignored due to unknown exit jump.\n",
3507 INSN_UID (outer_loop->start));
3508 outer_loop->invalid = 1;
3511 return;
3515 /* Return nonzero if there is a label in the range from
3516 insn INSN to and including the insn whose luid is END
3517 INSN must have an assigned luid (i.e., it must not have
3518 been previously created by loop.c). */
3520 static int
3521 labels_in_range_p (rtx insn, int end)
3523 while (insn && INSN_LUID (insn) <= end)
3525 if (LABEL_P (insn))
3526 return 1;
3527 insn = NEXT_INSN (insn);
3530 return 0;
3533 /* Record that a memory reference X is being set. */
3535 static void
3536 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3537 void *data ATTRIBUTE_UNUSED)
3539 struct loop_info *loop_info = data;
3541 if (x == 0 || !MEM_P (x))
3542 return;
3544 /* Count number of memory writes.
3545 This affects heuristics in strength_reduce. */
3546 loop_info->num_mem_sets++;
3548 /* BLKmode MEM means all memory is clobbered. */
3549 if (GET_MODE (x) == BLKmode)
3551 if (MEM_READONLY_P (x))
3552 loop_info->unknown_constant_address_altered = 1;
3553 else
3554 loop_info->unknown_address_altered = 1;
3556 return;
3559 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3560 loop_info->store_mems);
3563 /* X is a value modified by an INSN that references a biv inside a loop
3564 exit test (i.e., X is somehow related to the value of the biv). If X
3565 is a pseudo that is used more than once, then the biv is (effectively)
3566 used more than once. DATA is a pointer to a loop_regs structure. */
3568 static void
3569 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3571 struct loop_regs *regs = (struct loop_regs *) data;
3573 if (x == 0)
3574 return;
3576 while (GET_CODE (x) == STRICT_LOW_PART
3577 || GET_CODE (x) == SIGN_EXTRACT
3578 || GET_CODE (x) == ZERO_EXTRACT
3579 || GET_CODE (x) == SUBREG)
3580 x = XEXP (x, 0);
3582 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3583 return;
3585 /* If we do not have usage information, or if we know the register
3586 is used more than once, note that fact for check_dbra_loop. */
3587 if (REGNO (x) >= max_reg_before_loop
3588 || ! regs->array[REGNO (x)].single_usage
3589 || regs->array[REGNO (x)].single_usage == const0_rtx)
3590 regs->multiple_uses = 1;
3593 /* Return nonzero if the rtx X is invariant over the current loop.
3595 The value is 2 if we refer to something only conditionally invariant.
3597 A memory ref is invariant if it is not volatile and does not conflict
3598 with anything stored in `loop_info->store_mems'. */
3600 static int
3601 loop_invariant_p (const struct loop *loop, rtx x)
3603 struct loop_info *loop_info = LOOP_INFO (loop);
3604 struct loop_regs *regs = LOOP_REGS (loop);
3605 int i;
3606 enum rtx_code code;
3607 const char *fmt;
3608 int conditional = 0;
3609 rtx mem_list_entry;
3611 if (x == 0)
3612 return 1;
3613 code = GET_CODE (x);
3614 switch (code)
3616 case CONST_INT:
3617 case CONST_DOUBLE:
3618 case SYMBOL_REF:
3619 case CONST:
3620 return 1;
3622 case LABEL_REF:
3623 return 1;
3625 case PC:
3626 case CC0:
3627 case UNSPEC_VOLATILE:
3628 return 0;
3630 case REG:
3631 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3632 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3633 && ! current_function_has_nonlocal_goto)
3634 return 1;
3636 if (LOOP_INFO (loop)->has_call
3637 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3638 return 0;
3640 /* Out-of-range regs can occur when we are called from unrolling.
3641 These registers created by the unroller are set in the loop,
3642 hence are never invariant.
3643 Other out-of-range regs can be generated by load_mems; those that
3644 are written to in the loop are not invariant, while those that are
3645 not written to are invariant. It would be easy for load_mems
3646 to set n_times_set correctly for these registers, however, there
3647 is no easy way to distinguish them from registers created by the
3648 unroller. */
3650 if (REGNO (x) >= (unsigned) regs->num)
3651 return 0;
3653 if (regs->array[REGNO (x)].set_in_loop < 0)
3654 return 2;
3656 return regs->array[REGNO (x)].set_in_loop == 0;
3658 case MEM:
3659 /* Volatile memory references must be rejected. Do this before
3660 checking for read-only items, so that volatile read-only items
3661 will be rejected also. */
3662 if (MEM_VOLATILE_P (x))
3663 return 0;
3665 /* See if there is any dependence between a store and this load. */
3666 mem_list_entry = loop_info->store_mems;
3667 while (mem_list_entry)
3669 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3670 x, rtx_varies_p))
3671 return 0;
3673 mem_list_entry = XEXP (mem_list_entry, 1);
3676 /* It's not invalidated by a store in memory
3677 but we must still verify the address is invariant. */
3678 break;
3680 case ASM_OPERANDS:
3681 /* Don't mess with insns declared volatile. */
3682 if (MEM_VOLATILE_P (x))
3683 return 0;
3684 break;
3686 default:
3687 break;
3690 fmt = GET_RTX_FORMAT (code);
3691 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3693 if (fmt[i] == 'e')
3695 int tem = loop_invariant_p (loop, XEXP (x, i));
3696 if (tem == 0)
3697 return 0;
3698 if (tem == 2)
3699 conditional = 1;
3701 else if (fmt[i] == 'E')
3703 int j;
3704 for (j = 0; j < XVECLEN (x, i); j++)
3706 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3707 if (tem == 0)
3708 return 0;
3709 if (tem == 2)
3710 conditional = 1;
3716 return 1 + conditional;
3719 /* Return nonzero if all the insns in the loop that set REG
3720 are INSN and the immediately following insns,
3721 and if each of those insns sets REG in an invariant way
3722 (not counting uses of REG in them).
3724 The value is 2 if some of these insns are only conditionally invariant.
3726 We assume that INSN itself is the first set of REG
3727 and that its source is invariant. */
3729 static int
3730 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3731 rtx insn)
3733 struct loop_regs *regs = LOOP_REGS (loop);
3734 rtx p = insn;
3735 unsigned int regno = REGNO (reg);
3736 rtx temp;
3737 /* Number of sets we have to insist on finding after INSN. */
3738 int count = n_sets - 1;
3739 int old = regs->array[regno].set_in_loop;
3740 int value = 0;
3741 int this;
3743 /* If N_SETS hit the limit, we can't rely on its value. */
3744 if (n_sets == 127)
3745 return 0;
3747 regs->array[regno].set_in_loop = 0;
3749 while (count > 0)
3751 enum rtx_code code;
3752 rtx set;
3754 p = NEXT_INSN (p);
3755 code = GET_CODE (p);
3757 /* If library call, skip to end of it. */
3758 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3759 p = XEXP (temp, 0);
3761 this = 0;
3762 if (code == INSN
3763 && (set = single_set (p))
3764 && REG_P (SET_DEST (set))
3765 && REGNO (SET_DEST (set)) == regno)
3767 this = loop_invariant_p (loop, SET_SRC (set));
3768 if (this != 0)
3769 value |= this;
3770 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3772 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3773 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3774 notes are OK. */
3775 this = (CONSTANT_P (XEXP (temp, 0))
3776 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3777 && loop_invariant_p (loop, XEXP (temp, 0))));
3778 if (this != 0)
3779 value |= this;
3782 if (this != 0)
3783 count--;
3784 else if (code != NOTE)
3786 regs->array[regno].set_in_loop = old;
3787 return 0;
3791 regs->array[regno].set_in_loop = old;
3792 /* If loop_invariant_p ever returned 2, we return 2. */
3793 return 1 + (value & 2);
3796 /* Look at all uses (not sets) of registers in X. For each, if it is
3797 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3798 a different insn, set USAGE[REGNO] to const0_rtx. */
3800 static void
3801 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3803 enum rtx_code code = GET_CODE (x);
3804 const char *fmt = GET_RTX_FORMAT (code);
3805 int i, j;
3807 if (code == REG)
3808 regs->array[REGNO (x)].single_usage
3809 = (regs->array[REGNO (x)].single_usage != 0
3810 && regs->array[REGNO (x)].single_usage != insn)
3811 ? const0_rtx : insn;
3813 else if (code == SET)
3815 /* Don't count SET_DEST if it is a REG; otherwise count things
3816 in SET_DEST because if a register is partially modified, it won't
3817 show up as a potential movable so we don't care how USAGE is set
3818 for it. */
3819 if (!REG_P (SET_DEST (x)))
3820 find_single_use_in_loop (regs, insn, SET_DEST (x));
3821 find_single_use_in_loop (regs, insn, SET_SRC (x));
3823 else
3824 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3826 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3827 find_single_use_in_loop (regs, insn, XEXP (x, i));
3828 else if (fmt[i] == 'E')
3829 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3830 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3834 /* Count and record any set in X which is contained in INSN. Update
3835 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3836 in X. */
3838 static void
3839 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3841 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3842 /* Don't move a reg that has an explicit clobber.
3843 It's not worth the pain to try to do it correctly. */
3844 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3846 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3848 rtx dest = SET_DEST (x);
3849 while (GET_CODE (dest) == SUBREG
3850 || GET_CODE (dest) == ZERO_EXTRACT
3851 || GET_CODE (dest) == SIGN_EXTRACT
3852 || GET_CODE (dest) == STRICT_LOW_PART)
3853 dest = XEXP (dest, 0);
3854 if (REG_P (dest))
3856 int i;
3857 int regno = REGNO (dest);
3858 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3860 /* If this is the first setting of this reg
3861 in current basic block, and it was set before,
3862 it must be set in two basic blocks, so it cannot
3863 be moved out of the loop. */
3864 if (regs->array[regno].set_in_loop > 0
3865 && last_set[regno] == 0)
3866 regs->array[regno+i].may_not_optimize = 1;
3867 /* If this is not first setting in current basic block,
3868 see if reg was used in between previous one and this.
3869 If so, neither one can be moved. */
3870 if (last_set[regno] != 0
3871 && reg_used_between_p (dest, last_set[regno], insn))
3872 regs->array[regno+i].may_not_optimize = 1;
3873 if (regs->array[regno+i].set_in_loop < 127)
3874 ++regs->array[regno+i].set_in_loop;
3875 last_set[regno+i] = insn;
3881 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3882 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3883 contained in insn INSN is used by any insn that precedes INSN in
3884 cyclic order starting from the loop entry point.
3886 We don't want to use INSN_LUID here because if we restrict INSN to those
3887 that have a valid INSN_LUID, it means we cannot move an invariant out
3888 from an inner loop past two loops. */
3890 static int
3891 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3893 rtx reg = SET_DEST (set);
3894 rtx p;
3896 /* Scan forward checking for register usage. If we hit INSN, we
3897 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3898 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3900 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3901 return 1;
3903 if (p == loop->end)
3904 p = loop->start;
3907 return 0;
3911 /* Information we collect about arrays that we might want to prefetch. */
3912 struct prefetch_info
3914 struct iv_class *class; /* Class this prefetch is based on. */
3915 struct induction *giv; /* GIV this prefetch is based on. */
3916 rtx base_address; /* Start prefetching from this address plus
3917 index. */
3918 HOST_WIDE_INT index;
3919 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3920 iteration. */
3921 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3922 prefetch area in one iteration. */
3923 unsigned int total_bytes; /* Total bytes loop will access in this block.
3924 This is set only for loops with known
3925 iteration counts and is 0xffffffff
3926 otherwise. */
3927 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3928 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3929 unsigned int write : 1; /* 1 for read/write prefetches. */
3932 /* Data used by check_store function. */
3933 struct check_store_data
3935 rtx mem_address;
3936 int mem_write;
3939 static void check_store (rtx, rtx, void *);
3940 static void emit_prefetch_instructions (struct loop *);
3941 static int rtx_equal_for_prefetch_p (rtx, rtx);
3943 /* Set mem_write when mem_address is found. Used as callback to
3944 note_stores. */
3945 static void
3946 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3948 struct check_store_data *d = (struct check_store_data *) data;
3950 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3951 d->mem_write = 1;
3954 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3955 important to get some addresses combined. Later more sophisticated
3956 transformations can be added when necessary.
3958 ??? Same trick with swapping operand is done at several other places.
3959 It can be nice to develop some common way to handle this. */
3961 static int
3962 rtx_equal_for_prefetch_p (rtx x, rtx y)
3964 int i;
3965 int j;
3966 enum rtx_code code = GET_CODE (x);
3967 const char *fmt;
3969 if (x == y)
3970 return 1;
3971 if (code != GET_CODE (y))
3972 return 0;
3974 if (COMMUTATIVE_ARITH_P (x))
3976 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3977 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3978 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3979 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3982 /* Compare the elements. If any pair of corresponding elements fails to
3983 match, return 0 for the whole thing. */
3985 fmt = GET_RTX_FORMAT (code);
3986 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3988 switch (fmt[i])
3990 case 'w':
3991 if (XWINT (x, i) != XWINT (y, i))
3992 return 0;
3993 break;
3995 case 'i':
3996 if (XINT (x, i) != XINT (y, i))
3997 return 0;
3998 break;
4000 case 'E':
4001 /* Two vectors must have the same length. */
4002 if (XVECLEN (x, i) != XVECLEN (y, i))
4003 return 0;
4005 /* And the corresponding elements must match. */
4006 for (j = 0; j < XVECLEN (x, i); j++)
4007 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
4008 XVECEXP (y, i, j)) == 0)
4009 return 0;
4010 break;
4012 case 'e':
4013 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
4014 return 0;
4015 break;
4017 case 's':
4018 if (strcmp (XSTR (x, i), XSTR (y, i)))
4019 return 0;
4020 break;
4022 case 'u':
4023 /* These are just backpointers, so they don't matter. */
4024 break;
4026 case '0':
4027 break;
4029 /* It is believed that rtx's at this level will never
4030 contain anything but integers and other rtx's,
4031 except for within LABEL_REFs and SYMBOL_REFs. */
4032 default:
4033 abort ();
4036 return 1;
4039 /* Remove constant addition value from the expression X (when present)
4040 and return it. */
4042 static HOST_WIDE_INT
4043 remove_constant_addition (rtx *x)
4045 HOST_WIDE_INT addval = 0;
4046 rtx exp = *x;
4048 /* Avoid clobbering a shared CONST expression. */
4049 if (GET_CODE (exp) == CONST)
4051 if (GET_CODE (XEXP (exp, 0)) == PLUS
4052 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
4053 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
4055 *x = XEXP (XEXP (exp, 0), 0);
4056 return INTVAL (XEXP (XEXP (exp, 0), 1));
4058 return 0;
4061 if (GET_CODE (exp) == CONST_INT)
4063 addval = INTVAL (exp);
4064 *x = const0_rtx;
4067 /* For plus expression recurse on ourself. */
4068 else if (GET_CODE (exp) == PLUS)
4070 addval += remove_constant_addition (&XEXP (exp, 0));
4071 addval += remove_constant_addition (&XEXP (exp, 1));
4073 /* In case our parameter was constant, remove extra zero from the
4074 expression. */
4075 if (XEXP (exp, 0) == const0_rtx)
4076 *x = XEXP (exp, 1);
4077 else if (XEXP (exp, 1) == const0_rtx)
4078 *x = XEXP (exp, 0);
4081 return addval;
4084 /* Attempt to identify accesses to arrays that are most likely to cause cache
4085 misses, and emit prefetch instructions a few prefetch blocks forward.
4087 To detect the arrays we use the GIV information that was collected by the
4088 strength reduction pass.
4090 The prefetch instructions are generated after the GIV information is done
4091 and before the strength reduction process. The new GIVs are injected into
4092 the strength reduction tables, so the prefetch addresses are optimized as
4093 well.
4095 GIVs are split into base address, stride, and constant addition values.
4096 GIVs with the same address, stride and close addition values are combined
4097 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4098 for write instructions can be used for the block we write to, on machines
4099 that support write prefetches.
4101 Several heuristics are used to determine when to prefetch. They are
4102 controlled by defined symbols that can be overridden for each target. */
4104 static void
4105 emit_prefetch_instructions (struct loop *loop)
4107 int num_prefetches = 0;
4108 int num_real_prefetches = 0;
4109 int num_real_write_prefetches = 0;
4110 int num_prefetches_before = 0;
4111 int num_write_prefetches_before = 0;
4112 int ahead = 0;
4113 int i;
4114 struct iv_class *bl;
4115 struct induction *iv;
4116 struct prefetch_info info[MAX_PREFETCHES];
4117 struct loop_ivs *ivs = LOOP_IVS (loop);
4119 if (!HAVE_prefetch)
4120 return;
4122 /* Consider only loops w/o calls. When a call is done, the loop is probably
4123 slow enough to read the memory. */
4124 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
4126 if (loop_dump_stream)
4127 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4129 return;
4132 /* Don't prefetch in loops known to have few iterations. */
4133 if (PREFETCH_NO_LOW_LOOPCNT
4134 && LOOP_INFO (loop)->n_iterations
4135 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
4137 if (loop_dump_stream)
4138 fprintf (loop_dump_stream,
4139 "Prefetch: ignoring loop: not enough iterations.\n");
4140 return;
4143 /* Search all induction variables and pick those interesting for the prefetch
4144 machinery. */
4145 for (bl = ivs->list; bl; bl = bl->next)
4147 struct induction *biv = bl->biv, *biv1;
4148 int basestride = 0;
4150 biv1 = biv;
4152 /* Expect all BIVs to be executed in each iteration. This makes our
4153 analysis more conservative. */
4154 while (biv1)
4156 /* Discard non-constant additions that we can't handle well yet, and
4157 BIVs that are executed multiple times; such BIVs ought to be
4158 handled in the nested loop. We accept not_every_iteration BIVs,
4159 since these only result in larger strides and make our
4160 heuristics more conservative. */
4161 if (GET_CODE (biv->add_val) != CONST_INT)
4163 if (loop_dump_stream)
4165 fprintf (loop_dump_stream,
4166 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4167 REGNO (biv->src_reg), INSN_UID (biv->insn));
4168 print_rtl (loop_dump_stream, biv->add_val);
4169 fprintf (loop_dump_stream, "\n");
4171 break;
4174 if (biv->maybe_multiple)
4176 if (loop_dump_stream)
4178 fprintf (loop_dump_stream,
4179 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4180 REGNO (biv->src_reg), INSN_UID (biv->insn));
4181 print_rtl (loop_dump_stream, biv->add_val);
4182 fprintf (loop_dump_stream, "\n");
4184 break;
4187 basestride += INTVAL (biv1->add_val);
4188 biv1 = biv1->next_iv;
4191 if (biv1 || !basestride)
4192 continue;
4194 for (iv = bl->giv; iv; iv = iv->next_iv)
4196 rtx address;
4197 rtx temp;
4198 HOST_WIDE_INT index = 0;
4199 int add = 1;
4200 HOST_WIDE_INT stride = 0;
4201 int stride_sign = 1;
4202 struct check_store_data d;
4203 const char *ignore_reason = NULL;
4204 int size = GET_MODE_SIZE (GET_MODE (iv));
4206 /* See whether an induction variable is interesting to us and if
4207 not, report the reason. */
4208 if (iv->giv_type != DEST_ADDR)
4209 ignore_reason = "giv is not a destination address";
4211 /* We are interested only in constant stride memory references
4212 in order to be able to compute density easily. */
4213 else if (GET_CODE (iv->mult_val) != CONST_INT)
4214 ignore_reason = "stride is not constant";
4216 else
4218 stride = INTVAL (iv->mult_val) * basestride;
4219 if (stride < 0)
4221 stride = -stride;
4222 stride_sign = -1;
4225 /* On some targets, reversed order prefetches are not
4226 worthwhile. */
4227 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
4228 ignore_reason = "reversed order stride";
4230 /* Prefetch of accesses with an extreme stride might not be
4231 worthwhile, either. */
4232 else if (PREFETCH_NO_EXTREME_STRIDE
4233 && stride > PREFETCH_EXTREME_STRIDE)
4234 ignore_reason = "extreme stride";
4236 /* Ignore GIVs with varying add values; we can't predict the
4237 value for the next iteration. */
4238 else if (!loop_invariant_p (loop, iv->add_val))
4239 ignore_reason = "giv has varying add value";
4241 /* Ignore GIVs in the nested loops; they ought to have been
4242 handled already. */
4243 else if (iv->maybe_multiple)
4244 ignore_reason = "giv is in nested loop";
4247 if (ignore_reason != NULL)
4249 if (loop_dump_stream)
4250 fprintf (loop_dump_stream,
4251 "Prefetch: ignoring giv at %d: %s.\n",
4252 INSN_UID (iv->insn), ignore_reason);
4253 continue;
4256 /* Determine the pointer to the basic array we are examining. It is
4257 the sum of the BIV's initial value and the GIV's add_val. */
4258 address = copy_rtx (iv->add_val);
4259 temp = copy_rtx (bl->initial_value);
4261 address = simplify_gen_binary (PLUS, Pmode, temp, address);
4262 index = remove_constant_addition (&address);
4264 d.mem_write = 0;
4265 d.mem_address = *iv->location;
4267 /* When the GIV is not always executed, we might be better off by
4268 not dirtying the cache pages. */
4269 if (PREFETCH_CONDITIONAL || iv->always_executed)
4270 note_stores (PATTERN (iv->insn), check_store, &d);
4271 else
4273 if (loop_dump_stream)
4274 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
4275 INSN_UID (iv->insn), "in conditional code.");
4276 continue;
4279 /* Attempt to find another prefetch to the same array and see if we
4280 can merge this one. */
4281 for (i = 0; i < num_prefetches; i++)
4282 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
4283 && stride == info[i].stride)
4285 /* In case both access same array (same location
4286 just with small difference in constant indexes), merge
4287 the prefetches. Just do the later and the earlier will
4288 get prefetched from previous iteration.
4289 The artificial threshold should not be too small,
4290 but also not bigger than small portion of memory usually
4291 traversed by single loop. */
4292 if (index >= info[i].index
4293 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4295 info[i].write |= d.mem_write;
4296 info[i].bytes_accessed += size;
4297 info[i].index = index;
4298 info[i].giv = iv;
4299 info[i].class = bl;
4300 info[num_prefetches].base_address = address;
4301 add = 0;
4302 break;
4305 if (index < info[i].index
4306 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4308 info[i].write |= d.mem_write;
4309 info[i].bytes_accessed += size;
4310 add = 0;
4311 break;
4315 /* Merging failed. */
4316 if (add)
4318 info[num_prefetches].giv = iv;
4319 info[num_prefetches].class = bl;
4320 info[num_prefetches].index = index;
4321 info[num_prefetches].stride = stride;
4322 info[num_prefetches].base_address = address;
4323 info[num_prefetches].write = d.mem_write;
4324 info[num_prefetches].bytes_accessed = size;
4325 num_prefetches++;
4326 if (num_prefetches >= MAX_PREFETCHES)
4328 if (loop_dump_stream)
4329 fprintf (loop_dump_stream,
4330 "Maximal number of prefetches exceeded.\n");
4331 return;
4337 for (i = 0; i < num_prefetches; i++)
4339 int density;
4341 /* Attempt to calculate the total number of bytes fetched by all
4342 iterations of the loop. Avoid overflow. */
4343 if (LOOP_INFO (loop)->n_iterations
4344 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4345 >= LOOP_INFO (loop)->n_iterations))
4346 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4347 else
4348 info[i].total_bytes = 0xffffffff;
4350 density = info[i].bytes_accessed * 100 / info[i].stride;
4352 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4353 if (PREFETCH_ONLY_DENSE_MEM)
4354 if (density * 256 > PREFETCH_DENSE_MEM * 100
4355 && (info[i].total_bytes / PREFETCH_BLOCK
4356 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4358 info[i].prefetch_before_loop = 1;
4359 info[i].prefetch_in_loop
4360 = (info[i].total_bytes / PREFETCH_BLOCK
4361 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4363 else
4365 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4366 if (loop_dump_stream)
4367 fprintf (loop_dump_stream,
4368 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4369 INSN_UID (info[i].giv->insn), density);
4371 else
4372 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4374 /* Find how many prefetch instructions we'll use within the loop. */
4375 if (info[i].prefetch_in_loop != 0)
4377 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4378 / PREFETCH_BLOCK);
4379 num_real_prefetches += info[i].prefetch_in_loop;
4380 if (info[i].write)
4381 num_real_write_prefetches += info[i].prefetch_in_loop;
4385 /* Determine how many iterations ahead to prefetch within the loop, based
4386 on how many prefetches we currently expect to do within the loop. */
4387 if (num_real_prefetches != 0)
4389 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4391 if (loop_dump_stream)
4392 fprintf (loop_dump_stream,
4393 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4394 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4395 num_real_prefetches = 0, num_real_write_prefetches = 0;
4398 /* We'll also use AHEAD to determine how many prefetch instructions to
4399 emit before a loop, so don't leave it zero. */
4400 if (ahead == 0)
4401 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4403 for (i = 0; i < num_prefetches; i++)
4405 /* Update if we've decided not to prefetch anything within the loop. */
4406 if (num_real_prefetches == 0)
4407 info[i].prefetch_in_loop = 0;
4409 /* Find how many prefetch instructions we'll use before the loop. */
4410 if (info[i].prefetch_before_loop != 0)
4412 int n = info[i].total_bytes / PREFETCH_BLOCK;
4413 if (n > ahead)
4414 n = ahead;
4415 info[i].prefetch_before_loop = n;
4416 num_prefetches_before += n;
4417 if (info[i].write)
4418 num_write_prefetches_before += n;
4421 if (loop_dump_stream)
4423 if (info[i].prefetch_in_loop == 0
4424 && info[i].prefetch_before_loop == 0)
4425 continue;
4426 fprintf (loop_dump_stream, "Prefetch insn: %d",
4427 INSN_UID (info[i].giv->insn));
4428 fprintf (loop_dump_stream,
4429 "; in loop: %d; before: %d; %s\n",
4430 info[i].prefetch_in_loop,
4431 info[i].prefetch_before_loop,
4432 info[i].write ? "read/write" : "read only");
4433 fprintf (loop_dump_stream,
4434 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4435 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4436 info[i].bytes_accessed, info[i].total_bytes);
4437 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4438 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4439 info[i].index, info[i].stride);
4440 print_rtl (loop_dump_stream, info[i].base_address);
4441 fprintf (loop_dump_stream, "\n");
4445 if (num_real_prefetches + num_prefetches_before > 0)
4447 /* Record that this loop uses prefetch instructions. */
4448 LOOP_INFO (loop)->has_prefetch = 1;
4450 if (loop_dump_stream)
4452 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4453 num_real_prefetches, num_real_write_prefetches);
4454 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4455 num_prefetches_before, num_write_prefetches_before);
4459 for (i = 0; i < num_prefetches; i++)
4461 int y;
4463 for (y = 0; y < info[i].prefetch_in_loop; y++)
4465 rtx loc = copy_rtx (*info[i].giv->location);
4466 rtx insn;
4467 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4468 rtx before_insn = info[i].giv->insn;
4469 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4470 rtx seq;
4472 /* We can save some effort by offsetting the address on
4473 architectures with offsettable memory references. */
4474 if (offsettable_address_p (0, VOIDmode, loc))
4475 loc = plus_constant (loc, bytes_ahead);
4476 else
4478 rtx reg = gen_reg_rtx (Pmode);
4479 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4480 GEN_INT (bytes_ahead), reg,
4481 0, before_insn);
4482 loc = reg;
4485 start_sequence ();
4486 /* Make sure the address operand is valid for prefetch. */
4487 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4488 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4489 loc = force_reg (Pmode, loc);
4490 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4491 GEN_INT (3)));
4492 seq = get_insns ();
4493 end_sequence ();
4494 emit_insn_before (seq, before_insn);
4496 /* Check all insns emitted and record the new GIV
4497 information. */
4498 insn = NEXT_INSN (prev_insn);
4499 while (insn != before_insn)
4501 insn = check_insn_for_givs (loop, insn,
4502 info[i].giv->always_executed,
4503 info[i].giv->maybe_multiple);
4504 insn = NEXT_INSN (insn);
4508 if (PREFETCH_BEFORE_LOOP)
4510 /* Emit insns before the loop to fetch the first cache lines or,
4511 if we're not prefetching within the loop, everything we expect
4512 to need. */
4513 for (y = 0; y < info[i].prefetch_before_loop; y++)
4515 rtx reg = gen_reg_rtx (Pmode);
4516 rtx loop_start = loop->start;
4517 rtx init_val = info[i].class->initial_value;
4518 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4519 info[i].giv->add_val,
4520 GEN_INT (y * PREFETCH_BLOCK));
4522 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4523 non-constant INIT_VAL to have the same mode as REG, which
4524 in this case we know to be Pmode. */
4525 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4527 rtx seq;
4529 start_sequence ();
4530 init_val = convert_to_mode (Pmode, init_val, 0);
4531 seq = get_insns ();
4532 end_sequence ();
4533 loop_insn_emit_before (loop, 0, loop_start, seq);
4535 loop_iv_add_mult_emit_before (loop, init_val,
4536 info[i].giv->mult_val,
4537 add_val, reg, 0, loop_start);
4538 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4539 GEN_INT (3)),
4540 loop_start);
4545 return;
4548 /* Communication with routines called via `note_stores'. */
4550 static rtx note_insn;
4552 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4554 static rtx addr_placeholder;
4556 /* ??? Unfinished optimizations, and possible future optimizations,
4557 for the strength reduction code. */
4559 /* ??? The interaction of biv elimination, and recognition of 'constant'
4560 bivs, may cause problems. */
4562 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4563 performance problems.
4565 Perhaps don't eliminate things that can be combined with an addressing
4566 mode. Find all givs that have the same biv, mult_val, and add_val;
4567 then for each giv, check to see if its only use dies in a following
4568 memory address. If so, generate a new memory address and check to see
4569 if it is valid. If it is valid, then store the modified memory address,
4570 otherwise, mark the giv as not done so that it will get its own iv. */
4572 /* ??? Could try to optimize branches when it is known that a biv is always
4573 positive. */
4575 /* ??? When replace a biv in a compare insn, we should replace with closest
4576 giv so that an optimized branch can still be recognized by the combiner,
4577 e.g. the VAX acb insn. */
4579 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4580 was rerun in loop_optimize whenever a register was added or moved.
4581 Also, some of the optimizations could be a little less conservative. */
4583 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4584 is a backward branch in that range that branches to somewhere between
4585 LOOP->START and INSN. Returns 0 otherwise. */
4587 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4588 In practice, this is not a problem, because this function is seldom called,
4589 and uses a negligible amount of CPU time on average. */
4591 static int
4592 back_branch_in_range_p (const struct loop *loop, rtx insn)
4594 rtx p, q, target_insn;
4595 rtx loop_start = loop->start;
4596 rtx loop_end = loop->end;
4597 rtx orig_loop_end = loop->end;
4599 /* Stop before we get to the backward branch at the end of the loop. */
4600 loop_end = prev_nonnote_insn (loop_end);
4601 if (BARRIER_P (loop_end))
4602 loop_end = PREV_INSN (loop_end);
4604 /* Check in case insn has been deleted, search forward for first non
4605 deleted insn following it. */
4606 while (INSN_DELETED_P (insn))
4607 insn = NEXT_INSN (insn);
4609 /* Check for the case where insn is the last insn in the loop. Deal
4610 with the case where INSN was a deleted loop test insn, in which case
4611 it will now be the NOTE_LOOP_END. */
4612 if (insn == loop_end || insn == orig_loop_end)
4613 return 0;
4615 for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p))
4617 if (JUMP_P (p))
4619 target_insn = JUMP_LABEL (p);
4621 /* Search from loop_start to insn, to see if one of them is
4622 the target_insn. We can't use INSN_LUID comparisons here,
4623 since insn may not have an LUID entry. */
4624 for (q = loop_start; q != insn; q = NEXT_INSN (q))
4625 if (q == target_insn)
4626 return 1;
4630 return 0;
4633 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4634 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4635 callback.
4637 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4638 least once for every loop iteration except for the last one.
4640 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4641 loop iteration.
4643 typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int);
4644 static void
4645 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4647 int not_every_iteration = 0;
4648 int maybe_multiple = 0;
4649 int past_loop_latch = 0;
4650 rtx p;
4652 /* If loop_scan_start points to the loop exit test, we have to be wary of
4653 subversive use of gotos inside expression statements. */
4654 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4655 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4657 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4658 for (p = next_insn_in_loop (loop, loop->scan_start);
4659 p != NULL_RTX;
4660 p = next_insn_in_loop (loop, p))
4662 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4664 /* Past CODE_LABEL, we get to insns that may be executed multiple
4665 times. The only way we can be sure that they can't is if every
4666 jump insn between here and the end of the loop either
4667 returns, exits the loop, is a jump to a location that is still
4668 behind the label, or is a jump to the loop start. */
4670 if (LABEL_P (p))
4672 rtx insn = p;
4674 maybe_multiple = 0;
4676 while (1)
4678 insn = NEXT_INSN (insn);
4679 if (insn == loop->scan_start)
4680 break;
4681 if (insn == loop->end)
4683 if (loop->top != 0)
4684 insn = loop->top;
4685 else
4686 break;
4687 if (insn == loop->scan_start)
4688 break;
4691 if (JUMP_P (insn)
4692 && GET_CODE (PATTERN (insn)) != RETURN
4693 && (!any_condjump_p (insn)
4694 || (JUMP_LABEL (insn) != 0
4695 && JUMP_LABEL (insn) != loop->scan_start
4696 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4698 maybe_multiple = 1;
4699 break;
4704 /* Past a jump, we get to insns for which we can't count
4705 on whether they will be executed during each iteration. */
4706 /* This code appears twice in strength_reduce. There is also similar
4707 code in scan_loop. */
4708 if (JUMP_P (p)
4709 /* If we enter the loop in the middle, and scan around to the
4710 beginning, don't set not_every_iteration for that.
4711 This can be any kind of jump, since we want to know if insns
4712 will be executed if the loop is executed. */
4713 && !(JUMP_LABEL (p) == loop->top
4714 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4715 && any_uncondjump_p (p))
4716 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4718 rtx label = 0;
4720 /* If this is a jump outside the loop, then it also doesn't
4721 matter. Check to see if the target of this branch is on the
4722 loop->exits_labels list. */
4724 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4725 if (XEXP (label, 0) == JUMP_LABEL (p))
4726 break;
4728 if (!label)
4729 not_every_iteration = 1;
4732 /* Note if we pass a loop latch. If we do, then we can not clear
4733 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4734 a loop since a jump before the last CODE_LABEL may have started
4735 a new loop iteration.
4737 Note that LOOP_TOP is only set for rotated loops and we need
4738 this check for all loops, so compare against the CODE_LABEL
4739 which immediately follows LOOP_START. */
4740 if (JUMP_P (p)
4741 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4742 past_loop_latch = 1;
4744 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4745 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4746 or not an insn is known to be executed each iteration of the
4747 loop, whether or not any iterations are known to occur.
4749 Therefore, if we have just passed a label and have no more labels
4750 between here and the test insn of the loop, and we have not passed
4751 a jump to the top of the loop, then we know these insns will be
4752 executed each iteration. */
4754 if (not_every_iteration
4755 && !past_loop_latch
4756 && LABEL_P (p)
4757 && no_labels_between_p (p, loop->end))
4758 not_every_iteration = 0;
4762 static void
4763 loop_bivs_find (struct loop *loop)
4765 struct loop_regs *regs = LOOP_REGS (loop);
4766 struct loop_ivs *ivs = LOOP_IVS (loop);
4767 /* Temporary list pointers for traversing ivs->list. */
4768 struct iv_class *bl, **backbl;
4770 ivs->list = 0;
4772 for_each_insn_in_loop (loop, check_insn_for_bivs);
4774 /* Scan ivs->list to remove all regs that proved not to be bivs.
4775 Make a sanity check against regs->n_times_set. */
4776 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4778 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4779 /* Above happens if register modified by subreg, etc. */
4780 /* Make sure it is not recognized as a basic induction var: */
4781 || regs->array[bl->regno].n_times_set != bl->biv_count
4782 /* If never incremented, it is invariant that we decided not to
4783 move. So leave it alone. */
4784 || ! bl->incremented)
4786 if (loop_dump_stream)
4787 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4788 bl->regno,
4789 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4790 ? "not induction variable"
4791 : (! bl->incremented ? "never incremented"
4792 : "count error")));
4794 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4795 *backbl = bl->next;
4797 else
4799 backbl = &bl->next;
4801 if (loop_dump_stream)
4802 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4808 /* Determine how BIVS are initialized by looking through pre-header
4809 extended basic block. */
4810 static void
4811 loop_bivs_init_find (struct loop *loop)
4813 struct loop_ivs *ivs = LOOP_IVS (loop);
4814 /* Temporary list pointers for traversing ivs->list. */
4815 struct iv_class *bl;
4816 int call_seen;
4817 rtx p;
4819 /* Find initial value for each biv by searching backwards from loop_start,
4820 halting at first label. Also record any test condition. */
4822 call_seen = 0;
4823 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4825 rtx test;
4827 note_insn = p;
4829 if (CALL_P (p))
4830 call_seen = 1;
4832 if (INSN_P (p))
4833 note_stores (PATTERN (p), record_initial, ivs);
4835 /* Record any test of a biv that branches around the loop if no store
4836 between it and the start of loop. We only care about tests with
4837 constants and registers and only certain of those. */
4838 if (JUMP_P (p)
4839 && JUMP_LABEL (p) != 0
4840 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4841 && (test = get_condition_for_loop (loop, p)) != 0
4842 && REG_P (XEXP (test, 0))
4843 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4844 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4845 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4846 && bl->init_insn == 0)
4848 /* If an NE test, we have an initial value! */
4849 if (GET_CODE (test) == NE)
4851 bl->init_insn = p;
4852 bl->init_set = gen_rtx_SET (VOIDmode,
4853 XEXP (test, 0), XEXP (test, 1));
4855 else
4856 bl->initial_test = test;
4862 /* Look at the each biv and see if we can say anything better about its
4863 initial value from any initializing insns set up above. (This is done
4864 in two passes to avoid missing SETs in a PARALLEL.) */
4865 static void
4866 loop_bivs_check (struct loop *loop)
4868 struct loop_ivs *ivs = LOOP_IVS (loop);
4869 /* Temporary list pointers for traversing ivs->list. */
4870 struct iv_class *bl;
4871 struct iv_class **backbl;
4873 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4875 rtx src;
4876 rtx note;
4878 if (! bl->init_insn)
4879 continue;
4881 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4882 is a constant, use the value of that. */
4883 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4884 && CONSTANT_P (XEXP (note, 0)))
4885 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4886 && CONSTANT_P (XEXP (note, 0))))
4887 src = XEXP (note, 0);
4888 else
4889 src = SET_SRC (bl->init_set);
4891 if (loop_dump_stream)
4892 fprintf (loop_dump_stream,
4893 "Biv %d: initialized at insn %d: initial value ",
4894 bl->regno, INSN_UID (bl->init_insn));
4896 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4897 || GET_MODE (src) == VOIDmode)
4898 && valid_initial_value_p (src, bl->init_insn,
4899 LOOP_INFO (loop)->pre_header_has_call,
4900 loop->start))
4902 bl->initial_value = src;
4904 if (loop_dump_stream)
4906 print_simple_rtl (loop_dump_stream, src);
4907 fputc ('\n', loop_dump_stream);
4910 /* If we can't make it a giv,
4911 let biv keep initial value of "itself". */
4912 else if (loop_dump_stream)
4913 fprintf (loop_dump_stream, "is complex\n");
4918 /* Search the loop for general induction variables. */
4920 static void
4921 loop_givs_find (struct loop* loop)
4923 for_each_insn_in_loop (loop, check_insn_for_givs);
4927 /* For each giv for which we still don't know whether or not it is
4928 replaceable, check to see if it is replaceable because its final value
4929 can be calculated. */
4931 static void
4932 loop_givs_check (struct loop *loop)
4934 struct loop_ivs *ivs = LOOP_IVS (loop);
4935 struct iv_class *bl;
4937 for (bl = ivs->list; bl; bl = bl->next)
4939 struct induction *v;
4941 for (v = bl->giv; v; v = v->next_iv)
4942 if (! v->replaceable && ! v->not_replaceable)
4943 check_final_value (loop, v);
4947 /* Try to generate the simplest rtx for the expression
4948 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4949 value of giv's. */
4951 static rtx
4952 fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode)
4954 rtx temp, mult_res;
4955 rtx result;
4957 /* The modes must all be the same. This should always be true. For now,
4958 check to make sure. */
4959 if ((GET_MODE (mult1) != mode && GET_MODE (mult1) != VOIDmode)
4960 || (GET_MODE (mult2) != mode && GET_MODE (mult2) != VOIDmode)
4961 || (GET_MODE (add1) != mode && GET_MODE (add1) != VOIDmode))
4962 abort ();
4964 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4965 will be a constant. */
4966 if (GET_CODE (mult1) == CONST_INT)
4968 temp = mult2;
4969 mult2 = mult1;
4970 mult1 = temp;
4973 mult_res = simplify_binary_operation (MULT, mode, mult1, mult2);
4974 if (! mult_res)
4975 mult_res = gen_rtx_MULT (mode, mult1, mult2);
4977 /* Again, put the constant second. */
4978 if (GET_CODE (add1) == CONST_INT)
4980 temp = add1;
4981 add1 = mult_res;
4982 mult_res = temp;
4985 result = simplify_binary_operation (PLUS, mode, add1, mult_res);
4986 if (! result)
4987 result = gen_rtx_PLUS (mode, add1, mult_res);
4989 return result;
4992 /* Searches the list of induction struct's for the biv BL, to try to calculate
4993 the total increment value for one iteration of the loop as a constant.
4995 Returns the increment value as an rtx, simplified as much as possible,
4996 if it can be calculated. Otherwise, returns 0. */
4998 static rtx
4999 biv_total_increment (const struct iv_class *bl)
5001 struct induction *v;
5002 rtx result;
5004 /* For increment, must check every instruction that sets it. Each
5005 instruction must be executed only once each time through the loop.
5006 To verify this, we check that the insn is always executed, and that
5007 there are no backward branches after the insn that branch to before it.
5008 Also, the insn must have a mult_val of one (to make sure it really is
5009 an increment). */
5011 result = const0_rtx;
5012 for (v = bl->biv; v; v = v->next_iv)
5014 if (v->always_computable && v->mult_val == const1_rtx
5015 && ! v->maybe_multiple
5016 && SCALAR_INT_MODE_P (v->mode))
5018 /* If we have already counted it, skip it. */
5019 if (v->same)
5020 continue;
5022 result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode);
5024 else
5025 return 0;
5028 return result;
5031 /* Try to prove that the register is dead after the loop exits. Trace every
5032 loop exit looking for an insn that will always be executed, which sets
5033 the register to some value, and appears before the first use of the register
5034 is found. If successful, then return 1, otherwise return 0. */
5036 /* ?? Could be made more intelligent in the handling of jumps, so that
5037 it can search past if statements and other similar structures. */
5039 static int
5040 reg_dead_after_loop (const struct loop *loop, rtx reg)
5042 rtx insn, label;
5043 int jump_count = 0;
5044 int label_count = 0;
5046 /* In addition to checking all exits of this loop, we must also check
5047 all exits of inner nested loops that would exit this loop. We don't
5048 have any way to identify those, so we just give up if there are any
5049 such inner loop exits. */
5051 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
5052 label_count++;
5054 if (label_count != loop->exit_count)
5055 return 0;
5057 /* HACK: Must also search the loop fall through exit, create a label_ref
5058 here which points to the loop->end, and append the loop_number_exit_labels
5059 list to it. */
5060 label = gen_rtx_LABEL_REF (VOIDmode, loop->end);
5061 LABEL_NEXTREF (label) = loop->exit_labels;
5063 for (; label; label = LABEL_NEXTREF (label))
5065 /* Succeed if find an insn which sets the biv or if reach end of
5066 function. Fail if find an insn that uses the biv, or if come to
5067 a conditional jump. */
5069 insn = NEXT_INSN (XEXP (label, 0));
5070 while (insn)
5072 if (INSN_P (insn))
5074 rtx set, note;
5076 if (reg_referenced_p (reg, PATTERN (insn)))
5077 return 0;
5079 note = find_reg_equal_equiv_note (insn);
5080 if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0)))
5081 return 0;
5083 set = single_set (insn);
5084 if (set && rtx_equal_p (SET_DEST (set), reg))
5085 break;
5087 if (JUMP_P (insn))
5089 if (GET_CODE (PATTERN (insn)) == RETURN)
5090 break;
5091 else if (!any_uncondjump_p (insn)
5092 /* Prevent infinite loop following infinite loops. */
5093 || jump_count++ > 20)
5094 return 0;
5095 else
5096 insn = JUMP_LABEL (insn);
5100 insn = NEXT_INSN (insn);
5104 /* Success, the register is dead on all loop exits. */
5105 return 1;
5108 /* Try to calculate the final value of the biv, the value it will have at
5109 the end of the loop. If we can do it, return that value. */
5111 static rtx
5112 final_biv_value (const struct loop *loop, struct iv_class *bl)
5114 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
5115 rtx increment, tem;
5117 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5119 if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT)
5120 return 0;
5122 /* The final value for reversed bivs must be calculated differently than
5123 for ordinary bivs. In this case, there is already an insn after the
5124 loop which sets this biv's final value (if necessary), and there are
5125 no other loop exits, so we can return any value. */
5126 if (bl->reversed)
5128 if (loop_dump_stream)
5129 fprintf (loop_dump_stream,
5130 "Final biv value for %d, reversed biv.\n", bl->regno);
5132 return const0_rtx;
5135 /* Try to calculate the final value as initial value + (number of iterations
5136 * increment). For this to work, increment must be invariant, the only
5137 exit from the loop must be the fall through at the bottom (otherwise
5138 it may not have its final value when the loop exits), and the initial
5139 value of the biv must be invariant. */
5141 if (n_iterations != 0
5142 && ! loop->exit_count
5143 && loop_invariant_p (loop, bl->initial_value))
5145 increment = biv_total_increment (bl);
5147 if (increment && loop_invariant_p (loop, increment))
5149 /* Can calculate the loop exit value, emit insns after loop
5150 end to calculate this value into a temporary register in
5151 case it is needed later. */
5153 tem = gen_reg_rtx (bl->biv->mode);
5154 record_base_value (REGNO (tem), bl->biv->add_val, 0);
5155 loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations),
5156 bl->initial_value, tem);
5158 if (loop_dump_stream)
5159 fprintf (loop_dump_stream,
5160 "Final biv value for %d, calculated.\n", bl->regno);
5162 return tem;
5166 /* Check to see if the biv is dead at all loop exits. */
5167 if (reg_dead_after_loop (loop, bl->biv->src_reg))
5169 if (loop_dump_stream)
5170 fprintf (loop_dump_stream,
5171 "Final biv value for %d, biv dead after loop exit.\n",
5172 bl->regno);
5174 return const0_rtx;
5177 return 0;
5180 /* Return nonzero if it is possible to eliminate the biv BL provided
5181 all givs are reduced. This is possible if either the reg is not
5182 used outside the loop, or we can compute what its final value will
5183 be. */
5185 static int
5186 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
5187 int threshold, int insn_count)
5189 /* For architectures with a decrement_and_branch_until_zero insn,
5190 don't do this if we put a REG_NONNEG note on the endtest for this
5191 biv. */
5193 #ifdef HAVE_decrement_and_branch_until_zero
5194 if (bl->nonneg)
5196 if (loop_dump_stream)
5197 fprintf (loop_dump_stream,
5198 "Cannot eliminate nonneg biv %d.\n", bl->regno);
5199 return 0;
5201 #endif
5203 /* Check that biv is used outside loop or if it has a final value.
5204 Compare against bl->init_insn rather than loop->start. We aren't
5205 concerned with any uses of the biv between init_insn and
5206 loop->start since these won't be affected by the value of the biv
5207 elsewhere in the function, so long as init_insn doesn't use the
5208 biv itself. */
5210 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
5211 && bl->init_insn
5212 && INSN_UID (bl->init_insn) < max_uid_for_loop
5213 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
5214 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
5215 || (bl->final_value = final_biv_value (loop, bl)))
5216 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
5218 if (loop_dump_stream)
5220 fprintf (loop_dump_stream,
5221 "Cannot eliminate biv %d.\n",
5222 bl->regno);
5223 fprintf (loop_dump_stream,
5224 "First use: insn %d, last use: insn %d.\n",
5225 REGNO_FIRST_UID (bl->regno),
5226 REGNO_LAST_UID (bl->regno));
5228 return 0;
5232 /* Reduce each giv of BL that we have decided to reduce. */
5234 static void
5235 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
5237 struct induction *v;
5239 for (v = bl->giv; v; v = v->next_iv)
5241 struct induction *tv;
5242 if (! v->ignore && v->same == 0)
5244 int auto_inc_opt = 0;
5246 /* If the code for derived givs immediately below has already
5247 allocated a new_reg, we must keep it. */
5248 if (! v->new_reg)
5249 v->new_reg = gen_reg_rtx (v->mode);
5251 #ifdef AUTO_INC_DEC
5252 /* If the target has auto-increment addressing modes, and
5253 this is an address giv, then try to put the increment
5254 immediately after its use, so that flow can create an
5255 auto-increment addressing mode. */
5256 /* Don't do this for loops entered at the bottom, to avoid
5257 this invalid transformation:
5258 jmp L; -> jmp L;
5259 TOP: TOP:
5260 use giv use giv
5261 L: inc giv
5262 inc biv L:
5263 test biv test giv
5264 cbr TOP cbr TOP
5266 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
5267 && bl->biv->always_executed && ! bl->biv->maybe_multiple
5268 /* We don't handle reversed biv's because bl->biv->insn
5269 does not have a valid INSN_LUID. */
5270 && ! bl->reversed
5271 && v->always_executed && ! v->maybe_multiple
5272 && INSN_UID (v->insn) < max_uid_for_loop
5273 && !loop->top)
5275 /* If other giv's have been combined with this one, then
5276 this will work only if all uses of the other giv's occur
5277 before this giv's insn. This is difficult to check.
5279 We simplify this by looking for the common case where
5280 there is one DEST_REG giv, and this giv's insn is the
5281 last use of the dest_reg of that DEST_REG giv. If the
5282 increment occurs after the address giv, then we can
5283 perform the optimization. (Otherwise, the increment
5284 would have to go before other_giv, and we would not be
5285 able to combine it with the address giv to get an
5286 auto-inc address.) */
5287 if (v->combined_with)
5289 struct induction *other_giv = 0;
5291 for (tv = bl->giv; tv; tv = tv->next_iv)
5292 if (tv->same == v)
5294 if (other_giv)
5295 break;
5296 else
5297 other_giv = tv;
5299 if (! tv && other_giv
5300 && REGNO (other_giv->dest_reg) < max_reg_before_loop
5301 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
5302 == INSN_UID (v->insn))
5303 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
5304 auto_inc_opt = 1;
5306 /* Check for case where increment is before the address
5307 giv. Do this test in "loop order". */
5308 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
5309 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5310 || (INSN_LUID (bl->biv->insn)
5311 > INSN_LUID (loop->scan_start))))
5312 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
5313 && (INSN_LUID (loop->scan_start)
5314 < INSN_LUID (bl->biv->insn))))
5315 auto_inc_opt = -1;
5316 else
5317 auto_inc_opt = 1;
5319 #ifdef HAVE_cc0
5321 rtx prev;
5323 /* We can't put an insn immediately after one setting
5324 cc0, or immediately before one using cc0. */
5325 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
5326 || (auto_inc_opt == -1
5327 && (prev = prev_nonnote_insn (v->insn)) != 0
5328 && INSN_P (prev)
5329 && sets_cc0_p (PATTERN (prev))))
5330 auto_inc_opt = 0;
5332 #endif
5334 if (auto_inc_opt)
5335 v->auto_inc_opt = 1;
5337 #endif
5339 /* For each place where the biv is incremented, add an insn
5340 to increment the new, reduced reg for the giv. */
5341 for (tv = bl->biv; tv; tv = tv->next_iv)
5343 rtx insert_before;
5345 /* Skip if location is the same as a previous one. */
5346 if (tv->same)
5347 continue;
5348 if (! auto_inc_opt)
5349 insert_before = NEXT_INSN (tv->insn);
5350 else if (auto_inc_opt == 1)
5351 insert_before = NEXT_INSN (v->insn);
5352 else
5353 insert_before = v->insn;
5355 if (tv->mult_val == const1_rtx)
5356 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5357 v->new_reg, v->new_reg,
5358 0, insert_before);
5359 else /* tv->mult_val == const0_rtx */
5360 /* A multiply is acceptable here
5361 since this is presumed to be seldom executed. */
5362 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
5363 v->add_val, v->new_reg,
5364 0, insert_before);
5367 /* Add code at loop start to initialize giv's reduced reg. */
5369 loop_iv_add_mult_hoist (loop,
5370 extend_value_for_giv (v, bl->initial_value),
5371 v->mult_val, v->add_val, v->new_reg);
5377 /* Check for givs whose first use is their definition and whose
5378 last use is the definition of another giv. If so, it is likely
5379 dead and should not be used to derive another giv nor to
5380 eliminate a biv. */
5382 static void
5383 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
5385 struct induction *v;
5387 for (v = bl->giv; v; v = v->next_iv)
5389 if (v->ignore
5390 || (v->same && v->same->ignore))
5391 continue;
5393 if (v->giv_type == DEST_REG
5394 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
5396 struct induction *v1;
5398 for (v1 = bl->giv; v1; v1 = v1->next_iv)
5399 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
5400 v->maybe_dead = 1;
5406 static void
5407 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
5409 struct induction *v;
5411 for (v = bl->giv; v; v = v->next_iv)
5413 if (v->same && v->same->ignore)
5414 v->ignore = 1;
5416 if (v->ignore)
5417 continue;
5419 /* Update expression if this was combined, in case other giv was
5420 replaced. */
5421 if (v->same)
5422 v->new_reg = replace_rtx (v->new_reg,
5423 v->same->dest_reg, v->same->new_reg);
5425 /* See if this register is known to be a pointer to something. If
5426 so, see if we can find the alignment. First see if there is a
5427 destination register that is a pointer. If so, this shares the
5428 alignment too. Next see if we can deduce anything from the
5429 computational information. If not, and this is a DEST_ADDR
5430 giv, at least we know that it's a pointer, though we don't know
5431 the alignment. */
5432 if (REG_P (v->new_reg)
5433 && v->giv_type == DEST_REG
5434 && REG_POINTER (v->dest_reg))
5435 mark_reg_pointer (v->new_reg,
5436 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
5437 else if (REG_P (v->new_reg)
5438 && REG_POINTER (v->src_reg))
5440 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
5442 if (align == 0
5443 || GET_CODE (v->add_val) != CONST_INT
5444 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
5445 align = 0;
5447 mark_reg_pointer (v->new_reg, align);
5449 else if (REG_P (v->new_reg)
5450 && REG_P (v->add_val)
5451 && REG_POINTER (v->add_val))
5453 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
5455 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
5456 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
5457 align = 0;
5459 mark_reg_pointer (v->new_reg, align);
5461 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
5462 mark_reg_pointer (v->new_reg, 0);
5464 if (v->giv_type == DEST_ADDR)
5465 /* Store reduced reg as the address in the memref where we found
5466 this giv. */
5467 validate_change (v->insn, v->location, v->new_reg, 0);
5468 else if (v->replaceable)
5470 reg_map[REGNO (v->dest_reg)] = v->new_reg;
5472 else
5474 rtx original_insn = v->insn;
5475 rtx note;
5477 /* Not replaceable; emit an insn to set the original giv reg from
5478 the reduced giv, same as above. */
5479 v->insn = loop_insn_emit_after (loop, 0, original_insn,
5480 gen_move_insn (v->dest_reg,
5481 v->new_reg));
5483 /* The original insn may have a REG_EQUAL note. This note is
5484 now incorrect and may result in invalid substitutions later.
5485 The original insn is dead, but may be part of a libcall
5486 sequence, which doesn't seem worth the bother of handling. */
5487 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
5488 if (note)
5489 remove_note (original_insn, note);
5492 /* When a loop is reversed, givs which depend on the reversed
5493 biv, and which are live outside the loop, must be set to their
5494 correct final value. This insn is only needed if the giv is
5495 not replaceable. The correct final value is the same as the
5496 value that the giv starts the reversed loop with. */
5497 if (bl->reversed && ! v->replaceable)
5498 loop_iv_add_mult_sink (loop,
5499 extend_value_for_giv (v, bl->initial_value),
5500 v->mult_val, v->add_val, v->dest_reg);
5501 else if (v->final_value)
5502 loop_insn_sink_or_swim (loop,
5503 gen_load_of_final_value (v->dest_reg,
5504 v->final_value));
5506 if (loop_dump_stream)
5508 fprintf (loop_dump_stream, "giv at %d reduced to ",
5509 INSN_UID (v->insn));
5510 print_simple_rtl (loop_dump_stream, v->new_reg);
5511 fprintf (loop_dump_stream, "\n");
5517 static int
5518 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
5519 struct iv_class *bl, struct induction *v,
5520 rtx test_reg)
5522 int add_cost;
5523 int benefit;
5525 benefit = v->benefit;
5526 PUT_MODE (test_reg, v->mode);
5527 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
5528 test_reg, test_reg);
5530 /* Reduce benefit if not replaceable, since we will insert a
5531 move-insn to replace the insn that calculates this giv. Don't do
5532 this unless the giv is a user variable, since it will often be
5533 marked non-replaceable because of the duplication of the exit
5534 code outside the loop. In such a case, the copies we insert are
5535 dead and will be deleted. So they don't have a cost. Similar
5536 situations exist. */
5537 /* ??? The new final_[bg]iv_value code does a much better job of
5538 finding replaceable giv's, and hence this code may no longer be
5539 necessary. */
5540 if (! v->replaceable && ! bl->eliminable
5541 && REG_USERVAR_P (v->dest_reg))
5542 benefit -= copy_cost;
5544 /* Decrease the benefit to count the add-insns that we will insert
5545 to increment the reduced reg for the giv. ??? This can
5546 overestimate the run-time cost of the additional insns, e.g. if
5547 there are multiple basic blocks that increment the biv, but only
5548 one of these blocks is executed during each iteration. There is
5549 no good way to detect cases like this with the current structure
5550 of the loop optimizer. This code is more accurate for
5551 determining code size than run-time benefits. */
5552 benefit -= add_cost * bl->biv_count;
5554 /* Decide whether to strength-reduce this giv or to leave the code
5555 unchanged (recompute it from the biv each time it is used). This
5556 decision can be made independently for each giv. */
5558 #ifdef AUTO_INC_DEC
5559 /* Attempt to guess whether autoincrement will handle some of the
5560 new add insns; if so, increase BENEFIT (undo the subtraction of
5561 add_cost that was done above). */
5562 if (v->giv_type == DEST_ADDR
5563 /* Increasing the benefit is risky, since this is only a guess.
5564 Avoid increasing register pressure in cases where there would
5565 be no other benefit from reducing this giv. */
5566 && benefit > 0
5567 && GET_CODE (v->mult_val) == CONST_INT)
5569 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5571 if (HAVE_POST_INCREMENT
5572 && INTVAL (v->mult_val) == size)
5573 benefit += add_cost * bl->biv_count;
5574 else if (HAVE_PRE_INCREMENT
5575 && INTVAL (v->mult_val) == size)
5576 benefit += add_cost * bl->biv_count;
5577 else if (HAVE_POST_DECREMENT
5578 && -INTVAL (v->mult_val) == size)
5579 benefit += add_cost * bl->biv_count;
5580 else if (HAVE_PRE_DECREMENT
5581 && -INTVAL (v->mult_val) == size)
5582 benefit += add_cost * bl->biv_count;
5584 #endif
5586 return benefit;
5590 /* Free IV structures for LOOP. */
5592 static void
5593 loop_ivs_free (struct loop *loop)
5595 struct loop_ivs *ivs = LOOP_IVS (loop);
5596 struct iv_class *iv = ivs->list;
5598 free (ivs->regs);
5600 while (iv)
5602 struct iv_class *next = iv->next;
5603 struct induction *induction;
5604 struct induction *next_induction;
5606 for (induction = iv->biv; induction; induction = next_induction)
5608 next_induction = induction->next_iv;
5609 free (induction);
5611 for (induction = iv->giv; induction; induction = next_induction)
5613 next_induction = induction->next_iv;
5614 free (induction);
5617 free (iv);
5618 iv = next;
5622 /* Look back before LOOP->START for the insn that sets REG and return
5623 the equivalent constant if there is a REG_EQUAL note otherwise just
5624 the SET_SRC of REG. */
5626 static rtx
5627 loop_find_equiv_value (const struct loop *loop, rtx reg)
5629 rtx loop_start = loop->start;
5630 rtx insn, set;
5631 rtx ret;
5633 ret = reg;
5634 for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn))
5636 if (LABEL_P (insn))
5637 break;
5639 else if (INSN_P (insn) && reg_set_p (reg, insn))
5641 /* We found the last insn before the loop that sets the register.
5642 If it sets the entire register, and has a REG_EQUAL note,
5643 then use the value of the REG_EQUAL note. */
5644 if ((set = single_set (insn))
5645 && (SET_DEST (set) == reg))
5647 rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
5649 /* Only use the REG_EQUAL note if it is a constant.
5650 Other things, divide in particular, will cause
5651 problems later if we use them. */
5652 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST
5653 && CONSTANT_P (XEXP (note, 0)))
5654 ret = XEXP (note, 0);
5655 else
5656 ret = SET_SRC (set);
5658 /* We cannot do this if it changes between the
5659 assignment and loop start though. */
5660 if (modified_between_p (ret, insn, loop_start))
5661 ret = reg;
5663 break;
5666 return ret;
5669 /* Find and return register term common to both expressions OP0 and
5670 OP1 or NULL_RTX if no such term exists. Each expression must be a
5671 REG or a PLUS of a REG. */
5673 static rtx
5674 find_common_reg_term (rtx op0, rtx op1)
5676 if ((REG_P (op0) || GET_CODE (op0) == PLUS)
5677 && (REG_P (op1) || GET_CODE (op1) == PLUS))
5679 rtx op00;
5680 rtx op01;
5681 rtx op10;
5682 rtx op11;
5684 if (GET_CODE (op0) == PLUS)
5685 op01 = XEXP (op0, 1), op00 = XEXP (op0, 0);
5686 else
5687 op01 = const0_rtx, op00 = op0;
5689 if (GET_CODE (op1) == PLUS)
5690 op11 = XEXP (op1, 1), op10 = XEXP (op1, 0);
5691 else
5692 op11 = const0_rtx, op10 = op1;
5694 /* Find and return common register term if present. */
5695 if (REG_P (op00) && (op00 == op10 || op00 == op11))
5696 return op00;
5697 else if (REG_P (op01) && (op01 == op10 || op01 == op11))
5698 return op01;
5701 /* No common register term found. */
5702 return NULL_RTX;
5705 /* Determine the loop iterator and calculate the number of loop
5706 iterations. Returns the exact number of loop iterations if it can
5707 be calculated, otherwise returns zero. */
5709 static unsigned HOST_WIDE_INT
5710 loop_iterations (struct loop *loop)
5712 struct loop_info *loop_info = LOOP_INFO (loop);
5713 struct loop_ivs *ivs = LOOP_IVS (loop);
5714 rtx comparison, comparison_value;
5715 rtx iteration_var, initial_value, increment, final_value;
5716 enum rtx_code comparison_code;
5717 HOST_WIDE_INT inc;
5718 unsigned HOST_WIDE_INT abs_inc;
5719 unsigned HOST_WIDE_INT abs_diff;
5720 int off_by_one;
5721 int increment_dir;
5722 int unsigned_p, compare_dir, final_larger;
5723 rtx last_loop_insn;
5724 struct iv_class *bl;
5726 loop_info->n_iterations = 0;
5727 loop_info->initial_value = 0;
5728 loop_info->initial_equiv_value = 0;
5729 loop_info->comparison_value = 0;
5730 loop_info->final_value = 0;
5731 loop_info->final_equiv_value = 0;
5732 loop_info->increment = 0;
5733 loop_info->iteration_var = 0;
5734 loop_info->iv = 0;
5736 /* We used to use prev_nonnote_insn here, but that fails because it might
5737 accidentally get the branch for a contained loop if the branch for this
5738 loop was deleted. We can only trust branches immediately before the
5739 loop_end. */
5740 last_loop_insn = PREV_INSN (loop->end);
5742 /* ??? We should probably try harder to find the jump insn
5743 at the end of the loop. The following code assumes that
5744 the last loop insn is a jump to the top of the loop. */
5745 if (!JUMP_P (last_loop_insn))
5747 if (loop_dump_stream)
5748 fprintf (loop_dump_stream,
5749 "Loop iterations: No final conditional branch found.\n");
5750 return 0;
5753 /* If there is a more than a single jump to the top of the loop
5754 we cannot (easily) determine the iteration count. */
5755 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1)
5757 if (loop_dump_stream)
5758 fprintf (loop_dump_stream,
5759 "Loop iterations: Loop has multiple back edges.\n");
5760 return 0;
5763 /* Find the iteration variable. If the last insn is a conditional
5764 branch, and the insn before tests a register value, make that the
5765 iteration variable. */
5767 comparison = get_condition_for_loop (loop, last_loop_insn);
5768 if (comparison == 0)
5770 if (loop_dump_stream)
5771 fprintf (loop_dump_stream,
5772 "Loop iterations: No final comparison found.\n");
5773 return 0;
5776 /* ??? Get_condition may switch position of induction variable and
5777 invariant register when it canonicalizes the comparison. */
5779 comparison_code = GET_CODE (comparison);
5780 iteration_var = XEXP (comparison, 0);
5781 comparison_value = XEXP (comparison, 1);
5783 if (!REG_P (iteration_var))
5785 if (loop_dump_stream)
5786 fprintf (loop_dump_stream,
5787 "Loop iterations: Comparison not against register.\n");
5788 return 0;
5791 /* The only new registers that are created before loop iterations
5792 are givs made from biv increments or registers created by
5793 load_mems. In the latter case, it is possible that try_copy_prop
5794 will propagate a new pseudo into the old iteration register but
5795 this will be marked by having the REG_USERVAR_P bit set. */
5797 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs
5798 && ! REG_USERVAR_P (iteration_var))
5799 abort ();
5801 /* Determine the initial value of the iteration variable, and the amount
5802 that it is incremented each loop. Use the tables constructed by
5803 the strength reduction pass to calculate these values. */
5805 /* Clear the result values, in case no answer can be found. */
5806 initial_value = 0;
5807 increment = 0;
5809 /* The iteration variable can be either a giv or a biv. Check to see
5810 which it is, and compute the variable's initial value, and increment
5811 value if possible. */
5813 /* If this is a new register, can't handle it since we don't have any
5814 reg_iv_type entry for it. */
5815 if ((unsigned) REGNO (iteration_var) >= ivs->n_regs)
5817 if (loop_dump_stream)
5818 fprintf (loop_dump_stream,
5819 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5820 return 0;
5823 /* Reject iteration variables larger than the host wide int size, since they
5824 could result in a number of iterations greater than the range of our
5825 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5826 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var))
5827 > HOST_BITS_PER_WIDE_INT))
5829 if (loop_dump_stream)
5830 fprintf (loop_dump_stream,
5831 "Loop iterations: Iteration var rejected because mode too large.\n");
5832 return 0;
5834 else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT)
5836 if (loop_dump_stream)
5837 fprintf (loop_dump_stream,
5838 "Loop iterations: Iteration var not an integer.\n");
5839 return 0;
5842 /* Try swapping the comparison to identify a suitable iv. */
5843 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT
5844 && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT
5845 && REG_P (comparison_value)
5846 && REGNO (comparison_value) < ivs->n_regs)
5848 rtx temp = comparison_value;
5849 comparison_code = swap_condition (comparison_code);
5850 comparison_value = iteration_var;
5851 iteration_var = temp;
5854 if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT)
5856 if (REGNO (iteration_var) >= ivs->n_regs)
5857 abort ();
5859 /* Grab initial value, only useful if it is a constant. */
5860 bl = REG_IV_CLASS (ivs, REGNO (iteration_var));
5861 initial_value = bl->initial_value;
5862 if (!bl->biv->always_executed || bl->biv->maybe_multiple)
5864 if (loop_dump_stream)
5865 fprintf (loop_dump_stream,
5866 "Loop iterations: Basic induction var not set once in each iteration.\n");
5867 return 0;
5870 increment = biv_total_increment (bl);
5872 else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT)
5874 HOST_WIDE_INT offset = 0;
5875 struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var));
5876 rtx biv_initial_value;
5878 if (REGNO (v->src_reg) >= ivs->n_regs)
5879 abort ();
5881 if (!v->always_executed || v->maybe_multiple)
5883 if (loop_dump_stream)
5884 fprintf (loop_dump_stream,
5885 "Loop iterations: General induction var not set once in each iteration.\n");
5886 return 0;
5889 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5891 /* Increment value is mult_val times the increment value of the biv. */
5893 increment = biv_total_increment (bl);
5894 if (increment)
5896 struct induction *biv_inc;
5898 increment = fold_rtx_mult_add (v->mult_val,
5899 extend_value_for_giv (v, increment),
5900 const0_rtx, v->mode);
5901 /* The caller assumes that one full increment has occurred at the
5902 first loop test. But that's not true when the biv is incremented
5903 after the giv is set (which is the usual case), e.g.:
5904 i = 6; do {;} while (i++ < 9) .
5905 Therefore, we bias the initial value by subtracting the amount of
5906 the increment that occurs between the giv set and the giv test. */
5907 for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv)
5909 if (loop_insn_first_p (v->insn, biv_inc->insn))
5911 if (REG_P (biv_inc->add_val))
5913 if (loop_dump_stream)
5914 fprintf (loop_dump_stream,
5915 "Loop iterations: Basic induction var add_val is REG %d.\n",
5916 REGNO (biv_inc->add_val));
5917 return 0;
5920 /* If we have already counted it, skip it. */
5921 if (biv_inc->same)
5922 continue;
5924 offset -= INTVAL (biv_inc->add_val);
5928 if (loop_dump_stream)
5929 fprintf (loop_dump_stream,
5930 "Loop iterations: Giv iterator, initial value bias %ld.\n",
5931 (long) offset);
5933 /* Initial value is mult_val times the biv's initial value plus
5934 add_val. Only useful if it is a constant. */
5935 biv_initial_value = extend_value_for_giv (v, bl->initial_value);
5936 initial_value
5937 = fold_rtx_mult_add (v->mult_val,
5938 plus_constant (biv_initial_value, offset),
5939 v->add_val, v->mode);
5941 else
5943 if (loop_dump_stream)
5944 fprintf (loop_dump_stream,
5945 "Loop iterations: Not basic or general induction var.\n");
5946 return 0;
5949 if (initial_value == 0)
5950 return 0;
5952 unsigned_p = 0;
5953 off_by_one = 0;
5954 switch (comparison_code)
5956 case LEU:
5957 unsigned_p = 1;
5958 case LE:
5959 compare_dir = 1;
5960 off_by_one = 1;
5961 break;
5962 case GEU:
5963 unsigned_p = 1;
5964 case GE:
5965 compare_dir = -1;
5966 off_by_one = -1;
5967 break;
5968 case EQ:
5969 /* Cannot determine loop iterations with this case. */
5970 compare_dir = 0;
5971 break;
5972 case LTU:
5973 unsigned_p = 1;
5974 case LT:
5975 compare_dir = 1;
5976 break;
5977 case GTU:
5978 unsigned_p = 1;
5979 case GT:
5980 compare_dir = -1;
5981 break;
5982 case NE:
5983 compare_dir = 0;
5984 break;
5985 default:
5986 abort ();
5989 /* If the comparison value is an invariant register, then try to find
5990 its value from the insns before the start of the loop. */
5992 final_value = comparison_value;
5993 if (REG_P (comparison_value)
5994 && loop_invariant_p (loop, comparison_value))
5996 final_value = loop_find_equiv_value (loop, comparison_value);
5998 /* If we don't get an invariant final value, we are better
5999 off with the original register. */
6000 if (! loop_invariant_p (loop, final_value))
6001 final_value = comparison_value;
6004 /* Calculate the approximate final value of the induction variable
6005 (on the last successful iteration). The exact final value
6006 depends on the branch operator, and increment sign. It will be
6007 wrong if the iteration variable is not incremented by one each
6008 time through the loop and (comparison_value + off_by_one -
6009 initial_value) % increment != 0.
6010 ??? Note that the final_value may overflow and thus final_larger
6011 will be bogus. A potentially infinite loop will be classified
6012 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6013 if (off_by_one)
6014 final_value = plus_constant (final_value, off_by_one);
6016 /* Save the calculated values describing this loop's bounds, in case
6017 precondition_loop_p will need them later. These values can not be
6018 recalculated inside precondition_loop_p because strength reduction
6019 optimizations may obscure the loop's structure.
6021 These values are only required by precondition_loop_p and insert_bct
6022 whenever the number of iterations cannot be computed at compile time.
6023 Only the difference between final_value and initial_value is
6024 important. Note that final_value is only approximate. */
6025 loop_info->initial_value = initial_value;
6026 loop_info->comparison_value = comparison_value;
6027 loop_info->final_value = plus_constant (comparison_value, off_by_one);
6028 loop_info->increment = increment;
6029 loop_info->iteration_var = iteration_var;
6030 loop_info->comparison_code = comparison_code;
6031 loop_info->iv = bl;
6033 /* Try to determine the iteration count for loops such
6034 as (for i = init; i < init + const; i++). When running the
6035 loop optimization twice, the first pass often converts simple
6036 loops into this form. */
6038 if (REG_P (initial_value))
6040 rtx reg1;
6041 rtx reg2;
6042 rtx const2;
6044 reg1 = initial_value;
6045 if (GET_CODE (final_value) == PLUS)
6046 reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1);
6047 else
6048 reg2 = final_value, const2 = const0_rtx;
6050 /* Check for initial_value = reg1, final_value = reg2 + const2,
6051 where reg1 != reg2. */
6052 if (REG_P (reg2) && reg2 != reg1)
6054 rtx temp;
6056 /* Find what reg1 is equivalent to. Hopefully it will
6057 either be reg2 or reg2 plus a constant. */
6058 temp = loop_find_equiv_value (loop, reg1);
6060 if (find_common_reg_term (temp, reg2))
6061 initial_value = temp;
6062 else if (loop_invariant_p (loop, reg2))
6064 /* Find what reg2 is equivalent to. Hopefully it will
6065 either be reg1 or reg1 plus a constant. Let's ignore
6066 the latter case for now since it is not so common. */
6067 temp = loop_find_equiv_value (loop, reg2);
6069 if (temp == loop_info->iteration_var)
6070 temp = initial_value;
6071 if (temp == reg1)
6072 final_value = (const2 == const0_rtx)
6073 ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2);
6078 loop_info->initial_equiv_value = initial_value;
6079 loop_info->final_equiv_value = final_value;
6081 /* For EQ comparison loops, we don't have a valid final value.
6082 Check this now so that we won't leave an invalid value if we
6083 return early for any other reason. */
6084 if (comparison_code == EQ)
6085 loop_info->final_equiv_value = loop_info->final_value = 0;
6087 if (increment == 0)
6089 if (loop_dump_stream)
6090 fprintf (loop_dump_stream,
6091 "Loop iterations: Increment value can't be calculated.\n");
6092 return 0;
6095 if (GET_CODE (increment) != CONST_INT)
6097 /* If we have a REG, check to see if REG holds a constant value. */
6098 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6099 clear if it is worthwhile to try to handle such RTL. */
6100 if (REG_P (increment) || GET_CODE (increment) == SUBREG)
6101 increment = loop_find_equiv_value (loop, increment);
6103 if (GET_CODE (increment) != CONST_INT)
6105 if (loop_dump_stream)
6107 fprintf (loop_dump_stream,
6108 "Loop iterations: Increment value not constant ");
6109 print_simple_rtl (loop_dump_stream, increment);
6110 fprintf (loop_dump_stream, ".\n");
6112 return 0;
6114 loop_info->increment = increment;
6117 if (GET_CODE (initial_value) != CONST_INT)
6119 if (loop_dump_stream)
6121 fprintf (loop_dump_stream,
6122 "Loop iterations: Initial value not constant ");
6123 print_simple_rtl (loop_dump_stream, initial_value);
6124 fprintf (loop_dump_stream, ".\n");
6126 return 0;
6128 else if (GET_CODE (final_value) != CONST_INT)
6130 if (loop_dump_stream)
6132 fprintf (loop_dump_stream,
6133 "Loop iterations: Final value not constant ");
6134 print_simple_rtl (loop_dump_stream, final_value);
6135 fprintf (loop_dump_stream, ".\n");
6137 return 0;
6139 else if (comparison_code == EQ)
6141 rtx inc_once;
6143 if (loop_dump_stream)
6144 fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n");
6146 inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment),
6147 GET_MODE (iteration_var));
6149 if (inc_once == final_value)
6151 /* The iterator value once through the loop is equal to the
6152 comparison value. Either we have an infinite loop, or
6153 we'll loop twice. */
6154 if (increment == const0_rtx)
6155 return 0;
6156 loop_info->n_iterations = 2;
6158 else
6159 loop_info->n_iterations = 1;
6161 if (GET_CODE (loop_info->initial_value) == CONST_INT)
6162 loop_info->final_value
6163 = gen_int_mode ((INTVAL (loop_info->initial_value)
6164 + loop_info->n_iterations * INTVAL (increment)),
6165 GET_MODE (iteration_var));
6166 else
6167 loop_info->final_value
6168 = plus_constant (loop_info->initial_value,
6169 loop_info->n_iterations * INTVAL (increment));
6170 loop_info->final_equiv_value
6171 = gen_int_mode ((INTVAL (initial_value)
6172 + loop_info->n_iterations * INTVAL (increment)),
6173 GET_MODE (iteration_var));
6174 return loop_info->n_iterations;
6177 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6178 if (unsigned_p)
6179 final_larger
6180 = ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6181 > (unsigned HOST_WIDE_INT) INTVAL (initial_value))
6182 - ((unsigned HOST_WIDE_INT) INTVAL (final_value)
6183 < (unsigned HOST_WIDE_INT) INTVAL (initial_value));
6184 else
6185 final_larger = (INTVAL (final_value) > INTVAL (initial_value))
6186 - (INTVAL (final_value) < INTVAL (initial_value));
6188 if (INTVAL (increment) > 0)
6189 increment_dir = 1;
6190 else if (INTVAL (increment) == 0)
6191 increment_dir = 0;
6192 else
6193 increment_dir = -1;
6195 /* There are 27 different cases: compare_dir = -1, 0, 1;
6196 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6197 There are 4 normal cases, 4 reverse cases (where the iteration variable
6198 will overflow before the loop exits), 4 infinite loop cases, and 15
6199 immediate exit (0 or 1 iteration depending on loop type) cases.
6200 Only try to optimize the normal cases. */
6202 /* (compare_dir/final_larger/increment_dir)
6203 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6204 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6205 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6206 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6208 /* ?? If the meaning of reverse loops (where the iteration variable
6209 will overflow before the loop exits) is undefined, then could
6210 eliminate all of these special checks, and just always assume
6211 the loops are normal/immediate/infinite. Note that this means
6212 the sign of increment_dir does not have to be known. Also,
6213 since it does not really hurt if immediate exit loops or infinite loops
6214 are optimized, then that case could be ignored also, and hence all
6215 loops can be optimized.
6217 According to ANSI Spec, the reverse loop case result is undefined,
6218 because the action on overflow is undefined.
6220 See also the special test for NE loops below. */
6222 if (final_larger == increment_dir && final_larger != 0
6223 && (final_larger == compare_dir || compare_dir == 0))
6224 /* Normal case. */
6226 else
6228 if (loop_dump_stream)
6229 fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n");
6230 return 0;
6233 /* Calculate the number of iterations, final_value is only an approximation,
6234 so correct for that. Note that abs_diff and n_iterations are
6235 unsigned, because they can be as large as 2^n - 1. */
6237 inc = INTVAL (increment);
6238 if (inc > 0)
6240 abs_diff = INTVAL (final_value) - INTVAL (initial_value);
6241 abs_inc = inc;
6243 else if (inc < 0)
6245 abs_diff = INTVAL (initial_value) - INTVAL (final_value);
6246 abs_inc = -inc;
6248 else
6249 abort ();
6251 /* Given that iteration_var is going to iterate over its own mode,
6252 not HOST_WIDE_INT, disregard higher bits that might have come
6253 into the picture due to sign extension of initial and final
6254 values. */
6255 abs_diff &= ((unsigned HOST_WIDE_INT) 1
6256 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1)
6257 << 1) - 1;
6259 /* For NE tests, make sure that the iteration variable won't miss
6260 the final value. If abs_diff mod abs_incr is not zero, then the
6261 iteration variable will overflow before the loop exits, and we
6262 can not calculate the number of iterations. */
6263 if (compare_dir == 0 && (abs_diff % abs_inc) != 0)
6264 return 0;
6266 /* Note that the number of iterations could be calculated using
6267 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6268 handle potential overflow of the summation. */
6269 loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0);
6270 return loop_info->n_iterations;
6273 /* Perform strength reduction and induction variable elimination.
6275 Pseudo registers created during this function will be beyond the
6276 last valid index in several tables including
6277 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6278 problem here, because the added registers cannot be givs outside of
6279 their loop, and hence will never be reconsidered. But scan_loop
6280 must check regnos to make sure they are in bounds. */
6282 static void
6283 strength_reduce (struct loop *loop, int flags)
6285 struct loop_info *loop_info = LOOP_INFO (loop);
6286 struct loop_regs *regs = LOOP_REGS (loop);
6287 struct loop_ivs *ivs = LOOP_IVS (loop);
6288 rtx p;
6289 /* Temporary list pointer for traversing ivs->list. */
6290 struct iv_class *bl;
6291 /* Ratio of extra register life span we can justify
6292 for saving an instruction. More if loop doesn't call subroutines
6293 since in that case saving an insn makes more difference
6294 and more registers are available. */
6295 /* ??? could set this to last value of threshold in move_movables */
6296 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
6297 /* Map of pseudo-register replacements. */
6298 rtx *reg_map = NULL;
6299 int reg_map_size;
6300 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
6301 int insn_count = count_insns_in_loop (loop);
6303 addr_placeholder = gen_reg_rtx (Pmode);
6305 ivs->n_regs = max_reg_before_loop;
6306 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
6308 /* Find all BIVs in loop. */
6309 loop_bivs_find (loop);
6311 /* Exit if there are no bivs. */
6312 if (! ivs->list)
6314 loop_ivs_free (loop);
6315 return;
6318 /* Determine how BIVS are initialized by looking through pre-header
6319 extended basic block. */
6320 loop_bivs_init_find (loop);
6322 /* Look at the each biv and see if we can say anything better about its
6323 initial value from any initializing insns set up above. */
6324 loop_bivs_check (loop);
6326 /* Search the loop for general induction variables. */
6327 loop_givs_find (loop);
6329 /* Try to calculate and save the number of loop iterations. This is
6330 set to zero if the actual number can not be calculated. This must
6331 be called after all giv's have been identified, since otherwise it may
6332 fail if the iteration variable is a giv. */
6333 loop_iterations (loop);
6335 #ifdef HAVE_prefetch
6336 if (flags & LOOP_PREFETCH)
6337 emit_prefetch_instructions (loop);
6338 #endif
6340 /* Now for each giv for which we still don't know whether or not it is
6341 replaceable, check to see if it is replaceable because its final value
6342 can be calculated. This must be done after loop_iterations is called,
6343 so that final_giv_value will work correctly. */
6344 loop_givs_check (loop);
6346 /* Try to prove that the loop counter variable (if any) is always
6347 nonnegative; if so, record that fact with a REG_NONNEG note
6348 so that "decrement and branch until zero" insn can be used. */
6349 check_dbra_loop (loop, insn_count);
6351 /* Create reg_map to hold substitutions for replaceable giv regs.
6352 Some givs might have been made from biv increments, so look at
6353 ivs->reg_iv_type for a suitable size. */
6354 reg_map_size = ivs->n_regs;
6355 reg_map = xcalloc (reg_map_size, sizeof (rtx));
6357 /* Examine each iv class for feasibility of strength reduction/induction
6358 variable elimination. */
6360 for (bl = ivs->list; bl; bl = bl->next)
6362 struct induction *v;
6363 int benefit;
6365 /* Test whether it will be possible to eliminate this biv
6366 provided all givs are reduced. */
6367 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
6369 /* This will be true at the end, if all givs which depend on this
6370 biv have been strength reduced.
6371 We can't (currently) eliminate the biv unless this is so. */
6372 bl->all_reduced = 1;
6374 /* Check each extension dependent giv in this class to see if its
6375 root biv is safe from wrapping in the interior mode. */
6376 check_ext_dependent_givs (loop, bl);
6378 /* Combine all giv's for this iv_class. */
6379 combine_givs (regs, bl);
6381 for (v = bl->giv; v; v = v->next_iv)
6383 struct induction *tv;
6385 if (v->ignore || v->same)
6386 continue;
6388 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
6390 /* If an insn is not to be strength reduced, then set its ignore
6391 flag, and clear bl->all_reduced. */
6393 /* A giv that depends on a reversed biv must be reduced if it is
6394 used after the loop exit, otherwise, it would have the wrong
6395 value after the loop exit. To make it simple, just reduce all
6396 of such giv's whether or not we know they are used after the loop
6397 exit. */
6399 if (v->lifetime * threshold * benefit < insn_count
6400 && ! bl->reversed)
6402 if (loop_dump_stream)
6403 fprintf (loop_dump_stream,
6404 "giv of insn %d not worth while, %d vs %d.\n",
6405 INSN_UID (v->insn),
6406 v->lifetime * threshold * benefit, insn_count);
6407 v->ignore = 1;
6408 bl->all_reduced = 0;
6410 else
6412 /* Check that we can increment the reduced giv without a
6413 multiply insn. If not, reject it. */
6415 for (tv = bl->biv; tv; tv = tv->next_iv)
6416 if (tv->mult_val == const1_rtx
6417 && ! product_cheap_p (tv->add_val, v->mult_val))
6419 if (loop_dump_stream)
6420 fprintf (loop_dump_stream,
6421 "giv of insn %d: would need a multiply.\n",
6422 INSN_UID (v->insn));
6423 v->ignore = 1;
6424 bl->all_reduced = 0;
6425 break;
6430 /* Check for givs whose first use is their definition and whose
6431 last use is the definition of another giv. If so, it is likely
6432 dead and should not be used to derive another giv nor to
6433 eliminate a biv. */
6434 loop_givs_dead_check (loop, bl);
6436 /* Reduce each giv that we decided to reduce. */
6437 loop_givs_reduce (loop, bl);
6439 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6440 as not reduced.
6442 For each giv register that can be reduced now: if replaceable,
6443 substitute reduced reg wherever the old giv occurs;
6444 else add new move insn "giv_reg = reduced_reg". */
6445 loop_givs_rescan (loop, bl, reg_map);
6447 /* All the givs based on the biv bl have been reduced if they
6448 merit it. */
6450 /* For each giv not marked as maybe dead that has been combined with a
6451 second giv, clear any "maybe dead" mark on that second giv.
6452 v->new_reg will either be or refer to the register of the giv it
6453 combined with.
6455 Doing this clearing avoids problems in biv elimination where
6456 a giv's new_reg is a complex value that can't be put in the
6457 insn but the giv combined with (with a reg as new_reg) is
6458 marked maybe_dead. Since the register will be used in either
6459 case, we'd prefer it be used from the simpler giv. */
6461 for (v = bl->giv; v; v = v->next_iv)
6462 if (! v->maybe_dead && v->same)
6463 v->same->maybe_dead = 0;
6465 /* Try to eliminate the biv, if it is a candidate.
6466 This won't work if ! bl->all_reduced,
6467 since the givs we planned to use might not have been reduced.
6469 We have to be careful that we didn't initially think we could
6470 eliminate this biv because of a giv that we now think may be
6471 dead and shouldn't be used as a biv replacement.
6473 Also, there is the possibility that we may have a giv that looks
6474 like it can be used to eliminate a biv, but the resulting insn
6475 isn't valid. This can happen, for example, on the 88k, where a
6476 JUMP_INSN can compare a register only with zero. Attempts to
6477 replace it with a compare with a constant will fail.
6479 Note that in cases where this call fails, we may have replaced some
6480 of the occurrences of the biv with a giv, but no harm was done in
6481 doing so in the rare cases where it can occur. */
6483 if (bl->all_reduced == 1 && bl->eliminable
6484 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
6486 /* ?? If we created a new test to bypass the loop entirely,
6487 or otherwise drop straight in, based on this test, then
6488 we might want to rewrite it also. This way some later
6489 pass has more hope of removing the initialization of this
6490 biv entirely. */
6492 /* If final_value != 0, then the biv may be used after loop end
6493 and we must emit an insn to set it just in case.
6495 Reversed bivs already have an insn after the loop setting their
6496 value, so we don't need another one. We can't calculate the
6497 proper final value for such a biv here anyways. */
6498 if (bl->final_value && ! bl->reversed)
6499 loop_insn_sink_or_swim (loop,
6500 gen_load_of_final_value (bl->biv->dest_reg,
6501 bl->final_value));
6503 if (loop_dump_stream)
6504 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
6505 bl->regno);
6507 /* See above note wrt final_value. But since we couldn't eliminate
6508 the biv, we must set the value after the loop instead of before. */
6509 else if (bl->final_value && ! bl->reversed)
6510 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
6511 bl->final_value));
6514 /* Go through all the instructions in the loop, making all the
6515 register substitutions scheduled in REG_MAP. */
6517 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
6518 if (INSN_P (p))
6520 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
6521 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
6522 INSN_CODE (p) = -1;
6525 if (loop_dump_stream)
6526 fprintf (loop_dump_stream, "\n");
6528 loop_ivs_free (loop);
6529 if (reg_map)
6530 free (reg_map);
6533 /*Record all basic induction variables calculated in the insn. */
6534 static rtx
6535 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
6536 int maybe_multiple)
6538 struct loop_ivs *ivs = LOOP_IVS (loop);
6539 rtx set;
6540 rtx dest_reg;
6541 rtx inc_val;
6542 rtx mult_val;
6543 rtx *location;
6545 if (NONJUMP_INSN_P (p)
6546 && (set = single_set (p))
6547 && REG_P (SET_DEST (set)))
6549 dest_reg = SET_DEST (set);
6550 if (REGNO (dest_reg) < max_reg_before_loop
6551 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
6552 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
6554 if (basic_induction_var (loop, SET_SRC (set),
6555 GET_MODE (SET_SRC (set)),
6556 dest_reg, p, &inc_val, &mult_val,
6557 &location))
6559 /* It is a possible basic induction variable.
6560 Create and initialize an induction structure for it. */
6562 struct induction *v = xmalloc (sizeof (struct induction));
6564 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
6565 not_every_iteration, maybe_multiple);
6566 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
6568 else if (REGNO (dest_reg) < ivs->n_regs)
6569 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
6572 return p;
6575 /* Record all givs calculated in the insn.
6576 A register is a giv if: it is only set once, it is a function of a
6577 biv and a constant (or invariant), and it is not a biv. */
6578 static rtx
6579 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
6580 int maybe_multiple)
6582 struct loop_regs *regs = LOOP_REGS (loop);
6584 rtx set;
6585 /* Look for a general induction variable in a register. */
6586 if (NONJUMP_INSN_P (p)
6587 && (set = single_set (p))
6588 && REG_P (SET_DEST (set))
6589 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
6591 rtx src_reg;
6592 rtx dest_reg;
6593 rtx add_val;
6594 rtx mult_val;
6595 rtx ext_val;
6596 int benefit;
6597 rtx regnote = 0;
6598 rtx last_consec_insn;
6600 dest_reg = SET_DEST (set);
6601 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
6602 return p;
6604 if (/* SET_SRC is a giv. */
6605 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
6606 &mult_val, &ext_val, 0, &benefit, VOIDmode)
6607 /* Equivalent expression is a giv. */
6608 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
6609 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
6610 &add_val, &mult_val, &ext_val, 0,
6611 &benefit, VOIDmode)))
6612 /* Don't try to handle any regs made by loop optimization.
6613 We have nothing on them in regno_first_uid, etc. */
6614 && REGNO (dest_reg) < max_reg_before_loop
6615 /* Don't recognize a BASIC_INDUCT_VAR here. */
6616 && dest_reg != src_reg
6617 /* This must be the only place where the register is set. */
6618 && (regs->array[REGNO (dest_reg)].n_times_set == 1
6619 /* or all sets must be consecutive and make a giv. */
6620 || (benefit = consec_sets_giv (loop, benefit, p,
6621 src_reg, dest_reg,
6622 &add_val, &mult_val, &ext_val,
6623 &last_consec_insn))))
6625 struct induction *v = xmalloc (sizeof (struct induction));
6627 /* If this is a library call, increase benefit. */
6628 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6629 benefit += libcall_benefit (p);
6631 /* Skip the consecutive insns, if there are any. */
6632 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
6633 p = last_consec_insn;
6635 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
6636 ext_val, benefit, DEST_REG, not_every_iteration,
6637 maybe_multiple, (rtx*) 0);
6642 /* Look for givs which are memory addresses. */
6643 if (NONJUMP_INSN_P (p))
6644 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
6645 maybe_multiple);
6647 /* Update the status of whether giv can derive other givs. This can
6648 change when we pass a label or an insn that updates a biv. */
6649 if (INSN_P (p) || LABEL_P (p))
6650 update_giv_derive (loop, p);
6651 return p;
6654 /* Return 1 if X is a valid source for an initial value (or as value being
6655 compared against in an initial test).
6657 X must be either a register or constant and must not be clobbered between
6658 the current insn and the start of the loop.
6660 INSN is the insn containing X. */
6662 static int
6663 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
6665 if (CONSTANT_P (x))
6666 return 1;
6668 /* Only consider pseudos we know about initialized in insns whose luids
6669 we know. */
6670 if (!REG_P (x)
6671 || REGNO (x) >= max_reg_before_loop)
6672 return 0;
6674 /* Don't use call-clobbered registers across a call which clobbers it. On
6675 some machines, don't use any hard registers at all. */
6676 if (REGNO (x) < FIRST_PSEUDO_REGISTER
6677 && (SMALL_REGISTER_CLASSES
6678 || (call_used_regs[REGNO (x)] && call_seen)))
6679 return 0;
6681 /* Don't use registers that have been clobbered before the start of the
6682 loop. */
6683 if (reg_set_between_p (x, insn, loop_start))
6684 return 0;
6686 return 1;
6689 /* Scan X for memory refs and check each memory address
6690 as a possible giv. INSN is the insn whose pattern X comes from.
6691 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6692 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6693 more than once in each loop iteration. */
6695 static void
6696 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
6697 int not_every_iteration, int maybe_multiple)
6699 int i, j;
6700 enum rtx_code code;
6701 const char *fmt;
6703 if (x == 0)
6704 return;
6706 code = GET_CODE (x);
6707 switch (code)
6709 case REG:
6710 case CONST_INT:
6711 case CONST:
6712 case CONST_DOUBLE:
6713 case SYMBOL_REF:
6714 case LABEL_REF:
6715 case PC:
6716 case CC0:
6717 case ADDR_VEC:
6718 case ADDR_DIFF_VEC:
6719 case USE:
6720 case CLOBBER:
6721 return;
6723 case MEM:
6725 rtx src_reg;
6726 rtx add_val;
6727 rtx mult_val;
6728 rtx ext_val;
6729 int benefit;
6731 /* This code used to disable creating GIVs with mult_val == 1 and
6732 add_val == 0. However, this leads to lost optimizations when
6733 it comes time to combine a set of related DEST_ADDR GIVs, since
6734 this one would not be seen. */
6736 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
6737 &mult_val, &ext_val, 1, &benefit,
6738 GET_MODE (x)))
6740 /* Found one; record it. */
6741 struct induction *v = xmalloc (sizeof (struct induction));
6743 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
6744 add_val, ext_val, benefit, DEST_ADDR,
6745 not_every_iteration, maybe_multiple, &XEXP (x, 0));
6747 v->mem = x;
6750 return;
6752 default:
6753 break;
6756 /* Recursively scan the subexpressions for other mem refs. */
6758 fmt = GET_RTX_FORMAT (code);
6759 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6760 if (fmt[i] == 'e')
6761 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
6762 maybe_multiple);
6763 else if (fmt[i] == 'E')
6764 for (j = 0; j < XVECLEN (x, i); j++)
6765 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
6766 maybe_multiple);
6769 /* Fill in the data about one biv update.
6770 V is the `struct induction' in which we record the biv. (It is
6771 allocated by the caller, with alloca.)
6772 INSN is the insn that sets it.
6773 DEST_REG is the biv's reg.
6775 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6776 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6777 being set to INC_VAL.
6779 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6780 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6781 can be executed more than once per iteration. If MAYBE_MULTIPLE
6782 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6783 executed exactly once per iteration. */
6785 static void
6786 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
6787 rtx inc_val, rtx mult_val, rtx *location,
6788 int not_every_iteration, int maybe_multiple)
6790 struct loop_ivs *ivs = LOOP_IVS (loop);
6791 struct iv_class *bl;
6793 v->insn = insn;
6794 v->src_reg = dest_reg;
6795 v->dest_reg = dest_reg;
6796 v->mult_val = mult_val;
6797 v->add_val = inc_val;
6798 v->ext_dependent = NULL_RTX;
6799 v->location = location;
6800 v->mode = GET_MODE (dest_reg);
6801 v->always_computable = ! not_every_iteration;
6802 v->always_executed = ! not_every_iteration;
6803 v->maybe_multiple = maybe_multiple;
6804 v->same = 0;
6806 /* Add this to the reg's iv_class, creating a class
6807 if this is the first incrementation of the reg. */
6809 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
6810 if (bl == 0)
6812 /* Create and initialize new iv_class. */
6814 bl = xmalloc (sizeof (struct iv_class));
6816 bl->regno = REGNO (dest_reg);
6817 bl->biv = 0;
6818 bl->giv = 0;
6819 bl->biv_count = 0;
6820 bl->giv_count = 0;
6822 /* Set initial value to the reg itself. */
6823 bl->initial_value = dest_reg;
6824 bl->final_value = 0;
6825 /* We haven't seen the initializing insn yet. */
6826 bl->init_insn = 0;
6827 bl->init_set = 0;
6828 bl->initial_test = 0;
6829 bl->incremented = 0;
6830 bl->eliminable = 0;
6831 bl->nonneg = 0;
6832 bl->reversed = 0;
6833 bl->total_benefit = 0;
6835 /* Add this class to ivs->list. */
6836 bl->next = ivs->list;
6837 ivs->list = bl;
6839 /* Put it in the array of biv register classes. */
6840 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
6842 else
6844 /* Check if location is the same as a previous one. */
6845 struct induction *induction;
6846 for (induction = bl->biv; induction; induction = induction->next_iv)
6847 if (location == induction->location)
6849 v->same = induction;
6850 break;
6854 /* Update IV_CLASS entry for this biv. */
6855 v->next_iv = bl->biv;
6856 bl->biv = v;
6857 bl->biv_count++;
6858 if (mult_val == const1_rtx)
6859 bl->incremented = 1;
6861 if (loop_dump_stream)
6862 loop_biv_dump (v, loop_dump_stream, 0);
6865 /* Fill in the data about one giv.
6866 V is the `struct induction' in which we record the giv. (It is
6867 allocated by the caller, with alloca.)
6868 INSN is the insn that sets it.
6869 BENEFIT estimates the savings from deleting this insn.
6870 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6871 into a register or is used as a memory address.
6873 SRC_REG is the biv reg which the giv is computed from.
6874 DEST_REG is the giv's reg (if the giv is stored in a reg).
6875 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6876 LOCATION points to the place where this giv's value appears in INSN. */
6878 static void
6879 record_giv (const struct loop *loop, struct induction *v, rtx insn,
6880 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
6881 rtx ext_val, int benefit, enum g_types type,
6882 int not_every_iteration, int maybe_multiple, rtx *location)
6884 struct loop_ivs *ivs = LOOP_IVS (loop);
6885 struct induction *b;
6886 struct iv_class *bl;
6887 rtx set = single_set (insn);
6888 rtx temp;
6890 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6891 undo the MULT canonicalization that we performed earlier. */
6892 temp = simplify_rtx (add_val);
6893 if (temp
6894 && ! (GET_CODE (add_val) == MULT
6895 && GET_CODE (temp) == ASHIFT))
6896 add_val = temp;
6898 v->insn = insn;
6899 v->src_reg = src_reg;
6900 v->giv_type = type;
6901 v->dest_reg = dest_reg;
6902 v->mult_val = mult_val;
6903 v->add_val = add_val;
6904 v->ext_dependent = ext_val;
6905 v->benefit = benefit;
6906 v->location = location;
6907 v->cant_derive = 0;
6908 v->combined_with = 0;
6909 v->maybe_multiple = maybe_multiple;
6910 v->maybe_dead = 0;
6911 v->derive_adjustment = 0;
6912 v->same = 0;
6913 v->ignore = 0;
6914 v->new_reg = 0;
6915 v->final_value = 0;
6916 v->same_insn = 0;
6917 v->auto_inc_opt = 0;
6918 v->shared = 0;
6920 /* The v->always_computable field is used in update_giv_derive, to
6921 determine whether a giv can be used to derive another giv. For a
6922 DEST_REG giv, INSN computes a new value for the giv, so its value
6923 isn't computable if INSN insn't executed every iteration.
6924 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
6925 it does not compute a new value. Hence the value is always computable
6926 regardless of whether INSN is executed each iteration. */
6928 if (type == DEST_ADDR)
6929 v->always_computable = 1;
6930 else
6931 v->always_computable = ! not_every_iteration;
6933 v->always_executed = ! not_every_iteration;
6935 if (type == DEST_ADDR)
6937 v->mode = GET_MODE (*location);
6938 v->lifetime = 1;
6940 else /* type == DEST_REG */
6942 v->mode = GET_MODE (SET_DEST (set));
6944 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
6946 /* If the lifetime is zero, it means that this register is
6947 really a dead store. So mark this as a giv that can be
6948 ignored. This will not prevent the biv from being eliminated. */
6949 if (v->lifetime == 0)
6950 v->ignore = 1;
6952 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6953 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6956 /* Add the giv to the class of givs computed from one biv. */
6958 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
6959 if (bl)
6961 v->next_iv = bl->giv;
6962 bl->giv = v;
6963 /* Don't count DEST_ADDR. This is supposed to count the number of
6964 insns that calculate givs. */
6965 if (type == DEST_REG)
6966 bl->giv_count++;
6967 bl->total_benefit += benefit;
6969 else
6970 /* Fatal error, biv missing for this giv? */
6971 abort ();
6973 if (type == DEST_ADDR)
6975 v->replaceable = 1;
6976 v->not_replaceable = 0;
6978 else
6980 /* The giv can be replaced outright by the reduced register only if all
6981 of the following conditions are true:
6982 - the insn that sets the giv is always executed on any iteration
6983 on which the giv is used at all
6984 (there are two ways to deduce this:
6985 either the insn is executed on every iteration,
6986 or all uses follow that insn in the same basic block),
6987 - the giv is not used outside the loop
6988 - no assignments to the biv occur during the giv's lifetime. */
6990 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
6991 /* Previous line always fails if INSN was moved by loop opt. */
6992 && REGNO_LAST_LUID (REGNO (dest_reg))
6993 < INSN_LUID (loop->end)
6994 && (! not_every_iteration
6995 || last_use_this_basic_block (dest_reg, insn)))
6997 /* Now check that there are no assignments to the biv within the
6998 giv's lifetime. This requires two separate checks. */
7000 /* Check each biv update, and fail if any are between the first
7001 and last use of the giv.
7003 If this loop contains an inner loop that was unrolled, then
7004 the insn modifying the biv may have been emitted by the loop
7005 unrolling code, and hence does not have a valid luid. Just
7006 mark the biv as not replaceable in this case. It is not very
7007 useful as a biv, because it is used in two different loops.
7008 It is very unlikely that we would be able to optimize the giv
7009 using this biv anyways. */
7011 v->replaceable = 1;
7012 v->not_replaceable = 0;
7013 for (b = bl->biv; b; b = b->next_iv)
7015 if (INSN_UID (b->insn) >= max_uid_for_loop
7016 || ((INSN_LUID (b->insn)
7017 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
7018 && (INSN_LUID (b->insn)
7019 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
7021 v->replaceable = 0;
7022 v->not_replaceable = 1;
7023 break;
7027 /* If there are any backwards branches that go from after the
7028 biv update to before it, then this giv is not replaceable. */
7029 if (v->replaceable)
7030 for (b = bl->biv; b; b = b->next_iv)
7031 if (back_branch_in_range_p (loop, b->insn))
7033 v->replaceable = 0;
7034 v->not_replaceable = 1;
7035 break;
7038 else
7040 /* May still be replaceable, we don't have enough info here to
7041 decide. */
7042 v->replaceable = 0;
7043 v->not_replaceable = 0;
7047 /* Record whether the add_val contains a const_int, for later use by
7048 combine_givs. */
7050 rtx tem = add_val;
7052 v->no_const_addval = 1;
7053 if (tem == const0_rtx)
7055 else if (CONSTANT_P (add_val))
7056 v->no_const_addval = 0;
7057 if (GET_CODE (tem) == PLUS)
7059 while (1)
7061 if (GET_CODE (XEXP (tem, 0)) == PLUS)
7062 tem = XEXP (tem, 0);
7063 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
7064 tem = XEXP (tem, 1);
7065 else
7066 break;
7068 if (CONSTANT_P (XEXP (tem, 1)))
7069 v->no_const_addval = 0;
7073 if (loop_dump_stream)
7074 loop_giv_dump (v, loop_dump_stream, 0);
7077 /* Try to calculate the final value of the giv, the value it will have at
7078 the end of the loop. If we can do it, return that value. */
7080 static rtx
7081 final_giv_value (const struct loop *loop, struct induction *v)
7083 struct loop_ivs *ivs = LOOP_IVS (loop);
7084 struct iv_class *bl;
7085 rtx insn;
7086 rtx increment, tem;
7087 rtx seq;
7088 rtx loop_end = loop->end;
7089 unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations;
7091 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
7093 /* The final value for givs which depend on reversed bivs must be calculated
7094 differently than for ordinary givs. In this case, there is already an
7095 insn after the loop which sets this giv's final value (if necessary),
7096 and there are no other loop exits, so we can return any value. */
7097 if (bl->reversed)
7099 if (loop_dump_stream)
7100 fprintf (loop_dump_stream,
7101 "Final giv value for %d, depends on reversed biv\n",
7102 REGNO (v->dest_reg));
7103 return const0_rtx;
7106 /* Try to calculate the final value as a function of the biv it depends
7107 upon. The only exit from the loop must be the fall through at the bottom
7108 and the insn that sets the giv must be executed on every iteration
7109 (otherwise the giv may not have its final value when the loop exits). */
7111 /* ??? Can calculate the final giv value by subtracting off the
7112 extra biv increments times the giv's mult_val. The loop must have
7113 only one exit for this to work, but the loop iterations does not need
7114 to be known. */
7116 if (n_iterations != 0
7117 && ! loop->exit_count
7118 && v->always_executed)
7120 /* ?? It is tempting to use the biv's value here since these insns will
7121 be put after the loop, and hence the biv will have its final value
7122 then. However, this fails if the biv is subsequently eliminated.
7123 Perhaps determine whether biv's are eliminable before trying to
7124 determine whether giv's are replaceable so that we can use the
7125 biv value here if it is not eliminable. */
7127 /* We are emitting code after the end of the loop, so we must make
7128 sure that bl->initial_value is still valid then. It will still
7129 be valid if it is invariant. */
7131 increment = biv_total_increment (bl);
7133 if (increment && loop_invariant_p (loop, increment)
7134 && loop_invariant_p (loop, bl->initial_value))
7136 /* Can calculate the loop exit value of its biv as
7137 (n_iterations * increment) + initial_value */
7139 /* The loop exit value of the giv is then
7140 (final_biv_value - extra increments) * mult_val + add_val.
7141 The extra increments are any increments to the biv which
7142 occur in the loop after the giv's value is calculated.
7143 We must search from the insn that sets the giv to the end
7144 of the loop to calculate this value. */
7146 /* Put the final biv value in tem. */
7147 tem = gen_reg_rtx (v->mode);
7148 record_base_value (REGNO (tem), bl->biv->add_val, 0);
7149 loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment),
7150 GEN_INT (n_iterations),
7151 extend_value_for_giv (v, bl->initial_value),
7152 tem);
7154 /* Subtract off extra increments as we find them. */
7155 for (insn = NEXT_INSN (v->insn); insn != loop_end;
7156 insn = NEXT_INSN (insn))
7158 struct induction *biv;
7160 for (biv = bl->biv; biv; biv = biv->next_iv)
7161 if (biv->insn == insn)
7163 start_sequence ();
7164 tem = expand_simple_binop (GET_MODE (tem), MINUS, tem,
7165 biv->add_val, NULL_RTX, 0,
7166 OPTAB_LIB_WIDEN);
7167 seq = get_insns ();
7168 end_sequence ();
7169 loop_insn_sink (loop, seq);
7173 /* Now calculate the giv's final value. */
7174 loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem);
7176 if (loop_dump_stream)
7177 fprintf (loop_dump_stream,
7178 "Final giv value for %d, calc from biv's value.\n",
7179 REGNO (v->dest_reg));
7181 return tem;
7185 /* Replaceable giv's should never reach here. */
7186 if (v->replaceable)
7187 abort ();
7189 /* Check to see if the biv is dead at all loop exits. */
7190 if (reg_dead_after_loop (loop, v->dest_reg))
7192 if (loop_dump_stream)
7193 fprintf (loop_dump_stream,
7194 "Final giv value for %d, giv dead after loop exit.\n",
7195 REGNO (v->dest_reg));
7197 return const0_rtx;
7200 return 0;
7203 /* All this does is determine whether a giv can be made replaceable because
7204 its final value can be calculated. This code can not be part of record_giv
7205 above, because final_giv_value requires that the number of loop iterations
7206 be known, and that can not be accurately calculated until after all givs
7207 have been identified. */
7209 static void
7210 check_final_value (const struct loop *loop, struct induction *v)
7212 rtx final_value = 0;
7214 /* DEST_ADDR givs will never reach here, because they are always marked
7215 replaceable above in record_giv. */
7217 /* The giv can be replaced outright by the reduced register only if all
7218 of the following conditions are true:
7219 - the insn that sets the giv is always executed on any iteration
7220 on which the giv is used at all
7221 (there are two ways to deduce this:
7222 either the insn is executed on every iteration,
7223 or all uses follow that insn in the same basic block),
7224 - its final value can be calculated (this condition is different
7225 than the one above in record_giv)
7226 - it's not used before the it's set
7227 - no assignments to the biv occur during the giv's lifetime. */
7229 #if 0
7230 /* This is only called now when replaceable is known to be false. */
7231 /* Clear replaceable, so that it won't confuse final_giv_value. */
7232 v->replaceable = 0;
7233 #endif
7235 if ((final_value = final_giv_value (loop, v))
7236 && (v->always_executed
7237 || last_use_this_basic_block (v->dest_reg, v->insn)))
7239 int biv_increment_seen = 0, before_giv_insn = 0;
7240 rtx p = v->insn;
7241 rtx last_giv_use;
7243 v->replaceable = 1;
7244 v->not_replaceable = 0;
7246 /* When trying to determine whether or not a biv increment occurs
7247 during the lifetime of the giv, we can ignore uses of the variable
7248 outside the loop because final_value is true. Hence we can not
7249 use regno_last_uid and regno_first_uid as above in record_giv. */
7251 /* Search the loop to determine whether any assignments to the
7252 biv occur during the giv's lifetime. Start with the insn
7253 that sets the giv, and search around the loop until we come
7254 back to that insn again.
7256 Also fail if there is a jump within the giv's lifetime that jumps
7257 to somewhere outside the lifetime but still within the loop. This
7258 catches spaghetti code where the execution order is not linear, and
7259 hence the above test fails. Here we assume that the giv lifetime
7260 does not extend from one iteration of the loop to the next, so as
7261 to make the test easier. Since the lifetime isn't known yet,
7262 this requires two loops. See also record_giv above. */
7264 last_giv_use = v->insn;
7266 while (1)
7268 p = NEXT_INSN (p);
7269 if (p == loop->end)
7271 before_giv_insn = 1;
7272 p = NEXT_INSN (loop->start);
7274 if (p == v->insn)
7275 break;
7277 if (INSN_P (p))
7279 /* It is possible for the BIV increment to use the GIV if we
7280 have a cycle. Thus we must be sure to check each insn for
7281 both BIV and GIV uses, and we must check for BIV uses
7282 first. */
7284 if (! biv_increment_seen
7285 && reg_set_p (v->src_reg, PATTERN (p)))
7286 biv_increment_seen = 1;
7288 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
7290 if (biv_increment_seen || before_giv_insn)
7292 v->replaceable = 0;
7293 v->not_replaceable = 1;
7294 break;
7296 last_giv_use = p;
7301 /* Now that the lifetime of the giv is known, check for branches
7302 from within the lifetime to outside the lifetime if it is still
7303 replaceable. */
7305 if (v->replaceable)
7307 p = v->insn;
7308 while (1)
7310 p = NEXT_INSN (p);
7311 if (p == loop->end)
7312 p = NEXT_INSN (loop->start);
7313 if (p == last_giv_use)
7314 break;
7316 if (JUMP_P (p) && JUMP_LABEL (p)
7317 && LABEL_NAME (JUMP_LABEL (p))
7318 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
7319 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
7320 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
7321 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
7323 v->replaceable = 0;
7324 v->not_replaceable = 1;
7326 if (loop_dump_stream)
7327 fprintf (loop_dump_stream,
7328 "Found branch outside giv lifetime.\n");
7330 break;
7335 /* If it is replaceable, then save the final value. */
7336 if (v->replaceable)
7337 v->final_value = final_value;
7340 if (loop_dump_stream && v->replaceable)
7341 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
7342 INSN_UID (v->insn), REGNO (v->dest_reg));
7345 /* Update the status of whether a giv can derive other givs.
7347 We need to do something special if there is or may be an update to the biv
7348 between the time the giv is defined and the time it is used to derive
7349 another giv.
7351 In addition, a giv that is only conditionally set is not allowed to
7352 derive another giv once a label has been passed.
7354 The cases we look at are when a label or an update to a biv is passed. */
7356 static void
7357 update_giv_derive (const struct loop *loop, rtx p)
7359 struct loop_ivs *ivs = LOOP_IVS (loop);
7360 struct iv_class *bl;
7361 struct induction *biv, *giv;
7362 rtx tem;
7363 int dummy;
7365 /* Search all IV classes, then all bivs, and finally all givs.
7367 There are three cases we are concerned with. First we have the situation
7368 of a giv that is only updated conditionally. In that case, it may not
7369 derive any givs after a label is passed.
7371 The second case is when a biv update occurs, or may occur, after the
7372 definition of a giv. For certain biv updates (see below) that are
7373 known to occur between the giv definition and use, we can adjust the
7374 giv definition. For others, or when the biv update is conditional,
7375 we must prevent the giv from deriving any other givs. There are two
7376 sub-cases within this case.
7378 If this is a label, we are concerned with any biv update that is done
7379 conditionally, since it may be done after the giv is defined followed by
7380 a branch here (actually, we need to pass both a jump and a label, but
7381 this extra tracking doesn't seem worth it).
7383 If this is a jump, we are concerned about any biv update that may be
7384 executed multiple times. We are actually only concerned about
7385 backward jumps, but it is probably not worth performing the test
7386 on the jump again here.
7388 If this is a biv update, we must adjust the giv status to show that a
7389 subsequent biv update was performed. If this adjustment cannot be done,
7390 the giv cannot derive further givs. */
7392 for (bl = ivs->list; bl; bl = bl->next)
7393 for (biv = bl->biv; biv; biv = biv->next_iv)
7394 if (LABEL_P (p) || JUMP_P (p)
7395 || biv->insn == p)
7397 /* Skip if location is the same as a previous one. */
7398 if (biv->same)
7399 continue;
7401 for (giv = bl->giv; giv; giv = giv->next_iv)
7403 /* If cant_derive is already true, there is no point in
7404 checking all of these conditions again. */
7405 if (giv->cant_derive)
7406 continue;
7408 /* If this giv is conditionally set and we have passed a label,
7409 it cannot derive anything. */
7410 if (LABEL_P (p) && ! giv->always_computable)
7411 giv->cant_derive = 1;
7413 /* Skip givs that have mult_val == 0, since
7414 they are really invariants. Also skip those that are
7415 replaceable, since we know their lifetime doesn't contain
7416 any biv update. */
7417 else if (giv->mult_val == const0_rtx || giv->replaceable)
7418 continue;
7420 /* The only way we can allow this giv to derive another
7421 is if this is a biv increment and we can form the product
7422 of biv->add_val and giv->mult_val. In this case, we will
7423 be able to compute a compensation. */
7424 else if (biv->insn == p)
7426 rtx ext_val_dummy;
7428 tem = 0;
7429 if (biv->mult_val == const1_rtx)
7430 tem = simplify_giv_expr (loop,
7431 gen_rtx_MULT (giv->mode,
7432 biv->add_val,
7433 giv->mult_val),
7434 &ext_val_dummy, &dummy);
7436 if (tem && giv->derive_adjustment)
7437 tem = simplify_giv_expr
7438 (loop,
7439 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
7440 &ext_val_dummy, &dummy);
7442 if (tem)
7443 giv->derive_adjustment = tem;
7444 else
7445 giv->cant_derive = 1;
7447 else if ((LABEL_P (p) && ! biv->always_computable)
7448 || (JUMP_P (p) && biv->maybe_multiple))
7449 giv->cant_derive = 1;
7454 /* Check whether an insn is an increment legitimate for a basic induction var.
7455 X is the source of insn P, or a part of it.
7456 MODE is the mode in which X should be interpreted.
7458 DEST_REG is the putative biv, also the destination of the insn.
7459 We accept patterns of these forms:
7460 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7461 REG = INVARIANT + REG
7463 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7464 store the additive term into *INC_VAL, and store the place where
7465 we found the additive term into *LOCATION.
7467 If X is an assignment of an invariant into DEST_REG, we set
7468 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7470 We also want to detect a BIV when it corresponds to a variable
7471 whose mode was promoted. In that case, an increment
7472 of the variable may be a PLUS that adds a SUBREG of that variable to
7473 an invariant and then sign- or zero-extends the result of the PLUS
7474 into the variable.
7476 Most GIVs in such cases will be in the promoted mode, since that is the
7477 probably the natural computation mode (and almost certainly the mode
7478 used for addresses) on the machine. So we view the pseudo-reg containing
7479 the variable as the BIV, as if it were simply incremented.
7481 Note that treating the entire pseudo as a BIV will result in making
7482 simple increments to any GIVs based on it. However, if the variable
7483 overflows in its declared mode but not its promoted mode, the result will
7484 be incorrect. This is acceptable if the variable is signed, since
7485 overflows in such cases are undefined, but not if it is unsigned, since
7486 those overflows are defined. So we only check for SIGN_EXTEND and
7487 not ZERO_EXTEND.
7489 If we cannot find a biv, we return 0. */
7491 static int
7492 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
7493 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
7494 rtx **location)
7496 enum rtx_code code;
7497 rtx *argp, arg;
7498 rtx insn, set = 0, last, inc;
7500 code = GET_CODE (x);
7501 *location = NULL;
7502 switch (code)
7504 case PLUS:
7505 if (rtx_equal_p (XEXP (x, 0), dest_reg)
7506 || (GET_CODE (XEXP (x, 0)) == SUBREG
7507 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
7508 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
7510 argp = &XEXP (x, 1);
7512 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
7513 || (GET_CODE (XEXP (x, 1)) == SUBREG
7514 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
7515 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
7517 argp = &XEXP (x, 0);
7519 else
7520 return 0;
7522 arg = *argp;
7523 if (loop_invariant_p (loop, arg) != 1)
7524 return 0;
7526 /* convert_modes can emit new instructions, e.g. when arg is a loop
7527 invariant MEM and dest_reg has a different mode.
7528 These instructions would be emitted after the end of the function
7529 and then *inc_val would be an uninitialized pseudo.
7530 Detect this and bail in this case.
7531 Other alternatives to solve this can be introducing a convert_modes
7532 variant which is allowed to fail but not allowed to emit new
7533 instructions, emit these instructions before loop start and let
7534 it be garbage collected if *inc_val is never used or saving the
7535 *inc_val initialization sequence generated here and when *inc_val
7536 is going to be actually used, emit it at some suitable place. */
7537 last = get_last_insn ();
7538 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
7539 if (get_last_insn () != last)
7541 delete_insns_since (last);
7542 return 0;
7545 *inc_val = inc;
7546 *mult_val = const1_rtx;
7547 *location = argp;
7548 return 1;
7550 case SUBREG:
7551 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7552 handle addition of promoted variables.
7553 ??? The comment at the start of this function is wrong: promoted
7554 variable increments don't look like it says they do. */
7555 return basic_induction_var (loop, SUBREG_REG (x),
7556 GET_MODE (SUBREG_REG (x)),
7557 dest_reg, p, inc_val, mult_val, location);
7559 case REG:
7560 /* If this register is assigned in a previous insn, look at its
7561 source, but don't go outside the loop or past a label. */
7563 /* If this sets a register to itself, we would repeat any previous
7564 biv increment if we applied this strategy blindly. */
7565 if (rtx_equal_p (dest_reg, x))
7566 return 0;
7568 insn = p;
7569 while (1)
7571 rtx dest;
7574 insn = PREV_INSN (insn);
7576 while (insn && NOTE_P (insn)
7577 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7579 if (!insn)
7580 break;
7581 set = single_set (insn);
7582 if (set == 0)
7583 break;
7584 dest = SET_DEST (set);
7585 if (dest == x
7586 || (GET_CODE (dest) == SUBREG
7587 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
7588 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
7589 && SUBREG_REG (dest) == x))
7590 return basic_induction_var (loop, SET_SRC (set),
7591 (GET_MODE (SET_SRC (set)) == VOIDmode
7592 ? GET_MODE (x)
7593 : GET_MODE (SET_SRC (set))),
7594 dest_reg, insn,
7595 inc_val, mult_val, location);
7597 while (GET_CODE (dest) == SIGN_EXTRACT
7598 || GET_CODE (dest) == ZERO_EXTRACT
7599 || GET_CODE (dest) == SUBREG
7600 || GET_CODE (dest) == STRICT_LOW_PART)
7601 dest = XEXP (dest, 0);
7602 if (dest == x)
7603 break;
7605 /* Fall through. */
7607 /* Can accept constant setting of biv only when inside inner most loop.
7608 Otherwise, a biv of an inner loop may be incorrectly recognized
7609 as a biv of the outer loop,
7610 causing code to be moved INTO the inner loop. */
7611 case MEM:
7612 if (loop_invariant_p (loop, x) != 1)
7613 return 0;
7614 case CONST_INT:
7615 case SYMBOL_REF:
7616 case CONST:
7617 /* convert_modes aborts if we try to convert to or from CCmode, so just
7618 exclude that case. It is very unlikely that a condition code value
7619 would be a useful iterator anyways. convert_modes aborts if we try to
7620 convert a float mode to non-float or vice versa too. */
7621 if (loop->level == 1
7622 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
7623 && GET_MODE_CLASS (mode) != MODE_CC)
7625 /* Possible bug here? Perhaps we don't know the mode of X. */
7626 last = get_last_insn ();
7627 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
7628 if (get_last_insn () != last)
7630 delete_insns_since (last);
7631 return 0;
7634 *inc_val = inc;
7635 *mult_val = const0_rtx;
7636 return 1;
7638 else
7639 return 0;
7641 case SIGN_EXTEND:
7642 /* Ignore this BIV if signed arithmetic overflow is defined. */
7643 if (flag_wrapv)
7644 return 0;
7645 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
7646 dest_reg, p, inc_val, mult_val, location);
7648 case ASHIFTRT:
7649 /* Similar, since this can be a sign extension. */
7650 for (insn = PREV_INSN (p);
7651 (insn && NOTE_P (insn)
7652 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
7653 insn = PREV_INSN (insn))
7656 if (insn)
7657 set = single_set (insn);
7659 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
7660 && set && SET_DEST (set) == XEXP (x, 0)
7661 && GET_CODE (XEXP (x, 1)) == CONST_INT
7662 && INTVAL (XEXP (x, 1)) >= 0
7663 && GET_CODE (SET_SRC (set)) == ASHIFT
7664 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7665 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
7666 GET_MODE (XEXP (x, 0)),
7667 dest_reg, insn, inc_val, mult_val,
7668 location);
7669 return 0;
7671 default:
7672 return 0;
7676 /* A general induction variable (giv) is any quantity that is a linear
7677 function of a basic induction variable,
7678 i.e. giv = biv * mult_val + add_val.
7679 The coefficients can be any loop invariant quantity.
7680 A giv need not be computed directly from the biv;
7681 it can be computed by way of other givs. */
7683 /* Determine whether X computes a giv.
7684 If it does, return a nonzero value
7685 which is the benefit from eliminating the computation of X;
7686 set *SRC_REG to the register of the biv that it is computed from;
7687 set *ADD_VAL and *MULT_VAL to the coefficients,
7688 such that the value of X is biv * mult + add; */
7690 static int
7691 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
7692 rtx *add_val, rtx *mult_val, rtx *ext_val,
7693 int is_addr, int *pbenefit,
7694 enum machine_mode addr_mode)
7696 struct loop_ivs *ivs = LOOP_IVS (loop);
7697 rtx orig_x = x;
7699 /* If this is an invariant, forget it, it isn't a giv. */
7700 if (loop_invariant_p (loop, x) == 1)
7701 return 0;
7703 *pbenefit = 0;
7704 *ext_val = NULL_RTX;
7705 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
7706 if (x == 0)
7707 return 0;
7709 switch (GET_CODE (x))
7711 case USE:
7712 case CONST_INT:
7713 /* Since this is now an invariant and wasn't before, it must be a giv
7714 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7715 with. */
7716 *src_reg = ivs->list->biv->dest_reg;
7717 *mult_val = const0_rtx;
7718 *add_val = x;
7719 break;
7721 case REG:
7722 /* This is equivalent to a BIV. */
7723 *src_reg = x;
7724 *mult_val = const1_rtx;
7725 *add_val = const0_rtx;
7726 break;
7728 case PLUS:
7729 /* Either (plus (biv) (invar)) or
7730 (plus (mult (biv) (invar_1)) (invar_2)). */
7731 if (GET_CODE (XEXP (x, 0)) == MULT)
7733 *src_reg = XEXP (XEXP (x, 0), 0);
7734 *mult_val = XEXP (XEXP (x, 0), 1);
7736 else
7738 *src_reg = XEXP (x, 0);
7739 *mult_val = const1_rtx;
7741 *add_val = XEXP (x, 1);
7742 break;
7744 case MULT:
7745 /* ADD_VAL is zero. */
7746 *src_reg = XEXP (x, 0);
7747 *mult_val = XEXP (x, 1);
7748 *add_val = const0_rtx;
7749 break;
7751 default:
7752 abort ();
7755 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7756 unless they are CONST_INT). */
7757 if (GET_CODE (*add_val) == USE)
7758 *add_val = XEXP (*add_val, 0);
7759 if (GET_CODE (*mult_val) == USE)
7760 *mult_val = XEXP (*mult_val, 0);
7762 if (is_addr)
7763 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
7764 else
7765 *pbenefit += rtx_cost (orig_x, SET);
7767 /* Always return true if this is a giv so it will be detected as such,
7768 even if the benefit is zero or negative. This allows elimination
7769 of bivs that might otherwise not be eliminated. */
7770 return 1;
7773 /* Given an expression, X, try to form it as a linear function of a biv.
7774 We will canonicalize it to be of the form
7775 (plus (mult (BIV) (invar_1))
7776 (invar_2))
7777 with possible degeneracies.
7779 The invariant expressions must each be of a form that can be used as a
7780 machine operand. We surround then with a USE rtx (a hack, but localized
7781 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7782 routine; it is the caller's responsibility to strip them.
7784 If no such canonicalization is possible (i.e., two biv's are used or an
7785 expression that is neither invariant nor a biv or giv), this routine
7786 returns 0.
7788 For a nonzero return, the result will have a code of CONST_INT, USE,
7789 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7791 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7793 static rtx sge_plus (enum machine_mode, rtx, rtx);
7794 static rtx sge_plus_constant (rtx, rtx);
7796 static rtx
7797 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
7799 struct loop_ivs *ivs = LOOP_IVS (loop);
7800 struct loop_regs *regs = LOOP_REGS (loop);
7801 enum machine_mode mode = GET_MODE (x);
7802 rtx arg0, arg1;
7803 rtx tem;
7805 /* If this is not an integer mode, or if we cannot do arithmetic in this
7806 mode, this can't be a giv. */
7807 if (mode != VOIDmode
7808 && (GET_MODE_CLASS (mode) != MODE_INT
7809 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
7810 return NULL_RTX;
7812 switch (GET_CODE (x))
7814 case PLUS:
7815 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7816 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7817 if (arg0 == 0 || arg1 == 0)
7818 return NULL_RTX;
7820 /* Put constant last, CONST_INT last if both constant. */
7821 if ((GET_CODE (arg0) == USE
7822 || GET_CODE (arg0) == CONST_INT)
7823 && ! ((GET_CODE (arg0) == USE
7824 && GET_CODE (arg1) == USE)
7825 || GET_CODE (arg1) == CONST_INT))
7826 tem = arg0, arg0 = arg1, arg1 = tem;
7828 /* Handle addition of zero, then addition of an invariant. */
7829 if (arg1 == const0_rtx)
7830 return arg0;
7831 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
7832 switch (GET_CODE (arg0))
7834 case CONST_INT:
7835 case USE:
7836 /* Adding two invariants must result in an invariant, so enclose
7837 addition operation inside a USE and return it. */
7838 if (GET_CODE (arg0) == USE)
7839 arg0 = XEXP (arg0, 0);
7840 if (GET_CODE (arg1) == USE)
7841 arg1 = XEXP (arg1, 0);
7843 if (GET_CODE (arg0) == CONST_INT)
7844 tem = arg0, arg0 = arg1, arg1 = tem;
7845 if (GET_CODE (arg1) == CONST_INT)
7846 tem = sge_plus_constant (arg0, arg1);
7847 else
7848 tem = sge_plus (mode, arg0, arg1);
7850 if (GET_CODE (tem) != CONST_INT)
7851 tem = gen_rtx_USE (mode, tem);
7852 return tem;
7854 case REG:
7855 case MULT:
7856 /* biv + invar or mult + invar. Return sum. */
7857 return gen_rtx_PLUS (mode, arg0, arg1);
7859 case PLUS:
7860 /* (a + invar_1) + invar_2. Associate. */
7861 return
7862 simplify_giv_expr (loop,
7863 gen_rtx_PLUS (mode,
7864 XEXP (arg0, 0),
7865 gen_rtx_PLUS (mode,
7866 XEXP (arg0, 1),
7867 arg1)),
7868 ext_val, benefit);
7870 default:
7871 abort ();
7874 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7875 MULT to reduce cases. */
7876 if (REG_P (arg0))
7877 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
7878 if (REG_P (arg1))
7879 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
7881 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7882 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7883 Recurse to associate the second PLUS. */
7884 if (GET_CODE (arg1) == MULT)
7885 tem = arg0, arg0 = arg1, arg1 = tem;
7887 if (GET_CODE (arg1) == PLUS)
7888 return
7889 simplify_giv_expr (loop,
7890 gen_rtx_PLUS (mode,
7891 gen_rtx_PLUS (mode, arg0,
7892 XEXP (arg1, 0)),
7893 XEXP (arg1, 1)),
7894 ext_val, benefit);
7896 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7897 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
7898 return NULL_RTX;
7900 if (!rtx_equal_p (arg0, arg1))
7901 return NULL_RTX;
7903 return simplify_giv_expr (loop,
7904 gen_rtx_MULT (mode,
7905 XEXP (arg0, 0),
7906 gen_rtx_PLUS (mode,
7907 XEXP (arg0, 1),
7908 XEXP (arg1, 1))),
7909 ext_val, benefit);
7911 case MINUS:
7912 /* Handle "a - b" as "a + b * (-1)". */
7913 return simplify_giv_expr (loop,
7914 gen_rtx_PLUS (mode,
7915 XEXP (x, 0),
7916 gen_rtx_MULT (mode,
7917 XEXP (x, 1),
7918 constm1_rtx)),
7919 ext_val, benefit);
7921 case MULT:
7922 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
7923 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
7924 if (arg0 == 0 || arg1 == 0)
7925 return NULL_RTX;
7927 /* Put constant last, CONST_INT last if both constant. */
7928 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
7929 && GET_CODE (arg1) != CONST_INT)
7930 tem = arg0, arg0 = arg1, arg1 = tem;
7932 /* If second argument is not now constant, not giv. */
7933 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
7934 return NULL_RTX;
7936 /* Handle multiply by 0 or 1. */
7937 if (arg1 == const0_rtx)
7938 return const0_rtx;
7940 else if (arg1 == const1_rtx)
7941 return arg0;
7943 switch (GET_CODE (arg0))
7945 case REG:
7946 /* biv * invar. Done. */
7947 return gen_rtx_MULT (mode, arg0, arg1);
7949 case CONST_INT:
7950 /* Product of two constants. */
7951 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
7953 case USE:
7954 /* invar * invar is a giv, but attempt to simplify it somehow. */
7955 if (GET_CODE (arg1) != CONST_INT)
7956 return NULL_RTX;
7958 arg0 = XEXP (arg0, 0);
7959 if (GET_CODE (arg0) == MULT)
7961 /* (invar_0 * invar_1) * invar_2. Associate. */
7962 return simplify_giv_expr (loop,
7963 gen_rtx_MULT (mode,
7964 XEXP (arg0, 0),
7965 gen_rtx_MULT (mode,
7966 XEXP (arg0,
7968 arg1)),
7969 ext_val, benefit);
7971 /* Propagate the MULT expressions to the innermost nodes. */
7972 else if (GET_CODE (arg0) == PLUS)
7974 /* (invar_0 + invar_1) * invar_2. Distribute. */
7975 return simplify_giv_expr (loop,
7976 gen_rtx_PLUS (mode,
7977 gen_rtx_MULT (mode,
7978 XEXP (arg0,
7980 arg1),
7981 gen_rtx_MULT (mode,
7982 XEXP (arg0,
7984 arg1)),
7985 ext_val, benefit);
7987 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
7989 case MULT:
7990 /* (a * invar_1) * invar_2. Associate. */
7991 return simplify_giv_expr (loop,
7992 gen_rtx_MULT (mode,
7993 XEXP (arg0, 0),
7994 gen_rtx_MULT (mode,
7995 XEXP (arg0, 1),
7996 arg1)),
7997 ext_val, benefit);
7999 case PLUS:
8000 /* (a + invar_1) * invar_2. Distribute. */
8001 return simplify_giv_expr (loop,
8002 gen_rtx_PLUS (mode,
8003 gen_rtx_MULT (mode,
8004 XEXP (arg0, 0),
8005 arg1),
8006 gen_rtx_MULT (mode,
8007 XEXP (arg0, 1),
8008 arg1)),
8009 ext_val, benefit);
8011 default:
8012 abort ();
8015 case ASHIFT:
8016 /* Shift by constant is multiply by power of two. */
8017 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8018 return 0;
8020 return
8021 simplify_giv_expr (loop,
8022 gen_rtx_MULT (mode,
8023 XEXP (x, 0),
8024 GEN_INT ((HOST_WIDE_INT) 1
8025 << INTVAL (XEXP (x, 1)))),
8026 ext_val, benefit);
8028 case NEG:
8029 /* "-a" is "a * (-1)" */
8030 return simplify_giv_expr (loop,
8031 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
8032 ext_val, benefit);
8034 case NOT:
8035 /* "~a" is "-a - 1". Silly, but easy. */
8036 return simplify_giv_expr (loop,
8037 gen_rtx_MINUS (mode,
8038 gen_rtx_NEG (mode, XEXP (x, 0)),
8039 const1_rtx),
8040 ext_val, benefit);
8042 case USE:
8043 /* Already in proper form for invariant. */
8044 return x;
8046 case SIGN_EXTEND:
8047 case ZERO_EXTEND:
8048 case TRUNCATE:
8049 /* Conditionally recognize extensions of simple IVs. After we've
8050 computed loop traversal counts and verified the range of the
8051 source IV, we'll reevaluate this as a GIV. */
8052 if (*ext_val == NULL_RTX)
8054 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
8055 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
8057 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
8058 return arg0;
8061 goto do_default;
8063 case REG:
8064 /* If this is a new register, we can't deal with it. */
8065 if (REGNO (x) >= max_reg_before_loop)
8066 return 0;
8068 /* Check for biv or giv. */
8069 switch (REG_IV_TYPE (ivs, REGNO (x)))
8071 case BASIC_INDUCT:
8072 return x;
8073 case GENERAL_INDUCT:
8075 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
8077 /* Form expression from giv and add benefit. Ensure this giv
8078 can derive another and subtract any needed adjustment if so. */
8080 /* Increasing the benefit here is risky. The only case in which it
8081 is arguably correct is if this is the only use of V. In other
8082 cases, this will artificially inflate the benefit of the current
8083 giv, and lead to suboptimal code. Thus, it is disabled, since
8084 potentially not reducing an only marginally beneficial giv is
8085 less harmful than reducing many givs that are not really
8086 beneficial. */
8088 rtx single_use = regs->array[REGNO (x)].single_usage;
8089 if (single_use && single_use != const0_rtx)
8090 *benefit += v->benefit;
8093 if (v->cant_derive)
8094 return 0;
8096 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
8097 v->src_reg, v->mult_val),
8098 v->add_val);
8100 if (v->derive_adjustment)
8101 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
8102 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
8103 if (*ext_val)
8105 if (!v->ext_dependent)
8106 return arg0;
8108 else
8110 *ext_val = v->ext_dependent;
8111 return arg0;
8113 return 0;
8116 default:
8117 do_default:
8118 /* If it isn't an induction variable, and it is invariant, we
8119 may be able to simplify things further by looking through
8120 the bits we just moved outside the loop. */
8121 if (loop_invariant_p (loop, x) == 1)
8123 struct movable *m;
8124 struct loop_movables *movables = LOOP_MOVABLES (loop);
8126 for (m = movables->head; m; m = m->next)
8127 if (rtx_equal_p (x, m->set_dest))
8129 /* Ok, we found a match. Substitute and simplify. */
8131 /* If we match another movable, we must use that, as
8132 this one is going away. */
8133 if (m->match)
8134 return simplify_giv_expr (loop, m->match->set_dest,
8135 ext_val, benefit);
8137 /* If consec is nonzero, this is a member of a group of
8138 instructions that were moved together. We handle this
8139 case only to the point of seeking to the last insn and
8140 looking for a REG_EQUAL. Fail if we don't find one. */
8141 if (m->consec != 0)
8143 int i = m->consec;
8144 tem = m->insn;
8147 tem = NEXT_INSN (tem);
8149 while (--i > 0);
8151 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
8152 if (tem)
8153 tem = XEXP (tem, 0);
8155 else
8157 tem = single_set (m->insn);
8158 if (tem)
8159 tem = SET_SRC (tem);
8162 if (tem)
8164 /* What we are most interested in is pointer
8165 arithmetic on invariants -- only take
8166 patterns we may be able to do something with. */
8167 if (GET_CODE (tem) == PLUS
8168 || GET_CODE (tem) == MULT
8169 || GET_CODE (tem) == ASHIFT
8170 || GET_CODE (tem) == CONST_INT
8171 || GET_CODE (tem) == SYMBOL_REF)
8173 tem = simplify_giv_expr (loop, tem, ext_val,
8174 benefit);
8175 if (tem)
8176 return tem;
8178 else if (GET_CODE (tem) == CONST
8179 && GET_CODE (XEXP (tem, 0)) == PLUS
8180 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
8181 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
8183 tem = simplify_giv_expr (loop, XEXP (tem, 0),
8184 ext_val, benefit);
8185 if (tem)
8186 return tem;
8189 break;
8192 break;
8195 /* Fall through to general case. */
8196 default:
8197 /* If invariant, return as USE (unless CONST_INT).
8198 Otherwise, not giv. */
8199 if (GET_CODE (x) == USE)
8200 x = XEXP (x, 0);
8202 if (loop_invariant_p (loop, x) == 1)
8204 if (GET_CODE (x) == CONST_INT)
8205 return x;
8206 if (GET_CODE (x) == CONST
8207 && GET_CODE (XEXP (x, 0)) == PLUS
8208 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8209 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
8210 x = XEXP (x, 0);
8211 return gen_rtx_USE (mode, x);
8213 else
8214 return 0;
8218 /* This routine folds invariants such that there is only ever one
8219 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8221 static rtx
8222 sge_plus_constant (rtx x, rtx c)
8224 if (GET_CODE (x) == CONST_INT)
8225 return GEN_INT (INTVAL (x) + INTVAL (c));
8226 else if (GET_CODE (x) != PLUS)
8227 return gen_rtx_PLUS (GET_MODE (x), x, c);
8228 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8230 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8231 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
8233 else if (GET_CODE (XEXP (x, 0)) == PLUS
8234 || GET_CODE (XEXP (x, 1)) != PLUS)
8236 return gen_rtx_PLUS (GET_MODE (x),
8237 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
8239 else
8241 return gen_rtx_PLUS (GET_MODE (x),
8242 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
8246 static rtx
8247 sge_plus (enum machine_mode mode, rtx x, rtx y)
8249 while (GET_CODE (y) == PLUS)
8251 rtx a = XEXP (y, 0);
8252 if (GET_CODE (a) == CONST_INT)
8253 x = sge_plus_constant (x, a);
8254 else
8255 x = gen_rtx_PLUS (mode, x, a);
8256 y = XEXP (y, 1);
8258 if (GET_CODE (y) == CONST_INT)
8259 x = sge_plus_constant (x, y);
8260 else
8261 x = gen_rtx_PLUS (mode, x, y);
8262 return x;
8265 /* Help detect a giv that is calculated by several consecutive insns;
8266 for example,
8267 giv = biv * M
8268 giv = giv + A
8269 The caller has already identified the first insn P as having a giv as dest;
8270 we check that all other insns that set the same register follow
8271 immediately after P, that they alter nothing else,
8272 and that the result of the last is still a giv.
8274 The value is 0 if the reg set in P is not really a giv.
8275 Otherwise, the value is the amount gained by eliminating
8276 all the consecutive insns that compute the value.
8278 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8279 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8281 The coefficients of the ultimate giv value are stored in
8282 *MULT_VAL and *ADD_VAL. */
8284 static int
8285 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
8286 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
8287 rtx *ext_val, rtx *last_consec_insn)
8289 struct loop_ivs *ivs = LOOP_IVS (loop);
8290 struct loop_regs *regs = LOOP_REGS (loop);
8291 int count;
8292 enum rtx_code code;
8293 int benefit;
8294 rtx temp;
8295 rtx set;
8297 /* Indicate that this is a giv so that we can update the value produced in
8298 each insn of the multi-insn sequence.
8300 This induction structure will be used only by the call to
8301 general_induction_var below, so we can allocate it on our stack.
8302 If this is a giv, our caller will replace the induct var entry with
8303 a new induction structure. */
8304 struct induction *v;
8306 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
8307 return 0;
8309 v = alloca (sizeof (struct induction));
8310 v->src_reg = src_reg;
8311 v->mult_val = *mult_val;
8312 v->add_val = *add_val;
8313 v->benefit = first_benefit;
8314 v->cant_derive = 0;
8315 v->derive_adjustment = 0;
8316 v->ext_dependent = NULL_RTX;
8318 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
8319 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
8321 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
8323 while (count > 0)
8325 p = NEXT_INSN (p);
8326 code = GET_CODE (p);
8328 /* If libcall, skip to end of call sequence. */
8329 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
8330 p = XEXP (temp, 0);
8332 if (code == INSN
8333 && (set = single_set (p))
8334 && REG_P (SET_DEST (set))
8335 && SET_DEST (set) == dest_reg
8336 && (general_induction_var (loop, SET_SRC (set), &src_reg,
8337 add_val, mult_val, ext_val, 0,
8338 &benefit, VOIDmode)
8339 /* Giv created by equivalent expression. */
8340 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
8341 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
8342 add_val, mult_val, ext_val, 0,
8343 &benefit, VOIDmode)))
8344 && src_reg == v->src_reg)
8346 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
8347 benefit += libcall_benefit (p);
8349 count--;
8350 v->mult_val = *mult_val;
8351 v->add_val = *add_val;
8352 v->benefit += benefit;
8354 else if (code != NOTE)
8356 /* Allow insns that set something other than this giv to a
8357 constant. Such insns are needed on machines which cannot
8358 include long constants and should not disqualify a giv. */
8359 if (code == INSN
8360 && (set = single_set (p))
8361 && SET_DEST (set) != dest_reg
8362 && CONSTANT_P (SET_SRC (set)))
8363 continue;
8365 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8366 return 0;
8370 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8371 *last_consec_insn = p;
8372 return v->benefit;
8375 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8376 represented by G1. If no such expression can be found, or it is clear that
8377 it cannot possibly be a valid address, 0 is returned.
8379 To perform the computation, we note that
8380 G1 = x * v + a and
8381 G2 = y * v + b
8382 where `v' is the biv.
8384 So G2 = (y/b) * G1 + (b - a*y/x).
8386 Note that MULT = y/x.
8388 Update: A and B are now allowed to be additive expressions such that
8389 B contains all variables in A. That is, computing B-A will not require
8390 subtracting variables. */
8392 static rtx
8393 express_from_1 (rtx a, rtx b, rtx mult)
8395 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8397 if (mult == const0_rtx)
8398 return b;
8400 /* If MULT is not 1, we cannot handle A with non-constants, since we
8401 would then be required to subtract multiples of the registers in A.
8402 This is theoretically possible, and may even apply to some Fortran
8403 constructs, but it is a lot of work and we do not attempt it here. */
8405 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
8406 return NULL_RTX;
8408 /* In general these structures are sorted top to bottom (down the PLUS
8409 chain), but not left to right across the PLUS. If B is a higher
8410 order giv than A, we can strip one level and recurse. If A is higher
8411 order, we'll eventually bail out, but won't know that until the end.
8412 If they are the same, we'll strip one level around this loop. */
8414 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
8416 rtx ra, rb, oa, ob, tmp;
8418 ra = XEXP (a, 0), oa = XEXP (a, 1);
8419 if (GET_CODE (ra) == PLUS)
8420 tmp = ra, ra = oa, oa = tmp;
8422 rb = XEXP (b, 0), ob = XEXP (b, 1);
8423 if (GET_CODE (rb) == PLUS)
8424 tmp = rb, rb = ob, ob = tmp;
8426 if (rtx_equal_p (ra, rb))
8427 /* We matched: remove one reg completely. */
8428 a = oa, b = ob;
8429 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
8430 /* An alternate match. */
8431 a = oa, b = rb;
8432 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
8433 /* An alternate match. */
8434 a = ra, b = ob;
8435 else
8437 /* Indicates an extra register in B. Strip one level from B and
8438 recurse, hoping B was the higher order expression. */
8439 ob = express_from_1 (a, ob, mult);
8440 if (ob == NULL_RTX)
8441 return NULL_RTX;
8442 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
8446 /* Here we are at the last level of A, go through the cases hoping to
8447 get rid of everything but a constant. */
8449 if (GET_CODE (a) == PLUS)
8451 rtx ra, oa;
8453 ra = XEXP (a, 0), oa = XEXP (a, 1);
8454 if (rtx_equal_p (oa, b))
8455 oa = ra;
8456 else if (!rtx_equal_p (ra, b))
8457 return NULL_RTX;
8459 if (GET_CODE (oa) != CONST_INT)
8460 return NULL_RTX;
8462 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
8464 else if (GET_CODE (a) == CONST_INT)
8466 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
8468 else if (CONSTANT_P (a))
8470 enum machine_mode mode_a = GET_MODE (a);
8471 enum machine_mode mode_b = GET_MODE (b);
8472 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
8473 return simplify_gen_binary (MINUS, mode, b, a);
8475 else if (GET_CODE (b) == PLUS)
8477 if (rtx_equal_p (a, XEXP (b, 0)))
8478 return XEXP (b, 1);
8479 else if (rtx_equal_p (a, XEXP (b, 1)))
8480 return XEXP (b, 0);
8481 else
8482 return NULL_RTX;
8484 else if (rtx_equal_p (a, b))
8485 return const0_rtx;
8487 return NULL_RTX;
8490 static rtx
8491 express_from (struct induction *g1, struct induction *g2)
8493 rtx mult, add;
8495 /* The value that G1 will be multiplied by must be a constant integer. Also,
8496 the only chance we have of getting a valid address is if b*c/a (see above
8497 for notation) is also an integer. */
8498 if (GET_CODE (g1->mult_val) == CONST_INT
8499 && GET_CODE (g2->mult_val) == CONST_INT)
8501 if (g1->mult_val == const0_rtx
8502 || (g1->mult_val == constm1_rtx
8503 && INTVAL (g2->mult_val)
8504 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
8505 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
8506 return NULL_RTX;
8507 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
8509 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
8510 mult = const1_rtx;
8511 else
8513 /* ??? Find out if the one is a multiple of the other? */
8514 return NULL_RTX;
8517 add = express_from_1 (g1->add_val, g2->add_val, mult);
8518 if (add == NULL_RTX)
8520 /* Failed. If we've got a multiplication factor between G1 and G2,
8521 scale G1's addend and try again. */
8522 if (INTVAL (mult) > 1)
8524 rtx g1_add_val = g1->add_val;
8525 if (GET_CODE (g1_add_val) == MULT
8526 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
8528 HOST_WIDE_INT m;
8529 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
8530 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
8531 XEXP (g1_add_val, 0), GEN_INT (m));
8533 else
8535 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
8536 mult);
8539 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
8542 if (add == NULL_RTX)
8543 return NULL_RTX;
8545 /* Form simplified final result. */
8546 if (mult == const0_rtx)
8547 return add;
8548 else if (mult == const1_rtx)
8549 mult = g1->dest_reg;
8550 else
8551 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
8553 if (add == const0_rtx)
8554 return mult;
8555 else
8557 if (GET_CODE (add) == PLUS
8558 && CONSTANT_P (XEXP (add, 1)))
8560 rtx tem = XEXP (add, 1);
8561 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
8562 add = tem;
8565 return gen_rtx_PLUS (g2->mode, mult, add);
8569 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8570 represented by G1. This indicates that G2 should be combined with G1 and
8571 that G2 can use (either directly or via an address expression) a register
8572 used to represent G1. */
8574 static rtx
8575 combine_givs_p (struct induction *g1, struct induction *g2)
8577 rtx comb, ret;
8579 /* With the introduction of ext dependent givs, we must care for modes.
8580 G2 must not use a wider mode than G1. */
8581 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
8582 return NULL_RTX;
8584 ret = comb = express_from (g1, g2);
8585 if (comb == NULL_RTX)
8586 return NULL_RTX;
8587 if (g1->mode != g2->mode)
8588 ret = gen_lowpart (g2->mode, comb);
8590 /* If these givs are identical, they can be combined. We use the results
8591 of express_from because the addends are not in a canonical form, so
8592 rtx_equal_p is a weaker test. */
8593 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8594 combination to be the other way round. */
8595 if (comb == g1->dest_reg
8596 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
8598 return ret;
8601 /* If G2 can be expressed as a function of G1 and that function is valid
8602 as an address and no more expensive than using a register for G2,
8603 the expression of G2 in terms of G1 can be used. */
8604 if (ret != NULL_RTX
8605 && g2->giv_type == DEST_ADDR
8606 && memory_address_p (GET_MODE (g2->mem), ret))
8607 return ret;
8609 return NULL_RTX;
8612 /* Check each extension dependent giv in this class to see if its
8613 root biv is safe from wrapping in the interior mode, which would
8614 make the giv illegal. */
8616 static void
8617 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
8619 struct loop_info *loop_info = LOOP_INFO (loop);
8620 int ze_ok = 0, se_ok = 0, info_ok = 0;
8621 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
8622 HOST_WIDE_INT start_val;
8623 unsigned HOST_WIDE_INT u_end_val = 0;
8624 unsigned HOST_WIDE_INT u_start_val = 0;
8625 rtx incr = pc_rtx;
8626 struct induction *v;
8628 /* Make sure the iteration data is available. We must have
8629 constants in order to be certain of no overflow. */
8630 if (loop_info->n_iterations > 0
8631 && bl->initial_value
8632 && GET_CODE (bl->initial_value) == CONST_INT
8633 && (incr = biv_total_increment (bl))
8634 && GET_CODE (incr) == CONST_INT
8635 /* Make sure the host can represent the arithmetic. */
8636 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
8638 unsigned HOST_WIDE_INT abs_incr, total_incr;
8639 HOST_WIDE_INT s_end_val;
8640 int neg_incr;
8642 info_ok = 1;
8643 start_val = INTVAL (bl->initial_value);
8644 u_start_val = start_val;
8646 neg_incr = 0, abs_incr = INTVAL (incr);
8647 if (INTVAL (incr) < 0)
8648 neg_incr = 1, abs_incr = -abs_incr;
8649 total_incr = abs_incr * loop_info->n_iterations;
8651 /* Check for host arithmetic overflow. */
8652 if (total_incr / loop_info->n_iterations == abs_incr)
8654 unsigned HOST_WIDE_INT u_max;
8655 HOST_WIDE_INT s_max;
8657 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
8658 s_end_val = u_end_val;
8659 u_max = GET_MODE_MASK (biv_mode);
8660 s_max = u_max >> 1;
8662 /* Check zero extension of biv ok. */
8663 if (start_val >= 0
8664 /* Check for host arithmetic overflow. */
8665 && (neg_incr
8666 ? u_end_val < u_start_val
8667 : u_end_val > u_start_val)
8668 /* Check for target arithmetic overflow. */
8669 && (neg_incr
8670 ? 1 /* taken care of with host overflow */
8671 : u_end_val <= u_max))
8673 ze_ok = 1;
8676 /* Check sign extension of biv ok. */
8677 /* ??? While it is true that overflow with signed and pointer
8678 arithmetic is undefined, I fear too many programmers don't
8679 keep this fact in mind -- myself included on occasion.
8680 So leave alone with the signed overflow optimizations. */
8681 if (start_val >= -s_max - 1
8682 /* Check for host arithmetic overflow. */
8683 && (neg_incr
8684 ? s_end_val < start_val
8685 : s_end_val > start_val)
8686 /* Check for target arithmetic overflow. */
8687 && (neg_incr
8688 ? s_end_val >= -s_max - 1
8689 : s_end_val <= s_max))
8691 se_ok = 1;
8696 /* If we know the BIV is compared at run-time against an
8697 invariant value, and the increment is +/- 1, we may also
8698 be able to prove that the BIV cannot overflow. */
8699 else if (bl->biv->src_reg == loop_info->iteration_var
8700 && loop_info->comparison_value
8701 && loop_invariant_p (loop, loop_info->comparison_value)
8702 && (incr = biv_total_increment (bl))
8703 && GET_CODE (incr) == CONST_INT)
8705 /* If the increment is +1, and the exit test is a <,
8706 the BIV cannot overflow. (For <=, we have the
8707 problematic case that the comparison value might
8708 be the maximum value of the range.) */
8709 if (INTVAL (incr) == 1)
8711 if (loop_info->comparison_code == LT)
8712 se_ok = ze_ok = 1;
8713 else if (loop_info->comparison_code == LTU)
8714 ze_ok = 1;
8717 /* Likewise for increment -1 and exit test >. */
8718 if (INTVAL (incr) == -1)
8720 if (loop_info->comparison_code == GT)
8721 se_ok = ze_ok = 1;
8722 else if (loop_info->comparison_code == GTU)
8723 ze_ok = 1;
8727 /* Invalidate givs that fail the tests. */
8728 for (v = bl->giv; v; v = v->next_iv)
8729 if (v->ext_dependent)
8731 enum rtx_code code = GET_CODE (v->ext_dependent);
8732 int ok = 0;
8734 switch (code)
8736 case SIGN_EXTEND:
8737 ok = se_ok;
8738 break;
8739 case ZERO_EXTEND:
8740 ok = ze_ok;
8741 break;
8743 case TRUNCATE:
8744 /* We don't know whether this value is being used as either
8745 signed or unsigned, so to safely truncate we must satisfy
8746 both. The initial check here verifies the BIV itself;
8747 once that is successful we may check its range wrt the
8748 derived GIV. This works only if we were able to determine
8749 constant start and end values above. */
8750 if (se_ok && ze_ok && info_ok)
8752 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
8753 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
8755 /* We know from the above that both endpoints are nonnegative,
8756 and that there is no wrapping. Verify that both endpoints
8757 are within the (signed) range of the outer mode. */
8758 if (u_start_val <= max && u_end_val <= max)
8759 ok = 1;
8761 break;
8763 default:
8764 abort ();
8767 if (ok)
8769 if (loop_dump_stream)
8771 fprintf (loop_dump_stream,
8772 "Verified ext dependent giv at %d of reg %d\n",
8773 INSN_UID (v->insn), bl->regno);
8776 else
8778 if (loop_dump_stream)
8780 const char *why;
8782 if (info_ok)
8783 why = "biv iteration values overflowed";
8784 else
8786 if (incr == pc_rtx)
8787 incr = biv_total_increment (bl);
8788 if (incr == const1_rtx)
8789 why = "biv iteration info incomplete; incr by 1";
8790 else
8791 why = "biv iteration info incomplete";
8794 fprintf (loop_dump_stream,
8795 "Failed ext dependent giv at %d, %s\n",
8796 INSN_UID (v->insn), why);
8798 v->ignore = 1;
8799 bl->all_reduced = 0;
8804 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8806 static rtx
8807 extend_value_for_giv (struct induction *v, rtx value)
8809 rtx ext_dep = v->ext_dependent;
8811 if (! ext_dep)
8812 return value;
8814 /* Recall that check_ext_dependent_givs verified that the known bounds
8815 of a biv did not overflow or wrap with respect to the extension for
8816 the giv. Therefore, constants need no additional adjustment. */
8817 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
8818 return value;
8820 /* Otherwise, we must adjust the value to compensate for the
8821 differing modes of the biv and the giv. */
8822 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
8825 struct combine_givs_stats
8827 int giv_number;
8828 int total_benefit;
8831 static int
8832 cmp_combine_givs_stats (const void *xp, const void *yp)
8834 const struct combine_givs_stats * const x =
8835 (const struct combine_givs_stats *) xp;
8836 const struct combine_givs_stats * const y =
8837 (const struct combine_givs_stats *) yp;
8838 int d;
8839 d = y->total_benefit - x->total_benefit;
8840 /* Stabilize the sort. */
8841 if (!d)
8842 d = x->giv_number - y->giv_number;
8843 return d;
8846 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8847 any other. If so, point SAME to the giv combined with and set NEW_REG to
8848 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8849 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8851 static void
8852 combine_givs (struct loop_regs *regs, struct iv_class *bl)
8854 /* Additional benefit to add for being combined multiple times. */
8855 const int extra_benefit = 3;
8857 struct induction *g1, *g2, **giv_array;
8858 int i, j, k, giv_count;
8859 struct combine_givs_stats *stats;
8860 rtx *can_combine;
8862 /* Count givs, because bl->giv_count is incorrect here. */
8863 giv_count = 0;
8864 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8865 if (!g1->ignore)
8866 giv_count++;
8868 giv_array = alloca (giv_count * sizeof (struct induction *));
8869 i = 0;
8870 for (g1 = bl->giv; g1; g1 = g1->next_iv)
8871 if (!g1->ignore)
8872 giv_array[i++] = g1;
8874 stats = xcalloc (giv_count, sizeof (*stats));
8875 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
8877 for (i = 0; i < giv_count; i++)
8879 int this_benefit;
8880 rtx single_use;
8882 g1 = giv_array[i];
8883 stats[i].giv_number = i;
8885 /* If a DEST_REG GIV is used only once, do not allow it to combine
8886 with anything, for in doing so we will gain nothing that cannot
8887 be had by simply letting the GIV with which we would have combined
8888 to be reduced on its own. The losage shows up in particular with
8889 DEST_ADDR targets on hosts with reg+reg addressing, though it can
8890 be seen elsewhere as well. */
8891 if (g1->giv_type == DEST_REG
8892 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
8893 && single_use != const0_rtx)
8894 continue;
8896 this_benefit = g1->benefit;
8897 /* Add an additional weight for zero addends. */
8898 if (g1->no_const_addval)
8899 this_benefit += 1;
8901 for (j = 0; j < giv_count; j++)
8903 rtx this_combine;
8905 g2 = giv_array[j];
8906 if (g1 != g2
8907 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
8909 can_combine[i * giv_count + j] = this_combine;
8910 this_benefit += g2->benefit + extra_benefit;
8913 stats[i].total_benefit = this_benefit;
8916 /* Iterate, combining until we can't. */
8917 restart:
8918 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
8920 if (loop_dump_stream)
8922 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
8923 for (k = 0; k < giv_count; k++)
8925 g1 = giv_array[stats[k].giv_number];
8926 if (!g1->combined_with && !g1->same)
8927 fprintf (loop_dump_stream, " {%d, %d}",
8928 INSN_UID (giv_array[stats[k].giv_number]->insn),
8929 stats[k].total_benefit);
8931 putc ('\n', loop_dump_stream);
8934 for (k = 0; k < giv_count; k++)
8936 int g1_add_benefit = 0;
8938 i = stats[k].giv_number;
8939 g1 = giv_array[i];
8941 /* If it has already been combined, skip. */
8942 if (g1->combined_with || g1->same)
8943 continue;
8945 for (j = 0; j < giv_count; j++)
8947 g2 = giv_array[j];
8948 if (g1 != g2 && can_combine[i * giv_count + j]
8949 /* If it has already been combined, skip. */
8950 && ! g2->same && ! g2->combined_with)
8952 int l;
8954 g2->new_reg = can_combine[i * giv_count + j];
8955 g2->same = g1;
8956 /* For destination, we now may replace by mem expression instead
8957 of register. This changes the costs considerably, so add the
8958 compensation. */
8959 if (g2->giv_type == DEST_ADDR)
8960 g2->benefit = (g2->benefit + reg_address_cost
8961 - address_cost (g2->new_reg,
8962 GET_MODE (g2->mem)));
8963 g1->combined_with++;
8964 g1->lifetime += g2->lifetime;
8966 g1_add_benefit += g2->benefit;
8968 /* ??? The new final_[bg]iv_value code does a much better job
8969 of finding replaceable giv's, and hence this code may no
8970 longer be necessary. */
8971 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
8972 g1_add_benefit -= copy_cost;
8974 /* To help optimize the next set of combinations, remove
8975 this giv from the benefits of other potential mates. */
8976 for (l = 0; l < giv_count; ++l)
8978 int m = stats[l].giv_number;
8979 if (can_combine[m * giv_count + j])
8980 stats[l].total_benefit -= g2->benefit + extra_benefit;
8983 if (loop_dump_stream)
8984 fprintf (loop_dump_stream,
8985 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
8986 INSN_UID (g2->insn), INSN_UID (g1->insn),
8987 g1->benefit, g1_add_benefit, g1->lifetime);
8991 /* To help optimize the next set of combinations, remove
8992 this giv from the benefits of other potential mates. */
8993 if (g1->combined_with)
8995 for (j = 0; j < giv_count; ++j)
8997 int m = stats[j].giv_number;
8998 if (can_combine[m * giv_count + i])
8999 stats[j].total_benefit -= g1->benefit + extra_benefit;
9002 g1->benefit += g1_add_benefit;
9004 /* We've finished with this giv, and everything it touched.
9005 Restart the combination so that proper weights for the
9006 rest of the givs are properly taken into account. */
9007 /* ??? Ideally we would compact the arrays at this point, so
9008 as to not cover old ground. But sanely compacting
9009 can_combine is tricky. */
9010 goto restart;
9014 /* Clean up. */
9015 free (stats);
9016 free (can_combine);
9019 /* Generate sequence for REG = B * M + A. B is the initial value of
9020 the basic induction variable, M a multiplicative constant, A an
9021 additive constant and REG the destination register. */
9023 static rtx
9024 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
9026 rtx seq;
9027 rtx result;
9029 start_sequence ();
9030 /* Use unsigned arithmetic. */
9031 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9032 if (reg != result)
9033 emit_move_insn (reg, result);
9034 seq = get_insns ();
9035 end_sequence ();
9037 return seq;
9041 /* Update registers created in insn sequence SEQ. */
9043 static void
9044 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
9046 rtx insn;
9048 /* Update register info for alias analysis. */
9050 insn = seq;
9051 while (insn != NULL_RTX)
9053 rtx set = single_set (insn);
9055 if (set && REG_P (SET_DEST (set)))
9056 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
9058 insn = NEXT_INSN (insn);
9063 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9064 is the initial value of the basic induction variable, M a
9065 multiplicative constant, A an additive constant and REG the
9066 destination register. */
9068 static void
9069 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
9070 rtx reg, basic_block before_bb, rtx before_insn)
9072 rtx seq;
9074 if (! before_insn)
9076 loop_iv_add_mult_hoist (loop, b, m, a, reg);
9077 return;
9080 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9081 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9083 /* Increase the lifetime of any invariants moved further in code. */
9084 update_reg_last_use (a, before_insn);
9085 update_reg_last_use (b, before_insn);
9086 update_reg_last_use (m, before_insn);
9088 /* It is possible that the expansion created lots of new registers.
9089 Iterate over the sequence we just created and record them all. We
9090 must do this before inserting the sequence. */
9091 loop_regs_update (loop, seq);
9093 loop_insn_emit_before (loop, before_bb, before_insn, seq);
9097 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9098 initial value of the basic induction variable, M a multiplicative
9099 constant, A an additive constant and REG the destination
9100 register. */
9102 static void
9103 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9105 rtx seq;
9107 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9108 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9110 /* Increase the lifetime of any invariants moved further in code.
9111 ???? Is this really necessary? */
9112 update_reg_last_use (a, loop->sink);
9113 update_reg_last_use (b, loop->sink);
9114 update_reg_last_use (m, loop->sink);
9116 /* It is possible that the expansion created lots of new registers.
9117 Iterate over the sequence we just created and record them all. We
9118 must do this before inserting the sequence. */
9119 loop_regs_update (loop, seq);
9121 loop_insn_sink (loop, seq);
9125 /* Emit insns after loop to set REG = B * M + A. B is the initial
9126 value of the basic induction variable, M a multiplicative constant,
9127 A an additive constant and REG the destination register. */
9129 static void
9130 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
9132 rtx seq;
9134 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9135 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
9137 /* It is possible that the expansion created lots of new registers.
9138 Iterate over the sequence we just created and record them all. We
9139 must do this before inserting the sequence. */
9140 loop_regs_update (loop, seq);
9142 loop_insn_hoist (loop, seq);
9147 /* Similar to gen_add_mult, but compute cost rather than generating
9148 sequence. */
9150 static int
9151 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
9153 int cost = 0;
9154 rtx last, result;
9156 start_sequence ();
9157 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
9158 if (reg != result)
9159 emit_move_insn (reg, result);
9160 last = get_last_insn ();
9161 while (last)
9163 rtx t = single_set (last);
9164 if (t)
9165 cost += rtx_cost (SET_SRC (t), SET);
9166 last = PREV_INSN (last);
9168 end_sequence ();
9169 return cost;
9172 /* Test whether A * B can be computed without
9173 an actual multiply insn. Value is 1 if so.
9175 ??? This function stinks because it generates a ton of wasted RTL
9176 ??? and as a result fragments GC memory to no end. There are other
9177 ??? places in the compiler which are invoked a lot and do the same
9178 ??? thing, generate wasted RTL just to see if something is possible. */
9180 static int
9181 product_cheap_p (rtx a, rtx b)
9183 rtx tmp;
9184 int win, n_insns;
9186 /* If only one is constant, make it B. */
9187 if (GET_CODE (a) == CONST_INT)
9188 tmp = a, a = b, b = tmp;
9190 /* If first constant, both constant, so don't need multiply. */
9191 if (GET_CODE (a) == CONST_INT)
9192 return 1;
9194 /* If second not constant, neither is constant, so would need multiply. */
9195 if (GET_CODE (b) != CONST_INT)
9196 return 0;
9198 /* One operand is constant, so might not need multiply insn. Generate the
9199 code for the multiply and see if a call or multiply, or long sequence
9200 of insns is generated. */
9202 start_sequence ();
9203 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
9204 tmp = get_insns ();
9205 end_sequence ();
9207 win = 1;
9208 if (tmp == NULL_RTX)
9210 else if (INSN_P (tmp))
9212 n_insns = 0;
9213 while (tmp != NULL_RTX)
9215 rtx next = NEXT_INSN (tmp);
9217 if (++n_insns > 3
9218 || !NONJUMP_INSN_P (tmp)
9219 || (GET_CODE (PATTERN (tmp)) == SET
9220 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
9221 || (GET_CODE (PATTERN (tmp)) == PARALLEL
9222 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
9223 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
9225 win = 0;
9226 break;
9229 tmp = next;
9232 else if (GET_CODE (tmp) == SET
9233 && GET_CODE (SET_SRC (tmp)) == MULT)
9234 win = 0;
9235 else if (GET_CODE (tmp) == PARALLEL
9236 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
9237 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
9238 win = 0;
9240 return win;
9243 /* Check to see if loop can be terminated by a "decrement and branch until
9244 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9245 Also try reversing an increment loop to a decrement loop
9246 to see if the optimization can be performed.
9247 Value is nonzero if optimization was performed. */
9249 /* This is useful even if the architecture doesn't have such an insn,
9250 because it might change a loops which increments from 0 to n to a loop
9251 which decrements from n to 0. A loop that decrements to zero is usually
9252 faster than one that increments from zero. */
9254 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9255 such as approx_final_value, biv_total_increment, loop_iterations, and
9256 final_[bg]iv_value. */
9258 static int
9259 check_dbra_loop (struct loop *loop, int insn_count)
9261 struct loop_info *loop_info = LOOP_INFO (loop);
9262 struct loop_regs *regs = LOOP_REGS (loop);
9263 struct loop_ivs *ivs = LOOP_IVS (loop);
9264 struct iv_class *bl;
9265 rtx reg;
9266 enum machine_mode mode;
9267 rtx jump_label;
9268 rtx final_value;
9269 rtx start_value;
9270 rtx new_add_val;
9271 rtx comparison;
9272 rtx before_comparison;
9273 rtx p;
9274 rtx jump;
9275 rtx first_compare;
9276 int compare_and_branch;
9277 rtx loop_start = loop->start;
9278 rtx loop_end = loop->end;
9280 /* If last insn is a conditional branch, and the insn before tests a
9281 register value, try to optimize it. Otherwise, we can't do anything. */
9283 jump = PREV_INSN (loop_end);
9284 comparison = get_condition_for_loop (loop, jump);
9285 if (comparison == 0)
9286 return 0;
9287 if (!onlyjump_p (jump))
9288 return 0;
9290 /* Try to compute whether the compare/branch at the loop end is one or
9291 two instructions. */
9292 get_condition (jump, &first_compare, false, true);
9293 if (first_compare == jump)
9294 compare_and_branch = 1;
9295 else if (first_compare == prev_nonnote_insn (jump))
9296 compare_and_branch = 2;
9297 else
9298 return 0;
9301 /* If more than one condition is present to control the loop, then
9302 do not proceed, as this function does not know how to rewrite
9303 loop tests with more than one condition.
9305 Look backwards from the first insn in the last comparison
9306 sequence and see if we've got another comparison sequence. */
9308 rtx jump1;
9309 if ((jump1 = prev_nonnote_insn (first_compare))
9310 && JUMP_P (jump1))
9311 return 0;
9314 /* Check all of the bivs to see if the compare uses one of them.
9315 Skip biv's set more than once because we can't guarantee that
9316 it will be zero on the last iteration. Also skip if the biv is
9317 used between its update and the test insn. */
9319 for (bl = ivs->list; bl; bl = bl->next)
9321 if (bl->biv_count == 1
9322 && ! bl->biv->maybe_multiple
9323 && bl->biv->dest_reg == XEXP (comparison, 0)
9324 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9325 first_compare))
9326 break;
9329 /* Try swapping the comparison to identify a suitable biv. */
9330 if (!bl)
9331 for (bl = ivs->list; bl; bl = bl->next)
9332 if (bl->biv_count == 1
9333 && ! bl->biv->maybe_multiple
9334 && bl->biv->dest_reg == XEXP (comparison, 1)
9335 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
9336 first_compare))
9338 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
9339 VOIDmode,
9340 XEXP (comparison, 1),
9341 XEXP (comparison, 0));
9342 break;
9345 if (! bl)
9346 return 0;
9348 /* Look for the case where the basic induction variable is always
9349 nonnegative, and equals zero on the last iteration.
9350 In this case, add a reg_note REG_NONNEG, which allows the
9351 m68k DBRA instruction to be used. */
9353 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
9354 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
9355 && GET_CODE (bl->biv->add_val) == CONST_INT
9356 && INTVAL (bl->biv->add_val) < 0)
9358 /* Initial value must be greater than 0,
9359 init_val % -dec_value == 0 to ensure that it equals zero on
9360 the last iteration */
9362 if (GET_CODE (bl->initial_value) == CONST_INT
9363 && INTVAL (bl->initial_value) > 0
9364 && (INTVAL (bl->initial_value)
9365 % (-INTVAL (bl->biv->add_val))) == 0)
9367 /* Register always nonnegative, add REG_NOTE to branch. */
9368 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9369 REG_NOTES (jump)
9370 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9371 REG_NOTES (jump));
9372 bl->nonneg = 1;
9374 return 1;
9377 /* If the decrement is 1 and the value was tested as >= 0 before
9378 the loop, then we can safely optimize. */
9379 for (p = loop_start; p; p = PREV_INSN (p))
9381 if (LABEL_P (p))
9382 break;
9383 if (!JUMP_P (p))
9384 continue;
9386 before_comparison = get_condition_for_loop (loop, p);
9387 if (before_comparison
9388 && XEXP (before_comparison, 0) == bl->biv->dest_reg
9389 && (GET_CODE (before_comparison) == LT
9390 || GET_CODE (before_comparison) == LTU)
9391 && XEXP (before_comparison, 1) == const0_rtx
9392 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
9393 && INTVAL (bl->biv->add_val) == -1)
9395 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
9396 REG_NOTES (jump)
9397 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
9398 REG_NOTES (jump));
9399 bl->nonneg = 1;
9401 return 1;
9405 else if (GET_CODE (bl->biv->add_val) == CONST_INT
9406 && INTVAL (bl->biv->add_val) > 0)
9408 /* Try to change inc to dec, so can apply above optimization. */
9409 /* Can do this if:
9410 all registers modified are induction variables or invariant,
9411 all memory references have non-overlapping addresses
9412 (obviously true if only one write)
9413 allow 2 insns for the compare/jump at the end of the loop. */
9414 /* Also, we must avoid any instructions which use both the reversed
9415 biv and another biv. Such instructions will fail if the loop is
9416 reversed. We meet this condition by requiring that either
9417 no_use_except_counting is true, or else that there is only
9418 one biv. */
9419 int num_nonfixed_reads = 0;
9420 /* 1 if the iteration var is used only to count iterations. */
9421 int no_use_except_counting = 0;
9422 /* 1 if the loop has no memory store, or it has a single memory store
9423 which is reversible. */
9424 int reversible_mem_store = 1;
9426 if (bl->giv_count == 0
9427 && !loop->exit_count
9428 && !loop_info->has_multiple_exit_targets)
9430 rtx bivreg = regno_reg_rtx[bl->regno];
9431 struct iv_class *blt;
9433 /* If there are no givs for this biv, and the only exit is the
9434 fall through at the end of the loop, then
9435 see if perhaps there are no uses except to count. */
9436 no_use_except_counting = 1;
9437 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9438 if (INSN_P (p))
9440 rtx set = single_set (p);
9442 if (set && REG_P (SET_DEST (set))
9443 && REGNO (SET_DEST (set)) == bl->regno)
9444 /* An insn that sets the biv is okay. */
9446 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
9447 /* An insn that doesn't mention the biv is okay. */
9449 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
9450 || p == prev_nonnote_insn (loop_end))
9452 /* If either of these insns uses the biv and sets a pseudo
9453 that has more than one usage, then the biv has uses
9454 other than counting since it's used to derive a value
9455 that is used more than one time. */
9456 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
9457 regs);
9458 if (regs->multiple_uses)
9460 no_use_except_counting = 0;
9461 break;
9464 else
9466 no_use_except_counting = 0;
9467 break;
9471 /* A biv has uses besides counting if it is used to set
9472 another biv. */
9473 for (blt = ivs->list; blt; blt = blt->next)
9474 if (blt->init_set
9475 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
9477 no_use_except_counting = 0;
9478 break;
9482 if (no_use_except_counting)
9483 /* No need to worry about MEMs. */
9485 else if (loop_info->num_mem_sets <= 1)
9487 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9488 if (INSN_P (p))
9489 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
9491 /* If the loop has a single store, and the destination address is
9492 invariant, then we can't reverse the loop, because this address
9493 might then have the wrong value at loop exit.
9494 This would work if the source was invariant also, however, in that
9495 case, the insn should have been moved out of the loop. */
9497 if (loop_info->num_mem_sets == 1)
9499 struct induction *v;
9501 /* If we could prove that each of the memory locations
9502 written to was different, then we could reverse the
9503 store -- but we don't presently have any way of
9504 knowing that. */
9505 reversible_mem_store = 0;
9507 /* If the store depends on a register that is set after the
9508 store, it depends on the initial value, and is thus not
9509 reversible. */
9510 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
9512 if (v->giv_type == DEST_REG
9513 && reg_mentioned_p (v->dest_reg,
9514 PATTERN (loop_info->first_loop_store_insn))
9515 && loop_insn_first_p (loop_info->first_loop_store_insn,
9516 v->insn))
9517 reversible_mem_store = 0;
9521 else
9522 return 0;
9524 /* This code only acts for innermost loops. Also it simplifies
9525 the memory address check by only reversing loops with
9526 zero or one memory access.
9527 Two memory accesses could involve parts of the same array,
9528 and that can't be reversed.
9529 If the biv is used only for counting, than we don't need to worry
9530 about all these things. */
9532 if ((num_nonfixed_reads <= 1
9533 && ! loop_info->has_nonconst_call
9534 && ! loop_info->has_prefetch
9535 && ! loop_info->has_volatile
9536 && reversible_mem_store
9537 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
9538 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
9539 && (bl == ivs->list && bl->next == 0))
9540 || (no_use_except_counting && ! loop_info->has_prefetch))
9542 rtx tem;
9544 /* Loop can be reversed. */
9545 if (loop_dump_stream)
9546 fprintf (loop_dump_stream, "Can reverse loop\n");
9548 /* Now check other conditions:
9550 The increment must be a constant, as must the initial value,
9551 and the comparison code must be LT.
9553 This test can probably be improved since +/- 1 in the constant
9554 can be obtained by changing LT to LE and vice versa; this is
9555 confusing. */
9557 if (comparison
9558 /* for constants, LE gets turned into LT */
9559 && (GET_CODE (comparison) == LT
9560 || (GET_CODE (comparison) == LE
9561 && no_use_except_counting)
9562 || GET_CODE (comparison) == LTU))
9564 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
9565 rtx initial_value, comparison_value;
9566 int nonneg = 0;
9567 enum rtx_code cmp_code;
9568 int comparison_const_width;
9569 unsigned HOST_WIDE_INT comparison_sign_mask;
9570 bool keep_first_compare;
9572 add_val = INTVAL (bl->biv->add_val);
9573 comparison_value = XEXP (comparison, 1);
9574 if (GET_MODE (comparison_value) == VOIDmode)
9575 comparison_const_width
9576 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
9577 else
9578 comparison_const_width
9579 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
9580 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
9581 comparison_const_width = HOST_BITS_PER_WIDE_INT;
9582 comparison_sign_mask
9583 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
9585 /* If the comparison value is not a loop invariant, then we
9586 can not reverse this loop.
9588 ??? If the insns which initialize the comparison value as
9589 a whole compute an invariant result, then we could move
9590 them out of the loop and proceed with loop reversal. */
9591 if (! loop_invariant_p (loop, comparison_value))
9592 return 0;
9594 if (GET_CODE (comparison_value) == CONST_INT)
9595 comparison_val = INTVAL (comparison_value);
9596 initial_value = bl->initial_value;
9598 /* Normalize the initial value if it is an integer and
9599 has no other use except as a counter. This will allow
9600 a few more loops to be reversed. */
9601 if (no_use_except_counting
9602 && GET_CODE (comparison_value) == CONST_INT
9603 && GET_CODE (initial_value) == CONST_INT)
9605 comparison_val = comparison_val - INTVAL (bl->initial_value);
9606 /* The code below requires comparison_val to be a multiple
9607 of add_val in order to do the loop reversal, so
9608 round up comparison_val to a multiple of add_val.
9609 Since comparison_value is constant, we know that the
9610 current comparison code is LT. */
9611 comparison_val = comparison_val + add_val - 1;
9612 comparison_val
9613 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
9614 /* We postpone overflow checks for COMPARISON_VAL here;
9615 even if there is an overflow, we might still be able to
9616 reverse the loop, if converting the loop exit test to
9617 NE is possible. */
9618 initial_value = const0_rtx;
9621 /* First check if we can do a vanilla loop reversal. */
9622 if (initial_value == const0_rtx
9623 && GET_CODE (comparison_value) == CONST_INT
9624 /* Now do postponed overflow checks on COMPARISON_VAL. */
9625 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
9626 & comparison_sign_mask))
9628 /* Register will always be nonnegative, with value
9629 0 on last iteration */
9630 add_adjust = add_val;
9631 nonneg = 1;
9632 cmp_code = GE;
9634 else
9635 return 0;
9637 if (GET_CODE (comparison) == LE)
9638 add_adjust -= add_val;
9640 /* If the initial value is not zero, or if the comparison
9641 value is not an exact multiple of the increment, then we
9642 can not reverse this loop. */
9643 if (initial_value == const0_rtx
9644 && GET_CODE (comparison_value) == CONST_INT)
9646 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
9647 return 0;
9649 else
9651 if (! no_use_except_counting || add_val != 1)
9652 return 0;
9655 final_value = comparison_value;
9657 /* Reset these in case we normalized the initial value
9658 and comparison value above. */
9659 if (GET_CODE (comparison_value) == CONST_INT
9660 && GET_CODE (initial_value) == CONST_INT)
9662 comparison_value = GEN_INT (comparison_val);
9663 final_value
9664 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
9666 bl->initial_value = initial_value;
9668 /* Save some info needed to produce the new insns. */
9669 reg = bl->biv->dest_reg;
9670 mode = GET_MODE (reg);
9671 jump_label = condjump_label (PREV_INSN (loop_end));
9672 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
9674 /* Set start_value; if this is not a CONST_INT, we need
9675 to generate a SUB.
9676 Initialize biv to start_value before loop start.
9677 The old initializing insn will be deleted as a
9678 dead store by flow.c. */
9679 if (initial_value == const0_rtx
9680 && GET_CODE (comparison_value) == CONST_INT)
9682 start_value
9683 = gen_int_mode (comparison_val - add_adjust, mode);
9684 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
9686 else if (GET_CODE (initial_value) == CONST_INT)
9688 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
9689 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
9691 if (add_insn == 0)
9692 return 0;
9694 start_value
9695 = gen_rtx_PLUS (mode, comparison_value, offset);
9696 loop_insn_hoist (loop, add_insn);
9697 if (GET_CODE (comparison) == LE)
9698 final_value = gen_rtx_PLUS (mode, comparison_value,
9699 GEN_INT (add_val));
9701 else if (! add_adjust)
9703 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
9704 initial_value);
9706 if (sub_insn == 0)
9707 return 0;
9708 start_value
9709 = gen_rtx_MINUS (mode, comparison_value, initial_value);
9710 loop_insn_hoist (loop, sub_insn);
9712 else
9713 /* We could handle the other cases too, but it'll be
9714 better to have a testcase first. */
9715 return 0;
9717 /* We may not have a single insn which can increment a reg, so
9718 create a sequence to hold all the insns from expand_inc. */
9719 start_sequence ();
9720 expand_inc (reg, new_add_val);
9721 tem = get_insns ();
9722 end_sequence ();
9724 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
9725 delete_insn (bl->biv->insn);
9727 /* Update biv info to reflect its new status. */
9728 bl->biv->insn = p;
9729 bl->initial_value = start_value;
9730 bl->biv->add_val = new_add_val;
9732 /* Update loop info. */
9733 loop_info->initial_value = reg;
9734 loop_info->initial_equiv_value = reg;
9735 loop_info->final_value = const0_rtx;
9736 loop_info->final_equiv_value = const0_rtx;
9737 loop_info->comparison_value = const0_rtx;
9738 loop_info->comparison_code = cmp_code;
9739 loop_info->increment = new_add_val;
9741 /* Inc LABEL_NUSES so that delete_insn will
9742 not delete the label. */
9743 LABEL_NUSES (XEXP (jump_label, 0))++;
9745 /* If we have a separate comparison insn that does more
9746 than just set cc0, the result of the comparison might
9747 be used outside the loop. */
9748 keep_first_compare = (compare_and_branch == 2
9749 #ifdef HAVE_CC0
9750 && sets_cc0_p (first_compare) <= 0
9751 #endif
9754 /* Emit an insn after the end of the loop to set the biv's
9755 proper exit value if it is used anywhere outside the loop. */
9756 if (keep_first_compare
9757 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
9758 || ! bl->init_insn
9759 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
9760 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
9762 if (keep_first_compare)
9763 loop_insn_sink (loop, PATTERN (first_compare));
9765 /* Delete compare/branch at end of loop. */
9766 delete_related_insns (PREV_INSN (loop_end));
9767 if (compare_and_branch == 2)
9768 delete_related_insns (first_compare);
9770 /* Add new compare/branch insn at end of loop. */
9771 start_sequence ();
9772 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
9773 mode, 0,
9774 XEXP (jump_label, 0));
9775 tem = get_insns ();
9776 end_sequence ();
9777 emit_jump_insn_before (tem, loop_end);
9779 for (tem = PREV_INSN (loop_end);
9780 tem && !JUMP_P (tem);
9781 tem = PREV_INSN (tem))
9784 if (tem)
9785 JUMP_LABEL (tem) = XEXP (jump_label, 0);
9787 if (nonneg)
9789 if (tem)
9791 /* Increment of LABEL_NUSES done above. */
9792 /* Register is now always nonnegative,
9793 so add REG_NONNEG note to the branch. */
9794 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
9795 REG_NOTES (tem));
9797 bl->nonneg = 1;
9800 /* No insn may reference both the reversed and another biv or it
9801 will fail (see comment near the top of the loop reversal
9802 code).
9803 Earlier on, we have verified that the biv has no use except
9804 counting, or it is the only biv in this function.
9805 However, the code that computes no_use_except_counting does
9806 not verify reg notes. It's possible to have an insn that
9807 references another biv, and has a REG_EQUAL note with an
9808 expression based on the reversed biv. To avoid this case,
9809 remove all REG_EQUAL notes based on the reversed biv
9810 here. */
9811 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9812 if (INSN_P (p))
9814 rtx *pnote;
9815 rtx set = single_set (p);
9816 /* If this is a set of a GIV based on the reversed biv, any
9817 REG_EQUAL notes should still be correct. */
9818 if (! set
9819 || !REG_P (SET_DEST (set))
9820 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
9821 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
9822 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
9823 for (pnote = &REG_NOTES (p); *pnote;)
9825 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
9826 && reg_mentioned_p (regno_reg_rtx[bl->regno],
9827 XEXP (*pnote, 0)))
9828 *pnote = XEXP (*pnote, 1);
9829 else
9830 pnote = &XEXP (*pnote, 1);
9834 /* Mark that this biv has been reversed. Each giv which depends
9835 on this biv, and which is also live past the end of the loop
9836 will have to be fixed up. */
9838 bl->reversed = 1;
9840 if (loop_dump_stream)
9842 fprintf (loop_dump_stream, "Reversed loop");
9843 if (bl->nonneg)
9844 fprintf (loop_dump_stream, " and added reg_nonneg\n");
9845 else
9846 fprintf (loop_dump_stream, "\n");
9849 return 1;
9854 return 0;
9857 /* Verify whether the biv BL appears to be eliminable,
9858 based on the insns in the loop that refer to it.
9860 If ELIMINATE_P is nonzero, actually do the elimination.
9862 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
9863 determine whether invariant insns should be placed inside or at the
9864 start of the loop. */
9866 static int
9867 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
9868 int eliminate_p, int threshold, int insn_count)
9870 struct loop_ivs *ivs = LOOP_IVS (loop);
9871 rtx reg = bl->biv->dest_reg;
9872 rtx p;
9874 /* Scan all insns in the loop, stopping if we find one that uses the
9875 biv in a way that we cannot eliminate. */
9877 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9879 enum rtx_code code = GET_CODE (p);
9880 basic_block where_bb = 0;
9881 rtx where_insn = threshold >= insn_count ? 0 : p;
9882 rtx note;
9884 /* If this is a libcall that sets a giv, skip ahead to its end. */
9885 if (INSN_P (p))
9887 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
9889 if (note)
9891 rtx last = XEXP (note, 0);
9892 rtx set = single_set (last);
9894 if (set && REG_P (SET_DEST (set)))
9896 unsigned int regno = REGNO (SET_DEST (set));
9898 if (regno < ivs->n_regs
9899 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
9900 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
9901 p = last;
9906 /* Closely examine the insn if the biv is mentioned. */
9907 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
9908 && reg_mentioned_p (reg, PATTERN (p))
9909 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
9910 eliminate_p, where_bb, where_insn))
9912 if (loop_dump_stream)
9913 fprintf (loop_dump_stream,
9914 "Cannot eliminate biv %d: biv used in insn %d.\n",
9915 bl->regno, INSN_UID (p));
9916 break;
9919 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
9920 if (eliminate_p
9921 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
9922 && reg_mentioned_p (reg, XEXP (note, 0)))
9923 remove_note (p, note);
9926 if (p == loop->end)
9928 if (loop_dump_stream)
9929 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
9930 bl->regno, eliminate_p ? "was" : "can be");
9931 return 1;
9934 return 0;
9937 /* INSN and REFERENCE are instructions in the same insn chain.
9938 Return nonzero if INSN is first. */
9940 static int
9941 loop_insn_first_p (rtx insn, rtx reference)
9943 rtx p, q;
9945 for (p = insn, q = reference;;)
9947 /* Start with test for not first so that INSN == REFERENCE yields not
9948 first. */
9949 if (q == insn || ! p)
9950 return 0;
9951 if (p == reference || ! q)
9952 return 1;
9954 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
9955 previous insn, hence the <= comparison below does not work if
9956 P is a note. */
9957 if (INSN_UID (p) < max_uid_for_loop
9958 && INSN_UID (q) < max_uid_for_loop
9959 && !NOTE_P (p))
9960 return INSN_LUID (p) <= INSN_LUID (q);
9962 if (INSN_UID (p) >= max_uid_for_loop
9963 || NOTE_P (p))
9964 p = NEXT_INSN (p);
9965 if (INSN_UID (q) >= max_uid_for_loop)
9966 q = NEXT_INSN (q);
9970 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
9971 the offset that we have to take into account due to auto-increment /
9972 div derivation is zero. */
9973 static int
9974 biv_elimination_giv_has_0_offset (struct induction *biv,
9975 struct induction *giv, rtx insn)
9977 /* If the giv V had the auto-inc address optimization applied
9978 to it, and INSN occurs between the giv insn and the biv
9979 insn, then we'd have to adjust the value used here.
9980 This is rare, so we don't bother to make this possible. */
9981 if (giv->auto_inc_opt
9982 && ((loop_insn_first_p (giv->insn, insn)
9983 && loop_insn_first_p (insn, biv->insn))
9984 || (loop_insn_first_p (biv->insn, insn)
9985 && loop_insn_first_p (insn, giv->insn))))
9986 return 0;
9988 return 1;
9991 /* If BL appears in X (part of the pattern of INSN), see if we can
9992 eliminate its use. If so, return 1. If not, return 0.
9994 If BIV does not appear in X, return 1.
9996 If ELIMINATE_P is nonzero, actually do the elimination.
9997 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
9998 Depending on how many items have been moved out of the loop, it
9999 will either be before INSN (when WHERE_INSN is nonzero) or at the
10000 start of the loop (when WHERE_INSN is zero). */
10002 static int
10003 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
10004 struct iv_class *bl, int eliminate_p,
10005 basic_block where_bb, rtx where_insn)
10007 enum rtx_code code = GET_CODE (x);
10008 rtx reg = bl->biv->dest_reg;
10009 enum machine_mode mode = GET_MODE (reg);
10010 struct induction *v;
10011 rtx arg, tem;
10012 #ifdef HAVE_cc0
10013 rtx new;
10014 #endif
10015 int arg_operand;
10016 const char *fmt;
10017 int i, j;
10019 switch (code)
10021 case REG:
10022 /* If we haven't already been able to do something with this BIV,
10023 we can't eliminate it. */
10024 if (x == reg)
10025 return 0;
10026 return 1;
10028 case SET:
10029 /* If this sets the BIV, it is not a problem. */
10030 if (SET_DEST (x) == reg)
10031 return 1;
10033 /* If this is an insn that defines a giv, it is also ok because
10034 it will go away when the giv is reduced. */
10035 for (v = bl->giv; v; v = v->next_iv)
10036 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
10037 return 1;
10039 #ifdef HAVE_cc0
10040 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
10042 /* Can replace with any giv that was reduced and
10043 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10044 Require a constant for MULT_VAL, so we know it's nonzero.
10045 ??? We disable this optimization to avoid potential
10046 overflows. */
10048 for (v = bl->giv; v; v = v->next_iv)
10049 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
10050 && v->add_val == const0_rtx
10051 && ! v->ignore && ! v->maybe_dead && v->always_computable
10052 && v->mode == mode
10053 && 0)
10055 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10056 continue;
10058 if (! eliminate_p)
10059 return 1;
10061 /* If the giv has the opposite direction of change,
10062 then reverse the comparison. */
10063 if (INTVAL (v->mult_val) < 0)
10064 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
10065 const0_rtx, v->new_reg);
10066 else
10067 new = v->new_reg;
10069 /* We can probably test that giv's reduced reg. */
10070 if (validate_change (insn, &SET_SRC (x), new, 0))
10071 return 1;
10074 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10075 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10076 Require a constant for MULT_VAL, so we know it's nonzero.
10077 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10078 overflow problem. */
10080 for (v = bl->giv; v; v = v->next_iv)
10081 if (GET_CODE (v->mult_val) == CONST_INT
10082 && v->mult_val != const0_rtx
10083 && ! v->ignore && ! v->maybe_dead && v->always_computable
10084 && v->mode == mode
10085 && (GET_CODE (v->add_val) == SYMBOL_REF
10086 || GET_CODE (v->add_val) == LABEL_REF
10087 || GET_CODE (v->add_val) == CONST
10088 || (REG_P (v->add_val)
10089 && REG_POINTER (v->add_val))))
10091 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10092 continue;
10094 if (! eliminate_p)
10095 return 1;
10097 /* If the giv has the opposite direction of change,
10098 then reverse the comparison. */
10099 if (INTVAL (v->mult_val) < 0)
10100 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
10101 v->new_reg);
10102 else
10103 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
10104 copy_rtx (v->add_val));
10106 /* Replace biv with the giv's reduced register. */
10107 update_reg_last_use (v->add_val, insn);
10108 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10109 return 1;
10111 /* Insn doesn't support that constant or invariant. Copy it
10112 into a register (it will be a loop invariant.) */
10113 tem = gen_reg_rtx (GET_MODE (v->new_reg));
10115 loop_insn_emit_before (loop, 0, where_insn,
10116 gen_move_insn (tem,
10117 copy_rtx (v->add_val)));
10119 /* Substitute the new register for its invariant value in
10120 the compare expression. */
10121 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
10122 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
10123 return 1;
10126 #endif
10127 break;
10129 case COMPARE:
10130 case EQ: case NE:
10131 case GT: case GE: case GTU: case GEU:
10132 case LT: case LE: case LTU: case LEU:
10133 /* See if either argument is the biv. */
10134 if (XEXP (x, 0) == reg)
10135 arg = XEXP (x, 1), arg_operand = 1;
10136 else if (XEXP (x, 1) == reg)
10137 arg = XEXP (x, 0), arg_operand = 0;
10138 else
10139 break;
10141 if (CONSTANT_P (arg))
10143 /* First try to replace with any giv that has constant positive
10144 mult_val and constant add_val. We might be able to support
10145 negative mult_val, but it seems complex to do it in general. */
10147 for (v = bl->giv; v; v = v->next_iv)
10148 if (GET_CODE (v->mult_val) == CONST_INT
10149 && INTVAL (v->mult_val) > 0
10150 && (GET_CODE (v->add_val) == SYMBOL_REF
10151 || GET_CODE (v->add_val) == LABEL_REF
10152 || GET_CODE (v->add_val) == CONST
10153 || (REG_P (v->add_val)
10154 && REG_POINTER (v->add_val)))
10155 && ! v->ignore && ! v->maybe_dead && v->always_computable
10156 && v->mode == mode)
10158 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10159 continue;
10161 /* Don't eliminate if the linear combination that makes up
10162 the giv overflows when it is applied to ARG. */
10163 if (GET_CODE (arg) == CONST_INT)
10165 rtx add_val;
10167 if (GET_CODE (v->add_val) == CONST_INT)
10168 add_val = v->add_val;
10169 else
10170 add_val = const0_rtx;
10172 if (const_mult_add_overflow_p (arg, v->mult_val,
10173 add_val, mode, 1))
10174 continue;
10177 if (! eliminate_p)
10178 return 1;
10180 /* Replace biv with the giv's reduced reg. */
10181 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
10183 /* If all constants are actually constant integers and
10184 the derived constant can be directly placed in the COMPARE,
10185 do so. */
10186 if (GET_CODE (arg) == CONST_INT
10187 && GET_CODE (v->add_val) == CONST_INT)
10189 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
10190 v->add_val, mode, 1);
10192 else
10194 /* Otherwise, load it into a register. */
10195 tem = gen_reg_rtx (mode);
10196 loop_iv_add_mult_emit_before (loop, arg,
10197 v->mult_val, v->add_val,
10198 tem, where_bb, where_insn);
10201 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10203 if (apply_change_group ())
10204 return 1;
10207 /* Look for giv with positive constant mult_val and nonconst add_val.
10208 Insert insns to calculate new compare value.
10209 ??? Turn this off due to possible overflow. */
10211 for (v = bl->giv; v; v = v->next_iv)
10212 if (GET_CODE (v->mult_val) == CONST_INT
10213 && INTVAL (v->mult_val) > 0
10214 && ! v->ignore && ! v->maybe_dead && v->always_computable
10215 && v->mode == mode
10216 && 0)
10218 rtx tem;
10220 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10221 continue;
10223 if (! eliminate_p)
10224 return 1;
10226 tem = gen_reg_rtx (mode);
10228 /* Replace biv with giv's reduced register. */
10229 validate_change (insn, &XEXP (x, 1 - arg_operand),
10230 v->new_reg, 1);
10232 /* Compute value to compare against. */
10233 loop_iv_add_mult_emit_before (loop, arg,
10234 v->mult_val, v->add_val,
10235 tem, where_bb, where_insn);
10236 /* Use it in this insn. */
10237 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10238 if (apply_change_group ())
10239 return 1;
10242 else if (REG_P (arg) || MEM_P (arg))
10244 if (loop_invariant_p (loop, arg) == 1)
10246 /* Look for giv with constant positive mult_val and nonconst
10247 add_val. Insert insns to compute new compare value.
10248 ??? Turn this off due to possible overflow. */
10250 for (v = bl->giv; v; v = v->next_iv)
10251 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
10252 && ! v->ignore && ! v->maybe_dead && v->always_computable
10253 && v->mode == mode
10254 && 0)
10256 rtx tem;
10258 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10259 continue;
10261 if (! eliminate_p)
10262 return 1;
10264 tem = gen_reg_rtx (mode);
10266 /* Replace biv with giv's reduced register. */
10267 validate_change (insn, &XEXP (x, 1 - arg_operand),
10268 v->new_reg, 1);
10270 /* Compute value to compare against. */
10271 loop_iv_add_mult_emit_before (loop, arg,
10272 v->mult_val, v->add_val,
10273 tem, where_bb, where_insn);
10274 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
10275 if (apply_change_group ())
10276 return 1;
10280 /* This code has problems. Basically, you can't know when
10281 seeing if we will eliminate BL, whether a particular giv
10282 of ARG will be reduced. If it isn't going to be reduced,
10283 we can't eliminate BL. We can try forcing it to be reduced,
10284 but that can generate poor code.
10286 The problem is that the benefit of reducing TV, below should
10287 be increased if BL can actually be eliminated, but this means
10288 we might have to do a topological sort of the order in which
10289 we try to process biv. It doesn't seem worthwhile to do
10290 this sort of thing now. */
10292 #if 0
10293 /* Otherwise the reg compared with had better be a biv. */
10294 if (!REG_P (arg)
10295 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
10296 return 0;
10298 /* Look for a pair of givs, one for each biv,
10299 with identical coefficients. */
10300 for (v = bl->giv; v; v = v->next_iv)
10302 struct induction *tv;
10304 if (v->ignore || v->maybe_dead || v->mode != mode)
10305 continue;
10307 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
10308 tv = tv->next_iv)
10309 if (! tv->ignore && ! tv->maybe_dead
10310 && rtx_equal_p (tv->mult_val, v->mult_val)
10311 && rtx_equal_p (tv->add_val, v->add_val)
10312 && tv->mode == mode)
10314 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
10315 continue;
10317 if (! eliminate_p)
10318 return 1;
10320 /* Replace biv with its giv's reduced reg. */
10321 XEXP (x, 1 - arg_operand) = v->new_reg;
10322 /* Replace other operand with the other giv's
10323 reduced reg. */
10324 XEXP (x, arg_operand) = tv->new_reg;
10325 return 1;
10328 #endif
10331 /* If we get here, the biv can't be eliminated. */
10332 return 0;
10334 case MEM:
10335 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10336 biv is used in it, since it will be replaced. */
10337 for (v = bl->giv; v; v = v->next_iv)
10338 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
10339 return 1;
10340 break;
10342 default:
10343 break;
10346 /* See if any subexpression fails elimination. */
10347 fmt = GET_RTX_FORMAT (code);
10348 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
10350 switch (fmt[i])
10352 case 'e':
10353 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
10354 eliminate_p, where_bb, where_insn))
10355 return 0;
10356 break;
10358 case 'E':
10359 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10360 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
10361 eliminate_p, where_bb, where_insn))
10362 return 0;
10363 break;
10367 return 1;
10370 /* Return nonzero if the last use of REG
10371 is in an insn following INSN in the same basic block. */
10373 static int
10374 last_use_this_basic_block (rtx reg, rtx insn)
10376 rtx n;
10377 for (n = insn;
10378 n && !LABEL_P (n) && !JUMP_P (n);
10379 n = NEXT_INSN (n))
10381 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
10382 return 1;
10384 return 0;
10387 /* Called via `note_stores' to record the initial value of a biv. Here we
10388 just record the location of the set and process it later. */
10390 static void
10391 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
10393 struct loop_ivs *ivs = (struct loop_ivs *) data;
10394 struct iv_class *bl;
10396 if (!REG_P (dest)
10397 || REGNO (dest) >= ivs->n_regs
10398 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
10399 return;
10401 bl = REG_IV_CLASS (ivs, REGNO (dest));
10403 /* If this is the first set found, record it. */
10404 if (bl->init_insn == 0)
10406 bl->init_insn = note_insn;
10407 bl->init_set = set;
10411 /* If any of the registers in X are "old" and currently have a last use earlier
10412 than INSN, update them to have a last use of INSN. Their actual last use
10413 will be the previous insn but it will not have a valid uid_luid so we can't
10414 use it. X must be a source expression only. */
10416 static void
10417 update_reg_last_use (rtx x, rtx insn)
10419 /* Check for the case where INSN does not have a valid luid. In this case,
10420 there is no need to modify the regno_last_uid, as this can only happen
10421 when code is inserted after the loop_end to set a pseudo's final value,
10422 and hence this insn will never be the last use of x.
10423 ???? This comment is not correct. See for example loop_givs_reduce.
10424 This may insert an insn before another new insn. */
10425 if (REG_P (x) && REGNO (x) < max_reg_before_loop
10426 && INSN_UID (insn) < max_uid_for_loop
10427 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
10429 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
10431 else
10433 int i, j;
10434 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
10435 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10437 if (fmt[i] == 'e')
10438 update_reg_last_use (XEXP (x, i), insn);
10439 else if (fmt[i] == 'E')
10440 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10441 update_reg_last_use (XVECEXP (x, i, j), insn);
10446 /* Similar to rtlanal.c:get_condition, except that we also put an
10447 invariant last unless both operands are invariants. */
10449 static rtx
10450 get_condition_for_loop (const struct loop *loop, rtx x)
10452 rtx comparison = get_condition (x, (rtx*) 0, false, true);
10454 if (comparison == 0
10455 || ! loop_invariant_p (loop, XEXP (comparison, 0))
10456 || loop_invariant_p (loop, XEXP (comparison, 1)))
10457 return comparison;
10459 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
10460 XEXP (comparison, 1), XEXP (comparison, 0));
10463 /* Scan the function and determine whether it has indirect (computed) jumps.
10465 This is taken mostly from flow.c; similar code exists elsewhere
10466 in the compiler. It may be useful to put this into rtlanal.c. */
10467 static int
10468 indirect_jump_in_function_p (rtx start)
10470 rtx insn;
10472 for (insn = start; insn; insn = NEXT_INSN (insn))
10473 if (computed_jump_p (insn))
10474 return 1;
10476 return 0;
10479 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10480 documentation for LOOP_MEMS for the definition of `appropriate'.
10481 This function is called from prescan_loop via for_each_rtx. */
10483 static int
10484 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
10486 struct loop_info *loop_info = data;
10487 int i;
10488 rtx m = *mem;
10490 if (m == NULL_RTX)
10491 return 0;
10493 switch (GET_CODE (m))
10495 case MEM:
10496 break;
10498 case CLOBBER:
10499 /* We're not interested in MEMs that are only clobbered. */
10500 return -1;
10502 case CONST_DOUBLE:
10503 /* We're not interested in the MEM associated with a
10504 CONST_DOUBLE, so there's no need to traverse into this. */
10505 return -1;
10507 case EXPR_LIST:
10508 /* We're not interested in any MEMs that only appear in notes. */
10509 return -1;
10511 default:
10512 /* This is not a MEM. */
10513 return 0;
10516 /* See if we've already seen this MEM. */
10517 for (i = 0; i < loop_info->mems_idx; ++i)
10518 if (rtx_equal_p (m, loop_info->mems[i].mem))
10520 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
10521 loop_info->mems[i].mem = m;
10522 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
10523 /* The modes of the two memory accesses are different. If
10524 this happens, something tricky is going on, and we just
10525 don't optimize accesses to this MEM. */
10526 loop_info->mems[i].optimize = 0;
10528 return 0;
10531 /* Resize the array, if necessary. */
10532 if (loop_info->mems_idx == loop_info->mems_allocated)
10534 if (loop_info->mems_allocated != 0)
10535 loop_info->mems_allocated *= 2;
10536 else
10537 loop_info->mems_allocated = 32;
10539 loop_info->mems = xrealloc (loop_info->mems,
10540 loop_info->mems_allocated * sizeof (loop_mem_info));
10543 /* Actually insert the MEM. */
10544 loop_info->mems[loop_info->mems_idx].mem = m;
10545 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10546 because we can't put it in a register. We still store it in the
10547 table, though, so that if we see the same address later, but in a
10548 non-BLK mode, we'll not think we can optimize it at that point. */
10549 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
10550 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
10551 ++loop_info->mems_idx;
10553 return 0;
10557 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10559 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10560 register that is modified by an insn between FROM and TO. If the
10561 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10562 more, stop incrementing it, to avoid overflow.
10564 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10565 register I is used, if it is only used once. Otherwise, it is set
10566 to 0 (for no uses) or const0_rtx for more than one use. This
10567 parameter may be zero, in which case this processing is not done.
10569 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10570 optimize register I. */
10572 static void
10573 loop_regs_scan (const struct loop *loop, int extra_size)
10575 struct loop_regs *regs = LOOP_REGS (loop);
10576 int old_nregs;
10577 /* last_set[n] is nonzero iff reg n has been set in the current
10578 basic block. In that case, it is the insn that last set reg n. */
10579 rtx *last_set;
10580 rtx insn;
10581 int i;
10583 old_nregs = regs->num;
10584 regs->num = max_reg_num ();
10586 /* Grow the regs array if not allocated or too small. */
10587 if (regs->num >= regs->size)
10589 regs->size = regs->num + extra_size;
10591 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
10593 /* Zero the new elements. */
10594 memset (regs->array + old_nregs, 0,
10595 (regs->size - old_nregs) * sizeof (*regs->array));
10598 /* Clear previously scanned fields but do not clear n_times_set. */
10599 for (i = 0; i < old_nregs; i++)
10601 regs->array[i].set_in_loop = 0;
10602 regs->array[i].may_not_optimize = 0;
10603 regs->array[i].single_usage = NULL_RTX;
10606 last_set = xcalloc (regs->num, sizeof (rtx));
10608 /* Scan the loop, recording register usage. */
10609 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10610 insn = NEXT_INSN (insn))
10612 if (INSN_P (insn))
10614 /* Record registers that have exactly one use. */
10615 find_single_use_in_loop (regs, insn, PATTERN (insn));
10617 /* Include uses in REG_EQUAL notes. */
10618 if (REG_NOTES (insn))
10619 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
10621 if (GET_CODE (PATTERN (insn)) == SET
10622 || GET_CODE (PATTERN (insn)) == CLOBBER)
10623 count_one_set (regs, insn, PATTERN (insn), last_set);
10624 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
10626 int i;
10627 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
10628 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
10629 last_set);
10633 if (LABEL_P (insn) || JUMP_P (insn))
10634 memset (last_set, 0, regs->num * sizeof (rtx));
10636 /* Invalidate all registers used for function argument passing.
10637 We check rtx_varies_p for the same reason as below, to allow
10638 optimizing PIC calculations. */
10639 if (CALL_P (insn))
10641 rtx link;
10642 for (link = CALL_INSN_FUNCTION_USAGE (insn);
10643 link;
10644 link = XEXP (link, 1))
10646 rtx op, reg;
10648 if (GET_CODE (op = XEXP (link, 0)) == USE
10649 && REG_P (reg = XEXP (op, 0))
10650 && rtx_varies_p (reg, 1))
10651 regs->array[REGNO (reg)].may_not_optimize = 1;
10656 /* Invalidate all hard registers clobbered by calls. With one exception:
10657 a call-clobbered PIC register is still function-invariant for our
10658 purposes, since we can hoist any PIC calculations out of the loop.
10659 Thus the call to rtx_varies_p. */
10660 if (LOOP_INFO (loop)->has_call)
10661 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
10662 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
10663 && rtx_varies_p (regno_reg_rtx[i], 1))
10665 regs->array[i].may_not_optimize = 1;
10666 regs->array[i].set_in_loop = 1;
10669 #ifdef AVOID_CCMODE_COPIES
10670 /* Don't try to move insns which set CC registers if we should not
10671 create CCmode register copies. */
10672 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
10673 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
10674 regs->array[i].may_not_optimize = 1;
10675 #endif
10677 /* Set regs->array[I].n_times_set for the new registers. */
10678 for (i = old_nregs; i < regs->num; i++)
10679 regs->array[i].n_times_set = regs->array[i].set_in_loop;
10681 free (last_set);
10684 /* Returns the number of real INSNs in the LOOP. */
10686 static int
10687 count_insns_in_loop (const struct loop *loop)
10689 int count = 0;
10690 rtx insn;
10692 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
10693 insn = NEXT_INSN (insn))
10694 if (INSN_P (insn))
10695 ++count;
10697 return count;
10700 /* Move MEMs into registers for the duration of the loop. */
10702 static void
10703 load_mems (const struct loop *loop)
10705 struct loop_info *loop_info = LOOP_INFO (loop);
10706 struct loop_regs *regs = LOOP_REGS (loop);
10707 int maybe_never = 0;
10708 int i;
10709 rtx p, prev_ebb_head;
10710 rtx label = NULL_RTX;
10711 rtx end_label;
10712 /* Nonzero if the next instruction may never be executed. */
10713 int next_maybe_never = 0;
10714 unsigned int last_max_reg = max_reg_num ();
10716 if (loop_info->mems_idx == 0)
10717 return;
10719 /* We cannot use next_label here because it skips over normal insns. */
10720 end_label = next_nonnote_insn (loop->end);
10721 if (end_label && !LABEL_P (end_label))
10722 end_label = NULL_RTX;
10724 /* Check to see if it's possible that some instructions in the loop are
10725 never executed. Also check if there is a goto out of the loop other
10726 than right after the end of the loop. */
10727 for (p = next_insn_in_loop (loop, loop->scan_start);
10728 p != NULL_RTX;
10729 p = next_insn_in_loop (loop, p))
10731 if (LABEL_P (p))
10732 maybe_never = 1;
10733 else if (JUMP_P (p)
10734 /* If we enter the loop in the middle, and scan
10735 around to the beginning, don't set maybe_never
10736 for that. This must be an unconditional jump,
10737 otherwise the code at the top of the loop might
10738 never be executed. Unconditional jumps are
10739 followed a by barrier then loop end. */
10740 && ! (JUMP_P (p)
10741 && JUMP_LABEL (p) == loop->top
10742 && NEXT_INSN (NEXT_INSN (p)) == loop->end
10743 && any_uncondjump_p (p)))
10745 /* If this is a jump outside of the loop but not right
10746 after the end of the loop, we would have to emit new fixup
10747 sequences for each such label. */
10748 if (/* If we can't tell where control might go when this
10749 JUMP_INSN is executed, we must be conservative. */
10750 !JUMP_LABEL (p)
10751 || (JUMP_LABEL (p) != end_label
10752 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
10753 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
10754 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
10755 return;
10757 if (!any_condjump_p (p))
10758 /* Something complicated. */
10759 maybe_never = 1;
10760 else
10761 /* If there are any more instructions in the loop, they
10762 might not be reached. */
10763 next_maybe_never = 1;
10765 else if (next_maybe_never)
10766 maybe_never = 1;
10769 /* Find start of the extended basic block that enters the loop. */
10770 for (p = loop->start;
10771 PREV_INSN (p) && !LABEL_P (p);
10772 p = PREV_INSN (p))
10774 prev_ebb_head = p;
10776 cselib_init (true);
10778 /* Build table of mems that get set to constant values before the
10779 loop. */
10780 for (; p != loop->start; p = NEXT_INSN (p))
10781 cselib_process_insn (p);
10783 /* Actually move the MEMs. */
10784 for (i = 0; i < loop_info->mems_idx; ++i)
10786 regset_head load_copies;
10787 regset_head store_copies;
10788 int written = 0;
10789 rtx reg;
10790 rtx mem = loop_info->mems[i].mem;
10791 rtx mem_list_entry;
10793 if (MEM_VOLATILE_P (mem)
10794 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
10795 /* There's no telling whether or not MEM is modified. */
10796 loop_info->mems[i].optimize = 0;
10798 /* Go through the MEMs written to in the loop to see if this
10799 one is aliased by one of them. */
10800 mem_list_entry = loop_info->store_mems;
10801 while (mem_list_entry)
10803 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
10804 written = 1;
10805 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
10806 mem, rtx_varies_p))
10808 /* MEM is indeed aliased by this store. */
10809 loop_info->mems[i].optimize = 0;
10810 break;
10812 mem_list_entry = XEXP (mem_list_entry, 1);
10815 if (flag_float_store && written
10816 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
10817 loop_info->mems[i].optimize = 0;
10819 /* If this MEM is written to, we must be sure that there
10820 are no reads from another MEM that aliases this one. */
10821 if (loop_info->mems[i].optimize && written)
10823 int j;
10825 for (j = 0; j < loop_info->mems_idx; ++j)
10827 if (j == i)
10828 continue;
10829 else if (true_dependence (mem,
10830 VOIDmode,
10831 loop_info->mems[j].mem,
10832 rtx_varies_p))
10834 /* It's not safe to hoist loop_info->mems[i] out of
10835 the loop because writes to it might not be
10836 seen by reads from loop_info->mems[j]. */
10837 loop_info->mems[i].optimize = 0;
10838 break;
10843 if (maybe_never && may_trap_p (mem))
10844 /* We can't access the MEM outside the loop; it might
10845 cause a trap that wouldn't have happened otherwise. */
10846 loop_info->mems[i].optimize = 0;
10848 if (!loop_info->mems[i].optimize)
10849 /* We thought we were going to lift this MEM out of the
10850 loop, but later discovered that we could not. */
10851 continue;
10853 INIT_REG_SET (&load_copies);
10854 INIT_REG_SET (&store_copies);
10856 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10857 order to keep scan_loop from moving stores to this MEM
10858 out of the loop just because this REG is neither a
10859 user-variable nor used in the loop test. */
10860 reg = gen_reg_rtx (GET_MODE (mem));
10861 REG_USERVAR_P (reg) = 1;
10862 loop_info->mems[i].reg = reg;
10864 /* Now, replace all references to the MEM with the
10865 corresponding pseudos. */
10866 maybe_never = 0;
10867 for (p = next_insn_in_loop (loop, loop->scan_start);
10868 p != NULL_RTX;
10869 p = next_insn_in_loop (loop, p))
10871 if (INSN_P (p))
10873 rtx set;
10875 set = single_set (p);
10877 /* See if this copies the mem into a register that isn't
10878 modified afterwards. We'll try to do copy propagation
10879 a little further on. */
10880 if (set
10881 /* @@@ This test is _way_ too conservative. */
10882 && ! maybe_never
10883 && REG_P (SET_DEST (set))
10884 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
10885 && REGNO (SET_DEST (set)) < last_max_reg
10886 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
10887 && rtx_equal_p (SET_SRC (set), mem))
10888 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
10890 /* See if this copies the mem from a register that isn't
10891 modified afterwards. We'll try to remove the
10892 redundant copy later on by doing a little register
10893 renaming and copy propagation. This will help
10894 to untangle things for the BIV detection code. */
10895 if (set
10896 && ! maybe_never
10897 && REG_P (SET_SRC (set))
10898 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
10899 && REGNO (SET_SRC (set)) < last_max_reg
10900 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
10901 && rtx_equal_p (SET_DEST (set), mem))
10902 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
10904 /* If this is a call which uses / clobbers this memory
10905 location, we must not change the interface here. */
10906 if (CALL_P (p)
10907 && reg_mentioned_p (loop_info->mems[i].mem,
10908 CALL_INSN_FUNCTION_USAGE (p)))
10910 cancel_changes (0);
10911 loop_info->mems[i].optimize = 0;
10912 break;
10914 else
10915 /* Replace the memory reference with the shadow register. */
10916 replace_loop_mems (p, loop_info->mems[i].mem,
10917 loop_info->mems[i].reg, written);
10920 if (LABEL_P (p)
10921 || JUMP_P (p))
10922 maybe_never = 1;
10925 if (! loop_info->mems[i].optimize)
10926 ; /* We found we couldn't do the replacement, so do nothing. */
10927 else if (! apply_change_group ())
10928 /* We couldn't replace all occurrences of the MEM. */
10929 loop_info->mems[i].optimize = 0;
10930 else
10932 /* Load the memory immediately before LOOP->START, which is
10933 the NOTE_LOOP_BEG. */
10934 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
10935 rtx set;
10936 rtx best = mem;
10937 unsigned j;
10938 struct elt_loc_list *const_equiv = 0;
10939 reg_set_iterator rsi;
10941 if (e)
10943 struct elt_loc_list *equiv;
10944 struct elt_loc_list *best_equiv = 0;
10945 for (equiv = e->locs; equiv; equiv = equiv->next)
10947 if (CONSTANT_P (equiv->loc))
10948 const_equiv = equiv;
10949 else if (REG_P (equiv->loc)
10950 /* Extending hard register lifetimes causes crash
10951 on SRC targets. Doing so on non-SRC is
10952 probably also not good idea, since we most
10953 probably have pseudoregister equivalence as
10954 well. */
10955 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
10956 best_equiv = equiv;
10958 /* Use the constant equivalence if that is cheap enough. */
10959 if (! best_equiv)
10960 best_equiv = const_equiv;
10961 else if (const_equiv
10962 && (rtx_cost (const_equiv->loc, SET)
10963 <= rtx_cost (best_equiv->loc, SET)))
10965 best_equiv = const_equiv;
10966 const_equiv = 0;
10969 /* If best_equiv is nonzero, we know that MEM is set to a
10970 constant or register before the loop. We will use this
10971 knowledge to initialize the shadow register with that
10972 constant or reg rather than by loading from MEM. */
10973 if (best_equiv)
10974 best = copy_rtx (best_equiv->loc);
10977 set = gen_move_insn (reg, best);
10978 set = loop_insn_hoist (loop, set);
10979 if (REG_P (best))
10981 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10982 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10984 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10985 break;
10989 if (const_equiv)
10990 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10992 if (written)
10994 if (label == NULL_RTX)
10996 label = gen_label_rtx ();
10997 emit_label_after (label, loop->end);
11000 /* Store the memory immediately after END, which is
11001 the NOTE_LOOP_END. */
11002 set = gen_move_insn (copy_rtx (mem), reg);
11003 loop_insn_emit_after (loop, 0, label, set);
11006 if (loop_dump_stream)
11008 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
11009 REGNO (reg), (written ? "r/w" : "r/o"));
11010 print_rtl (loop_dump_stream, mem);
11011 fputc ('\n', loop_dump_stream);
11014 /* Attempt a bit of copy propagation. This helps untangle the
11015 data flow, and enables {basic,general}_induction_var to find
11016 more bivs/givs. */
11017 EXECUTE_IF_SET_IN_REG_SET
11018 (&load_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11020 try_copy_prop (loop, reg, j);
11022 CLEAR_REG_SET (&load_copies);
11024 EXECUTE_IF_SET_IN_REG_SET
11025 (&store_copies, FIRST_PSEUDO_REGISTER, j, rsi)
11027 try_swap_copy_prop (loop, reg, j);
11029 CLEAR_REG_SET (&store_copies);
11033 /* Now, we need to replace all references to the previous exit
11034 label with the new one. */
11035 if (label != NULL_RTX && end_label != NULL_RTX)
11036 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
11037 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
11038 redirect_jump (p, label, false);
11040 cselib_finish ();
11043 /* For communication between note_reg_stored and its caller. */
11044 struct note_reg_stored_arg
11046 int set_seen;
11047 rtx reg;
11050 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11051 is equal to ARG. */
11052 static void
11053 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
11055 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
11056 if (t->reg == x)
11057 t->set_seen = 1;
11060 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11061 There must be exactly one insn that sets this pseudo; it will be
11062 deleted if all replacements succeed and we can prove that the register
11063 is not used after the loop. */
11065 static void
11066 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
11068 /* This is the reg that we are copying from. */
11069 rtx reg_rtx = regno_reg_rtx[regno];
11070 rtx init_insn = 0;
11071 rtx insn;
11072 /* These help keep track of whether we replaced all uses of the reg. */
11073 int replaced_last = 0;
11074 int store_is_first = 0;
11076 for (insn = next_insn_in_loop (loop, loop->scan_start);
11077 insn != NULL_RTX;
11078 insn = next_insn_in_loop (loop, insn))
11080 rtx set;
11082 /* Only substitute within one extended basic block from the initializing
11083 insn. */
11084 if (LABEL_P (insn) && init_insn)
11085 break;
11087 if (! INSN_P (insn))
11088 continue;
11090 /* Is this the initializing insn? */
11091 set = single_set (insn);
11092 if (set
11093 && REG_P (SET_DEST (set))
11094 && REGNO (SET_DEST (set)) == regno)
11096 if (init_insn)
11097 abort ();
11099 init_insn = insn;
11100 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
11101 store_is_first = 1;
11104 /* Only substitute after seeing the initializing insn. */
11105 if (init_insn && insn != init_insn)
11107 struct note_reg_stored_arg arg;
11109 replace_loop_regs (insn, reg_rtx, replacement);
11110 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
11111 replaced_last = 1;
11113 /* Stop replacing when REPLACEMENT is modified. */
11114 arg.reg = replacement;
11115 arg.set_seen = 0;
11116 note_stores (PATTERN (insn), note_reg_stored, &arg);
11117 if (arg.set_seen)
11119 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
11121 /* It is possible that we've turned previously valid REG_EQUAL to
11122 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11123 REPLACEMENT is modified, we get different meaning. */
11124 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
11125 remove_note (insn, note);
11126 break;
11130 if (! init_insn)
11131 abort ();
11132 if (apply_change_group ())
11134 if (loop_dump_stream)
11135 fprintf (loop_dump_stream, " Replaced reg %d", regno);
11136 if (store_is_first && replaced_last)
11138 rtx first;
11139 rtx retval_note;
11141 /* Assume we're just deleting INIT_INSN. */
11142 first = init_insn;
11143 /* Look for REG_RETVAL note. If we're deleting the end of
11144 the libcall sequence, the whole sequence can go. */
11145 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
11146 /* If we found a REG_RETVAL note, find the first instruction
11147 in the sequence. */
11148 if (retval_note)
11149 first = XEXP (retval_note, 0);
11151 /* Delete the instructions. */
11152 loop_delete_insns (first, init_insn);
11154 if (loop_dump_stream)
11155 fprintf (loop_dump_stream, ".\n");
11159 /* Replace all the instructions from FIRST up to and including LAST
11160 with NOTE_INSN_DELETED notes. */
11162 static void
11163 loop_delete_insns (rtx first, rtx last)
11165 while (1)
11167 if (loop_dump_stream)
11168 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
11169 INSN_UID (first));
11170 delete_insn (first);
11172 /* If this was the LAST instructions we're supposed to delete,
11173 we're done. */
11174 if (first == last)
11175 break;
11177 first = NEXT_INSN (first);
11181 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11182 loop LOOP if the order of the sets of these registers can be
11183 swapped. There must be exactly one insn within the loop that sets
11184 this pseudo followed immediately by a move insn that sets
11185 REPLACEMENT with REGNO. */
11186 static void
11187 try_swap_copy_prop (const struct loop *loop, rtx replacement,
11188 unsigned int regno)
11190 rtx insn;
11191 rtx set = NULL_RTX;
11192 unsigned int new_regno;
11194 new_regno = REGNO (replacement);
11196 for (insn = next_insn_in_loop (loop, loop->scan_start);
11197 insn != NULL_RTX;
11198 insn = next_insn_in_loop (loop, insn))
11200 /* Search for the insn that copies REGNO to NEW_REGNO? */
11201 if (INSN_P (insn)
11202 && (set = single_set (insn))
11203 && REG_P (SET_DEST (set))
11204 && REGNO (SET_DEST (set)) == new_regno
11205 && REG_P (SET_SRC (set))
11206 && REGNO (SET_SRC (set)) == regno)
11207 break;
11210 if (insn != NULL_RTX)
11212 rtx prev_insn;
11213 rtx prev_set;
11215 /* Some DEF-USE info would come in handy here to make this
11216 function more general. For now, just check the previous insn
11217 which is the most likely candidate for setting REGNO. */
11219 prev_insn = PREV_INSN (insn);
11221 if (INSN_P (insn)
11222 && (prev_set = single_set (prev_insn))
11223 && REG_P (SET_DEST (prev_set))
11224 && REGNO (SET_DEST (prev_set)) == regno)
11226 /* We have:
11227 (set (reg regno) (expr))
11228 (set (reg new_regno) (reg regno))
11230 so try converting this to:
11231 (set (reg new_regno) (expr))
11232 (set (reg regno) (reg new_regno))
11234 The former construct is often generated when a global
11235 variable used for an induction variable is shadowed by a
11236 register (NEW_REGNO). The latter construct improves the
11237 chances of GIV replacement and BIV elimination. */
11239 validate_change (prev_insn, &SET_DEST (prev_set),
11240 replacement, 1);
11241 validate_change (insn, &SET_DEST (set),
11242 SET_SRC (set), 1);
11243 validate_change (insn, &SET_SRC (set),
11244 replacement, 1);
11246 if (apply_change_group ())
11248 if (loop_dump_stream)
11249 fprintf (loop_dump_stream,
11250 " Swapped set of reg %d at %d with reg %d at %d.\n",
11251 regno, INSN_UID (insn),
11252 new_regno, INSN_UID (prev_insn));
11254 /* Update first use of REGNO. */
11255 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
11256 REGNO_FIRST_UID (regno) = INSN_UID (insn);
11258 /* Now perform copy propagation to hopefully
11259 remove all uses of REGNO within the loop. */
11260 try_copy_prop (loop, replacement, regno);
11266 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11268 static int
11269 find_mem_in_note_1 (rtx *x, void *data)
11271 if (*x != NULL_RTX && MEM_P (*x))
11273 rtx *res = (rtx *) data;
11274 *res = *x;
11275 return 1;
11277 return 0;
11280 /* Returns the first MEM found in NOTE by depth-first search. */
11282 static rtx
11283 find_mem_in_note (rtx note)
11285 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
11286 return note;
11287 return NULL_RTX;
11290 /* Replace MEM with its associated pseudo register. This function is
11291 called from load_mems via for_each_rtx. DATA is actually a pointer
11292 to a structure describing the instruction currently being scanned
11293 and the MEM we are currently replacing. */
11295 static int
11296 replace_loop_mem (rtx *mem, void *data)
11298 loop_replace_args *args = (loop_replace_args *) data;
11299 rtx m = *mem;
11301 if (m == NULL_RTX)
11302 return 0;
11304 switch (GET_CODE (m))
11306 case MEM:
11307 break;
11309 case CONST_DOUBLE:
11310 /* We're not interested in the MEM associated with a
11311 CONST_DOUBLE, so there's no need to traverse into one. */
11312 return -1;
11314 default:
11315 /* This is not a MEM. */
11316 return 0;
11319 if (!rtx_equal_p (args->match, m))
11320 /* This is not the MEM we are currently replacing. */
11321 return 0;
11323 /* Actually replace the MEM. */
11324 validate_change (args->insn, mem, args->replacement, 1);
11326 return 0;
11329 static void
11330 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
11332 loop_replace_args args;
11334 args.insn = insn;
11335 args.match = mem;
11336 args.replacement = reg;
11338 for_each_rtx (&insn, replace_loop_mem, &args);
11340 /* If we hoist a mem write out of the loop, then REG_EQUAL
11341 notes referring to the mem are no longer valid. */
11342 if (written)
11344 rtx note, sub;
11345 rtx *link;
11347 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
11349 if (REG_NOTE_KIND (note) == REG_EQUAL
11350 && (sub = find_mem_in_note (note))
11351 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
11353 /* Remove the note. */
11354 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
11355 break;
11361 /* Replace one register with another. Called through for_each_rtx; PX points
11362 to the rtx being scanned. DATA is actually a pointer to
11363 a structure of arguments. */
11365 static int
11366 replace_loop_reg (rtx *px, void *data)
11368 rtx x = *px;
11369 loop_replace_args *args = (loop_replace_args *) data;
11371 if (x == NULL_RTX)
11372 return 0;
11374 if (x == args->match)
11375 validate_change (args->insn, px, args->replacement, 1);
11377 return 0;
11380 static void
11381 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
11383 loop_replace_args args;
11385 args.insn = insn;
11386 args.match = reg;
11387 args.replacement = replacement;
11389 for_each_rtx (&insn, replace_loop_reg, &args);
11392 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11393 (ignored in the interim). */
11395 static rtx
11396 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
11397 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
11398 rtx pattern)
11400 return emit_insn_after (pattern, where_insn);
11404 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11405 in basic block WHERE_BB (ignored in the interim) within the loop
11406 otherwise hoist PATTERN into the loop pre-header. */
11408 static rtx
11409 loop_insn_emit_before (const struct loop *loop,
11410 basic_block where_bb ATTRIBUTE_UNUSED,
11411 rtx where_insn, rtx pattern)
11413 if (! where_insn)
11414 return loop_insn_hoist (loop, pattern);
11415 return emit_insn_before (pattern, where_insn);
11419 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11420 WHERE_BB (ignored in the interim) within the loop. */
11422 static rtx
11423 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
11424 basic_block where_bb ATTRIBUTE_UNUSED,
11425 rtx where_insn, rtx pattern)
11427 return emit_call_insn_before (pattern, where_insn);
11431 /* Hoist insn for PATTERN into the loop pre-header. */
11433 static rtx
11434 loop_insn_hoist (const struct loop *loop, rtx pattern)
11436 return loop_insn_emit_before (loop, 0, loop->start, pattern);
11440 /* Hoist call insn for PATTERN into the loop pre-header. */
11442 static rtx
11443 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
11445 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
11449 /* Sink insn for PATTERN after the loop end. */
11451 static rtx
11452 loop_insn_sink (const struct loop *loop, rtx pattern)
11454 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
11457 /* bl->final_value can be either general_operand or PLUS of general_operand
11458 and constant. Emit sequence of instructions to load it into REG. */
11459 static rtx
11460 gen_load_of_final_value (rtx reg, rtx final_value)
11462 rtx seq;
11463 start_sequence ();
11464 final_value = force_operand (final_value, reg);
11465 if (final_value != reg)
11466 emit_move_insn (reg, final_value);
11467 seq = get_insns ();
11468 end_sequence ();
11469 return seq;
11472 /* If the loop has multiple exits, emit insn for PATTERN before the
11473 loop to ensure that it will always be executed no matter how the
11474 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11475 since this is slightly more efficient. */
11477 static rtx
11478 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
11480 if (loop->exit_count)
11481 return loop_insn_hoist (loop, pattern);
11482 else
11483 return loop_insn_sink (loop, pattern);
11486 static void
11487 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
11489 struct iv_class *bl;
11490 int iv_num = 0;
11492 if (! loop || ! file)
11493 return;
11495 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11496 iv_num++;
11498 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
11500 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
11502 loop_iv_class_dump (bl, file, verbose);
11503 fputc ('\n', file);
11508 static void
11509 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
11510 int verbose ATTRIBUTE_UNUSED)
11512 struct induction *v;
11513 rtx incr;
11514 int i;
11516 if (! bl || ! file)
11517 return;
11519 fprintf (file, "IV class for reg %d, benefit %d\n",
11520 bl->regno, bl->total_benefit);
11522 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
11523 if (bl->initial_value)
11525 fprintf (file, ", init val: ");
11526 print_simple_rtl (file, bl->initial_value);
11528 if (bl->initial_test)
11530 fprintf (file, ", init test: ");
11531 print_simple_rtl (file, bl->initial_test);
11533 fputc ('\n', file);
11535 if (bl->final_value)
11537 fprintf (file, " Final val: ");
11538 print_simple_rtl (file, bl->final_value);
11539 fputc ('\n', file);
11542 if ((incr = biv_total_increment (bl)))
11544 fprintf (file, " Total increment: ");
11545 print_simple_rtl (file, incr);
11546 fputc ('\n', file);
11549 /* List the increments. */
11550 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
11552 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
11553 print_simple_rtl (file, v->add_val);
11554 fputc ('\n', file);
11557 /* List the givs. */
11558 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
11560 fprintf (file, " Giv%d: insn %d, benefit %d, ",
11561 i, INSN_UID (v->insn), v->benefit);
11562 if (v->giv_type == DEST_ADDR)
11563 print_simple_rtl (file, v->mem);
11564 else
11565 print_simple_rtl (file, single_set (v->insn));
11566 fputc ('\n', file);
11571 static void
11572 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
11574 if (! v || ! file)
11575 return;
11577 fprintf (file,
11578 "Biv %d: insn %d",
11579 REGNO (v->dest_reg), INSN_UID (v->insn));
11580 fprintf (file, " const ");
11581 print_simple_rtl (file, v->add_val);
11583 if (verbose && v->final_value)
11585 fputc ('\n', file);
11586 fprintf (file, " final ");
11587 print_simple_rtl (file, v->final_value);
11590 fputc ('\n', file);
11594 static void
11595 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
11597 if (! v || ! file)
11598 return;
11600 if (v->giv_type == DEST_REG)
11601 fprintf (file, "Giv %d: insn %d",
11602 REGNO (v->dest_reg), INSN_UID (v->insn));
11603 else
11604 fprintf (file, "Dest address: insn %d",
11605 INSN_UID (v->insn));
11607 fprintf (file, " src reg %d benefit %d",
11608 REGNO (v->src_reg), v->benefit);
11609 fprintf (file, " lifetime %d",
11610 v->lifetime);
11612 if (v->replaceable)
11613 fprintf (file, " replaceable");
11615 if (v->no_const_addval)
11616 fprintf (file, " ncav");
11618 if (v->ext_dependent)
11620 switch (GET_CODE (v->ext_dependent))
11622 case SIGN_EXTEND:
11623 fprintf (file, " ext se");
11624 break;
11625 case ZERO_EXTEND:
11626 fprintf (file, " ext ze");
11627 break;
11628 case TRUNCATE:
11629 fprintf (file, " ext tr");
11630 break;
11631 default:
11632 abort ();
11636 fputc ('\n', file);
11637 fprintf (file, " mult ");
11638 print_simple_rtl (file, v->mult_val);
11640 fputc ('\n', file);
11641 fprintf (file, " add ");
11642 print_simple_rtl (file, v->add_val);
11644 if (verbose && v->final_value)
11646 fputc ('\n', file);
11647 fprintf (file, " final ");
11648 print_simple_rtl (file, v->final_value);
11651 fputc ('\n', file);
11655 void
11656 debug_ivs (const struct loop *loop)
11658 loop_ivs_dump (loop, stderr, 1);
11662 void
11663 debug_iv_class (const struct iv_class *bl)
11665 loop_iv_class_dump (bl, stderr, 1);
11669 void
11670 debug_biv (const struct induction *v)
11672 loop_biv_dump (v, stderr, 1);
11676 void
11677 debug_giv (const struct induction *v)
11679 loop_giv_dump (v, stderr, 1);
11683 #define LOOP_BLOCK_NUM_1(INSN) \
11684 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11686 /* The notes do not have an assigned block, so look at the next insn. */
11687 #define LOOP_BLOCK_NUM(INSN) \
11688 ((INSN) ? (NOTE_P (INSN) \
11689 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11690 : LOOP_BLOCK_NUM_1 (INSN)) \
11691 : -1)
11693 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11695 static void
11696 loop_dump_aux (const struct loop *loop, FILE *file,
11697 int verbose ATTRIBUTE_UNUSED)
11699 rtx label;
11701 if (! loop || ! file || !BB_HEAD (loop->first))
11702 return;
11704 /* Print diagnostics to compare our concept of a loop with
11705 what the loop notes say. */
11706 if (! PREV_INSN (BB_HEAD (loop->first))
11707 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
11708 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
11709 != NOTE_INSN_LOOP_BEG)
11710 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11711 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
11712 if (! NEXT_INSN (BB_END (loop->last))
11713 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
11714 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
11715 != NOTE_INSN_LOOP_END)
11716 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
11717 INSN_UID (NEXT_INSN (BB_END (loop->last))));
11719 if (loop->start)
11721 fprintf (file,
11722 ";; start %d (%d), end %d (%d)\n",
11723 LOOP_BLOCK_NUM (loop->start),
11724 LOOP_INSN_UID (loop->start),
11725 LOOP_BLOCK_NUM (loop->end),
11726 LOOP_INSN_UID (loop->end));
11727 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
11728 LOOP_BLOCK_NUM (loop->top),
11729 LOOP_INSN_UID (loop->top),
11730 LOOP_BLOCK_NUM (loop->scan_start),
11731 LOOP_INSN_UID (loop->scan_start));
11732 fprintf (file, ";; exit_count %d", loop->exit_count);
11733 if (loop->exit_count)
11735 fputs (", labels:", file);
11736 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
11738 fprintf (file, " %d ",
11739 LOOP_INSN_UID (XEXP (label, 0)));
11742 fputs ("\n", file);
11746 /* Call this function from the debugger to dump LOOP. */
11748 void
11749 debug_loop (const struct loop *loop)
11751 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
11754 /* Call this function from the debugger to dump LOOPS. */
11756 void
11757 debug_loops (const struct loops *loops)
11759 flow_loops_dump (loops, stderr, loop_dump_aux, 1);