1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995,
3 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 /* This is the loop optimization pass of the compiler.
24 It finds invariant computations within loops and moves them
25 to the beginning of the loop. Then it identifies basic and
26 general induction variables.
28 Basic induction variables (BIVs) are a pseudo registers which are set within
29 a loop only by incrementing or decrementing its value. General induction
30 variables (GIVs) are pseudo registers with a value which is a linear function
31 of a basic induction variable. BIVs are recognized by `basic_induction_var';
32 GIVs by `general_induction_var'.
34 Once induction variables are identified, strength reduction is applied to the
35 general induction variables, and induction variable elimination is applied to
36 the basic induction variables.
38 It also finds cases where
39 a register is set within the loop by zero-extending a narrower value
40 and changes these to zero the entire register once before the loop
41 and merely copy the low part within the loop.
43 Most of the complexity is in heuristics to decide when it is worth
44 while to do these things. */
48 #include "coretypes.h"
54 #include "hard-reg-set.h"
55 #include "basic-block.h"
56 #include "insn-config.h"
65 #include "insn-flags.h"
70 /* Get the loop info pointer of a loop. */
71 #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux)
73 /* Get a pointer to the loop movables structure. */
74 #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables)
76 /* Get a pointer to the loop registers structure. */
77 #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs)
79 /* Get a pointer to the loop induction variables structure. */
80 #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs)
82 /* Get the luid of an insn. Catch the error of trying to reference the LUID
83 of an insn added during loop, since these don't have LUIDs. */
85 #define INSN_LUID(INSN) \
86 (gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
88 #define REGNO_FIRST_LUID(REGNO) \
89 (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
90 ? uid_luid[REGNO_FIRST_UID (REGNO)] \
92 #define REGNO_LAST_LUID(REGNO) \
93 (REGNO_LAST_UID (REGNO) < max_uid_for_loop \
94 ? uid_luid[REGNO_LAST_UID (REGNO)] \
97 /* A "basic induction variable" or biv is a pseudo reg that is set
98 (within this loop) only by incrementing or decrementing it. */
99 /* A "general induction variable" or giv is a pseudo reg whose
100 value is a linear function of a biv. */
102 /* Bivs are recognized by `basic_induction_var';
103 Givs by `general_induction_var'. */
105 /* An enum for the two different types of givs, those that are used
106 as memory addresses and those that are calculated into registers. */
114 /* A `struct induction' is created for every instruction that sets
115 an induction variable (either a biv or a giv). */
119 rtx insn
; /* The insn that sets a biv or giv */
120 rtx new_reg
; /* New register, containing strength reduced
121 version of this giv. */
122 rtx src_reg
; /* Biv from which this giv is computed.
123 (If this is a biv, then this is the biv.) */
124 enum g_types giv_type
; /* Indicate whether DEST_ADDR or DEST_REG */
125 rtx dest_reg
; /* Destination register for insn: this is the
126 register which was the biv or giv.
127 For a biv, this equals src_reg.
128 For a DEST_ADDR type giv, this is 0. */
129 rtx
*location
; /* Place in the insn where this giv occurs.
130 If GIV_TYPE is DEST_REG, this is 0. */
131 /* For a biv, this is the place where add_val
133 enum machine_mode mode
; /* The mode of this biv or giv */
134 rtx mem
; /* For DEST_ADDR, the memory object. */
135 rtx mult_val
; /* Multiplicative factor for src_reg. */
136 rtx add_val
; /* Additive constant for that product. */
137 int benefit
; /* Gain from eliminating this insn. */
138 rtx final_value
; /* If the giv is used outside the loop, and its
139 final value could be calculated, it is put
140 here, and the giv is made replaceable. Set
141 the giv to this value before the loop. */
142 unsigned combined_with
; /* The number of givs this giv has been
143 combined with. If nonzero, this giv
144 cannot combine with any other giv. */
145 unsigned replaceable
: 1; /* 1 if we can substitute the strength-reduced
146 variable for the original variable.
147 0 means they must be kept separate and the
148 new one must be copied into the old pseudo
149 reg each time the old one is set. */
150 unsigned not_replaceable
: 1; /* Used to prevent duplicating work. This is
151 1 if we know that the giv definitely can
152 not be made replaceable, in which case we
153 don't bother checking the variable again
154 even if further info is available.
155 Both this and the above can be zero. */
156 unsigned ignore
: 1; /* 1 prohibits further processing of giv */
157 unsigned always_computable
: 1;/* 1 if this value is computable every
159 unsigned always_executed
: 1; /* 1 if this set occurs each iteration. */
160 unsigned maybe_multiple
: 1; /* Only used for a biv and 1 if this biv
161 update may be done multiple times per
163 unsigned cant_derive
: 1; /* For giv's, 1 if this giv cannot derive
164 another giv. This occurs in many cases
165 where a giv's lifetime spans an update to
167 unsigned maybe_dead
: 1; /* 1 if this giv might be dead. In that case,
168 we won't use it to eliminate a biv, it
169 would probably lose. */
170 unsigned auto_inc_opt
: 1; /* 1 if this giv had its increment output next
171 to it to try to form an auto-inc address. */
173 unsigned no_const_addval
: 1; /* 1 if add_val does not contain a const. */
174 int lifetime
; /* Length of life of this giv */
175 rtx derive_adjustment
; /* If nonzero, is an adjustment to be
176 subtracted from add_val when this giv
177 derives another. This occurs when the
178 giv spans a biv update by incrementation. */
179 rtx ext_dependent
; /* If nonzero, is a sign or zero extension
180 if a biv on which this giv is dependent. */
181 struct induction
*next_iv
; /* For givs, links together all givs that are
182 based on the same biv. For bivs, links
183 together all biv entries that refer to the
184 same biv register. */
185 struct induction
*same
; /* For givs, if the giv has been combined with
186 another giv, this points to the base giv.
187 The base giv will have COMBINED_WITH nonzero.
188 For bivs, if the biv has the same LOCATION
189 than another biv, this points to the base
191 struct induction
*same_insn
; /* If there are multiple identical givs in
192 the same insn, then all but one have this
193 field set, and they all point to the giv
194 that doesn't have this field set. */
195 rtx last_use
; /* For a giv made from a biv increment, this is
196 a substitute for the lifetime information. */
200 /* A `struct iv_class' is created for each biv. */
204 unsigned int regno
; /* Pseudo reg which is the biv. */
205 int biv_count
; /* Number of insns setting this reg. */
206 struct induction
*biv
; /* List of all insns that set this reg. */
207 int giv_count
; /* Number of DEST_REG givs computed from this
208 biv. The resulting count is only used in
210 struct induction
*giv
; /* List of all insns that compute a giv
212 int total_benefit
; /* Sum of BENEFITs of all those givs. */
213 rtx initial_value
; /* Value of reg at loop start. */
214 rtx initial_test
; /* Test performed on BIV before loop. */
215 rtx final_value
; /* Value of reg at loop end, if known. */
216 struct iv_class
*next
; /* Links all class structures together. */
217 rtx init_insn
; /* insn which initializes biv, 0 if none. */
218 rtx init_set
; /* SET of INIT_INSN, if any. */
219 unsigned incremented
: 1; /* 1 if somewhere incremented/decremented */
220 unsigned eliminable
: 1; /* 1 if plausible candidate for
222 unsigned nonneg
: 1; /* 1 if we added a REG_NONNEG note for
224 unsigned reversed
: 1; /* 1 if we reversed the loop that this
226 unsigned all_reduced
: 1; /* 1 if all givs using this biv have
231 /* Definitions used by the basic induction variable discovery code. */
241 /* A `struct iv' is created for every register. */
248 struct iv_class
*class;
249 struct induction
*info
;
254 #define REG_IV_TYPE(ivs, n) ivs->regs[n].type
255 #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info
256 #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class
261 /* Indexed by register number, contains pointer to `struct
262 iv' if register is an induction variable. */
265 /* Size of regs array. */
268 /* The head of a list which links together (via the next field)
269 every iv class for the current loop. */
270 struct iv_class
*list
;
274 typedef struct loop_mem_info
276 rtx mem
; /* The MEM itself. */
277 rtx reg
; /* Corresponding pseudo, if any. */
278 int optimize
; /* Nonzero if we can optimize access to this MEM. */
285 /* Number of times the reg is set during the loop being scanned.
286 During code motion, a negative value indicates a reg that has
287 been made a candidate; in particular -2 means that it is an
288 candidate that we know is equal to a constant and -1 means that
289 it is a candidate not known equal to a constant. After code
290 motion, regs moved have 0 (which is accurate now) while the
291 failed candidates have the original number of times set.
293 Therefore, at all times, == 0 indicates an invariant register;
294 < 0 a conditionally invariant one. */
297 /* Original value of set_in_loop; same except that this value
298 is not set negative for a reg whose sets have been made candidates
299 and not set to 0 for a reg that is moved. */
302 /* Contains the insn in which a register was used if it was used
303 exactly once; contains const0_rtx if it was used more than once. */
306 /* Nonzero indicates that the register cannot be moved or strength
308 char may_not_optimize
;
310 /* Nonzero means reg N has already been moved out of one loop.
311 This reduces the desire to move it out of another. */
318 int num
; /* Number of regs used in table. */
319 int size
; /* Size of table. */
320 struct loop_reg
*array
; /* Register usage info. array. */
321 int multiple_uses
; /* Nonzero if a reg has multiple uses. */
328 /* Head of movable chain. */
329 struct movable
*head
;
330 /* Last movable in chain. */
331 struct movable
*last
;
335 /* Information pertaining to a loop. */
339 /* Nonzero if there is a subroutine call in the current loop. */
341 /* Nonzero if there is a libcall in the current loop. */
343 /* Nonzero if there is a non constant call in the current loop. */
344 int has_nonconst_call
;
345 /* Nonzero if there is a prefetch instruction in the current loop. */
347 /* Nonzero if there is a volatile memory reference in the current
350 /* Nonzero if there is a tablejump in the current loop. */
352 /* Nonzero if there are ways to leave the loop other than falling
354 int has_multiple_exit_targets
;
355 /* Nonzero if there is an indirect jump in the current function. */
356 int has_indirect_jump
;
357 /* Register or constant initial loop value. */
359 /* Register or constant value used for comparison test. */
360 rtx comparison_value
;
361 /* Register or constant approximate final value. */
363 /* Register or constant initial loop value with term common to
364 final_value removed. */
365 rtx initial_equiv_value
;
366 /* Register or constant final loop value with term common to
367 initial_value removed. */
368 rtx final_equiv_value
;
369 /* Register corresponding to iteration variable. */
371 /* Constant loop increment. */
373 enum rtx_code comparison_code
;
374 /* Holds the number of loop iterations. It is zero if the number
375 could not be calculated. Must be unsigned since the number of
376 iterations can be as high as 2^wordsize - 1. For loops with a
377 wider iterator, this number will be zero if the number of loop
378 iterations is too large for an unsigned integer to hold. */
379 unsigned HOST_WIDE_INT n_iterations
;
380 int used_count_register
;
381 /* The loop iterator induction variable. */
383 /* List of MEMs that are stored in this loop. */
385 /* Array of MEMs that are used (read or written) in this loop, but
386 cannot be aliased by anything in this loop, except perhaps
387 themselves. In other words, if mems[i] is altered during
388 the loop, it is altered by an expression that is rtx_equal_p to
391 /* The index of the next available slot in MEMS. */
393 /* The number of elements allocated in MEMS. */
395 /* Nonzero if we don't know what MEMs were changed in the current
396 loop. This happens if the loop contains a call (in which case
397 `has_call' will also be set) or if we store into more than
399 int unknown_address_altered
;
400 /* The above doesn't count any readonly memory locations that are
401 stored. This does. */
402 int unknown_constant_address_altered
;
403 /* Count of memory write instructions discovered in the loop. */
405 /* The insn where the first of these was found. */
406 rtx first_loop_store_insn
;
407 /* The chain of movable insns in loop. */
408 struct loop_movables movables
;
409 /* The registers used the in loop. */
410 struct loop_regs regs
;
411 /* The induction variable information in loop. */
413 /* Nonzero if call is in pre_header extended basic block. */
414 int pre_header_has_call
;
417 /* Not really meaningful values, but at least something. */
418 #ifndef SIMULTANEOUS_PREFETCHES
419 #define SIMULTANEOUS_PREFETCHES 3
421 #ifndef PREFETCH_BLOCK
422 #define PREFETCH_BLOCK 32
424 #ifndef HAVE_prefetch
425 #define HAVE_prefetch 0
426 #define CODE_FOR_prefetch 0
427 #define gen_prefetch(a,b,c) (gcc_unreachable (), NULL_RTX)
430 /* Give up the prefetch optimizations once we exceed a given threshold.
431 It is unlikely that we would be able to optimize something in a loop
432 with so many detected prefetches. */
433 #define MAX_PREFETCHES 100
434 /* The number of prefetch blocks that are beneficial to fetch at once before
435 a loop with a known (and low) iteration count. */
436 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
437 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
438 since it is likely that the data are already in the cache. */
439 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
441 /* Parameterize some prefetch heuristics so they can be turned on and off
442 easily for performance testing on new architectures. These can be
443 defined in target-dependent files. */
445 /* Prefetch is worthwhile only when loads/stores are dense. */
446 #ifndef PREFETCH_ONLY_DENSE_MEM
447 #define PREFETCH_ONLY_DENSE_MEM 1
450 /* Define what we mean by "dense" loads and stores; This value divided by 256
451 is the minimum percentage of memory references that worth prefetching. */
452 #ifndef PREFETCH_DENSE_MEM
453 #define PREFETCH_DENSE_MEM 220
456 /* Do not prefetch for a loop whose iteration count is known to be low. */
457 #ifndef PREFETCH_NO_LOW_LOOPCNT
458 #define PREFETCH_NO_LOW_LOOPCNT 1
461 /* Define what we mean by a "low" iteration count. */
462 #ifndef PREFETCH_LOW_LOOPCNT
463 #define PREFETCH_LOW_LOOPCNT 32
466 /* Do not prefetch for a loop that contains a function call; such a loop is
467 probably not an internal loop. */
468 #ifndef PREFETCH_NO_CALL
469 #define PREFETCH_NO_CALL 1
472 /* Do not prefetch accesses with an extreme stride. */
473 #ifndef PREFETCH_NO_EXTREME_STRIDE
474 #define PREFETCH_NO_EXTREME_STRIDE 1
477 /* Define what we mean by an "extreme" stride. */
478 #ifndef PREFETCH_EXTREME_STRIDE
479 #define PREFETCH_EXTREME_STRIDE 4096
482 /* Define a limit to how far apart indices can be and still be merged
483 into a single prefetch. */
484 #ifndef PREFETCH_EXTREME_DIFFERENCE
485 #define PREFETCH_EXTREME_DIFFERENCE 4096
488 /* Issue prefetch instructions before the loop to fetch data to be used
489 in the first few loop iterations. */
490 #ifndef PREFETCH_BEFORE_LOOP
491 #define PREFETCH_BEFORE_LOOP 1
494 /* Do not handle reversed order prefetches (negative stride). */
495 #ifndef PREFETCH_NO_REVERSE_ORDER
496 #define PREFETCH_NO_REVERSE_ORDER 1
499 /* Prefetch even if the GIV is in conditional code. */
500 #ifndef PREFETCH_CONDITIONAL
501 #define PREFETCH_CONDITIONAL 1
504 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
505 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
507 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
508 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
509 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
511 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
512 ((REGNO) < FIRST_PSEUDO_REGISTER \
513 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
516 /* Vector mapping INSN_UIDs to luids.
517 The luids are like uids but increase monotonically always.
518 We use them to see whether a jump comes from outside a given loop. */
520 static int *uid_luid
;
522 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
523 number the insn is contained in. */
525 static struct loop
**uid_loop
;
527 /* 1 + largest uid of any insn. */
529 static int max_uid_for_loop
;
531 /* Number of loops detected in current function. Used as index to the
534 static int max_loop_num
;
536 /* Bound on pseudo register number before loop optimization.
537 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
538 static unsigned int max_reg_before_loop
;
540 /* The value to pass to the next call of reg_scan_update. */
541 static int loop_max_reg
;
543 /* During the analysis of a loop, a chain of `struct movable's
544 is made to record all the movable insns found.
545 Then the entire chain can be scanned to decide which to move. */
549 rtx insn
; /* A movable insn */
550 rtx set_src
; /* The expression this reg is set from. */
551 rtx set_dest
; /* The destination of this SET. */
552 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
553 of any registers used within the LIBCALL. */
554 int consec
; /* Number of consecutive following insns
555 that must be moved with this one. */
556 unsigned int regno
; /* The register it sets */
557 short lifetime
; /* lifetime of that register;
558 may be adjusted when matching movables
559 that load the same value are found. */
560 short savings
; /* Number of insns we can move for this reg,
561 including other movables that force this
562 or match this one. */
563 ENUM_BITFIELD(machine_mode
) savemode
: 8; /* Nonzero means it is a mode for
564 a low part that we should avoid changing when
565 clearing the rest of the reg. */
566 unsigned int cond
: 1; /* 1 if only conditionally movable */
567 unsigned int force
: 1; /* 1 means MUST move this insn */
568 unsigned int global
: 1; /* 1 means reg is live outside this loop */
569 /* If PARTIAL is 1, GLOBAL means something different:
570 that the reg is live outside the range from where it is set
571 to the following label. */
572 unsigned int done
: 1; /* 1 inhibits further processing of this */
574 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
575 In particular, moving it does not make it
577 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
578 load SRC, rather than copying INSN. */
579 unsigned int move_insn_first
:1;/* Same as above, if this is necessary for the
580 first insn of a consecutive sets group. */
581 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
582 unsigned int insert_temp
: 1; /* 1 means we copy to a new pseudo and replace
583 the original insn with a copy from that
584 pseudo, rather than deleting it. */
585 struct movable
*match
; /* First entry for same value */
586 struct movable
*forces
; /* An insn that must be moved if this is */
587 struct movable
*next
;
591 static FILE *loop_dump_stream
;
593 /* Forward declarations. */
595 static void invalidate_loops_containing_label (rtx
);
596 static void find_and_verify_loops (rtx
, struct loops
*);
597 static void mark_loop_jump (rtx
, struct loop
*);
598 static void prescan_loop (struct loop
*);
599 static int reg_in_basic_block_p (rtx
, rtx
);
600 static int consec_sets_invariant_p (const struct loop
*, rtx
, int, rtx
);
601 static int labels_in_range_p (rtx
, int);
602 static void count_one_set (struct loop_regs
*, rtx
, rtx
, rtx
*);
603 static void note_addr_stored (rtx
, rtx
, void *);
604 static void note_set_pseudo_multiple_uses (rtx
, rtx
, void *);
605 static int loop_reg_used_before_p (const struct loop
*, rtx
, rtx
);
606 static rtx
find_regs_nested (rtx
, rtx
);
607 static void scan_loop (struct loop
*, int);
609 static void replace_call_address (rtx
, rtx
, rtx
);
611 static rtx
skip_consec_insns (rtx
, int);
612 static int libcall_benefit (rtx
);
613 static rtx
libcall_other_reg (rtx
, rtx
);
614 static void record_excess_regs (rtx
, rtx
, rtx
*);
615 static void ignore_some_movables (struct loop_movables
*);
616 static void force_movables (struct loop_movables
*);
617 static void combine_movables (struct loop_movables
*, struct loop_regs
*);
618 static int num_unmoved_movables (const struct loop
*);
619 static int regs_match_p (rtx
, rtx
, struct loop_movables
*);
620 static int rtx_equal_for_loop_p (rtx
, rtx
, struct loop_movables
*,
622 static void add_label_notes (rtx
, rtx
);
623 static void move_movables (struct loop
*loop
, struct loop_movables
*, int,
625 static void loop_movables_add (struct loop_movables
*, struct movable
*);
626 static void loop_movables_free (struct loop_movables
*);
627 static int count_nonfixed_reads (const struct loop
*, rtx
);
628 static void loop_bivs_find (struct loop
*);
629 static void loop_bivs_init_find (struct loop
*);
630 static void loop_bivs_check (struct loop
*);
631 static void loop_givs_find (struct loop
*);
632 static void loop_givs_check (struct loop
*);
633 static int loop_biv_eliminable_p (struct loop
*, struct iv_class
*, int, int);
634 static int loop_giv_reduce_benefit (struct loop
*, struct iv_class
*,
635 struct induction
*, rtx
);
636 static void loop_givs_dead_check (struct loop
*, struct iv_class
*);
637 static void loop_givs_reduce (struct loop
*, struct iv_class
*);
638 static void loop_givs_rescan (struct loop
*, struct iv_class
*, rtx
*);
639 static void loop_ivs_free (struct loop
*);
640 static void strength_reduce (struct loop
*, int);
641 static void find_single_use_in_loop (struct loop_regs
*, rtx
, rtx
);
642 static int valid_initial_value_p (rtx
, rtx
, int, rtx
);
643 static void find_mem_givs (const struct loop
*, rtx
, rtx
, int, int);
644 static void record_biv (struct loop
*, struct induction
*, rtx
, rtx
, rtx
,
645 rtx
, rtx
*, int, int);
646 static void check_final_value (const struct loop
*, struct induction
*);
647 static void loop_ivs_dump (const struct loop
*, FILE *, int);
648 static void loop_iv_class_dump (const struct iv_class
*, FILE *, int);
649 static void loop_biv_dump (const struct induction
*, FILE *, int);
650 static void loop_giv_dump (const struct induction
*, FILE *, int);
651 static void record_giv (const struct loop
*, struct induction
*, rtx
, rtx
,
652 rtx
, rtx
, rtx
, rtx
, int, enum g_types
, int, int,
654 static void update_giv_derive (const struct loop
*, rtx
);
655 static HOST_WIDE_INT
get_monotonic_increment (struct iv_class
*);
656 static bool biased_biv_fits_mode_p (const struct loop
*, struct iv_class
*,
657 HOST_WIDE_INT
, enum machine_mode
,
658 unsigned HOST_WIDE_INT
);
659 static bool biv_fits_mode_p (const struct loop
*, struct iv_class
*,
660 HOST_WIDE_INT
, enum machine_mode
, bool);
661 static bool extension_within_bounds_p (const struct loop
*, struct iv_class
*,
663 static void check_ext_dependent_givs (const struct loop
*, struct iv_class
*);
664 static int basic_induction_var (const struct loop
*, rtx
, enum machine_mode
,
665 rtx
, rtx
, rtx
*, rtx
*, rtx
**);
666 static rtx
simplify_giv_expr (const struct loop
*, rtx
, rtx
*, int *);
667 static int general_induction_var (const struct loop
*loop
, rtx
, rtx
*, rtx
*,
668 rtx
*, rtx
*, int, int *, enum machine_mode
);
669 static int consec_sets_giv (const struct loop
*, int, rtx
, rtx
, rtx
, rtx
*,
670 rtx
*, rtx
*, rtx
*);
671 static int check_dbra_loop (struct loop
*, int);
672 static rtx
express_from_1 (rtx
, rtx
, rtx
);
673 static rtx
combine_givs_p (struct induction
*, struct induction
*);
674 static int cmp_combine_givs_stats (const void *, const void *);
675 static void combine_givs (struct loop_regs
*, struct iv_class
*);
676 static int product_cheap_p (rtx
, rtx
);
677 static int maybe_eliminate_biv (const struct loop
*, struct iv_class
*, int,
679 static int maybe_eliminate_biv_1 (const struct loop
*, rtx
, rtx
,
680 struct iv_class
*, int, basic_block
, rtx
);
681 static int last_use_this_basic_block (rtx
, rtx
);
682 static void record_initial (rtx
, rtx
, void *);
683 static void update_reg_last_use (rtx
, rtx
);
684 static rtx
next_insn_in_loop (const struct loop
*, rtx
);
685 static void loop_regs_scan (const struct loop
*, int);
686 static int count_insns_in_loop (const struct loop
*);
687 static int find_mem_in_note_1 (rtx
*, void *);
688 static rtx
find_mem_in_note (rtx
);
689 static void load_mems (const struct loop
*);
690 static int insert_loop_mem (rtx
*, void *);
691 static int replace_loop_mem (rtx
*, void *);
692 static void replace_loop_mems (rtx
, rtx
, rtx
, int);
693 static int replace_loop_reg (rtx
*, void *);
694 static void replace_loop_regs (rtx insn
, rtx
, rtx
);
695 static void note_reg_stored (rtx
, rtx
, void *);
696 static void try_copy_prop (const struct loop
*, rtx
, unsigned int);
697 static void try_swap_copy_prop (const struct loop
*, rtx
, unsigned int);
698 static rtx
check_insn_for_givs (struct loop
*, rtx
, int, int);
699 static rtx
check_insn_for_bivs (struct loop
*, rtx
, int, int);
700 static rtx
gen_add_mult (rtx
, rtx
, rtx
, rtx
);
701 static void loop_regs_update (const struct loop
*, rtx
);
702 static int iv_add_mult_cost (rtx
, rtx
, rtx
, rtx
);
703 static int loop_invariant_p (const struct loop
*, rtx
);
704 static rtx
loop_insn_hoist (const struct loop
*, rtx
);
705 static void loop_iv_add_mult_emit_before (const struct loop
*, rtx
, rtx
, rtx
,
706 rtx
, basic_block
, rtx
);
707 static rtx
loop_insn_emit_before (const struct loop
*, basic_block
,
709 static int loop_insn_first_p (rtx
, rtx
);
710 static rtx
get_condition_for_loop (const struct loop
*, rtx
);
711 static void loop_iv_add_mult_sink (const struct loop
*, rtx
, rtx
, rtx
, rtx
);
712 static void loop_iv_add_mult_hoist (const struct loop
*, rtx
, rtx
, rtx
, rtx
);
713 static rtx
extend_value_for_giv (struct induction
*, rtx
);
714 static rtx
loop_insn_sink (const struct loop
*, rtx
);
716 static rtx
loop_insn_emit_after (const struct loop
*, basic_block
, rtx
, rtx
);
717 static rtx
loop_call_insn_emit_before (const struct loop
*, basic_block
,
719 static rtx
loop_call_insn_hoist (const struct loop
*, rtx
);
720 static rtx
loop_insn_sink_or_swim (const struct loop
*, rtx
);
722 static void loop_dump_aux (const struct loop
*, FILE *, int);
723 static void loop_delete_insns (rtx
, rtx
);
724 static HOST_WIDE_INT
remove_constant_addition (rtx
*);
725 static rtx
gen_load_of_final_value (rtx
, rtx
);
726 void debug_ivs (const struct loop
*);
727 void debug_iv_class (const struct iv_class
*);
728 void debug_biv (const struct induction
*);
729 void debug_giv (const struct induction
*);
730 void debug_loop (const struct loop
*);
731 void debug_loops (const struct loops
*);
733 typedef struct loop_replace_args
740 /* Nonzero iff INSN is between START and END, inclusive. */
741 #define INSN_IN_RANGE_P(INSN, START, END) \
742 (INSN_UID (INSN) < max_uid_for_loop \
743 && INSN_LUID (INSN) >= INSN_LUID (START) \
744 && INSN_LUID (INSN) <= INSN_LUID (END))
746 /* Indirect_jump_in_function is computed once per function. */
747 static int indirect_jump_in_function
;
748 static int indirect_jump_in_function_p (rtx
);
750 static int compute_luids (rtx
, rtx
, int);
752 static int biv_elimination_giv_has_0_offset (struct induction
*,
753 struct induction
*, rtx
);
755 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
756 copy the value of the strength reduced giv to its original register. */
757 static int copy_cost
;
759 /* Cost of using a register, to normalize the benefits of a giv. */
760 static int reg_address_cost
;
765 rtx reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
767 reg_address_cost
= address_cost (reg
, SImode
);
769 copy_cost
= COSTS_N_INSNS (1);
772 /* Compute the mapping from uids to luids.
773 LUIDs are numbers assigned to insns, like uids,
774 except that luids increase monotonically through the code.
775 Start at insn START and stop just before END. Assign LUIDs
776 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
778 compute_luids (rtx start
, rtx end
, int prev_luid
)
783 for (insn
= start
, i
= prev_luid
; insn
!= end
; insn
= NEXT_INSN (insn
))
785 if (INSN_UID (insn
) >= max_uid_for_loop
)
787 /* Don't assign luids to line-number NOTEs, so that the distance in
788 luids between two insns is not affected by -g. */
790 || NOTE_LINE_NUMBER (insn
) <= 0)
791 uid_luid
[INSN_UID (insn
)] = ++i
;
793 /* Give a line number note the same luid as preceding insn. */
794 uid_luid
[INSN_UID (insn
)] = i
;
799 /* Entry point of this file. Perform loop optimization
800 on the current function. F is the first insn of the function
801 and DUMPFILE is a stream for output of a trace of actions taken
802 (or 0 if none should be output). */
805 loop_optimize (rtx f
, FILE *dumpfile
, int flags
)
809 struct loops loops_data
;
810 struct loops
*loops
= &loops_data
;
811 struct loop_info
*loops_info
;
813 loop_dump_stream
= dumpfile
;
815 init_recog_no_volatile ();
817 max_reg_before_loop
= max_reg_num ();
818 loop_max_reg
= max_reg_before_loop
;
822 /* Count the number of loops. */
825 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
828 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
832 /* Don't waste time if no loops. */
833 if (max_loop_num
== 0)
836 loops
->num
= max_loop_num
;
838 /* Get size to use for tables indexed by uids.
839 Leave some space for labels allocated by find_and_verify_loops. */
840 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
842 uid_luid
= xcalloc (max_uid_for_loop
, sizeof (int));
843 uid_loop
= xcalloc (max_uid_for_loop
, sizeof (struct loop
*));
845 /* Allocate storage for array of loops. */
846 loops
->array
= xcalloc (loops
->num
, sizeof (struct loop
));
848 /* Find and process each loop.
849 First, find them, and record them in order of their beginnings. */
850 find_and_verify_loops (f
, loops
);
852 /* Allocate and initialize auxiliary loop information. */
853 loops_info
= xcalloc (loops
->num
, sizeof (struct loop_info
));
854 for (i
= 0; i
< (int) loops
->num
; i
++)
855 loops
->array
[i
].aux
= loops_info
+ i
;
857 /* Now find all register lifetimes. This must be done after
858 find_and_verify_loops, because it might reorder the insns in the
860 reg_scan (f
, max_reg_before_loop
);
862 /* This must occur after reg_scan so that registers created by gcse
863 will have entries in the register tables.
865 We could have added a call to reg_scan after gcse_main in toplev.c,
866 but moving this call to init_alias_analysis is more efficient. */
867 init_alias_analysis ();
869 /* See if we went too far. Note that get_max_uid already returns
870 one more that the maximum uid of all insn. */
871 gcc_assert (get_max_uid () <= max_uid_for_loop
);
872 /* Now reset it to the actual size we need. See above. */
873 max_uid_for_loop
= get_max_uid ();
875 /* find_and_verify_loops has already called compute_luids, but it
876 might have rearranged code afterwards, so we need to recompute
878 compute_luids (f
, NULL_RTX
, 0);
880 /* Don't leave gaps in uid_luid for insns that have been
881 deleted. It is possible that the first or last insn
882 using some register has been deleted by cross-jumping.
883 Make sure that uid_luid for that former insn's uid
884 points to the general area where that insn used to be. */
885 for (i
= 0; i
< max_uid_for_loop
; i
++)
887 uid_luid
[0] = uid_luid
[i
];
888 if (uid_luid
[0] != 0)
891 for (i
= 0; i
< max_uid_for_loop
; i
++)
892 if (uid_luid
[i
] == 0)
893 uid_luid
[i
] = uid_luid
[i
- 1];
895 /* Determine if the function has indirect jump. On some systems
896 this prevents low overhead loop instructions from being used. */
897 indirect_jump_in_function
= indirect_jump_in_function_p (f
);
899 /* Now scan the loops, last ones first, since this means inner ones are done
900 before outer ones. */
901 for (i
= max_loop_num
- 1; i
>= 0; i
--)
903 struct loop
*loop
= &loops
->array
[i
];
905 if (! loop
->invalid
&& loop
->end
)
907 scan_loop (loop
, flags
);
912 end_alias_analysis ();
915 for (i
= 0; i
< (int) loops
->num
; i
++)
916 free (loops_info
[i
].mems
);
924 /* Returns the next insn, in execution order, after INSN. START and
925 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
926 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
927 insn-stream; it is used with loops that are entered near the
931 next_insn_in_loop (const struct loop
*loop
, rtx insn
)
933 insn
= NEXT_INSN (insn
);
935 if (insn
== loop
->end
)
938 /* Go to the top of the loop, and continue there. */
945 if (insn
== loop
->scan_start
)
952 /* Find any register references hidden inside X and add them to
953 the dependency list DEPS. This is used to look inside CLOBBER (MEM
954 when checking whether a PARALLEL can be pulled out of a loop. */
957 find_regs_nested (rtx deps
, rtx x
)
959 enum rtx_code code
= GET_CODE (x
);
961 deps
= gen_rtx_EXPR_LIST (VOIDmode
, x
, deps
);
964 const char *fmt
= GET_RTX_FORMAT (code
);
966 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
969 deps
= find_regs_nested (deps
, XEXP (x
, i
));
970 else if (fmt
[i
] == 'E')
971 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
972 deps
= find_regs_nested (deps
, XVECEXP (x
, i
, j
));
978 /* Optimize one loop described by LOOP. */
980 /* ??? Could also move memory writes out of loops if the destination address
981 is invariant, the source is invariant, the memory write is not volatile,
982 and if we can prove that no read inside the loop can read this address
983 before the write occurs. If there is a read of this address after the
984 write, then we can also mark the memory read as invariant. */
987 scan_loop (struct loop
*loop
, int flags
)
989 struct loop_info
*loop_info
= LOOP_INFO (loop
);
990 struct loop_regs
*regs
= LOOP_REGS (loop
);
992 rtx loop_start
= loop
->start
;
993 rtx loop_end
= loop
->end
;
995 /* 1 if we are scanning insns that could be executed zero times. */
997 /* 1 if we are scanning insns that might never be executed
998 due to a subroutine call which might exit before they are reached. */
1000 /* Number of insns in the loop. */
1003 rtx temp
, update_start
, update_end
;
1004 /* The SET from an insn, if it is the only SET in the insn. */
1006 /* Chain describing insns movable in current loop. */
1007 struct loop_movables
*movables
= LOOP_MOVABLES (loop
);
1008 /* Ratio of extra register life span we can justify
1009 for saving an instruction. More if loop doesn't call subroutines
1010 since in that case saving an insn makes more difference
1011 and more registers are available. */
1020 /* Determine whether this loop starts with a jump down to a test at
1021 the end. This will occur for a small number of loops with a test
1022 that is too complex to duplicate in front of the loop.
1024 We search for the first insn or label in the loop, skipping NOTEs.
1025 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
1026 (because we might have a loop executed only once that contains a
1027 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
1028 (in case we have a degenerate loop).
1030 Note that if we mistakenly think that a loop is entered at the top
1031 when, in fact, it is entered at the exit test, the only effect will be
1032 slightly poorer optimization. Making the opposite error can generate
1033 incorrect code. Since very few loops now start with a jump to the
1034 exit test, the code here to detect that case is very conservative. */
1036 for (p
= NEXT_INSN (loop_start
);
1038 && !LABEL_P (p
) && ! INSN_P (p
)
1040 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
1041 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
1045 loop
->scan_start
= p
;
1047 /* If loop end is the end of the current function, then emit a
1048 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
1049 note insn. This is the position we use when sinking insns out of
1051 if (NEXT_INSN (loop
->end
) != 0)
1052 loop
->sink
= NEXT_INSN (loop
->end
);
1054 loop
->sink
= emit_note_after (NOTE_INSN_DELETED
, loop
->end
);
1056 /* Set up variables describing this loop. */
1057 prescan_loop (loop
);
1058 threshold
= (loop_info
->has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
1060 /* If loop has a jump before the first label,
1061 the true entry is the target of that jump.
1062 Start scan from there.
1063 But record in LOOP->TOP the place where the end-test jumps
1064 back to so we can scan that after the end of the loop. */
1066 /* Loop entry must be unconditional jump (and not a RETURN) */
1067 && any_uncondjump_p (p
)
1068 && JUMP_LABEL (p
) != 0
1069 /* Check to see whether the jump actually
1070 jumps out of the loop (meaning it's no loop).
1071 This case can happen for things like
1072 do {..} while (0). If this label was generated previously
1073 by loop, we can't tell anything about it and have to reject
1075 && INSN_IN_RANGE_P (JUMP_LABEL (p
), loop_start
, loop_end
))
1077 loop
->top
= next_label (loop
->scan_start
);
1078 loop
->scan_start
= JUMP_LABEL (p
);
1081 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
1082 as required by loop_reg_used_before_p. So skip such loops. (This
1083 test may never be true, but it's best to play it safe.)
1085 Also, skip loops where we do not start scanning at a label. This
1086 test also rejects loops starting with a JUMP_INSN that failed the
1089 if (INSN_UID (loop
->scan_start
) >= max_uid_for_loop
1090 || !LABEL_P (loop
->scan_start
))
1092 if (loop_dump_stream
)
1093 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
1094 INSN_UID (loop_start
), INSN_UID (loop_end
));
1098 /* Allocate extra space for REGs that might be created by load_mems.
1099 We allocate a little extra slop as well, in the hopes that we
1100 won't have to reallocate the regs array. */
1101 loop_regs_scan (loop
, loop_info
->mems_idx
+ 16);
1102 insn_count
= count_insns_in_loop (loop
);
1104 if (loop_dump_stream
)
1105 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
1106 INSN_UID (loop_start
), INSN_UID (loop_end
), insn_count
);
1108 /* Scan through the loop finding insns that are safe to move.
1109 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
1110 this reg will be considered invariant for subsequent insns.
1111 We consider whether subsequent insns use the reg
1112 in deciding whether it is worth actually moving.
1114 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
1115 and therefore it is possible that the insns we are scanning
1116 would never be executed. At such times, we must make sure
1117 that it is safe to execute the insn once instead of zero times.
1118 When MAYBE_NEVER is 0, all insns will be executed at least once
1119 so that is not a problem. */
1121 for (in_libcall
= 0, p
= next_insn_in_loop (loop
, loop
->scan_start
);
1123 p
= next_insn_in_loop (loop
, p
))
1125 if (in_libcall
&& INSN_P (p
) && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
1127 if (NONJUMP_INSN_P (p
))
1129 /* Do not scan past an optimization barrier. */
1130 if (GET_CODE (PATTERN (p
)) == ASM_INPUT
)
1132 temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
);
1136 && (set
= single_set (p
))
1137 && REG_P (SET_DEST (set
))
1138 && SET_DEST (set
) != frame_pointer_rtx
1139 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1140 && SET_DEST (set
) != pic_offset_table_rtx
1142 && ! regs
->array
[REGNO (SET_DEST (set
))].may_not_optimize
)
1147 int insert_temp
= 0;
1148 rtx src
= SET_SRC (set
);
1149 rtx dependencies
= 0;
1151 /* Figure out what to use as a source of this insn. If a
1152 REG_EQUIV note is given or if a REG_EQUAL note with a
1153 constant operand is specified, use it as the source and
1154 mark that we should move this insn by calling
1155 emit_move_insn rather that duplicating the insn.
1157 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
1159 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
1161 src
= XEXP (temp
, 0), move_insn
= 1;
1164 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1165 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
1166 src
= XEXP (temp
, 0), move_insn
= 1;
1167 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
1169 src
= XEXP (temp
, 0);
1170 /* A libcall block can use regs that don't appear in
1171 the equivalent expression. To move the libcall,
1172 we must move those regs too. */
1173 dependencies
= libcall_other_reg (p
, src
);
1177 /* For parallels, add any possible uses to the dependencies, as
1178 we can't move the insn without resolving them first.
1179 MEMs inside CLOBBERs may also reference registers; these
1180 count as implicit uses. */
1181 if (GET_CODE (PATTERN (p
)) == PARALLEL
)
1183 for (i
= 0; i
< XVECLEN (PATTERN (p
), 0); i
++)
1185 rtx x
= XVECEXP (PATTERN (p
), 0, i
);
1186 if (GET_CODE (x
) == USE
)
1188 = gen_rtx_EXPR_LIST (VOIDmode
, XEXP (x
, 0),
1190 else if (GET_CODE (x
) == CLOBBER
1191 && MEM_P (XEXP (x
, 0)))
1192 dependencies
= find_regs_nested (dependencies
,
1193 XEXP (XEXP (x
, 0), 0));
1197 if (/* The register is used in basic blocks other
1198 than the one where it is set (meaning that
1199 something after this point in the loop might
1200 depend on its value before the set). */
1201 ! reg_in_basic_block_p (p
, SET_DEST (set
))
1202 /* And the set is not guaranteed to be executed once
1203 the loop starts, or the value before the set is
1204 needed before the set occurs...
1206 ??? Note we have quadratic behavior here, mitigated
1207 by the fact that the previous test will often fail for
1208 large loops. Rather than re-scanning the entire loop
1209 each time for register usage, we should build tables
1210 of the register usage and use them here instead. */
1212 || loop_reg_used_before_p (loop
, set
, p
)))
1213 /* It is unsafe to move the set. However, it may be OK to
1214 move the source into a new pseudo, and substitute a
1215 reg-to-reg copy for the original insn.
1217 This code used to consider it OK to move a set of a variable
1218 which was not created by the user and not used in an exit
1220 That behavior is incorrect and was removed. */
1223 /* Don't try to optimize a MODE_CC set with a constant
1224 source. It probably will be combined with a conditional
1226 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set
))) == MODE_CC
1227 && CONSTANT_P (src
))
1229 /* Don't try to optimize a register that was made
1230 by loop-optimization for an inner loop.
1231 We don't know its life-span, so we can't compute
1233 else if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
1235 /* Don't move the source and add a reg-to-reg copy:
1236 - with -Os (this certainly increases size),
1237 - if the mode doesn't support copy operations (obviously),
1238 - if the source is already a reg (the motion will gain nothing),
1239 - if the source is a legitimate constant (likewise). */
1240 else if (insert_temp
1242 || ! can_copy_p (GET_MODE (SET_SRC (set
)))
1243 || REG_P (SET_SRC (set
))
1244 || (CONSTANT_P (SET_SRC (set
))
1245 && LEGITIMATE_CONSTANT_P (SET_SRC (set
)))))
1247 else if ((tem
= loop_invariant_p (loop
, src
))
1248 && (dependencies
== 0
1250 = loop_invariant_p (loop
, dependencies
)) != 0)
1251 && (regs
->array
[REGNO (SET_DEST (set
))].set_in_loop
== 1
1253 = consec_sets_invariant_p
1254 (loop
, SET_DEST (set
),
1255 regs
->array
[REGNO (SET_DEST (set
))].set_in_loop
,
1257 /* If the insn can cause a trap (such as divide by zero),
1258 can't move it unless it's guaranteed to be executed
1259 once loop is entered. Even a function call might
1260 prevent the trap insn from being reached
1261 (since it might exit!) */
1262 && ! ((maybe_never
|| call_passed
)
1263 && may_trap_p (src
)))
1266 int regno
= REGNO (SET_DEST (set
));
1268 /* A potential lossage is where we have a case where two insns
1269 can be combined as long as they are both in the loop, but
1270 we move one of them outside the loop. For large loops,
1271 this can lose. The most common case of this is the address
1272 of a function being called.
1274 Therefore, if this register is marked as being used
1275 exactly once if we are in a loop with calls
1276 (a "large loop"), see if we can replace the usage of
1277 this register with the source of this SET. If we can,
1280 Don't do this if P has a REG_RETVAL note or if we have
1281 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
1283 if (loop_info
->has_call
1284 && regs
->array
[regno
].single_usage
!= 0
1285 && regs
->array
[regno
].single_usage
!= const0_rtx
1286 && REGNO_FIRST_UID (regno
) == INSN_UID (p
)
1287 && (REGNO_LAST_UID (regno
)
1288 == INSN_UID (regs
->array
[regno
].single_usage
))
1289 && regs
->array
[regno
].set_in_loop
== 1
1290 && GET_CODE (SET_SRC (set
)) != ASM_OPERANDS
1291 && ! side_effects_p (SET_SRC (set
))
1292 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
1293 && (! SMALL_REGISTER_CLASSES
1294 || (! (REG_P (SET_SRC (set
))
1295 && (REGNO (SET_SRC (set
))
1296 < FIRST_PSEUDO_REGISTER
))))
1297 && regno
>= FIRST_PSEUDO_REGISTER
1298 /* This test is not redundant; SET_SRC (set) might be
1299 a call-clobbered register and the life of REGNO
1300 might span a call. */
1301 && ! modified_between_p (SET_SRC (set
), p
,
1302 regs
->array
[regno
].single_usage
)
1303 && no_labels_between_p (p
,
1304 regs
->array
[regno
].single_usage
)
1305 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
1306 regs
->array
[regno
].single_usage
))
1308 /* Replace any usage in a REG_EQUAL note. Must copy
1309 the new source, so that we don't get rtx sharing
1310 between the SET_SOURCE and REG_NOTES of insn p. */
1311 REG_NOTES (regs
->array
[regno
].single_usage
)
1313 (REG_NOTES (regs
->array
[regno
].single_usage
),
1314 SET_DEST (set
), copy_rtx (SET_SRC (set
))));
1317 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, SET_DEST (set
));
1319 regs
->array
[regno
+i
].set_in_loop
= 0;
1323 m
= xmalloc (sizeof (struct movable
));
1327 m
->dependencies
= dependencies
;
1328 m
->set_dest
= SET_DEST (set
);
1331 = regs
->array
[REGNO (SET_DEST (set
))].set_in_loop
- 1;
1335 m
->move_insn
= move_insn
;
1336 m
->move_insn_first
= 0;
1337 m
->insert_temp
= insert_temp
;
1338 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
1339 m
->savemode
= VOIDmode
;
1341 /* Set M->cond if either loop_invariant_p
1342 or consec_sets_invariant_p returned 2
1343 (only conditionally invariant). */
1344 m
->cond
= ((tem
| tem1
| tem2
) > 1);
1345 m
->global
= LOOP_REG_GLOBAL_P (loop
, regno
);
1347 m
->lifetime
= LOOP_REG_LIFETIME (loop
, regno
);
1348 m
->savings
= regs
->array
[regno
].n_times_set
;
1349 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
1350 m
->savings
+= libcall_benefit (p
);
1351 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, SET_DEST (set
)); i
++)
1352 regs
->array
[regno
+i
].set_in_loop
= move_insn
? -2 : -1;
1353 /* Add M to the end of the chain MOVABLES. */
1354 loop_movables_add (movables
, m
);
1358 /* It is possible for the first instruction to have a
1359 REG_EQUAL note but a non-invariant SET_SRC, so we must
1360 remember the status of the first instruction in case
1361 the last instruction doesn't have a REG_EQUAL note. */
1362 m
->move_insn_first
= m
->move_insn
;
1364 /* Skip this insn, not checking REG_LIBCALL notes. */
1365 p
= next_nonnote_insn (p
);
1366 /* Skip the consecutive insns, if there are any. */
1367 p
= skip_consec_insns (p
, m
->consec
);
1368 /* Back up to the last insn of the consecutive group. */
1369 p
= prev_nonnote_insn (p
);
1371 /* We must now reset m->move_insn, m->is_equiv, and
1372 possibly m->set_src to correspond to the effects of
1374 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
1376 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
1379 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1380 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
1381 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
1387 = (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
1390 /* If this register is always set within a STRICT_LOW_PART
1391 or set to zero, then its high bytes are constant.
1392 So clear them outside the loop and within the loop
1393 just load the low bytes.
1394 We must check that the machine has an instruction to do so.
1395 Also, if the value loaded into the register
1396 depends on the same register, this cannot be done. */
1397 else if (SET_SRC (set
) == const0_rtx
1398 && NONJUMP_INSN_P (NEXT_INSN (p
))
1399 && (set1
= single_set (NEXT_INSN (p
)))
1400 && GET_CODE (set1
) == SET
1401 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
1402 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
1403 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
1405 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
1407 int regno
= REGNO (SET_DEST (set
));
1408 if (regs
->array
[regno
].set_in_loop
== 2)
1411 m
= xmalloc (sizeof (struct movable
));
1414 m
->set_dest
= SET_DEST (set
);
1415 m
->dependencies
= 0;
1421 m
->move_insn_first
= 0;
1422 m
->insert_temp
= insert_temp
;
1424 /* If the insn may not be executed on some cycles,
1425 we can't clear the whole reg; clear just high part.
1426 Not even if the reg is used only within this loop.
1433 Clearing x before the inner loop could clobber a value
1434 being saved from the last time around the outer loop.
1435 However, if the reg is not used outside this loop
1436 and all uses of the register are in the same
1437 basic block as the store, there is no problem.
1439 If this insn was made by loop, we don't know its
1440 INSN_LUID and hence must make a conservative
1442 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
1443 || LOOP_REG_GLOBAL_P (loop
, regno
)
1444 || (labels_in_range_p
1445 (p
, REGNO_FIRST_LUID (regno
))));
1446 if (maybe_never
&& m
->global
)
1447 m
->savemode
= GET_MODE (SET_SRC (set1
));
1449 m
->savemode
= VOIDmode
;
1453 m
->lifetime
= LOOP_REG_LIFETIME (loop
, regno
);
1456 i
< LOOP_REGNO_NREGS (regno
, SET_DEST (set
));
1458 regs
->array
[regno
+i
].set_in_loop
= -1;
1459 /* Add M to the end of the chain MOVABLES. */
1460 loop_movables_add (movables
, m
);
1465 /* Past a call insn, we get to insns which might not be executed
1466 because the call might exit. This matters for insns that trap.
1467 Constant and pure call insns always return, so they don't count. */
1468 else if (CALL_P (p
) && ! CONST_OR_PURE_CALL_P (p
))
1470 /* Past a label or a jump, we get to insns for which we
1471 can't count on whether or how many times they will be
1472 executed during each iteration. Therefore, we can
1473 only move out sets of trivial variables
1474 (those not used after the loop). */
1475 /* Similar code appears twice in strength_reduce. */
1476 else if ((LABEL_P (p
) || JUMP_P (p
))
1477 /* If we enter the loop in the middle, and scan around to the
1478 beginning, don't set maybe_never for that. This must be an
1479 unconditional jump, otherwise the code at the top of the
1480 loop might never be executed. Unconditional jumps are
1481 followed by a barrier then the loop_end. */
1482 && ! (JUMP_P (p
) && JUMP_LABEL (p
) == loop
->top
1483 && NEXT_INSN (NEXT_INSN (p
)) == loop_end
1484 && any_uncondjump_p (p
)))
1488 /* If one movable subsumes another, ignore that other. */
1490 ignore_some_movables (movables
);
1492 /* For each movable insn, see if the reg that it loads
1493 leads when it dies right into another conditionally movable insn.
1494 If so, record that the second insn "forces" the first one,
1495 since the second can be moved only if the first is. */
1497 force_movables (movables
);
1499 /* See if there are multiple movable insns that load the same value.
1500 If there are, make all but the first point at the first one
1501 through the `match' field, and add the priorities of them
1502 all together as the priority of the first. */
1504 combine_movables (movables
, regs
);
1506 /* Now consider each movable insn to decide whether it is worth moving.
1507 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1509 For machines with few registers this increases code size, so do not
1510 move moveables when optimizing for code size on such machines.
1511 (The 18 below is the value for i386.) */
1514 || (reg_class_size
[GENERAL_REGS
] > 18 && !loop_info
->has_call
))
1516 move_movables (loop
, movables
, threshold
, insn_count
);
1518 /* Recalculate regs->array if move_movables has created new
1520 if (max_reg_num () > regs
->num
)
1522 loop_regs_scan (loop
, 0);
1523 for (update_start
= loop_start
;
1524 PREV_INSN (update_start
)
1525 && !LABEL_P (PREV_INSN (update_start
));
1526 update_start
= PREV_INSN (update_start
))
1528 update_end
= NEXT_INSN (loop_end
);
1530 reg_scan_update (update_start
, update_end
, loop_max_reg
);
1531 loop_max_reg
= max_reg_num ();
1535 /* Now candidates that still are negative are those not moved.
1536 Change regs->array[I].set_in_loop to indicate that those are not actually
1538 for (i
= 0; i
< regs
->num
; i
++)
1539 if (regs
->array
[i
].set_in_loop
< 0)
1540 regs
->array
[i
].set_in_loop
= regs
->array
[i
].n_times_set
;
1542 /* Now that we've moved some things out of the loop, we might be able to
1543 hoist even more memory references. */
1546 /* Recalculate regs->array if load_mems has created new registers. */
1547 if (max_reg_num () > regs
->num
)
1548 loop_regs_scan (loop
, 0);
1550 for (update_start
= loop_start
;
1551 PREV_INSN (update_start
)
1552 && !LABEL_P (PREV_INSN (update_start
));
1553 update_start
= PREV_INSN (update_start
))
1555 update_end
= NEXT_INSN (loop_end
);
1557 reg_scan_update (update_start
, update_end
, loop_max_reg
);
1558 loop_max_reg
= max_reg_num ();
1560 if (flag_strength_reduce
)
1562 if (update_end
&& LABEL_P (update_end
))
1563 /* Ensure our label doesn't go away. */
1564 LABEL_NUSES (update_end
)++;
1566 strength_reduce (loop
, flags
);
1568 reg_scan_update (update_start
, update_end
, loop_max_reg
);
1569 loop_max_reg
= max_reg_num ();
1571 if (update_end
&& LABEL_P (update_end
)
1572 && --LABEL_NUSES (update_end
) == 0)
1573 delete_related_insns (update_end
);
1577 /* The movable information is required for strength reduction. */
1578 loop_movables_free (movables
);
1585 /* Add elements to *OUTPUT to record all the pseudo-regs
1586 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1589 record_excess_regs (rtx in_this
, rtx not_in_this
, rtx
*output
)
1595 code
= GET_CODE (in_this
);
1609 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
1610 && ! reg_mentioned_p (in_this
, not_in_this
))
1611 *output
= gen_rtx_EXPR_LIST (VOIDmode
, in_this
, *output
);
1618 fmt
= GET_RTX_FORMAT (code
);
1619 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1626 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1627 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1631 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1637 /* Check what regs are referred to in the libcall block ending with INSN,
1638 aside from those mentioned in the equivalent value.
1639 If there are none, return 0.
1640 If there are one or more, return an EXPR_LIST containing all of them. */
1643 libcall_other_reg (rtx insn
, rtx equiv
)
1645 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1646 rtx p
= XEXP (note
, 0);
1649 /* First, find all the regs used in the libcall block
1650 that are not mentioned as inputs to the result. */
1655 record_excess_regs (PATTERN (p
), equiv
, &output
);
1662 /* Return 1 if all uses of REG
1663 are between INSN and the end of the basic block. */
1666 reg_in_basic_block_p (rtx insn
, rtx reg
)
1668 int regno
= REGNO (reg
);
1671 if (REGNO_FIRST_UID (regno
) != INSN_UID (insn
))
1674 /* Search this basic block for the already recorded last use of the reg. */
1675 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1677 switch (GET_CODE (p
))
1684 /* Ordinary insn: if this is the last use, we win. */
1685 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1690 /* Jump insn: if this is the last use, we win. */
1691 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1693 /* Otherwise, it's the end of the basic block, so we lose. */
1698 /* It's the end of the basic block, so we lose. */
1706 /* The "last use" that was recorded can't be found after the first
1707 use. This can happen when the last use was deleted while
1708 processing an inner loop, this inner loop was then completely
1709 unrolled, and the outer loop is always exited after the inner loop,
1710 so that everything after the first use becomes a single basic block. */
1714 /* Compute the benefit of eliminating the insns in the block whose
1715 last insn is LAST. This may be a group of insns used to compute a
1716 value directly or can contain a library call. */
1719 libcall_benefit (rtx last
)
1724 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1725 insn
!= last
; insn
= NEXT_INSN (insn
))
1728 benefit
+= 10; /* Assume at least this many insns in a library
1730 else if (NONJUMP_INSN_P (insn
)
1731 && GET_CODE (PATTERN (insn
)) != USE
1732 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1739 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1742 skip_consec_insns (rtx insn
, int count
)
1744 for (; count
> 0; count
--)
1748 /* If first insn of libcall sequence, skip to end. */
1749 /* Do this at start of loop, since INSN is guaranteed to
1752 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1753 insn
= XEXP (temp
, 0);
1756 insn
= NEXT_INSN (insn
);
1757 while (NOTE_P (insn
));
1763 /* Ignore any movable whose insn falls within a libcall
1764 which is part of another movable.
1765 We make use of the fact that the movable for the libcall value
1766 was made later and so appears later on the chain. */
1769 ignore_some_movables (struct loop_movables
*movables
)
1771 struct movable
*m
, *m1
;
1773 for (m
= movables
->head
; m
; m
= m
->next
)
1775 /* Is this a movable for the value of a libcall? */
1776 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1780 /* Check for earlier movables inside that range,
1781 and mark them invalid. We cannot use LUIDs here because
1782 insns created by loop.c for prior loops don't have LUIDs.
1783 Rather than reject all such insns from movables, we just
1784 explicitly check each insn in the libcall (since invariant
1785 libcalls aren't that common). */
1786 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1787 for (m1
= movables
->head
; m1
!= m
; m1
= m1
->next
)
1788 if (m1
->insn
== insn
)
1794 /* For each movable insn, see if the reg that it loads
1795 leads when it dies right into another conditionally movable insn.
1796 If so, record that the second insn "forces" the first one,
1797 since the second can be moved only if the first is. */
1800 force_movables (struct loop_movables
*movables
)
1802 struct movable
*m
, *m1
;
1804 for (m1
= movables
->head
; m1
; m1
= m1
->next
)
1805 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1806 if (!m1
->partial
&& !m1
->done
)
1808 int regno
= m1
->regno
;
1809 for (m
= m1
->next
; m
; m
= m
->next
)
1810 /* ??? Could this be a bug? What if CSE caused the
1811 register of M1 to be used after this insn?
1812 Since CSE does not update regno_last_uid,
1813 this insn M->insn might not be where it dies.
1814 But very likely this doesn't matter; what matters is
1815 that M's reg is computed from M1's reg. */
1816 if (INSN_UID (m
->insn
) == REGNO_LAST_UID (regno
)
1819 if (m
!= 0 && m
->set_src
== m1
->set_dest
1820 /* If m->consec, m->set_src isn't valid. */
1824 /* Increase the priority of the moving the first insn
1825 since it permits the second to be moved as well.
1826 Likewise for insns already forced by the first insn. */
1832 for (m2
= m1
; m2
; m2
= m2
->forces
)
1834 m2
->lifetime
+= m
->lifetime
;
1835 m2
->savings
+= m
->savings
;
1841 /* Find invariant expressions that are equal and can be combined into
1845 combine_movables (struct loop_movables
*movables
, struct loop_regs
*regs
)
1848 char *matched_regs
= xmalloc (regs
->num
);
1849 enum machine_mode mode
;
1851 /* Regs that are set more than once are not allowed to match
1852 or be matched. I'm no longer sure why not. */
1853 /* Only pseudo registers are allowed to match or be matched,
1854 since move_movables does not validate the change. */
1855 /* Perhaps testing m->consec_sets would be more appropriate here? */
1857 for (m
= movables
->head
; m
; m
= m
->next
)
1858 if (m
->match
== 0 && regs
->array
[m
->regno
].n_times_set
== 1
1859 && m
->regno
>= FIRST_PSEUDO_REGISTER
1864 int regno
= m
->regno
;
1866 memset (matched_regs
, 0, regs
->num
);
1867 matched_regs
[regno
] = 1;
1869 /* We want later insns to match the first one. Don't make the first
1870 one match any later ones. So start this loop at m->next. */
1871 for (m1
= m
->next
; m1
; m1
= m1
->next
)
1872 if (m
!= m1
&& m1
->match
== 0
1874 && regs
->array
[m1
->regno
].n_times_set
== 1
1875 && m1
->regno
>= FIRST_PSEUDO_REGISTER
1876 /* A reg used outside the loop mustn't be eliminated. */
1878 /* A reg used for zero-extending mustn't be eliminated. */
1880 && (matched_regs
[m1
->regno
]
1883 /* Can combine regs with different modes loaded from the
1884 same constant only if the modes are the same or
1885 if both are integer modes with M wider or the same
1886 width as M1. The check for integer is redundant, but
1887 safe, since the only case of differing destination
1888 modes with equal sources is when both sources are
1889 VOIDmode, i.e., CONST_INT. */
1890 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1891 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1892 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1893 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1894 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1895 /* See if the source of M1 says it matches M. */
1896 && ((REG_P (m1
->set_src
)
1897 && matched_regs
[REGNO (m1
->set_src
)])
1898 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1900 && ((m
->dependencies
== m1
->dependencies
)
1901 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1903 m
->lifetime
+= m1
->lifetime
;
1904 m
->savings
+= m1
->savings
;
1907 matched_regs
[m1
->regno
] = 1;
1911 /* Now combine the regs used for zero-extension.
1912 This can be done for those not marked `global'
1913 provided their lives don't overlap. */
1915 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1916 mode
= GET_MODE_WIDER_MODE (mode
))
1918 struct movable
*m0
= 0;
1920 /* Combine all the registers for extension from mode MODE.
1921 Don't combine any that are used outside this loop. */
1922 for (m
= movables
->head
; m
; m
= m
->next
)
1923 if (m
->partial
&& ! m
->global
1924 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1928 int first
= REGNO_FIRST_LUID (m
->regno
);
1929 int last
= REGNO_LAST_LUID (m
->regno
);
1933 /* First one: don't check for overlap, just record it. */
1938 /* Make sure they extend to the same mode.
1939 (Almost always true.) */
1940 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1943 /* We already have one: check for overlap with those
1944 already combined together. */
1945 for (m1
= movables
->head
; m1
!= m
; m1
= m1
->next
)
1946 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1947 if (! (REGNO_FIRST_LUID (m1
->regno
) > last
1948 || REGNO_LAST_LUID (m1
->regno
) < first
))
1951 /* No overlap: we can combine this with the others. */
1952 m0
->lifetime
+= m
->lifetime
;
1953 m0
->savings
+= m
->savings
;
1963 free (matched_regs
);
1966 /* Returns the number of movable instructions in LOOP that were not
1967 moved outside the loop. */
1970 num_unmoved_movables (const struct loop
*loop
)
1975 for (m
= LOOP_MOVABLES (loop
)->head
; m
; m
= m
->next
)
1983 /* Return 1 if regs X and Y will become the same if moved. */
1986 regs_match_p (rtx x
, rtx y
, struct loop_movables
*movables
)
1988 unsigned int xn
= REGNO (x
);
1989 unsigned int yn
= REGNO (y
);
1990 struct movable
*mx
, *my
;
1992 for (mx
= movables
->head
; mx
; mx
= mx
->next
)
1993 if (mx
->regno
== xn
)
1996 for (my
= movables
->head
; my
; my
= my
->next
)
1997 if (my
->regno
== yn
)
2001 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
2003 || mx
== my
->match
));
2006 /* Return 1 if X and Y are identical-looking rtx's.
2007 This is the Lisp function EQUAL for rtx arguments.
2009 If two registers are matching movables or a movable register and an
2010 equivalent constant, consider them equal. */
2013 rtx_equal_for_loop_p (rtx x
, rtx y
, struct loop_movables
*movables
,
2014 struct loop_regs
*regs
)
2024 if (x
== 0 || y
== 0)
2027 code
= GET_CODE (x
);
2029 /* If we have a register and a constant, they may sometimes be
2031 if (REG_P (x
) && regs
->array
[REGNO (x
)].set_in_loop
== -2
2034 for (m
= movables
->head
; m
; m
= m
->next
)
2035 if (m
->move_insn
&& m
->regno
== REGNO (x
)
2036 && rtx_equal_p (m
->set_src
, y
))
2039 else if (REG_P (y
) && regs
->array
[REGNO (y
)].set_in_loop
== -2
2042 for (m
= movables
->head
; m
; m
= m
->next
)
2043 if (m
->move_insn
&& m
->regno
== REGNO (y
)
2044 && rtx_equal_p (m
->set_src
, x
))
2048 /* Otherwise, rtx's of different codes cannot be equal. */
2049 if (code
!= GET_CODE (y
))
2052 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
2053 (REG:SI x) and (REG:HI x) are NOT equivalent. */
2055 if (GET_MODE (x
) != GET_MODE (y
))
2058 /* These three types of rtx's can be compared nonrecursively. */
2060 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
2062 if (code
== LABEL_REF
)
2063 return XEXP (x
, 0) == XEXP (y
, 0);
2064 if (code
== SYMBOL_REF
)
2065 return XSTR (x
, 0) == XSTR (y
, 0);
2067 /* Compare the elements. If any pair of corresponding elements
2068 fail to match, return 0 for the whole things. */
2070 fmt
= GET_RTX_FORMAT (code
);
2071 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2076 if (XWINT (x
, i
) != XWINT (y
, i
))
2081 if (XINT (x
, i
) != XINT (y
, i
))
2086 /* Two vectors must have the same length. */
2087 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
2090 /* And the corresponding elements must match. */
2091 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2092 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
),
2093 movables
, regs
) == 0)
2098 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
, regs
)
2104 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
2109 /* These are just backpointers, so they don't matter. */
2115 /* It is believed that rtx's at this level will never
2116 contain anything but integers and other rtx's,
2117 except for within LABEL_REFs and SYMBOL_REFs. */
2125 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
2126 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
2127 references is incremented once for each added note. */
2130 add_label_notes (rtx x
, rtx insns
)
2132 enum rtx_code code
= GET_CODE (x
);
2137 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
2139 /* This code used to ignore labels that referred to dispatch tables to
2140 avoid flow generating (slightly) worse code.
2142 We no longer ignore such label references (see LABEL_REF handling in
2143 mark_jump_label for additional information). */
2144 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2145 if (reg_mentioned_p (XEXP (x
, 0), insn
))
2147 REG_NOTES (insn
) = gen_rtx_INSN_LIST (REG_LABEL
, XEXP (x
, 0),
2149 if (LABEL_P (XEXP (x
, 0)))
2150 LABEL_NUSES (XEXP (x
, 0))++;
2154 fmt
= GET_RTX_FORMAT (code
);
2155 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2158 add_label_notes (XEXP (x
, i
), insns
);
2159 else if (fmt
[i
] == 'E')
2160 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2161 add_label_notes (XVECEXP (x
, i
, j
), insns
);
2165 /* Scan MOVABLES, and move the insns that deserve to be moved.
2166 If two matching movables are combined, replace one reg with the
2167 other throughout. */
2170 move_movables (struct loop
*loop
, struct loop_movables
*movables
,
2171 int threshold
, int insn_count
)
2173 struct loop_regs
*regs
= LOOP_REGS (loop
);
2174 int nregs
= regs
->num
;
2178 rtx loop_start
= loop
->start
;
2179 rtx loop_end
= loop
->end
;
2180 /* Map of pseudo-register replacements to handle combining
2181 when we move several insns that load the same value
2182 into different pseudo-registers. */
2183 rtx
*reg_map
= xcalloc (nregs
, sizeof (rtx
));
2184 char *already_moved
= xcalloc (nregs
, sizeof (char));
2186 for (m
= movables
->head
; m
; m
= m
->next
)
2188 /* Describe this movable insn. */
2190 if (loop_dump_stream
)
2192 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
2193 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
2195 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
2197 fprintf (loop_dump_stream
, "cond ");
2199 fprintf (loop_dump_stream
, "force ");
2201 fprintf (loop_dump_stream
, "global ");
2203 fprintf (loop_dump_stream
, "done ");
2205 fprintf (loop_dump_stream
, "move-insn ");
2207 fprintf (loop_dump_stream
, "matches %d ",
2208 INSN_UID (m
->match
->insn
));
2210 fprintf (loop_dump_stream
, "forces %d ",
2211 INSN_UID (m
->forces
->insn
));
2214 /* Ignore the insn if it's already done (it matched something else).
2215 Otherwise, see if it is now safe to move. */
2219 || (1 == loop_invariant_p (loop
, m
->set_src
)
2220 && (m
->dependencies
== 0
2221 || 1 == loop_invariant_p (loop
, m
->dependencies
))
2223 || 1 == consec_sets_invariant_p (loop
, m
->set_dest
,
2226 && (! m
->forces
|| m
->forces
->done
))
2230 int savings
= m
->savings
;
2232 /* We have an insn that is safe to move.
2233 Compute its desirability. */
2238 if (loop_dump_stream
)
2239 fprintf (loop_dump_stream
, "savings %d ", savings
);
2241 if (regs
->array
[regno
].moved_once
&& loop_dump_stream
)
2242 fprintf (loop_dump_stream
, "halved since already moved ");
2244 /* An insn MUST be moved if we already moved something else
2245 which is safe only if this one is moved too: that is,
2246 if already_moved[REGNO] is nonzero. */
2248 /* An insn is desirable to move if the new lifetime of the
2249 register is no more than THRESHOLD times the old lifetime.
2250 If it's not desirable, it means the loop is so big
2251 that moving won't speed things up much,
2252 and it is liable to make register usage worse. */
2254 /* It is also desirable to move if it can be moved at no
2255 extra cost because something else was already moved. */
2257 if (already_moved
[regno
]
2258 || (threshold
* savings
* m
->lifetime
) >=
2259 (regs
->array
[regno
].moved_once
? insn_count
* 2 : insn_count
)
2260 || (m
->forces
&& m
->forces
->done
2261 && regs
->array
[m
->forces
->regno
].n_times_set
== 1))
2265 rtx first
= NULL_RTX
;
2266 rtx newreg
= NULL_RTX
;
2269 newreg
= gen_reg_rtx (GET_MODE (m
->set_dest
));
2271 /* Now move the insns that set the reg. */
2273 if (m
->partial
&& m
->match
)
2277 /* Find the end of this chain of matching regs.
2278 Thus, we load each reg in the chain from that one reg.
2279 And that reg is loaded with 0 directly,
2280 since it has ->match == 0. */
2281 for (m1
= m
; m1
->match
; m1
= m1
->match
);
2282 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
2283 SET_DEST (PATTERN (m1
->insn
)));
2284 i1
= loop_insn_hoist (loop
, newpat
);
2286 /* Mark the moved, invariant reg as being allowed to
2287 share a hard reg with the other matching invariant. */
2288 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
2289 r1
= SET_DEST (PATTERN (m
->insn
));
2290 r2
= SET_DEST (PATTERN (m1
->insn
));
2292 = gen_rtx_EXPR_LIST (VOIDmode
, r1
,
2293 gen_rtx_EXPR_LIST (VOIDmode
, r2
,
2295 delete_insn (m
->insn
);
2300 if (loop_dump_stream
)
2301 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
2303 /* If we are to re-generate the item being moved with a
2304 new move insn, first delete what we have and then emit
2305 the move insn before the loop. */
2306 else if (m
->move_insn
)
2310 for (count
= m
->consec
; count
>= 0; count
--)
2314 /* If this is the first insn of a library
2315 call sequence, something is very
2317 gcc_assert (!find_reg_note
2318 (p
, REG_LIBCALL
, NULL_RTX
));
2320 /* If this is the last insn of a libcall
2321 sequence, then delete every insn in the
2322 sequence except the last. The last insn
2323 is handled in the normal manner. */
2324 temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
);
2328 temp
= XEXP (temp
, 0);
2330 temp
= delete_insn (temp
);
2335 p
= delete_insn (p
);
2337 /* simplify_giv_expr expects that it can walk the insns
2338 at m->insn forwards and see this old sequence we are
2339 tossing here. delete_insn does preserve the next
2340 pointers, but when we skip over a NOTE we must fix
2341 it up. Otherwise that code walks into the non-deleted
2343 while (p
&& NOTE_P (p
))
2344 p
= NEXT_INSN (temp
) = NEXT_INSN (p
);
2348 /* Replace the original insn with a move from
2349 our newly created temp. */
2351 emit_move_insn (m
->set_dest
, newreg
);
2354 emit_insn_before (seq
, p
);
2359 emit_move_insn (m
->insert_temp
? newreg
: m
->set_dest
,
2364 add_label_notes (m
->set_src
, seq
);
2366 i1
= loop_insn_hoist (loop
, seq
);
2367 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2368 set_unique_reg_note (i1
,
2369 m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
2372 if (loop_dump_stream
)
2373 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
2375 /* The more regs we move, the less we like moving them. */
2380 for (count
= m
->consec
; count
>= 0; count
--)
2384 /* If first insn of libcall sequence, skip to end. */
2385 /* Do this at start of loop, since p is guaranteed to
2388 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
2391 /* If last insn of libcall sequence, move all
2392 insns except the last before the loop. The last
2393 insn is handled in the normal manner. */
2395 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
2399 rtx fn_address_insn
= 0;
2402 for (temp
= XEXP (temp
, 0); temp
!= p
;
2403 temp
= NEXT_INSN (temp
))
2412 body
= PATTERN (temp
);
2414 /* Find the next insn after TEMP,
2415 not counting USE or NOTE insns. */
2416 for (next
= NEXT_INSN (temp
); next
!= p
;
2417 next
= NEXT_INSN (next
))
2418 if (! (NONJUMP_INSN_P (next
)
2419 && GET_CODE (PATTERN (next
)) == USE
)
2423 /* If that is the call, this may be the insn
2424 that loads the function address.
2426 Extract the function address from the insn
2427 that loads it into a register.
2428 If this insn was cse'd, we get incorrect code.
2430 So emit a new move insn that copies the
2431 function address into the register that the
2432 call insn will use. flow.c will delete any
2433 redundant stores that we have created. */
2435 && GET_CODE (body
) == SET
2436 && REG_P (SET_DEST (body
))
2437 && (n
= find_reg_note (temp
, REG_EQUAL
,
2440 fn_reg
= SET_SRC (body
);
2441 if (!REG_P (fn_reg
))
2442 fn_reg
= SET_DEST (body
);
2443 fn_address
= XEXP (n
, 0);
2444 fn_address_insn
= temp
;
2446 /* We have the call insn.
2447 If it uses the register we suspect it might,
2448 load it with the correct address directly. */
2451 && reg_referenced_p (fn_reg
, body
))
2452 loop_insn_emit_after (loop
, 0, fn_address_insn
,
2454 (fn_reg
, fn_address
));
2458 i1
= loop_call_insn_hoist (loop
, body
);
2459 /* Because the USAGE information potentially
2460 contains objects other than hard registers
2461 we need to copy it. */
2462 if (CALL_INSN_FUNCTION_USAGE (temp
))
2463 CALL_INSN_FUNCTION_USAGE (i1
)
2464 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp
));
2467 i1
= loop_insn_hoist (loop
, body
);
2470 if (temp
== fn_address_insn
)
2471 fn_address_insn
= i1
;
2472 REG_NOTES (i1
) = REG_NOTES (temp
);
2473 REG_NOTES (temp
) = NULL
;
2479 if (m
->savemode
!= VOIDmode
)
2481 /* P sets REG to zero; but we should clear only
2482 the bits that are not covered by the mode
2484 rtx reg
= m
->set_dest
;
2489 tem
= expand_simple_binop
2490 (GET_MODE (reg
), AND
, reg
,
2491 GEN_INT ((((HOST_WIDE_INT
) 1
2492 << GET_MODE_BITSIZE (m
->savemode
)))
2494 reg
, 1, OPTAB_LIB_WIDEN
);
2497 emit_move_insn (reg
, tem
);
2498 sequence
= get_insns ();
2500 i1
= loop_insn_hoist (loop
, sequence
);
2502 else if (CALL_P (p
))
2504 i1
= loop_call_insn_hoist (loop
, PATTERN (p
));
2505 /* Because the USAGE information potentially
2506 contains objects other than hard registers
2507 we need to copy it. */
2508 if (CALL_INSN_FUNCTION_USAGE (p
))
2509 CALL_INSN_FUNCTION_USAGE (i1
)
2510 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p
));
2512 else if (count
== m
->consec
&& m
->move_insn_first
)
2515 /* The SET_SRC might not be invariant, so we must
2516 use the REG_EQUAL note. */
2518 emit_move_insn (m
->insert_temp
? newreg
: m
->set_dest
,
2523 add_label_notes (m
->set_src
, seq
);
2525 i1
= loop_insn_hoist (loop
, seq
);
2526 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2527 set_unique_reg_note (i1
, m
->is_equiv
? REG_EQUIV
2528 : REG_EQUAL
, m
->set_src
);
2530 else if (m
->insert_temp
)
2532 rtx
*reg_map2
= xcalloc (REGNO (newreg
),
2534 reg_map2
[m
->regno
] = newreg
;
2536 i1
= loop_insn_hoist (loop
, copy_rtx (PATTERN (p
)));
2537 replace_regs (i1
, reg_map2
, REGNO (newreg
), 1);
2541 i1
= loop_insn_hoist (loop
, PATTERN (p
));
2543 if (REG_NOTES (i1
) == 0)
2545 REG_NOTES (i1
) = REG_NOTES (p
);
2546 REG_NOTES (p
) = NULL
;
2548 /* If there is a REG_EQUAL note present whose value
2549 is not loop invariant, then delete it, since it
2550 may cause problems with later optimization passes.
2551 It is possible for cse to create such notes
2552 like this as a result of record_jump_cond. */
2554 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2555 && ! loop_invariant_p (loop
, XEXP (temp
, 0)))
2556 remove_note (i1
, temp
);
2562 if (loop_dump_stream
)
2563 fprintf (loop_dump_stream
, " moved to %d",
2566 /* If library call, now fix the REG_NOTES that contain
2567 insn pointers, namely REG_LIBCALL on FIRST
2568 and REG_RETVAL on I1. */
2569 if ((temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
)))
2571 XEXP (temp
, 0) = first
;
2572 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
2573 XEXP (temp
, 0) = i1
;
2580 /* simplify_giv_expr expects that it can walk the insns
2581 at m->insn forwards and see this old sequence we are
2582 tossing here. delete_insn does preserve the next
2583 pointers, but when we skip over a NOTE we must fix
2584 it up. Otherwise that code walks into the non-deleted
2586 while (p
&& NOTE_P (p
))
2587 p
= NEXT_INSN (temp
) = NEXT_INSN (p
);
2592 /* Replace the original insn with a move from
2593 our newly created temp. */
2595 emit_move_insn (m
->set_dest
, newreg
);
2598 emit_insn_before (seq
, p
);
2602 /* The more regs we move, the less we like moving them. */
2608 if (!m
->insert_temp
)
2610 /* Any other movable that loads the same register
2612 already_moved
[regno
] = 1;
2614 /* This reg has been moved out of one loop. */
2615 regs
->array
[regno
].moved_once
= 1;
2617 /* The reg set here is now invariant. */
2621 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, m
->set_dest
); i
++)
2622 regs
->array
[regno
+i
].set_in_loop
= 0;
2625 /* Change the length-of-life info for the register
2626 to say it lives at least the full length of this loop.
2627 This will help guide optimizations in outer loops. */
2629 if (REGNO_FIRST_LUID (regno
) > INSN_LUID (loop_start
))
2630 /* This is the old insn before all the moved insns.
2631 We can't use the moved insn because it is out of range
2632 in uid_luid. Only the old insns have luids. */
2633 REGNO_FIRST_UID (regno
) = INSN_UID (loop_start
);
2634 if (REGNO_LAST_LUID (regno
) < INSN_LUID (loop_end
))
2635 REGNO_LAST_UID (regno
) = INSN_UID (loop_end
);
2638 /* Combine with this moved insn any other matching movables. */
2641 for (m1
= movables
->head
; m1
; m1
= m1
->next
)
2646 /* Schedule the reg loaded by M1
2647 for replacement so that shares the reg of M.
2648 If the modes differ (only possible in restricted
2649 circumstances, make a SUBREG.
2651 Note this assumes that the target dependent files
2652 treat REG and SUBREG equally, including within
2653 GO_IF_LEGITIMATE_ADDRESS and in all the
2654 predicates since we never verify that replacing the
2655 original register with a SUBREG results in a
2656 recognizable insn. */
2657 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
2658 reg_map
[m1
->regno
] = m
->set_dest
;
2661 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
2664 /* Get rid of the matching insn
2665 and prevent further processing of it. */
2668 /* If library call, delete all insns. */
2669 if ((temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
2671 delete_insn_chain (XEXP (temp
, 0), m1
->insn
);
2673 delete_insn (m1
->insn
);
2675 /* Any other movable that loads the same register
2677 already_moved
[m1
->regno
] = 1;
2679 /* The reg merged here is now invariant,
2680 if the reg it matches is invariant. */
2685 i
< LOOP_REGNO_NREGS (regno
, m1
->set_dest
);
2687 regs
->array
[m1
->regno
+i
].set_in_loop
= 0;
2691 else if (loop_dump_stream
)
2692 fprintf (loop_dump_stream
, "not desirable");
2694 else if (loop_dump_stream
&& !m
->match
)
2695 fprintf (loop_dump_stream
, "not safe");
2697 if (loop_dump_stream
)
2698 fprintf (loop_dump_stream
, "\n");
2702 new_start
= loop_start
;
2704 /* Go through all the instructions in the loop, making
2705 all the register substitutions scheduled in REG_MAP. */
2706 for (p
= new_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
2709 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
2710 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
2716 free (already_moved
);
2721 loop_movables_add (struct loop_movables
*movables
, struct movable
*m
)
2723 if (movables
->head
== 0)
2726 movables
->last
->next
= m
;
2732 loop_movables_free (struct loop_movables
*movables
)
2735 struct movable
*m_next
;
2737 for (m
= movables
->head
; m
; m
= m_next
)
2745 /* Scan X and replace the address of any MEM in it with ADDR.
2746 REG is the address that MEM should have before the replacement. */
2749 replace_call_address (rtx x
, rtx reg
, rtx addr
)
2757 code
= GET_CODE (x
);
2771 /* Short cut for very common case. */
2772 replace_call_address (XEXP (x
, 1), reg
, addr
);
2776 /* Short cut for very common case. */
2777 replace_call_address (XEXP (x
, 0), reg
, addr
);
2781 /* If this MEM uses a reg other than the one we expected,
2782 something is wrong. */
2783 gcc_assert (XEXP (x
, 0) == reg
);
2791 fmt
= GET_RTX_FORMAT (code
);
2792 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2795 replace_call_address (XEXP (x
, i
), reg
, addr
);
2796 else if (fmt
[i
] == 'E')
2799 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2800 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2806 /* Return the number of memory refs to addresses that vary
2810 count_nonfixed_reads (const struct loop
*loop
, rtx x
)
2820 code
= GET_CODE (x
);
2834 return ((loop_invariant_p (loop
, XEXP (x
, 0)) != 1)
2835 + count_nonfixed_reads (loop
, XEXP (x
, 0)));
2842 fmt
= GET_RTX_FORMAT (code
);
2843 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2846 value
+= count_nonfixed_reads (loop
, XEXP (x
, i
));
2850 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2851 value
+= count_nonfixed_reads (loop
, XVECEXP (x
, i
, j
));
2857 /* Scan a loop setting the elements `loops_enclosed',
2858 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2859 `unknown_address_altered', `unknown_constant_address_altered', and
2860 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2861 list `store_mems' in LOOP. */
2864 prescan_loop (struct loop
*loop
)
2868 struct loop_info
*loop_info
= LOOP_INFO (loop
);
2869 rtx start
= loop
->start
;
2870 rtx end
= loop
->end
;
2871 /* The label after END. Jumping here is just like falling off the
2872 end of the loop. We use next_nonnote_insn instead of next_label
2873 as a hedge against the (pathological) case where some actual insn
2874 might end up between the two. */
2875 rtx exit_target
= next_nonnote_insn (end
);
2877 loop_info
->has_indirect_jump
= indirect_jump_in_function
;
2878 loop_info
->pre_header_has_call
= 0;
2879 loop_info
->has_call
= 0;
2880 loop_info
->has_nonconst_call
= 0;
2881 loop_info
->has_prefetch
= 0;
2882 loop_info
->has_volatile
= 0;
2883 loop_info
->has_tablejump
= 0;
2884 loop_info
->has_multiple_exit_targets
= 0;
2887 loop_info
->unknown_address_altered
= 0;
2888 loop_info
->unknown_constant_address_altered
= 0;
2889 loop_info
->store_mems
= NULL_RTX
;
2890 loop_info
->first_loop_store_insn
= NULL_RTX
;
2891 loop_info
->mems_idx
= 0;
2892 loop_info
->num_mem_sets
= 0;
2894 for (insn
= start
; insn
&& !LABEL_P (insn
);
2895 insn
= PREV_INSN (insn
))
2899 loop_info
->pre_header_has_call
= 1;
2904 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2905 insn
= NEXT_INSN (insn
))
2907 switch (GET_CODE (insn
))
2910 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2913 /* Count number of loops contained in this one. */
2916 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2921 if (! CONST_OR_PURE_CALL_P (insn
))
2923 loop_info
->unknown_address_altered
= 1;
2924 loop_info
->has_nonconst_call
= 1;
2926 else if (pure_call_p (insn
))
2927 loop_info
->has_nonconst_call
= 1;
2928 loop_info
->has_call
= 1;
2929 if (can_throw_internal (insn
))
2930 loop_info
->has_multiple_exit_targets
= 1;
2934 if (! loop_info
->has_multiple_exit_targets
)
2936 rtx set
= pc_set (insn
);
2940 rtx src
= SET_SRC (set
);
2943 if (GET_CODE (src
) == IF_THEN_ELSE
)
2945 label1
= XEXP (src
, 1);
2946 label2
= XEXP (src
, 2);
2956 if (label1
&& label1
!= pc_rtx
)
2958 if (GET_CODE (label1
) != LABEL_REF
)
2960 /* Something tricky. */
2961 loop_info
->has_multiple_exit_targets
= 1;
2964 else if (XEXP (label1
, 0) != exit_target
2965 && LABEL_OUTSIDE_LOOP_P (label1
))
2967 /* A jump outside the current loop. */
2968 loop_info
->has_multiple_exit_targets
= 1;
2980 /* A return, or something tricky. */
2981 loop_info
->has_multiple_exit_targets
= 1;
2987 if (volatile_refs_p (PATTERN (insn
)))
2988 loop_info
->has_volatile
= 1;
2991 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
2992 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
2993 loop_info
->has_tablejump
= 1;
2995 note_stores (PATTERN (insn
), note_addr_stored
, loop_info
);
2996 if (! loop_info
->first_loop_store_insn
&& loop_info
->store_mems
)
2997 loop_info
->first_loop_store_insn
= insn
;
2999 if (flag_non_call_exceptions
&& can_throw_internal (insn
))
3000 loop_info
->has_multiple_exit_targets
= 1;
3008 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
3009 if (/* An exception thrown by a called function might land us
3011 ! loop_info
->has_nonconst_call
3012 /* We don't want loads for MEMs moved to a location before the
3013 one at which their stack memory becomes allocated. (Note
3014 that this is not a problem for malloc, etc., since those
3015 require actual function calls. */
3016 && ! current_function_calls_alloca
3017 /* There are ways to leave the loop other than falling off the
3019 && ! loop_info
->has_multiple_exit_targets
)
3020 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
3021 insn
= NEXT_INSN (insn
))
3022 for_each_rtx (&insn
, insert_loop_mem
, loop_info
);
3024 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
3025 that loop_invariant_p and load_mems can use true_dependence
3026 to determine what is really clobbered. */
3027 if (loop_info
->unknown_address_altered
)
3029 rtx mem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3031 loop_info
->store_mems
3032 = gen_rtx_EXPR_LIST (VOIDmode
, mem
, loop_info
->store_mems
);
3034 if (loop_info
->unknown_constant_address_altered
)
3036 rtx mem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3037 MEM_READONLY_P (mem
) = 1;
3038 loop_info
->store_mems
3039 = gen_rtx_EXPR_LIST (VOIDmode
, mem
, loop_info
->store_mems
);
3043 /* Invalidate all loops containing LABEL. */
3046 invalidate_loops_containing_label (rtx label
)
3049 for (loop
= uid_loop
[INSN_UID (label
)]; loop
; loop
= loop
->outer
)
3053 /* Scan the function looking for loops. Record the start and end of each loop.
3054 Also mark as invalid loops any loops that contain a setjmp or are branched
3055 to from outside the loop. */
3058 find_and_verify_loops (rtx f
, struct loops
*loops
)
3063 struct loop
*current_loop
;
3064 struct loop
*next_loop
;
3067 num_loops
= loops
->num
;
3069 compute_luids (f
, NULL_RTX
, 0);
3071 /* If there are jumps to undefined labels,
3072 treat them as jumps out of any/all loops.
3073 This also avoids writing past end of tables when there are no loops. */
3076 /* Find boundaries of loops, mark which loops are contained within
3077 loops, and invalidate loops that have setjmp. */
3080 current_loop
= NULL
;
3081 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
3084 switch (NOTE_LINE_NUMBER (insn
))
3086 case NOTE_INSN_LOOP_BEG
:
3087 next_loop
= loops
->array
+ num_loops
;
3088 next_loop
->num
= num_loops
;
3090 next_loop
->start
= insn
;
3091 next_loop
->outer
= current_loop
;
3092 current_loop
= next_loop
;
3095 case NOTE_INSN_LOOP_END
:
3096 gcc_assert (current_loop
);
3098 current_loop
->end
= insn
;
3099 current_loop
= current_loop
->outer
;
3107 && find_reg_note (insn
, REG_SETJMP
, NULL
))
3109 /* In this case, we must invalidate our current loop and any
3111 for (loop
= current_loop
; loop
; loop
= loop
->outer
)
3114 if (loop_dump_stream
)
3115 fprintf (loop_dump_stream
,
3116 "\nLoop at %d ignored due to setjmp.\n",
3117 INSN_UID (loop
->start
));
3121 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
3122 enclosing loop, but this doesn't matter. */
3123 uid_loop
[INSN_UID (insn
)] = current_loop
;
3126 /* Any loop containing a label used in an initializer must be invalidated,
3127 because it can be jumped into from anywhere. */
3128 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
3129 invalidate_loops_containing_label (XEXP (label
, 0));
3131 /* Any loop containing a label used for an exception handler must be
3132 invalidated, because it can be jumped into from anywhere. */
3133 for_each_eh_label (invalidate_loops_containing_label
);
3135 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
3136 loop that it is not contained within, that loop is marked invalid.
3137 If any INSN or CALL_INSN uses a label's address, then the loop containing
3138 that label is marked invalid, because it could be jumped into from
3141 Also look for blocks of code ending in an unconditional branch that
3142 exits the loop. If such a block is surrounded by a conditional
3143 branch around the block, move the block elsewhere (see below) and
3144 invert the jump to point to the code block. This may eliminate a
3145 label in our loop and will simplify processing by both us and a
3146 possible second cse pass. */
3148 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
3151 struct loop
*this_loop
= uid_loop
[INSN_UID (insn
)];
3153 if (NONJUMP_INSN_P (insn
) || CALL_P (insn
))
3155 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
3157 invalidate_loops_containing_label (XEXP (note
, 0));
3163 mark_loop_jump (PATTERN (insn
), this_loop
);
3165 /* See if this is an unconditional branch outside the loop. */
3167 && (GET_CODE (PATTERN (insn
)) == RETURN
3168 || (any_uncondjump_p (insn
)
3169 && onlyjump_p (insn
)
3170 && (uid_loop
[INSN_UID (JUMP_LABEL (insn
))]
3172 && get_max_uid () < max_uid_for_loop
)
3175 rtx our_next
= next_real_insn (insn
);
3176 rtx last_insn_to_move
= NEXT_INSN (insn
);
3177 struct loop
*dest_loop
;
3178 struct loop
*outer_loop
= NULL
;
3180 /* Go backwards until we reach the start of the loop, a label,
3182 for (p
= PREV_INSN (insn
);
3185 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
3190 /* Check for the case where we have a jump to an inner nested
3191 loop, and do not perform the optimization in that case. */
3193 if (JUMP_LABEL (insn
))
3195 dest_loop
= uid_loop
[INSN_UID (JUMP_LABEL (insn
))];
3198 for (outer_loop
= dest_loop
; outer_loop
;
3199 outer_loop
= outer_loop
->outer
)
3200 if (outer_loop
== this_loop
)
3205 /* Make sure that the target of P is within the current loop. */
3207 if (JUMP_P (p
) && JUMP_LABEL (p
)
3208 && uid_loop
[INSN_UID (JUMP_LABEL (p
))] != this_loop
)
3209 outer_loop
= this_loop
;
3211 /* If we stopped on a JUMP_INSN to the next insn after INSN,
3212 we have a block of code to try to move.
3214 We look backward and then forward from the target of INSN
3215 to find a BARRIER at the same loop depth as the target.
3216 If we find such a BARRIER, we make a new label for the start
3217 of the block, invert the jump in P and point it to that label,
3218 and move the block of code to the spot we found. */
3222 && JUMP_LABEL (p
) != 0
3223 /* Just ignore jumps to labels that were never emitted.
3224 These always indicate compilation errors. */
3225 && INSN_UID (JUMP_LABEL (p
)) != 0
3226 && any_condjump_p (p
) && onlyjump_p (p
)
3227 && next_real_insn (JUMP_LABEL (p
)) == our_next
3228 /* If it's not safe to move the sequence, then we
3230 && insns_safe_to_move_p (p
, NEXT_INSN (insn
),
3231 &last_insn_to_move
))
3234 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
3235 struct loop
*target_loop
= uid_loop
[INSN_UID (target
)];
3239 /* Search for possible garbage past the conditional jumps
3240 and look for the last barrier. */
3241 for (tmp
= last_insn_to_move
;
3242 tmp
&& !LABEL_P (tmp
); tmp
= NEXT_INSN (tmp
))
3243 if (BARRIER_P (tmp
))
3244 last_insn_to_move
= tmp
;
3246 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
3248 /* Don't move things inside a tablejump. */
3249 && ((loc2
= next_nonnote_insn (loc
)) == 0
3251 || (loc2
= next_nonnote_insn (loc2
)) == 0
3253 || (GET_CODE (PATTERN (loc2
)) != ADDR_VEC
3254 && GET_CODE (PATTERN (loc2
)) != ADDR_DIFF_VEC
))
3255 && uid_loop
[INSN_UID (loc
)] == target_loop
)
3259 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
3261 /* Don't move things inside a tablejump. */
3262 && ((loc2
= next_nonnote_insn (loc
)) == 0
3264 || (loc2
= next_nonnote_insn (loc2
)) == 0
3266 || (GET_CODE (PATTERN (loc2
)) != ADDR_VEC
3267 && GET_CODE (PATTERN (loc2
)) != ADDR_DIFF_VEC
))
3268 && uid_loop
[INSN_UID (loc
)] == target_loop
)
3273 rtx cond_label
= JUMP_LABEL (p
);
3274 rtx new_label
= get_label_after (p
);
3276 /* Ensure our label doesn't go away. */
3277 LABEL_NUSES (cond_label
)++;
3279 /* Verify that uid_loop is large enough and that
3281 if (invert_jump (p
, new_label
, 1))
3286 /* If no suitable BARRIER was found, create a suitable
3287 one before TARGET. Since TARGET is a fall through
3288 path, we'll need to insert a jump around our block
3289 and add a BARRIER before TARGET.
3291 This creates an extra unconditional jump outside
3292 the loop. However, the benefits of removing rarely
3293 executed instructions from inside the loop usually
3294 outweighs the cost of the extra unconditional jump
3295 outside the loop. */
3300 temp
= gen_jump (JUMP_LABEL (insn
));
3301 temp
= emit_jump_insn_before (temp
, target
);
3302 JUMP_LABEL (temp
) = JUMP_LABEL (insn
);
3303 LABEL_NUSES (JUMP_LABEL (insn
))++;
3304 loc
= emit_barrier_before (target
);
3307 /* Include the BARRIER after INSN and copy the
3309 only_notes
= squeeze_notes (&new_label
,
3310 &last_insn_to_move
);
3311 gcc_assert (!only_notes
);
3313 reorder_insns (new_label
, last_insn_to_move
, loc
);
3315 /* All those insns are now in TARGET_LOOP. */
3317 q
!= NEXT_INSN (last_insn_to_move
);
3319 uid_loop
[INSN_UID (q
)] = target_loop
;
3321 /* The label jumped to by INSN is no longer a loop
3322 exit. Unless INSN does not have a label (e.g.,
3323 it is a RETURN insn), search loop->exit_labels
3324 to find its label_ref, and remove it. Also turn
3325 off LABEL_OUTSIDE_LOOP_P bit. */
3326 if (JUMP_LABEL (insn
))
3328 for (q
= 0, r
= this_loop
->exit_labels
;
3330 q
= r
, r
= LABEL_NEXTREF (r
))
3331 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
3333 LABEL_OUTSIDE_LOOP_P (r
) = 0;
3335 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
3337 this_loop
->exit_labels
= LABEL_NEXTREF (r
);
3341 for (loop
= this_loop
; loop
&& loop
!= target_loop
;
3345 /* If we didn't find it, then something is
3350 /* P is now a jump outside the loop, so it must be put
3351 in loop->exit_labels, and marked as such.
3352 The easiest way to do this is to just call
3353 mark_loop_jump again for P. */
3354 mark_loop_jump (PATTERN (p
), this_loop
);
3356 /* If INSN now jumps to the insn after it,
3358 if (JUMP_LABEL (insn
) != 0
3359 && (next_real_insn (JUMP_LABEL (insn
))
3360 == next_real_insn (insn
)))
3361 delete_related_insns (insn
);
3364 /* Continue the loop after where the conditional
3365 branch used to jump, since the only branch insn
3366 in the block (if it still remains) is an inter-loop
3367 branch and hence needs no processing. */
3368 insn
= NEXT_INSN (cond_label
);
3370 if (--LABEL_NUSES (cond_label
) == 0)
3371 delete_related_insns (cond_label
);
3373 /* This loop will be continued with NEXT_INSN (insn). */
3374 insn
= PREV_INSN (insn
);
3381 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3382 loops it is contained in, mark the target loop invalid.
3384 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3387 mark_loop_jump (rtx x
, struct loop
*loop
)
3389 struct loop
*dest_loop
;
3390 struct loop
*outer_loop
;
3393 switch (GET_CODE (x
))
3406 /* There could be a label reference in here. */
3407 mark_loop_jump (XEXP (x
, 0), loop
);
3413 mark_loop_jump (XEXP (x
, 0), loop
);
3414 mark_loop_jump (XEXP (x
, 1), loop
);
3418 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3419 mark_loop_jump (XEXP (x
, 1), loop
);
3424 mark_loop_jump (XEXP (x
, 0), loop
);
3428 dest_loop
= uid_loop
[INSN_UID (XEXP (x
, 0))];
3430 /* Link together all labels that branch outside the loop. This
3431 is used by final_[bg]iv_value and the loop unrolling code. Also
3432 mark this LABEL_REF so we know that this branch should predict
3435 /* A check to make sure the label is not in an inner nested loop,
3436 since this does not count as a loop exit. */
3439 for (outer_loop
= dest_loop
; outer_loop
;
3440 outer_loop
= outer_loop
->outer
)
3441 if (outer_loop
== loop
)
3447 if (loop
&& ! outer_loop
)
3449 LABEL_OUTSIDE_LOOP_P (x
) = 1;
3450 LABEL_NEXTREF (x
) = loop
->exit_labels
;
3451 loop
->exit_labels
= x
;
3453 for (outer_loop
= loop
;
3454 outer_loop
&& outer_loop
!= dest_loop
;
3455 outer_loop
= outer_loop
->outer
)
3456 outer_loop
->exit_count
++;
3459 /* If this is inside a loop, but not in the current loop or one enclosed
3460 by it, it invalidates at least one loop. */
3465 /* We must invalidate every nested loop containing the target of this
3466 label, except those that also contain the jump insn. */
3468 for (; dest_loop
; dest_loop
= dest_loop
->outer
)
3470 /* Stop when we reach a loop that also contains the jump insn. */
3471 for (outer_loop
= loop
; outer_loop
; outer_loop
= outer_loop
->outer
)
3472 if (dest_loop
== outer_loop
)
3475 /* If we get here, we know we need to invalidate a loop. */
3476 if (loop_dump_stream
&& ! dest_loop
->invalid
)
3477 fprintf (loop_dump_stream
,
3478 "\nLoop at %d ignored due to multiple entry points.\n",
3479 INSN_UID (dest_loop
->start
));
3481 dest_loop
->invalid
= 1;
3486 /* If this is not setting pc, ignore. */
3487 if (SET_DEST (x
) == pc_rtx
)
3488 mark_loop_jump (SET_SRC (x
), loop
);
3492 mark_loop_jump (XEXP (x
, 1), loop
);
3493 mark_loop_jump (XEXP (x
, 2), loop
);
3498 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
3499 mark_loop_jump (XVECEXP (x
, 0, i
), loop
);
3503 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
3504 mark_loop_jump (XVECEXP (x
, 1, i
), loop
);
3508 /* Strictly speaking this is not a jump into the loop, only a possible
3509 jump out of the loop. However, we have no way to link the destination
3510 of this jump onto the list of exit labels. To be safe we mark this
3511 loop and any containing loops as invalid. */
3514 for (outer_loop
= loop
; outer_loop
; outer_loop
= outer_loop
->outer
)
3516 if (loop_dump_stream
&& ! outer_loop
->invalid
)
3517 fprintf (loop_dump_stream
,
3518 "\nLoop at %d ignored due to unknown exit jump.\n",
3519 INSN_UID (outer_loop
->start
));
3520 outer_loop
->invalid
= 1;
3527 /* Return nonzero if there is a label in the range from
3528 insn INSN to and including the insn whose luid is END
3529 INSN must have an assigned luid (i.e., it must not have
3530 been previously created by loop.c). */
3533 labels_in_range_p (rtx insn
, int end
)
3535 while (insn
&& INSN_LUID (insn
) <= end
)
3539 insn
= NEXT_INSN (insn
);
3545 /* Record that a memory reference X is being set. */
3548 note_addr_stored (rtx x
, rtx y ATTRIBUTE_UNUSED
,
3549 void *data ATTRIBUTE_UNUSED
)
3551 struct loop_info
*loop_info
= data
;
3553 if (x
== 0 || !MEM_P (x
))
3556 /* Count number of memory writes.
3557 This affects heuristics in strength_reduce. */
3558 loop_info
->num_mem_sets
++;
3560 /* BLKmode MEM means all memory is clobbered. */
3561 if (GET_MODE (x
) == BLKmode
)
3563 if (MEM_READONLY_P (x
))
3564 loop_info
->unknown_constant_address_altered
= 1;
3566 loop_info
->unknown_address_altered
= 1;
3571 loop_info
->store_mems
= gen_rtx_EXPR_LIST (VOIDmode
, x
,
3572 loop_info
->store_mems
);
3575 /* X is a value modified by an INSN that references a biv inside a loop
3576 exit test (i.e., X is somehow related to the value of the biv). If X
3577 is a pseudo that is used more than once, then the biv is (effectively)
3578 used more than once. DATA is a pointer to a loop_regs structure. */
3581 note_set_pseudo_multiple_uses (rtx x
, rtx y ATTRIBUTE_UNUSED
, void *data
)
3583 struct loop_regs
*regs
= (struct loop_regs
*) data
;
3588 while (GET_CODE (x
) == STRICT_LOW_PART
3589 || GET_CODE (x
) == SIGN_EXTRACT
3590 || GET_CODE (x
) == ZERO_EXTRACT
3591 || GET_CODE (x
) == SUBREG
)
3594 if (!REG_P (x
) || REGNO (x
) < FIRST_PSEUDO_REGISTER
)
3597 /* If we do not have usage information, or if we know the register
3598 is used more than once, note that fact for check_dbra_loop. */
3599 if (REGNO (x
) >= max_reg_before_loop
3600 || ! regs
->array
[REGNO (x
)].single_usage
3601 || regs
->array
[REGNO (x
)].single_usage
== const0_rtx
)
3602 regs
->multiple_uses
= 1;
3605 /* Return nonzero if the rtx X is invariant over the current loop.
3607 The value is 2 if we refer to something only conditionally invariant.
3609 A memory ref is invariant if it is not volatile and does not conflict
3610 with anything stored in `loop_info->store_mems'. */
3613 loop_invariant_p (const struct loop
*loop
, rtx x
)
3615 struct loop_info
*loop_info
= LOOP_INFO (loop
);
3616 struct loop_regs
*regs
= LOOP_REGS (loop
);
3620 int conditional
= 0;
3625 code
= GET_CODE (x
);
3639 case UNSPEC_VOLATILE
:
3643 if ((x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
3644 || x
== arg_pointer_rtx
|| x
== pic_offset_table_rtx
)
3645 && ! current_function_has_nonlocal_goto
)
3648 if (LOOP_INFO (loop
)->has_call
3649 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
3652 /* Out-of-range regs can occur when we are called from unrolling.
3653 These registers created by the unroller are set in the loop,
3654 hence are never invariant.
3655 Other out-of-range regs can be generated by load_mems; those that
3656 are written to in the loop are not invariant, while those that are
3657 not written to are invariant. It would be easy for load_mems
3658 to set n_times_set correctly for these registers, however, there
3659 is no easy way to distinguish them from registers created by the
3662 if (REGNO (x
) >= (unsigned) regs
->num
)
3665 if (regs
->array
[REGNO (x
)].set_in_loop
< 0)
3668 return regs
->array
[REGNO (x
)].set_in_loop
== 0;
3671 /* Volatile memory references must be rejected. Do this before
3672 checking for read-only items, so that volatile read-only items
3673 will be rejected also. */
3674 if (MEM_VOLATILE_P (x
))
3677 /* See if there is any dependence between a store and this load. */
3678 mem_list_entry
= loop_info
->store_mems
;
3679 while (mem_list_entry
)
3681 if (true_dependence (XEXP (mem_list_entry
, 0), VOIDmode
,
3685 mem_list_entry
= XEXP (mem_list_entry
, 1);
3688 /* It's not invalidated by a store in memory
3689 but we must still verify the address is invariant. */
3693 /* Don't mess with insns declared volatile. */
3694 if (MEM_VOLATILE_P (x
))
3702 fmt
= GET_RTX_FORMAT (code
);
3703 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3707 int tem
= loop_invariant_p (loop
, XEXP (x
, i
));
3713 else if (fmt
[i
] == 'E')
3716 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3718 int tem
= loop_invariant_p (loop
, XVECEXP (x
, i
, j
));
3728 return 1 + conditional
;
3731 /* Return nonzero if all the insns in the loop that set REG
3732 are INSN and the immediately following insns,
3733 and if each of those insns sets REG in an invariant way
3734 (not counting uses of REG in them).
3736 The value is 2 if some of these insns are only conditionally invariant.
3738 We assume that INSN itself is the first set of REG
3739 and that its source is invariant. */
3742 consec_sets_invariant_p (const struct loop
*loop
, rtx reg
, int n_sets
,
3745 struct loop_regs
*regs
= LOOP_REGS (loop
);
3747 unsigned int regno
= REGNO (reg
);
3749 /* Number of sets we have to insist on finding after INSN. */
3750 int count
= n_sets
- 1;
3751 int old
= regs
->array
[regno
].set_in_loop
;
3755 /* If N_SETS hit the limit, we can't rely on its value. */
3759 regs
->array
[regno
].set_in_loop
= 0;
3767 code
= GET_CODE (p
);
3769 /* If library call, skip to end of it. */
3770 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3775 && (set
= single_set (p
))
3776 && REG_P (SET_DEST (set
))
3777 && REGNO (SET_DEST (set
)) == regno
)
3779 this = loop_invariant_p (loop
, SET_SRC (set
));
3782 else if ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
)))
3784 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3785 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3787 this = (CONSTANT_P (XEXP (temp
, 0))
3788 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
3789 && loop_invariant_p (loop
, XEXP (temp
, 0))));
3796 else if (code
!= NOTE
)
3798 regs
->array
[regno
].set_in_loop
= old
;
3803 regs
->array
[regno
].set_in_loop
= old
;
3804 /* If loop_invariant_p ever returned 2, we return 2. */
3805 return 1 + (value
& 2);
3808 /* Look at all uses (not sets) of registers in X. For each, if it is
3809 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3810 a different insn, set USAGE[REGNO] to const0_rtx. */
3813 find_single_use_in_loop (struct loop_regs
*regs
, rtx insn
, rtx x
)
3815 enum rtx_code code
= GET_CODE (x
);
3816 const char *fmt
= GET_RTX_FORMAT (code
);
3820 regs
->array
[REGNO (x
)].single_usage
3821 = (regs
->array
[REGNO (x
)].single_usage
!= 0
3822 && regs
->array
[REGNO (x
)].single_usage
!= insn
)
3823 ? const0_rtx
: insn
;
3825 else if (code
== SET
)
3827 /* Don't count SET_DEST if it is a REG; otherwise count things
3828 in SET_DEST because if a register is partially modified, it won't
3829 show up as a potential movable so we don't care how USAGE is set
3831 if (!REG_P (SET_DEST (x
)))
3832 find_single_use_in_loop (regs
, insn
, SET_DEST (x
));
3833 find_single_use_in_loop (regs
, insn
, SET_SRC (x
));
3836 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3838 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
3839 find_single_use_in_loop (regs
, insn
, XEXP (x
, i
));
3840 else if (fmt
[i
] == 'E')
3841 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3842 find_single_use_in_loop (regs
, insn
, XVECEXP (x
, i
, j
));
3846 /* Count and record any set in X which is contained in INSN. Update
3847 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3851 count_one_set (struct loop_regs
*regs
, rtx insn
, rtx x
, rtx
*last_set
)
3853 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
3854 /* Don't move a reg that has an explicit clobber.
3855 It's not worth the pain to try to do it correctly. */
3856 regs
->array
[REGNO (XEXP (x
, 0))].may_not_optimize
= 1;
3858 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
3860 rtx dest
= SET_DEST (x
);
3861 while (GET_CODE (dest
) == SUBREG
3862 || GET_CODE (dest
) == ZERO_EXTRACT
3863 || GET_CODE (dest
) == STRICT_LOW_PART
)
3864 dest
= XEXP (dest
, 0);
3868 int regno
= REGNO (dest
);
3869 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, dest
); i
++)
3871 /* If this is the first setting of this reg
3872 in current basic block, and it was set before,
3873 it must be set in two basic blocks, so it cannot
3874 be moved out of the loop. */
3875 if (regs
->array
[regno
].set_in_loop
> 0
3876 && last_set
[regno
] == 0)
3877 regs
->array
[regno
+i
].may_not_optimize
= 1;
3878 /* If this is not first setting in current basic block,
3879 see if reg was used in between previous one and this.
3880 If so, neither one can be moved. */
3881 if (last_set
[regno
] != 0
3882 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3883 regs
->array
[regno
+i
].may_not_optimize
= 1;
3884 if (regs
->array
[regno
+i
].set_in_loop
< 127)
3885 ++regs
->array
[regno
+i
].set_in_loop
;
3886 last_set
[regno
+i
] = insn
;
3892 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3893 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3894 contained in insn INSN is used by any insn that precedes INSN in
3895 cyclic order starting from the loop entry point.
3897 We don't want to use INSN_LUID here because if we restrict INSN to those
3898 that have a valid INSN_LUID, it means we cannot move an invariant out
3899 from an inner loop past two loops. */
3902 loop_reg_used_before_p (const struct loop
*loop
, rtx set
, rtx insn
)
3904 rtx reg
= SET_DEST (set
);
3907 /* Scan forward checking for register usage. If we hit INSN, we
3908 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3909 for (p
= loop
->scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3911 if (INSN_P (p
) && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3922 /* Information we collect about arrays that we might want to prefetch. */
3923 struct prefetch_info
3925 struct iv_class
*class; /* Class this prefetch is based on. */
3926 struct induction
*giv
; /* GIV this prefetch is based on. */
3927 rtx base_address
; /* Start prefetching from this address plus
3929 HOST_WIDE_INT index
;
3930 HOST_WIDE_INT stride
; /* Prefetch stride in bytes in each
3932 unsigned int bytes_accessed
; /* Sum of sizes of all accesses to this
3933 prefetch area in one iteration. */
3934 unsigned int total_bytes
; /* Total bytes loop will access in this block.
3935 This is set only for loops with known
3936 iteration counts and is 0xffffffff
3938 int prefetch_in_loop
; /* Number of prefetch insns in loop. */
3939 int prefetch_before_loop
; /* Number of prefetch insns before loop. */
3940 unsigned int write
: 1; /* 1 for read/write prefetches. */
3943 /* Data used by check_store function. */
3944 struct check_store_data
3950 static void check_store (rtx
, rtx
, void *);
3951 static void emit_prefetch_instructions (struct loop
*);
3952 static int rtx_equal_for_prefetch_p (rtx
, rtx
);
3954 /* Set mem_write when mem_address is found. Used as callback to
3957 check_store (rtx x
, rtx pat ATTRIBUTE_UNUSED
, void *data
)
3959 struct check_store_data
*d
= (struct check_store_data
*) data
;
3961 if ((MEM_P (x
)) && rtx_equal_p (d
->mem_address
, XEXP (x
, 0)))
3965 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3966 important to get some addresses combined. Later more sophisticated
3967 transformations can be added when necessary.
3969 ??? Same trick with swapping operand is done at several other places.
3970 It can be nice to develop some common way to handle this. */
3973 rtx_equal_for_prefetch_p (rtx x
, rtx y
)
3977 enum rtx_code code
= GET_CODE (x
);
3982 if (code
!= GET_CODE (y
))
3985 if (COMMUTATIVE_ARITH_P (x
))
3987 return ((rtx_equal_for_prefetch_p (XEXP (x
, 0), XEXP (y
, 0))
3988 && rtx_equal_for_prefetch_p (XEXP (x
, 1), XEXP (y
, 1)))
3989 || (rtx_equal_for_prefetch_p (XEXP (x
, 0), XEXP (y
, 1))
3990 && rtx_equal_for_prefetch_p (XEXP (x
, 1), XEXP (y
, 0))));
3993 /* Compare the elements. If any pair of corresponding elements fails to
3994 match, return 0 for the whole thing. */
3996 fmt
= GET_RTX_FORMAT (code
);
3997 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4002 if (XWINT (x
, i
) != XWINT (y
, i
))
4007 if (XINT (x
, i
) != XINT (y
, i
))
4012 /* Two vectors must have the same length. */
4013 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
4016 /* And the corresponding elements must match. */
4017 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4018 if (rtx_equal_for_prefetch_p (XVECEXP (x
, i
, j
),
4019 XVECEXP (y
, i
, j
)) == 0)
4024 if (rtx_equal_for_prefetch_p (XEXP (x
, i
), XEXP (y
, i
)) == 0)
4029 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
4034 /* These are just backpointers, so they don't matter. */
4040 /* It is believed that rtx's at this level will never
4041 contain anything but integers and other rtx's,
4042 except for within LABEL_REFs and SYMBOL_REFs. */
4050 /* Remove constant addition value from the expression X (when present)
4053 static HOST_WIDE_INT
4054 remove_constant_addition (rtx
*x
)
4056 HOST_WIDE_INT addval
= 0;
4059 /* Avoid clobbering a shared CONST expression. */
4060 if (GET_CODE (exp
) == CONST
)
4062 if (GET_CODE (XEXP (exp
, 0)) == PLUS
4063 && GET_CODE (XEXP (XEXP (exp
, 0), 0)) == SYMBOL_REF
4064 && GET_CODE (XEXP (XEXP (exp
, 0), 1)) == CONST_INT
)
4066 *x
= XEXP (XEXP (exp
, 0), 0);
4067 return INTVAL (XEXP (XEXP (exp
, 0), 1));
4072 if (GET_CODE (exp
) == CONST_INT
)
4074 addval
= INTVAL (exp
);
4078 /* For plus expression recurse on ourself. */
4079 else if (GET_CODE (exp
) == PLUS
)
4081 addval
+= remove_constant_addition (&XEXP (exp
, 0));
4082 addval
+= remove_constant_addition (&XEXP (exp
, 1));
4084 /* In case our parameter was constant, remove extra zero from the
4086 if (XEXP (exp
, 0) == const0_rtx
)
4088 else if (XEXP (exp
, 1) == const0_rtx
)
4095 /* Attempt to identify accesses to arrays that are most likely to cause cache
4096 misses, and emit prefetch instructions a few prefetch blocks forward.
4098 To detect the arrays we use the GIV information that was collected by the
4099 strength reduction pass.
4101 The prefetch instructions are generated after the GIV information is done
4102 and before the strength reduction process. The new GIVs are injected into
4103 the strength reduction tables, so the prefetch addresses are optimized as
4106 GIVs are split into base address, stride, and constant addition values.
4107 GIVs with the same address, stride and close addition values are combined
4108 into a single prefetch. Also writes to GIVs are detected, so that prefetch
4109 for write instructions can be used for the block we write to, on machines
4110 that support write prefetches.
4112 Several heuristics are used to determine when to prefetch. They are
4113 controlled by defined symbols that can be overridden for each target. */
4116 emit_prefetch_instructions (struct loop
*loop
)
4118 int num_prefetches
= 0;
4119 int num_real_prefetches
= 0;
4120 int num_real_write_prefetches
= 0;
4121 int num_prefetches_before
= 0;
4122 int num_write_prefetches_before
= 0;
4125 struct iv_class
*bl
;
4126 struct induction
*iv
;
4127 struct prefetch_info info
[MAX_PREFETCHES
];
4128 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4130 if (!HAVE_prefetch
|| PREFETCH_BLOCK
== 0)
4133 /* Consider only loops w/o calls. When a call is done, the loop is probably
4134 slow enough to read the memory. */
4135 if (PREFETCH_NO_CALL
&& LOOP_INFO (loop
)->has_call
)
4137 if (loop_dump_stream
)
4138 fprintf (loop_dump_stream
, "Prefetch: ignoring loop: has call.\n");
4143 /* Don't prefetch in loops known to have few iterations. */
4144 if (PREFETCH_NO_LOW_LOOPCNT
4145 && LOOP_INFO (loop
)->n_iterations
4146 && LOOP_INFO (loop
)->n_iterations
<= PREFETCH_LOW_LOOPCNT
)
4148 if (loop_dump_stream
)
4149 fprintf (loop_dump_stream
,
4150 "Prefetch: ignoring loop: not enough iterations.\n");
4154 /* Search all induction variables and pick those interesting for the prefetch
4156 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
4158 struct induction
*biv
= bl
->biv
, *biv1
;
4163 /* Expect all BIVs to be executed in each iteration. This makes our
4164 analysis more conservative. */
4167 /* Discard non-constant additions that we can't handle well yet, and
4168 BIVs that are executed multiple times; such BIVs ought to be
4169 handled in the nested loop. We accept not_every_iteration BIVs,
4170 since these only result in larger strides and make our
4171 heuristics more conservative. */
4172 if (GET_CODE (biv
->add_val
) != CONST_INT
)
4174 if (loop_dump_stream
)
4176 fprintf (loop_dump_stream
,
4177 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
4178 REGNO (biv
->src_reg
), INSN_UID (biv
->insn
));
4179 print_rtl (loop_dump_stream
, biv
->add_val
);
4180 fprintf (loop_dump_stream
, "\n");
4185 if (biv
->maybe_multiple
)
4187 if (loop_dump_stream
)
4189 fprintf (loop_dump_stream
,
4190 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
4191 REGNO (biv
->src_reg
), INSN_UID (biv
->insn
));
4192 print_rtl (loop_dump_stream
, biv
->add_val
);
4193 fprintf (loop_dump_stream
, "\n");
4198 basestride
+= INTVAL (biv1
->add_val
);
4199 biv1
= biv1
->next_iv
;
4202 if (biv1
|| !basestride
)
4205 for (iv
= bl
->giv
; iv
; iv
= iv
->next_iv
)
4209 HOST_WIDE_INT index
= 0;
4211 HOST_WIDE_INT stride
= 0;
4212 int stride_sign
= 1;
4213 struct check_store_data d
;
4214 const char *ignore_reason
= NULL
;
4215 int size
= GET_MODE_SIZE (GET_MODE (iv
));
4217 /* See whether an induction variable is interesting to us and if
4218 not, report the reason. */
4219 if (iv
->giv_type
!= DEST_ADDR
)
4220 ignore_reason
= "giv is not a destination address";
4222 /* We are interested only in constant stride memory references
4223 in order to be able to compute density easily. */
4224 else if (GET_CODE (iv
->mult_val
) != CONST_INT
)
4225 ignore_reason
= "stride is not constant";
4229 stride
= INTVAL (iv
->mult_val
) * basestride
;
4236 /* On some targets, reversed order prefetches are not
4238 if (PREFETCH_NO_REVERSE_ORDER
&& stride_sign
< 0)
4239 ignore_reason
= "reversed order stride";
4241 /* Prefetch of accesses with an extreme stride might not be
4242 worthwhile, either. */
4243 else if (PREFETCH_NO_EXTREME_STRIDE
4244 && stride
> PREFETCH_EXTREME_STRIDE
)
4245 ignore_reason
= "extreme stride";
4247 /* Ignore GIVs with varying add values; we can't predict the
4248 value for the next iteration. */
4249 else if (!loop_invariant_p (loop
, iv
->add_val
))
4250 ignore_reason
= "giv has varying add value";
4252 /* Ignore GIVs in the nested loops; they ought to have been
4254 else if (iv
->maybe_multiple
)
4255 ignore_reason
= "giv is in nested loop";
4258 if (ignore_reason
!= NULL
)
4260 if (loop_dump_stream
)
4261 fprintf (loop_dump_stream
,
4262 "Prefetch: ignoring giv at %d: %s.\n",
4263 INSN_UID (iv
->insn
), ignore_reason
);
4267 /* Determine the pointer to the basic array we are examining. It is
4268 the sum of the BIV's initial value and the GIV's add_val. */
4269 address
= copy_rtx (iv
->add_val
);
4270 temp
= copy_rtx (bl
->initial_value
);
4272 address
= simplify_gen_binary (PLUS
, Pmode
, temp
, address
);
4273 index
= remove_constant_addition (&address
);
4276 d
.mem_address
= *iv
->location
;
4278 /* When the GIV is not always executed, we might be better off by
4279 not dirtying the cache pages. */
4280 if (PREFETCH_CONDITIONAL
|| iv
->always_executed
)
4281 note_stores (PATTERN (iv
->insn
), check_store
, &d
);
4284 if (loop_dump_stream
)
4285 fprintf (loop_dump_stream
, "Prefetch: Ignoring giv at %d: %s\n",
4286 INSN_UID (iv
->insn
), "in conditional code.");
4290 /* Attempt to find another prefetch to the same array and see if we
4291 can merge this one. */
4292 for (i
= 0; i
< num_prefetches
; i
++)
4293 if (rtx_equal_for_prefetch_p (address
, info
[i
].base_address
)
4294 && stride
== info
[i
].stride
)
4296 /* In case both access same array (same location
4297 just with small difference in constant indexes), merge
4298 the prefetches. Just do the later and the earlier will
4299 get prefetched from previous iteration.
4300 The artificial threshold should not be too small,
4301 but also not bigger than small portion of memory usually
4302 traversed by single loop. */
4303 if (index
>= info
[i
].index
4304 && index
- info
[i
].index
< PREFETCH_EXTREME_DIFFERENCE
)
4306 info
[i
].write
|= d
.mem_write
;
4307 info
[i
].bytes_accessed
+= size
;
4308 info
[i
].index
= index
;
4311 info
[num_prefetches
].base_address
= address
;
4316 if (index
< info
[i
].index
4317 && info
[i
].index
- index
< PREFETCH_EXTREME_DIFFERENCE
)
4319 info
[i
].write
|= d
.mem_write
;
4320 info
[i
].bytes_accessed
+= size
;
4326 /* Merging failed. */
4329 info
[num_prefetches
].giv
= iv
;
4330 info
[num_prefetches
].class = bl
;
4331 info
[num_prefetches
].index
= index
;
4332 info
[num_prefetches
].stride
= stride
;
4333 info
[num_prefetches
].base_address
= address
;
4334 info
[num_prefetches
].write
= d
.mem_write
;
4335 info
[num_prefetches
].bytes_accessed
= size
;
4337 if (num_prefetches
>= MAX_PREFETCHES
)
4339 if (loop_dump_stream
)
4340 fprintf (loop_dump_stream
,
4341 "Maximal number of prefetches exceeded.\n");
4348 for (i
= 0; i
< num_prefetches
; i
++)
4352 /* Attempt to calculate the total number of bytes fetched by all
4353 iterations of the loop. Avoid overflow. */
4354 if (LOOP_INFO (loop
)->n_iterations
4355 && ((unsigned HOST_WIDE_INT
) (0xffffffff / info
[i
].stride
)
4356 >= LOOP_INFO (loop
)->n_iterations
))
4357 info
[i
].total_bytes
= info
[i
].stride
* LOOP_INFO (loop
)->n_iterations
;
4359 info
[i
].total_bytes
= 0xffffffff;
4361 density
= info
[i
].bytes_accessed
* 100 / info
[i
].stride
;
4363 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4364 if (PREFETCH_ONLY_DENSE_MEM
)
4365 if (density
* 256 > PREFETCH_DENSE_MEM
* 100
4366 && (info
[i
].total_bytes
/ PREFETCH_BLOCK
4367 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN
))
4369 info
[i
].prefetch_before_loop
= 1;
4370 info
[i
].prefetch_in_loop
4371 = (info
[i
].total_bytes
/ PREFETCH_BLOCK
4372 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX
);
4376 info
[i
].prefetch_in_loop
= 0, info
[i
].prefetch_before_loop
= 0;
4377 if (loop_dump_stream
)
4378 fprintf (loop_dump_stream
,
4379 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4380 INSN_UID (info
[i
].giv
->insn
), density
);
4383 info
[i
].prefetch_in_loop
= 1, info
[i
].prefetch_before_loop
= 1;
4385 /* Find how many prefetch instructions we'll use within the loop. */
4386 if (info
[i
].prefetch_in_loop
!= 0)
4388 info
[i
].prefetch_in_loop
= ((info
[i
].stride
+ PREFETCH_BLOCK
- 1)
4390 num_real_prefetches
+= info
[i
].prefetch_in_loop
;
4392 num_real_write_prefetches
+= info
[i
].prefetch_in_loop
;
4396 /* Determine how many iterations ahead to prefetch within the loop, based
4397 on how many prefetches we currently expect to do within the loop. */
4398 if (num_real_prefetches
!= 0)
4400 if ((ahead
= SIMULTANEOUS_PREFETCHES
/ num_real_prefetches
) == 0)
4402 if (loop_dump_stream
)
4403 fprintf (loop_dump_stream
,
4404 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4405 SIMULTANEOUS_PREFETCHES
, num_real_prefetches
);
4406 num_real_prefetches
= 0, num_real_write_prefetches
= 0;
4409 /* We'll also use AHEAD to determine how many prefetch instructions to
4410 emit before a loop, so don't leave it zero. */
4412 ahead
= PREFETCH_BLOCKS_BEFORE_LOOP_MAX
;
4414 for (i
= 0; i
< num_prefetches
; i
++)
4416 /* Update if we've decided not to prefetch anything within the loop. */
4417 if (num_real_prefetches
== 0)
4418 info
[i
].prefetch_in_loop
= 0;
4420 /* Find how many prefetch instructions we'll use before the loop. */
4421 if (info
[i
].prefetch_before_loop
!= 0)
4423 int n
= info
[i
].total_bytes
/ PREFETCH_BLOCK
;
4426 info
[i
].prefetch_before_loop
= n
;
4427 num_prefetches_before
+= n
;
4429 num_write_prefetches_before
+= n
;
4432 if (loop_dump_stream
)
4434 if (info
[i
].prefetch_in_loop
== 0
4435 && info
[i
].prefetch_before_loop
== 0)
4437 fprintf (loop_dump_stream
, "Prefetch insn: %d",
4438 INSN_UID (info
[i
].giv
->insn
));
4439 fprintf (loop_dump_stream
,
4440 "; in loop: %d; before: %d; %s\n",
4441 info
[i
].prefetch_in_loop
,
4442 info
[i
].prefetch_before_loop
,
4443 info
[i
].write
? "read/write" : "read only");
4444 fprintf (loop_dump_stream
,
4445 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4446 (int) (info
[i
].bytes_accessed
* 100 / info
[i
].stride
),
4447 info
[i
].bytes_accessed
, info
[i
].total_bytes
);
4448 fprintf (loop_dump_stream
, " index: " HOST_WIDE_INT_PRINT_DEC
4449 "; stride: " HOST_WIDE_INT_PRINT_DEC
"; address: ",
4450 info
[i
].index
, info
[i
].stride
);
4451 print_rtl (loop_dump_stream
, info
[i
].base_address
);
4452 fprintf (loop_dump_stream
, "\n");
4456 if (num_real_prefetches
+ num_prefetches_before
> 0)
4458 /* Record that this loop uses prefetch instructions. */
4459 LOOP_INFO (loop
)->has_prefetch
= 1;
4461 if (loop_dump_stream
)
4463 fprintf (loop_dump_stream
, "Real prefetches needed within loop: %d (write: %d)\n",
4464 num_real_prefetches
, num_real_write_prefetches
);
4465 fprintf (loop_dump_stream
, "Real prefetches needed before loop: %d (write: %d)\n",
4466 num_prefetches_before
, num_write_prefetches_before
);
4470 for (i
= 0; i
< num_prefetches
; i
++)
4474 for (y
= 0; y
< info
[i
].prefetch_in_loop
; y
++)
4476 rtx loc
= copy_rtx (*info
[i
].giv
->location
);
4478 int bytes_ahead
= PREFETCH_BLOCK
* (ahead
+ y
);
4479 rtx before_insn
= info
[i
].giv
->insn
;
4480 rtx prev_insn
= PREV_INSN (info
[i
].giv
->insn
);
4483 /* We can save some effort by offsetting the address on
4484 architectures with offsettable memory references. */
4485 if (offsettable_address_p (0, VOIDmode
, loc
))
4486 loc
= plus_constant (loc
, bytes_ahead
);
4489 rtx reg
= gen_reg_rtx (Pmode
);
4490 loop_iv_add_mult_emit_before (loop
, loc
, const1_rtx
,
4491 GEN_INT (bytes_ahead
), reg
,
4497 /* Make sure the address operand is valid for prefetch. */
4498 if (! (*insn_data
[(int)CODE_FOR_prefetch
].operand
[0].predicate
)
4499 (loc
, insn_data
[(int)CODE_FOR_prefetch
].operand
[0].mode
))
4500 loc
= force_reg (Pmode
, loc
);
4501 emit_insn (gen_prefetch (loc
, GEN_INT (info
[i
].write
),
4505 emit_insn_before (seq
, before_insn
);
4507 /* Check all insns emitted and record the new GIV
4509 insn
= NEXT_INSN (prev_insn
);
4510 while (insn
!= before_insn
)
4512 insn
= check_insn_for_givs (loop
, insn
,
4513 info
[i
].giv
->always_executed
,
4514 info
[i
].giv
->maybe_multiple
);
4515 insn
= NEXT_INSN (insn
);
4519 if (PREFETCH_BEFORE_LOOP
)
4521 /* Emit insns before the loop to fetch the first cache lines or,
4522 if we're not prefetching within the loop, everything we expect
4524 for (y
= 0; y
< info
[i
].prefetch_before_loop
; y
++)
4526 rtx reg
= gen_reg_rtx (Pmode
);
4527 rtx loop_start
= loop
->start
;
4528 rtx init_val
= info
[i
].class->initial_value
;
4529 rtx add_val
= simplify_gen_binary (PLUS
, Pmode
,
4530 info
[i
].giv
->add_val
,
4531 GEN_INT (y
* PREFETCH_BLOCK
));
4533 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4534 non-constant INIT_VAL to have the same mode as REG, which
4535 in this case we know to be Pmode. */
4536 if (GET_MODE (init_val
) != Pmode
&& !CONSTANT_P (init_val
))
4541 init_val
= convert_to_mode (Pmode
, init_val
, 0);
4544 loop_insn_emit_before (loop
, 0, loop_start
, seq
);
4546 loop_iv_add_mult_emit_before (loop
, init_val
,
4547 info
[i
].giv
->mult_val
,
4548 add_val
, reg
, 0, loop_start
);
4549 emit_insn_before (gen_prefetch (reg
, GEN_INT (info
[i
].write
),
4559 /* Communication with routines called via `note_stores'. */
4561 static rtx note_insn
;
4563 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4565 static rtx addr_placeholder
;
4567 /* ??? Unfinished optimizations, and possible future optimizations,
4568 for the strength reduction code. */
4570 /* ??? The interaction of biv elimination, and recognition of 'constant'
4571 bivs, may cause problems. */
4573 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4574 performance problems.
4576 Perhaps don't eliminate things that can be combined with an addressing
4577 mode. Find all givs that have the same biv, mult_val, and add_val;
4578 then for each giv, check to see if its only use dies in a following
4579 memory address. If so, generate a new memory address and check to see
4580 if it is valid. If it is valid, then store the modified memory address,
4581 otherwise, mark the giv as not done so that it will get its own iv. */
4583 /* ??? Could try to optimize branches when it is known that a biv is always
4586 /* ??? When replace a biv in a compare insn, we should replace with closest
4587 giv so that an optimized branch can still be recognized by the combiner,
4588 e.g. the VAX acb insn. */
4590 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4591 was rerun in loop_optimize whenever a register was added or moved.
4592 Also, some of the optimizations could be a little less conservative. */
4594 /* Searches the insns between INSN and LOOP->END. Returns 1 if there
4595 is a backward branch in that range that branches to somewhere between
4596 LOOP->START and INSN. Returns 0 otherwise. */
4598 /* ??? This is quadratic algorithm. Could be rewritten to be linear.
4599 In practice, this is not a problem, because this function is seldom called,
4600 and uses a negligible amount of CPU time on average. */
4603 back_branch_in_range_p (const struct loop
*loop
, rtx insn
)
4605 rtx p
, q
, target_insn
;
4606 rtx loop_start
= loop
->start
;
4607 rtx loop_end
= loop
->end
;
4608 rtx orig_loop_end
= loop
->end
;
4610 /* Stop before we get to the backward branch at the end of the loop. */
4611 loop_end
= prev_nonnote_insn (loop_end
);
4612 if (BARRIER_P (loop_end
))
4613 loop_end
= PREV_INSN (loop_end
);
4615 /* Check in case insn has been deleted, search forward for first non
4616 deleted insn following it. */
4617 while (INSN_DELETED_P (insn
))
4618 insn
= NEXT_INSN (insn
);
4620 /* Check for the case where insn is the last insn in the loop. Deal
4621 with the case where INSN was a deleted loop test insn, in which case
4622 it will now be the NOTE_LOOP_END. */
4623 if (insn
== loop_end
|| insn
== orig_loop_end
)
4626 for (p
= NEXT_INSN (insn
); p
!= loop_end
; p
= NEXT_INSN (p
))
4630 target_insn
= JUMP_LABEL (p
);
4632 /* Search from loop_start to insn, to see if one of them is
4633 the target_insn. We can't use INSN_LUID comparisons here,
4634 since insn may not have an LUID entry. */
4635 for (q
= loop_start
; q
!= insn
; q
= NEXT_INSN (q
))
4636 if (q
== target_insn
)
4644 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4645 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4648 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4649 least once for every loop iteration except for the last one.
4651 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4654 typedef rtx (*loop_insn_callback
) (struct loop
*, rtx
, int, int);
4656 for_each_insn_in_loop (struct loop
*loop
, loop_insn_callback fncall
)
4658 int not_every_iteration
= 0;
4659 int maybe_multiple
= 0;
4660 int past_loop_latch
= 0;
4661 bool exit_test_is_entry
= false;
4664 /* If loop_scan_start points to the loop exit test, the loop body
4665 cannot be counted on running on every iteration, and we have to
4666 be wary of subversive use of gotos inside expression
4668 if (prev_nonnote_insn (loop
->scan_start
) != prev_nonnote_insn (loop
->start
))
4670 exit_test_is_entry
= true;
4671 maybe_multiple
= back_branch_in_range_p (loop
, loop
->scan_start
);
4674 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4675 for (p
= next_insn_in_loop (loop
, loop
->scan_start
);
4677 p
= next_insn_in_loop (loop
, p
))
4679 p
= fncall (loop
, p
, not_every_iteration
, maybe_multiple
);
4681 /* Past CODE_LABEL, we get to insns that may be executed multiple
4682 times. The only way we can be sure that they can't is if every
4683 jump insn between here and the end of the loop either
4684 returns, exits the loop, is a jump to a location that is still
4685 behind the label, or is a jump to the loop start. */
4695 insn
= NEXT_INSN (insn
);
4696 if (insn
== loop
->scan_start
)
4698 if (insn
== loop
->end
)
4704 if (insn
== loop
->scan_start
)
4709 && GET_CODE (PATTERN (insn
)) != RETURN
4710 && (!any_condjump_p (insn
)
4711 || (JUMP_LABEL (insn
) != 0
4712 && JUMP_LABEL (insn
) != loop
->scan_start
4713 && !loop_insn_first_p (p
, JUMP_LABEL (insn
)))))
4721 /* Past a jump, we get to insns for which we can't count
4722 on whether they will be executed during each iteration. */
4723 /* This code appears twice in strength_reduce. There is also similar
4724 code in scan_loop. */
4726 /* If we enter the loop in the middle, and scan around to the
4727 beginning, don't set not_every_iteration for that.
4728 This can be any kind of jump, since we want to know if insns
4729 will be executed if the loop is executed. */
4730 && (exit_test_is_entry
4731 || !(JUMP_LABEL (p
) == loop
->top
4732 && ((NEXT_INSN (NEXT_INSN (p
)) == loop
->end
4733 && any_uncondjump_p (p
))
4734 || (NEXT_INSN (p
) == loop
->end
4735 && any_condjump_p (p
))))))
4739 /* If this is a jump outside the loop, then it also doesn't
4740 matter. Check to see if the target of this branch is on the
4741 loop->exits_labels list. */
4743 for (label
= loop
->exit_labels
; label
; label
= LABEL_NEXTREF (label
))
4744 if (XEXP (label
, 0) == JUMP_LABEL (p
))
4748 not_every_iteration
= 1;
4751 /* Note if we pass a loop latch. If we do, then we can not clear
4752 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4753 a loop since a jump before the last CODE_LABEL may have started
4754 a new loop iteration.
4756 Note that LOOP_TOP is only set for rotated loops and we need
4757 this check for all loops, so compare against the CODE_LABEL
4758 which immediately follows LOOP_START. */
4760 && JUMP_LABEL (p
) == NEXT_INSN (loop
->start
))
4761 past_loop_latch
= 1;
4763 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4764 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4765 or not an insn is known to be executed each iteration of the
4766 loop, whether or not any iterations are known to occur.
4768 Therefore, if we have just passed a label and have no more labels
4769 between here and the test insn of the loop, and we have not passed
4770 a jump to the top of the loop, then we know these insns will be
4771 executed each iteration. */
4773 if (not_every_iteration
4776 && no_labels_between_p (p
, loop
->end
))
4777 not_every_iteration
= 0;
4782 loop_bivs_find (struct loop
*loop
)
4784 struct loop_regs
*regs
= LOOP_REGS (loop
);
4785 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4786 /* Temporary list pointers for traversing ivs->list. */
4787 struct iv_class
*bl
, **backbl
;
4791 for_each_insn_in_loop (loop
, check_insn_for_bivs
);
4793 /* Scan ivs->list to remove all regs that proved not to be bivs.
4794 Make a sanity check against regs->n_times_set. */
4795 for (backbl
= &ivs
->list
, bl
= *backbl
; bl
; bl
= bl
->next
)
4797 if (REG_IV_TYPE (ivs
, bl
->regno
) != BASIC_INDUCT
4798 /* Above happens if register modified by subreg, etc. */
4799 /* Make sure it is not recognized as a basic induction var: */
4800 || regs
->array
[bl
->regno
].n_times_set
!= bl
->biv_count
4801 /* If never incremented, it is invariant that we decided not to
4802 move. So leave it alone. */
4803 || ! bl
->incremented
)
4805 if (loop_dump_stream
)
4806 fprintf (loop_dump_stream
, "Biv %d: discarded, %s\n",
4808 (REG_IV_TYPE (ivs
, bl
->regno
) != BASIC_INDUCT
4809 ? "not induction variable"
4810 : (! bl
->incremented
? "never incremented"
4813 REG_IV_TYPE (ivs
, bl
->regno
) = NOT_BASIC_INDUCT
;
4820 if (loop_dump_stream
)
4821 fprintf (loop_dump_stream
, "Biv %d: verified\n", bl
->regno
);
4827 /* Determine how BIVS are initialized by looking through pre-header
4828 extended basic block. */
4830 loop_bivs_init_find (struct loop
*loop
)
4832 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4833 /* Temporary list pointers for traversing ivs->list. */
4834 struct iv_class
*bl
;
4838 /* Find initial value for each biv by searching backwards from loop_start,
4839 halting at first label. Also record any test condition. */
4842 for (p
= loop
->start
; p
&& !LABEL_P (p
); p
= PREV_INSN (p
))
4852 note_stores (PATTERN (p
), record_initial
, ivs
);
4854 /* Record any test of a biv that branches around the loop if no store
4855 between it and the start of loop. We only care about tests with
4856 constants and registers and only certain of those. */
4858 && JUMP_LABEL (p
) != 0
4859 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop
->end
)
4860 && (test
= get_condition_for_loop (loop
, p
)) != 0
4861 && REG_P (XEXP (test
, 0))
4862 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
4863 && (bl
= REG_IV_CLASS (ivs
, REGNO (XEXP (test
, 0)))) != 0
4864 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop
->start
)
4865 && bl
->init_insn
== 0)
4867 /* If an NE test, we have an initial value! */
4868 if (GET_CODE (test
) == NE
)
4871 bl
->init_set
= gen_rtx_SET (VOIDmode
,
4872 XEXP (test
, 0), XEXP (test
, 1));
4875 bl
->initial_test
= test
;
4881 /* Look at the each biv and see if we can say anything better about its
4882 initial value from any initializing insns set up above. (This is done
4883 in two passes to avoid missing SETs in a PARALLEL.) */
4885 loop_bivs_check (struct loop
*loop
)
4887 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4888 /* Temporary list pointers for traversing ivs->list. */
4889 struct iv_class
*bl
;
4890 struct iv_class
**backbl
;
4892 for (backbl
= &ivs
->list
; (bl
= *backbl
); backbl
= &bl
->next
)
4897 if (! bl
->init_insn
)
4900 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4901 is a constant, use the value of that. */
4902 if (((note
= find_reg_note (bl
->init_insn
, REG_EQUAL
, 0)) != NULL
4903 && CONSTANT_P (XEXP (note
, 0)))
4904 || ((note
= find_reg_note (bl
->init_insn
, REG_EQUIV
, 0)) != NULL
4905 && CONSTANT_P (XEXP (note
, 0))))
4906 src
= XEXP (note
, 0);
4908 src
= SET_SRC (bl
->init_set
);
4910 if (loop_dump_stream
)
4911 fprintf (loop_dump_stream
,
4912 "Biv %d: initialized at insn %d: initial value ",
4913 bl
->regno
, INSN_UID (bl
->init_insn
));
4915 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
4916 || GET_MODE (src
) == VOIDmode
)
4917 && valid_initial_value_p (src
, bl
->init_insn
,
4918 LOOP_INFO (loop
)->pre_header_has_call
,
4921 bl
->initial_value
= src
;
4923 if (loop_dump_stream
)
4925 print_simple_rtl (loop_dump_stream
, src
);
4926 fputc ('\n', loop_dump_stream
);
4929 /* If we can't make it a giv,
4930 let biv keep initial value of "itself". */
4931 else if (loop_dump_stream
)
4932 fprintf (loop_dump_stream
, "is complex\n");
4937 /* Search the loop for general induction variables. */
4940 loop_givs_find (struct loop
* loop
)
4942 for_each_insn_in_loop (loop
, check_insn_for_givs
);
4946 /* For each giv for which we still don't know whether or not it is
4947 replaceable, check to see if it is replaceable because its final value
4948 can be calculated. */
4951 loop_givs_check (struct loop
*loop
)
4953 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4954 struct iv_class
*bl
;
4956 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
4958 struct induction
*v
;
4960 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4961 if (! v
->replaceable
&& ! v
->not_replaceable
)
4962 check_final_value (loop
, v
);
4966 /* Try to generate the simplest rtx for the expression
4967 (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial
4971 fold_rtx_mult_add (rtx mult1
, rtx mult2
, rtx add1
, enum machine_mode mode
)
4976 /* The modes must all be the same. This should always be true. For now,
4977 check to make sure. */
4978 gcc_assert (GET_MODE (mult1
) == mode
|| GET_MODE (mult1
) == VOIDmode
);
4979 gcc_assert (GET_MODE (mult2
) == mode
|| GET_MODE (mult2
) == VOIDmode
);
4980 gcc_assert (GET_MODE (add1
) == mode
|| GET_MODE (add1
) == VOIDmode
);
4982 /* Ensure that if at least one of mult1/mult2 are constant, then mult2
4983 will be a constant. */
4984 if (GET_CODE (mult1
) == CONST_INT
)
4991 mult_res
= simplify_binary_operation (MULT
, mode
, mult1
, mult2
);
4993 mult_res
= gen_rtx_MULT (mode
, mult1
, mult2
);
4995 /* Again, put the constant second. */
4996 if (GET_CODE (add1
) == CONST_INT
)
5003 result
= simplify_binary_operation (PLUS
, mode
, add1
, mult_res
);
5005 result
= gen_rtx_PLUS (mode
, add1
, mult_res
);
5010 /* Searches the list of induction struct's for the biv BL, to try to calculate
5011 the total increment value for one iteration of the loop as a constant.
5013 Returns the increment value as an rtx, simplified as much as possible,
5014 if it can be calculated. Otherwise, returns 0. */
5017 biv_total_increment (const struct iv_class
*bl
)
5019 struct induction
*v
;
5022 /* For increment, must check every instruction that sets it. Each
5023 instruction must be executed only once each time through the loop.
5024 To verify this, we check that the insn is always executed, and that
5025 there are no backward branches after the insn that branch to before it.
5026 Also, the insn must have a mult_val of one (to make sure it really is
5029 result
= const0_rtx
;
5030 for (v
= bl
->biv
; v
; v
= v
->next_iv
)
5032 if (v
->always_computable
&& v
->mult_val
== const1_rtx
5033 && ! v
->maybe_multiple
5034 && SCALAR_INT_MODE_P (v
->mode
))
5036 /* If we have already counted it, skip it. */
5040 result
= fold_rtx_mult_add (result
, const1_rtx
, v
->add_val
, v
->mode
);
5049 /* Try to prove that the register is dead after the loop exits. Trace every
5050 loop exit looking for an insn that will always be executed, which sets
5051 the register to some value, and appears before the first use of the register
5052 is found. If successful, then return 1, otherwise return 0. */
5054 /* ?? Could be made more intelligent in the handling of jumps, so that
5055 it can search past if statements and other similar structures. */
5058 reg_dead_after_loop (const struct loop
*loop
, rtx reg
)
5062 int label_count
= 0;
5064 /* In addition to checking all exits of this loop, we must also check
5065 all exits of inner nested loops that would exit this loop. We don't
5066 have any way to identify those, so we just give up if there are any
5067 such inner loop exits. */
5069 for (label
= loop
->exit_labels
; label
; label
= LABEL_NEXTREF (label
))
5072 if (label_count
!= loop
->exit_count
)
5075 /* HACK: Must also search the loop fall through exit, create a label_ref
5076 here which points to the loop->end, and append the loop_number_exit_labels
5078 label
= gen_rtx_LABEL_REF (Pmode
, loop
->end
);
5079 LABEL_NEXTREF (label
) = loop
->exit_labels
;
5081 for (; label
; label
= LABEL_NEXTREF (label
))
5083 /* Succeed if find an insn which sets the biv or if reach end of
5084 function. Fail if find an insn that uses the biv, or if come to
5085 a conditional jump. */
5087 insn
= NEXT_INSN (XEXP (label
, 0));
5094 if (reg_referenced_p (reg
, PATTERN (insn
)))
5097 note
= find_reg_equal_equiv_note (insn
);
5098 if (note
&& reg_overlap_mentioned_p (reg
, XEXP (note
, 0)))
5101 set
= single_set (insn
);
5102 if (set
&& rtx_equal_p (SET_DEST (set
), reg
))
5107 if (GET_CODE (PATTERN (insn
)) == RETURN
)
5109 else if (!any_uncondjump_p (insn
)
5110 /* Prevent infinite loop following infinite loops. */
5111 || jump_count
++ > 20)
5114 insn
= JUMP_LABEL (insn
);
5118 insn
= NEXT_INSN (insn
);
5122 /* Success, the register is dead on all loop exits. */
5126 /* Try to calculate the final value of the biv, the value it will have at
5127 the end of the loop. If we can do it, return that value. */
5130 final_biv_value (const struct loop
*loop
, struct iv_class
*bl
)
5132 unsigned HOST_WIDE_INT n_iterations
= LOOP_INFO (loop
)->n_iterations
;
5135 /* ??? This only works for MODE_INT biv's. Reject all others for now. */
5137 if (GET_MODE_CLASS (bl
->biv
->mode
) != MODE_INT
)
5140 /* The final value for reversed bivs must be calculated differently than
5141 for ordinary bivs. In this case, there is already an insn after the
5142 loop which sets this biv's final value (if necessary), and there are
5143 no other loop exits, so we can return any value. */
5146 if (loop_dump_stream
)
5147 fprintf (loop_dump_stream
,
5148 "Final biv value for %d, reversed biv.\n", bl
->regno
);
5153 /* Try to calculate the final value as initial value + (number of iterations
5154 * increment). For this to work, increment must be invariant, the only
5155 exit from the loop must be the fall through at the bottom (otherwise
5156 it may not have its final value when the loop exits), and the initial
5157 value of the biv must be invariant. */
5159 if (n_iterations
!= 0
5160 && ! loop
->exit_count
5161 && loop_invariant_p (loop
, bl
->initial_value
))
5163 increment
= biv_total_increment (bl
);
5165 if (increment
&& loop_invariant_p (loop
, increment
))
5167 /* Can calculate the loop exit value, emit insns after loop
5168 end to calculate this value into a temporary register in
5169 case it is needed later. */
5171 tem
= gen_reg_rtx (bl
->biv
->mode
);
5172 record_base_value (REGNO (tem
), bl
->biv
->add_val
, 0);
5173 loop_iv_add_mult_sink (loop
, increment
, GEN_INT (n_iterations
),
5174 bl
->initial_value
, tem
);
5176 if (loop_dump_stream
)
5177 fprintf (loop_dump_stream
,
5178 "Final biv value for %d, calculated.\n", bl
->regno
);
5184 /* Check to see if the biv is dead at all loop exits. */
5185 if (reg_dead_after_loop (loop
, bl
->biv
->src_reg
))
5187 if (loop_dump_stream
)
5188 fprintf (loop_dump_stream
,
5189 "Final biv value for %d, biv dead after loop exit.\n",
5198 /* Return nonzero if it is possible to eliminate the biv BL provided
5199 all givs are reduced. This is possible if either the reg is not
5200 used outside the loop, or we can compute what its final value will
5204 loop_biv_eliminable_p (struct loop
*loop
, struct iv_class
*bl
,
5205 int threshold
, int insn_count
)
5207 /* For architectures with a decrement_and_branch_until_zero insn,
5208 don't do this if we put a REG_NONNEG note on the endtest for this
5211 #ifdef HAVE_decrement_and_branch_until_zero
5214 if (loop_dump_stream
)
5215 fprintf (loop_dump_stream
,
5216 "Cannot eliminate nonneg biv %d.\n", bl
->regno
);
5221 /* Check that biv is used outside loop or if it has a final value.
5222 Compare against bl->init_insn rather than loop->start. We aren't
5223 concerned with any uses of the biv between init_insn and
5224 loop->start since these won't be affected by the value of the biv
5225 elsewhere in the function, so long as init_insn doesn't use the
5228 if ((REGNO_LAST_LUID (bl
->regno
) < INSN_LUID (loop
->end
)
5230 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
5231 && REGNO_FIRST_LUID (bl
->regno
) >= INSN_LUID (bl
->init_insn
)
5232 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
5233 || (bl
->final_value
= final_biv_value (loop
, bl
)))
5234 return maybe_eliminate_biv (loop
, bl
, 0, threshold
, insn_count
);
5236 if (loop_dump_stream
)
5238 fprintf (loop_dump_stream
,
5239 "Cannot eliminate biv %d.\n",
5241 fprintf (loop_dump_stream
,
5242 "First use: insn %d, last use: insn %d.\n",
5243 REGNO_FIRST_UID (bl
->regno
),
5244 REGNO_LAST_UID (bl
->regno
));
5250 /* Reduce each giv of BL that we have decided to reduce. */
5253 loop_givs_reduce (struct loop
*loop
, struct iv_class
*bl
)
5255 struct induction
*v
;
5257 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
5259 struct induction
*tv
;
5260 if (! v
->ignore
&& v
->same
== 0)
5262 int auto_inc_opt
= 0;
5264 /* If the code for derived givs immediately below has already
5265 allocated a new_reg, we must keep it. */
5267 v
->new_reg
= gen_reg_rtx (v
->mode
);
5270 /* If the target has auto-increment addressing modes, and
5271 this is an address giv, then try to put the increment
5272 immediately after its use, so that flow can create an
5273 auto-increment addressing mode. */
5274 /* Don't do this for loops entered at the bottom, to avoid
5275 this invalid transformation:
5284 if (v
->giv_type
== DEST_ADDR
&& bl
->biv_count
== 1
5285 && bl
->biv
->always_executed
&& ! bl
->biv
->maybe_multiple
5286 /* We don't handle reversed biv's because bl->biv->insn
5287 does not have a valid INSN_LUID. */
5289 && v
->always_executed
&& ! v
->maybe_multiple
5290 && INSN_UID (v
->insn
) < max_uid_for_loop
5293 /* If other giv's have been combined with this one, then
5294 this will work only if all uses of the other giv's occur
5295 before this giv's insn. This is difficult to check.
5297 We simplify this by looking for the common case where
5298 there is one DEST_REG giv, and this giv's insn is the
5299 last use of the dest_reg of that DEST_REG giv. If the
5300 increment occurs after the address giv, then we can
5301 perform the optimization. (Otherwise, the increment
5302 would have to go before other_giv, and we would not be
5303 able to combine it with the address giv to get an
5304 auto-inc address.) */
5305 if (v
->combined_with
)
5307 struct induction
*other_giv
= 0;
5309 for (tv
= bl
->giv
; tv
; tv
= tv
->next_iv
)
5317 if (! tv
&& other_giv
5318 && REGNO (other_giv
->dest_reg
) < max_reg_before_loop
5319 && (REGNO_LAST_UID (REGNO (other_giv
->dest_reg
))
5320 == INSN_UID (v
->insn
))
5321 && INSN_LUID (v
->insn
) < INSN_LUID (bl
->biv
->insn
))
5324 /* Check for case where increment is before the address
5325 giv. Do this test in "loop order". */
5326 else if ((INSN_LUID (v
->insn
) > INSN_LUID (bl
->biv
->insn
)
5327 && (INSN_LUID (v
->insn
) < INSN_LUID (loop
->scan_start
)
5328 || (INSN_LUID (bl
->biv
->insn
)
5329 > INSN_LUID (loop
->scan_start
))))
5330 || (INSN_LUID (v
->insn
) < INSN_LUID (loop
->scan_start
)
5331 && (INSN_LUID (loop
->scan_start
)
5332 < INSN_LUID (bl
->biv
->insn
))))
5341 /* We can't put an insn immediately after one setting
5342 cc0, or immediately before one using cc0. */
5343 if ((auto_inc_opt
== 1 && sets_cc0_p (PATTERN (v
->insn
)))
5344 || (auto_inc_opt
== -1
5345 && (prev
= prev_nonnote_insn (v
->insn
)) != 0
5347 && sets_cc0_p (PATTERN (prev
))))
5353 v
->auto_inc_opt
= 1;
5357 /* For each place where the biv is incremented, add an insn
5358 to increment the new, reduced reg for the giv. */
5359 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
5363 /* Skip if location is the same as a previous one. */
5367 insert_before
= NEXT_INSN (tv
->insn
);
5368 else if (auto_inc_opt
== 1)
5369 insert_before
= NEXT_INSN (v
->insn
);
5371 insert_before
= v
->insn
;
5373 if (tv
->mult_val
== const1_rtx
)
5374 loop_iv_add_mult_emit_before (loop
, tv
->add_val
, v
->mult_val
,
5375 v
->new_reg
, v
->new_reg
,
5377 else /* tv->mult_val == const0_rtx */
5378 /* A multiply is acceptable here
5379 since this is presumed to be seldom executed. */
5380 loop_iv_add_mult_emit_before (loop
, tv
->add_val
, v
->mult_val
,
5381 v
->add_val
, v
->new_reg
,
5385 /* Add code at loop start to initialize giv's reduced reg. */
5387 loop_iv_add_mult_hoist (loop
,
5388 extend_value_for_giv (v
, bl
->initial_value
),
5389 v
->mult_val
, v
->add_val
, v
->new_reg
);
5395 /* Check for givs whose first use is their definition and whose
5396 last use is the definition of another giv. If so, it is likely
5397 dead and should not be used to derive another giv nor to
5401 loop_givs_dead_check (struct loop
*loop ATTRIBUTE_UNUSED
, struct iv_class
*bl
)
5403 struct induction
*v
;
5405 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
5408 || (v
->same
&& v
->same
->ignore
))
5411 if (v
->giv_type
== DEST_REG
5412 && REGNO_FIRST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v
->insn
))
5414 struct induction
*v1
;
5416 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
5417 if (REGNO_LAST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v1
->insn
))
5425 loop_givs_rescan (struct loop
*loop
, struct iv_class
*bl
, rtx
*reg_map
)
5427 struct induction
*v
;
5429 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
5431 if (v
->same
&& v
->same
->ignore
)
5437 /* Update expression if this was combined, in case other giv was
5440 v
->new_reg
= replace_rtx (v
->new_reg
,
5441 v
->same
->dest_reg
, v
->same
->new_reg
);
5443 /* See if this register is known to be a pointer to something. If
5444 so, see if we can find the alignment. First see if there is a
5445 destination register that is a pointer. If so, this shares the
5446 alignment too. Next see if we can deduce anything from the
5447 computational information. If not, and this is a DEST_ADDR
5448 giv, at least we know that it's a pointer, though we don't know
5450 if (REG_P (v
->new_reg
)
5451 && v
->giv_type
== DEST_REG
5452 && REG_POINTER (v
->dest_reg
))
5453 mark_reg_pointer (v
->new_reg
,
5454 REGNO_POINTER_ALIGN (REGNO (v
->dest_reg
)));
5455 else if (REG_P (v
->new_reg
)
5456 && REG_POINTER (v
->src_reg
))
5458 unsigned int align
= REGNO_POINTER_ALIGN (REGNO (v
->src_reg
));
5461 || GET_CODE (v
->add_val
) != CONST_INT
5462 || INTVAL (v
->add_val
) % (align
/ BITS_PER_UNIT
) != 0)
5465 mark_reg_pointer (v
->new_reg
, align
);
5467 else if (REG_P (v
->new_reg
)
5468 && REG_P (v
->add_val
)
5469 && REG_POINTER (v
->add_val
))
5471 unsigned int align
= REGNO_POINTER_ALIGN (REGNO (v
->add_val
));
5473 if (align
== 0 || GET_CODE (v
->mult_val
) != CONST_INT
5474 || INTVAL (v
->mult_val
) % (align
/ BITS_PER_UNIT
) != 0)
5477 mark_reg_pointer (v
->new_reg
, align
);
5479 else if (REG_P (v
->new_reg
) && v
->giv_type
== DEST_ADDR
)
5480 mark_reg_pointer (v
->new_reg
, 0);
5482 if (v
->giv_type
== DEST_ADDR
)
5484 /* Store reduced reg as the address in the memref where we found
5486 if (validate_change_maybe_volatile (v
->insn
, v
->location
,
5488 /* Yay, it worked! */;
5489 /* Not replaceable; emit an insn to set the original
5490 giv reg from the reduced giv. */
5491 else if (REG_P (*v
->location
))
5492 loop_insn_emit_before (loop
, 0, v
->insn
,
5493 gen_move_insn (*v
->location
,
5495 else if (GET_CODE (*v
->location
) == PLUS
5496 && REG_P (XEXP (*v
->location
, 0))
5497 && CONSTANT_P (XEXP (*v
->location
, 1)))
5498 loop_insn_emit_before (loop
, 0, v
->insn
,
5499 gen_move_insn (XEXP (*v
->location
, 0),
5501 (GET_MODE (*v
->location
),
5503 XEXP (*v
->location
, 1))));
5506 /* If it wasn't a reg, create a pseudo and use that. */
5509 reg
= force_reg (v
->mode
, *v
->location
);
5512 loop_insn_emit_before (loop
, 0, v
->insn
, seq
);
5513 if (!validate_change_maybe_volatile (v
->insn
, v
->location
, reg
))
5517 else if (v
->replaceable
)
5519 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
5523 rtx original_insn
= v
->insn
;
5526 /* Not replaceable; emit an insn to set the original giv reg from
5527 the reduced giv, same as above. */
5528 v
->insn
= loop_insn_emit_after (loop
, 0, original_insn
,
5529 gen_move_insn (v
->dest_reg
,
5532 /* The original insn may have a REG_EQUAL note. This note is
5533 now incorrect and may result in invalid substitutions later.
5534 The original insn is dead, but may be part of a libcall
5535 sequence, which doesn't seem worth the bother of handling. */
5536 note
= find_reg_note (original_insn
, REG_EQUAL
, NULL_RTX
);
5538 remove_note (original_insn
, note
);
5541 /* When a loop is reversed, givs which depend on the reversed
5542 biv, and which are live outside the loop, must be set to their
5543 correct final value. This insn is only needed if the giv is
5544 not replaceable. The correct final value is the same as the
5545 value that the giv starts the reversed loop with. */
5546 if (bl
->reversed
&& ! v
->replaceable
)
5547 loop_iv_add_mult_sink (loop
,
5548 extend_value_for_giv (v
, bl
->initial_value
),
5549 v
->mult_val
, v
->add_val
, v
->dest_reg
);
5550 else if (v
->final_value
)
5551 loop_insn_sink_or_swim (loop
,
5552 gen_load_of_final_value (v
->dest_reg
,
5555 if (loop_dump_stream
)
5557 fprintf (loop_dump_stream
, "giv at %d reduced to ",
5558 INSN_UID (v
->insn
));
5559 print_simple_rtl (loop_dump_stream
, v
->new_reg
);
5560 fprintf (loop_dump_stream
, "\n");
5567 loop_giv_reduce_benefit (struct loop
*loop ATTRIBUTE_UNUSED
,
5568 struct iv_class
*bl
, struct induction
*v
,
5574 benefit
= v
->benefit
;
5575 PUT_MODE (test_reg
, v
->mode
);
5576 add_cost
= iv_add_mult_cost (bl
->biv
->add_val
, v
->mult_val
,
5577 test_reg
, test_reg
);
5579 /* Reduce benefit if not replaceable, since we will insert a
5580 move-insn to replace the insn that calculates this giv. Don't do
5581 this unless the giv is a user variable, since it will often be
5582 marked non-replaceable because of the duplication of the exit
5583 code outside the loop. In such a case, the copies we insert are
5584 dead and will be deleted. So they don't have a cost. Similar
5585 situations exist. */
5586 /* ??? The new final_[bg]iv_value code does a much better job of
5587 finding replaceable giv's, and hence this code may no longer be
5589 if (! v
->replaceable
&& ! bl
->eliminable
5590 && REG_USERVAR_P (v
->dest_reg
))
5591 benefit
-= copy_cost
;
5593 /* Decrease the benefit to count the add-insns that we will insert
5594 to increment the reduced reg for the giv. ??? This can
5595 overestimate the run-time cost of the additional insns, e.g. if
5596 there are multiple basic blocks that increment the biv, but only
5597 one of these blocks is executed during each iteration. There is
5598 no good way to detect cases like this with the current structure
5599 of the loop optimizer. This code is more accurate for
5600 determining code size than run-time benefits. */
5601 benefit
-= add_cost
* bl
->biv_count
;
5603 /* Decide whether to strength-reduce this giv or to leave the code
5604 unchanged (recompute it from the biv each time it is used). This
5605 decision can be made independently for each giv. */
5608 /* Attempt to guess whether autoincrement will handle some of the
5609 new add insns; if so, increase BENEFIT (undo the subtraction of
5610 add_cost that was done above). */
5611 if (v
->giv_type
== DEST_ADDR
5612 /* Increasing the benefit is risky, since this is only a guess.
5613 Avoid increasing register pressure in cases where there would
5614 be no other benefit from reducing this giv. */
5616 && GET_CODE (v
->mult_val
) == CONST_INT
)
5618 int size
= GET_MODE_SIZE (GET_MODE (v
->mem
));
5620 if (HAVE_POST_INCREMENT
5621 && INTVAL (v
->mult_val
) == size
)
5622 benefit
+= add_cost
* bl
->biv_count
;
5623 else if (HAVE_PRE_INCREMENT
5624 && INTVAL (v
->mult_val
) == size
)
5625 benefit
+= add_cost
* bl
->biv_count
;
5626 else if (HAVE_POST_DECREMENT
5627 && -INTVAL (v
->mult_val
) == size
)
5628 benefit
+= add_cost
* bl
->biv_count
;
5629 else if (HAVE_PRE_DECREMENT
5630 && -INTVAL (v
->mult_val
) == size
)
5631 benefit
+= add_cost
* bl
->biv_count
;
5639 /* Free IV structures for LOOP. */
5642 loop_ivs_free (struct loop
*loop
)
5644 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
5645 struct iv_class
*iv
= ivs
->list
;
5651 struct iv_class
*next
= iv
->next
;
5652 struct induction
*induction
;
5653 struct induction
*next_induction
;
5655 for (induction
= iv
->biv
; induction
; induction
= next_induction
)
5657 next_induction
= induction
->next_iv
;
5660 for (induction
= iv
->giv
; induction
; induction
= next_induction
)
5662 next_induction
= induction
->next_iv
;
5671 /* Look back before LOOP->START for the insn that sets REG and return
5672 the equivalent constant if there is a REG_EQUAL note otherwise just
5673 the SET_SRC of REG. */
5676 loop_find_equiv_value (const struct loop
*loop
, rtx reg
)
5678 rtx loop_start
= loop
->start
;
5683 for (insn
= PREV_INSN (loop_start
); insn
; insn
= PREV_INSN (insn
))
5688 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
5690 /* We found the last insn before the loop that sets the register.
5691 If it sets the entire register, and has a REG_EQUAL note,
5692 then use the value of the REG_EQUAL note. */
5693 if ((set
= single_set (insn
))
5694 && (SET_DEST (set
) == reg
))
5696 rtx note
= find_reg_note (insn
, REG_EQUAL
, NULL_RTX
);
5698 /* Only use the REG_EQUAL note if it is a constant.
5699 Other things, divide in particular, will cause
5700 problems later if we use them. */
5701 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
5702 && CONSTANT_P (XEXP (note
, 0)))
5703 ret
= XEXP (note
, 0);
5705 ret
= SET_SRC (set
);
5707 /* We cannot do this if it changes between the
5708 assignment and loop start though. */
5709 if (modified_between_p (ret
, insn
, loop_start
))
5718 /* Find and return register term common to both expressions OP0 and
5719 OP1 or NULL_RTX if no such term exists. Each expression must be a
5720 REG or a PLUS of a REG. */
5723 find_common_reg_term (rtx op0
, rtx op1
)
5725 if ((REG_P (op0
) || GET_CODE (op0
) == PLUS
)
5726 && (REG_P (op1
) || GET_CODE (op1
) == PLUS
))
5733 if (GET_CODE (op0
) == PLUS
)
5734 op01
= XEXP (op0
, 1), op00
= XEXP (op0
, 0);
5736 op01
= const0_rtx
, op00
= op0
;
5738 if (GET_CODE (op1
) == PLUS
)
5739 op11
= XEXP (op1
, 1), op10
= XEXP (op1
, 0);
5741 op11
= const0_rtx
, op10
= op1
;
5743 /* Find and return common register term if present. */
5744 if (REG_P (op00
) && (op00
== op10
|| op00
== op11
))
5746 else if (REG_P (op01
) && (op01
== op10
|| op01
== op11
))
5750 /* No common register term found. */
5754 /* Determine the loop iterator and calculate the number of loop
5755 iterations. Returns the exact number of loop iterations if it can
5756 be calculated, otherwise returns zero. */
5758 static unsigned HOST_WIDE_INT
5759 loop_iterations (struct loop
*loop
)
5761 struct loop_info
*loop_info
= LOOP_INFO (loop
);
5762 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
5763 rtx comparison
, comparison_value
;
5764 rtx iteration_var
, initial_value
, increment
, final_value
;
5765 enum rtx_code comparison_code
;
5767 unsigned HOST_WIDE_INT abs_inc
;
5768 unsigned HOST_WIDE_INT abs_diff
;
5771 int unsigned_p
, compare_dir
, final_larger
;
5773 struct iv_class
*bl
;
5775 loop_info
->n_iterations
= 0;
5776 loop_info
->initial_value
= 0;
5777 loop_info
->initial_equiv_value
= 0;
5778 loop_info
->comparison_value
= 0;
5779 loop_info
->final_value
= 0;
5780 loop_info
->final_equiv_value
= 0;
5781 loop_info
->increment
= 0;
5782 loop_info
->iteration_var
= 0;
5785 /* We used to use prev_nonnote_insn here, but that fails because it might
5786 accidentally get the branch for a contained loop if the branch for this
5787 loop was deleted. We can only trust branches immediately before the
5789 last_loop_insn
= PREV_INSN (loop
->end
);
5791 /* ??? We should probably try harder to find the jump insn
5792 at the end of the loop. The following code assumes that
5793 the last loop insn is a jump to the top of the loop. */
5794 if (!JUMP_P (last_loop_insn
))
5796 if (loop_dump_stream
)
5797 fprintf (loop_dump_stream
,
5798 "Loop iterations: No final conditional branch found.\n");
5802 /* If there is a more than a single jump to the top of the loop
5803 we cannot (easily) determine the iteration count. */
5804 if (LABEL_NUSES (JUMP_LABEL (last_loop_insn
)) > 1)
5806 if (loop_dump_stream
)
5807 fprintf (loop_dump_stream
,
5808 "Loop iterations: Loop has multiple back edges.\n");
5812 /* Find the iteration variable. If the last insn is a conditional
5813 branch, and the insn before tests a register value, make that the
5814 iteration variable. */
5816 comparison
= get_condition_for_loop (loop
, last_loop_insn
);
5817 if (comparison
== 0)
5819 if (loop_dump_stream
)
5820 fprintf (loop_dump_stream
,
5821 "Loop iterations: No final comparison found.\n");
5825 /* ??? Get_condition may switch position of induction variable and
5826 invariant register when it canonicalizes the comparison. */
5828 comparison_code
= GET_CODE (comparison
);
5829 iteration_var
= XEXP (comparison
, 0);
5830 comparison_value
= XEXP (comparison
, 1);
5832 if (!REG_P (iteration_var
))
5834 if (loop_dump_stream
)
5835 fprintf (loop_dump_stream
,
5836 "Loop iterations: Comparison not against register.\n");
5840 /* The only new registers that are created before loop iterations
5841 are givs made from biv increments or registers created by
5842 load_mems. In the latter case, it is possible that try_copy_prop
5843 will propagate a new pseudo into the old iteration register but
5844 this will be marked by having the REG_USERVAR_P bit set. */
5846 gcc_assert ((unsigned) REGNO (iteration_var
) < ivs
->n_regs
5847 || REG_USERVAR_P (iteration_var
));
5849 /* Determine the initial value of the iteration variable, and the amount
5850 that it is incremented each loop. Use the tables constructed by
5851 the strength reduction pass to calculate these values. */
5853 /* Clear the result values, in case no answer can be found. */
5857 /* The iteration variable can be either a giv or a biv. Check to see
5858 which it is, and compute the variable's initial value, and increment
5859 value if possible. */
5861 /* If this is a new register, can't handle it since we don't have any
5862 reg_iv_type entry for it. */
5863 if ((unsigned) REGNO (iteration_var
) >= ivs
->n_regs
)
5865 if (loop_dump_stream
)
5866 fprintf (loop_dump_stream
,
5867 "Loop iterations: No reg_iv_type entry for iteration var.\n");
5871 /* Reject iteration variables larger than the host wide int size, since they
5872 could result in a number of iterations greater than the range of our
5873 `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */
5874 else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var
))
5875 > HOST_BITS_PER_WIDE_INT
))
5877 if (loop_dump_stream
)
5878 fprintf (loop_dump_stream
,
5879 "Loop iterations: Iteration var rejected because mode too large.\n");
5882 else if (GET_MODE_CLASS (GET_MODE (iteration_var
)) != MODE_INT
)
5884 if (loop_dump_stream
)
5885 fprintf (loop_dump_stream
,
5886 "Loop iterations: Iteration var not an integer.\n");
5890 /* Try swapping the comparison to identify a suitable iv. */
5891 if (REG_IV_TYPE (ivs
, REGNO (iteration_var
)) != BASIC_INDUCT
5892 && REG_IV_TYPE (ivs
, REGNO (iteration_var
)) != GENERAL_INDUCT
5893 && REG_P (comparison_value
)
5894 && REGNO (comparison_value
) < ivs
->n_regs
)
5896 rtx temp
= comparison_value
;
5897 comparison_code
= swap_condition (comparison_code
);
5898 comparison_value
= iteration_var
;
5899 iteration_var
= temp
;
5902 if (REG_IV_TYPE (ivs
, REGNO (iteration_var
)) == BASIC_INDUCT
)
5904 gcc_assert (REGNO (iteration_var
) < ivs
->n_regs
);
5906 /* Grab initial value, only useful if it is a constant. */
5907 bl
= REG_IV_CLASS (ivs
, REGNO (iteration_var
));
5908 initial_value
= bl
->initial_value
;
5909 if (!bl
->biv
->always_executed
|| bl
->biv
->maybe_multiple
)
5911 if (loop_dump_stream
)
5912 fprintf (loop_dump_stream
,
5913 "Loop iterations: Basic induction var not set once in each iteration.\n");
5917 increment
= biv_total_increment (bl
);
5919 else if (REG_IV_TYPE (ivs
, REGNO (iteration_var
)) == GENERAL_INDUCT
)
5921 HOST_WIDE_INT offset
= 0;
5922 struct induction
*v
= REG_IV_INFO (ivs
, REGNO (iteration_var
));
5923 rtx biv_initial_value
;
5925 gcc_assert (REGNO (v
->src_reg
) < ivs
->n_regs
);
5927 if (!v
->always_executed
|| v
->maybe_multiple
)
5929 if (loop_dump_stream
)
5930 fprintf (loop_dump_stream
,
5931 "Loop iterations: General induction var not set once in each iteration.\n");
5935 bl
= REG_IV_CLASS (ivs
, REGNO (v
->src_reg
));
5937 /* Increment value is mult_val times the increment value of the biv. */
5939 increment
= biv_total_increment (bl
);
5942 struct induction
*biv_inc
;
5944 increment
= fold_rtx_mult_add (v
->mult_val
,
5945 extend_value_for_giv (v
, increment
),
5946 const0_rtx
, v
->mode
);
5947 /* The caller assumes that one full increment has occurred at the
5948 first loop test. But that's not true when the biv is incremented
5949 after the giv is set (which is the usual case), e.g.:
5950 i = 6; do {;} while (i++ < 9) .
5951 Therefore, we bias the initial value by subtracting the amount of
5952 the increment that occurs between the giv set and the giv test. */
5953 for (biv_inc
= bl
->biv
; biv_inc
; biv_inc
= biv_inc
->next_iv
)
5955 if (loop_insn_first_p (v
->insn
, biv_inc
->insn
))
5957 if (REG_P (biv_inc
->add_val
))
5959 if (loop_dump_stream
)
5960 fprintf (loop_dump_stream
,
5961 "Loop iterations: Basic induction var add_val is REG %d.\n",
5962 REGNO (biv_inc
->add_val
));
5966 /* If we have already counted it, skip it. */
5970 offset
-= INTVAL (biv_inc
->add_val
);
5974 if (loop_dump_stream
)
5975 fprintf (loop_dump_stream
,
5976 "Loop iterations: Giv iterator, initial value bias %ld.\n",
5979 /* Initial value is mult_val times the biv's initial value plus
5980 add_val. Only useful if it is a constant. */
5981 biv_initial_value
= extend_value_for_giv (v
, bl
->initial_value
);
5983 = fold_rtx_mult_add (v
->mult_val
,
5984 plus_constant (biv_initial_value
, offset
),
5985 v
->add_val
, v
->mode
);
5989 if (loop_dump_stream
)
5990 fprintf (loop_dump_stream
,
5991 "Loop iterations: Not basic or general induction var.\n");
5995 if (initial_value
== 0)
6000 switch (comparison_code
)
6015 /* Cannot determine loop iterations with this case. */
6035 /* If the comparison value is an invariant register, then try to find
6036 its value from the insns before the start of the loop. */
6038 final_value
= comparison_value
;
6039 if (REG_P (comparison_value
)
6040 && loop_invariant_p (loop
, comparison_value
))
6042 final_value
= loop_find_equiv_value (loop
, comparison_value
);
6044 /* If we don't get an invariant final value, we are better
6045 off with the original register. */
6046 if (! loop_invariant_p (loop
, final_value
))
6047 final_value
= comparison_value
;
6050 /* Calculate the approximate final value of the induction variable
6051 (on the last successful iteration). The exact final value
6052 depends on the branch operator, and increment sign. It will be
6053 wrong if the iteration variable is not incremented by one each
6054 time through the loop and (comparison_value + off_by_one -
6055 initial_value) % increment != 0.
6056 ??? Note that the final_value may overflow and thus final_larger
6057 will be bogus. A potentially infinite loop will be classified
6058 as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */
6060 final_value
= plus_constant (final_value
, off_by_one
);
6062 /* Save the calculated values describing this loop's bounds, in case
6063 precondition_loop_p will need them later. These values can not be
6064 recalculated inside precondition_loop_p because strength reduction
6065 optimizations may obscure the loop's structure.
6067 These values are only required by precondition_loop_p and insert_bct
6068 whenever the number of iterations cannot be computed at compile time.
6069 Only the difference between final_value and initial_value is
6070 important. Note that final_value is only approximate. */
6071 loop_info
->initial_value
= initial_value
;
6072 loop_info
->comparison_value
= comparison_value
;
6073 loop_info
->final_value
= plus_constant (comparison_value
, off_by_one
);
6074 loop_info
->increment
= increment
;
6075 loop_info
->iteration_var
= iteration_var
;
6076 loop_info
->comparison_code
= comparison_code
;
6079 /* Try to determine the iteration count for loops such
6080 as (for i = init; i < init + const; i++). When running the
6081 loop optimization twice, the first pass often converts simple
6082 loops into this form. */
6084 if (REG_P (initial_value
))
6090 reg1
= initial_value
;
6091 if (GET_CODE (final_value
) == PLUS
)
6092 reg2
= XEXP (final_value
, 0), const2
= XEXP (final_value
, 1);
6094 reg2
= final_value
, const2
= const0_rtx
;
6096 /* Check for initial_value = reg1, final_value = reg2 + const2,
6097 where reg1 != reg2. */
6098 if (REG_P (reg2
) && reg2
!= reg1
)
6102 /* Find what reg1 is equivalent to. Hopefully it will
6103 either be reg2 or reg2 plus a constant. */
6104 temp
= loop_find_equiv_value (loop
, reg1
);
6106 if (find_common_reg_term (temp
, reg2
))
6107 initial_value
= temp
;
6108 else if (loop_invariant_p (loop
, reg2
))
6110 /* Find what reg2 is equivalent to. Hopefully it will
6111 either be reg1 or reg1 plus a constant. Let's ignore
6112 the latter case for now since it is not so common. */
6113 temp
= loop_find_equiv_value (loop
, reg2
);
6115 if (temp
== loop_info
->iteration_var
)
6116 temp
= initial_value
;
6118 final_value
= (const2
== const0_rtx
)
6119 ? reg1
: gen_rtx_PLUS (GET_MODE (reg1
), reg1
, const2
);
6124 loop_info
->initial_equiv_value
= initial_value
;
6125 loop_info
->final_equiv_value
= final_value
;
6127 /* For EQ comparison loops, we don't have a valid final value.
6128 Check this now so that we won't leave an invalid value if we
6129 return early for any other reason. */
6130 if (comparison_code
== EQ
)
6131 loop_info
->final_equiv_value
= loop_info
->final_value
= 0;
6135 if (loop_dump_stream
)
6136 fprintf (loop_dump_stream
,
6137 "Loop iterations: Increment value can't be calculated.\n");
6141 if (GET_CODE (increment
) != CONST_INT
)
6143 /* If we have a REG, check to see if REG holds a constant value. */
6144 /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't
6145 clear if it is worthwhile to try to handle such RTL. */
6146 if (REG_P (increment
) || GET_CODE (increment
) == SUBREG
)
6147 increment
= loop_find_equiv_value (loop
, increment
);
6149 if (GET_CODE (increment
) != CONST_INT
)
6151 if (loop_dump_stream
)
6153 fprintf (loop_dump_stream
,
6154 "Loop iterations: Increment value not constant ");
6155 print_simple_rtl (loop_dump_stream
, increment
);
6156 fprintf (loop_dump_stream
, ".\n");
6160 loop_info
->increment
= increment
;
6163 if (GET_CODE (initial_value
) != CONST_INT
)
6165 if (loop_dump_stream
)
6167 fprintf (loop_dump_stream
,
6168 "Loop iterations: Initial value not constant ");
6169 print_simple_rtl (loop_dump_stream
, initial_value
);
6170 fprintf (loop_dump_stream
, ".\n");
6174 else if (GET_CODE (final_value
) != CONST_INT
)
6176 if (loop_dump_stream
)
6178 fprintf (loop_dump_stream
,
6179 "Loop iterations: Final value not constant ");
6180 print_simple_rtl (loop_dump_stream
, final_value
);
6181 fprintf (loop_dump_stream
, ".\n");
6185 else if (comparison_code
== EQ
)
6189 if (loop_dump_stream
)
6190 fprintf (loop_dump_stream
, "Loop iterations: EQ comparison loop.\n");
6192 inc_once
= gen_int_mode (INTVAL (initial_value
) + INTVAL (increment
),
6193 GET_MODE (iteration_var
));
6195 if (inc_once
== final_value
)
6197 /* The iterator value once through the loop is equal to the
6198 comparison value. Either we have an infinite loop, or
6199 we'll loop twice. */
6200 if (increment
== const0_rtx
)
6202 loop_info
->n_iterations
= 2;
6205 loop_info
->n_iterations
= 1;
6207 if (GET_CODE (loop_info
->initial_value
) == CONST_INT
)
6208 loop_info
->final_value
6209 = gen_int_mode ((INTVAL (loop_info
->initial_value
)
6210 + loop_info
->n_iterations
* INTVAL (increment
)),
6211 GET_MODE (iteration_var
));
6213 loop_info
->final_value
6214 = plus_constant (loop_info
->initial_value
,
6215 loop_info
->n_iterations
* INTVAL (increment
));
6216 loop_info
->final_equiv_value
6217 = gen_int_mode ((INTVAL (initial_value
)
6218 + loop_info
->n_iterations
* INTVAL (increment
)),
6219 GET_MODE (iteration_var
));
6220 return loop_info
->n_iterations
;
6223 /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */
6226 = ((unsigned HOST_WIDE_INT
) INTVAL (final_value
)
6227 > (unsigned HOST_WIDE_INT
) INTVAL (initial_value
))
6228 - ((unsigned HOST_WIDE_INT
) INTVAL (final_value
)
6229 < (unsigned HOST_WIDE_INT
) INTVAL (initial_value
));
6231 final_larger
= (INTVAL (final_value
) > INTVAL (initial_value
))
6232 - (INTVAL (final_value
) < INTVAL (initial_value
));
6234 if (INTVAL (increment
) > 0)
6236 else if (INTVAL (increment
) == 0)
6241 /* There are 27 different cases: compare_dir = -1, 0, 1;
6242 final_larger = -1, 0, 1; increment_dir = -1, 0, 1.
6243 There are 4 normal cases, 4 reverse cases (where the iteration variable
6244 will overflow before the loop exits), 4 infinite loop cases, and 15
6245 immediate exit (0 or 1 iteration depending on loop type) cases.
6246 Only try to optimize the normal cases. */
6248 /* (compare_dir/final_larger/increment_dir)
6249 Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1)
6250 Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1)
6251 Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0)
6252 Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */
6254 /* ?? If the meaning of reverse loops (where the iteration variable
6255 will overflow before the loop exits) is undefined, then could
6256 eliminate all of these special checks, and just always assume
6257 the loops are normal/immediate/infinite. Note that this means
6258 the sign of increment_dir does not have to be known. Also,
6259 since it does not really hurt if immediate exit loops or infinite loops
6260 are optimized, then that case could be ignored also, and hence all
6261 loops can be optimized.
6263 According to ANSI Spec, the reverse loop case result is undefined,
6264 because the action on overflow is undefined.
6266 See also the special test for NE loops below. */
6268 if (final_larger
== increment_dir
&& final_larger
!= 0
6269 && (final_larger
== compare_dir
|| compare_dir
== 0))
6274 if (loop_dump_stream
)
6275 fprintf (loop_dump_stream
, "Loop iterations: Not normal loop.\n");
6279 /* Calculate the number of iterations, final_value is only an approximation,
6280 so correct for that. Note that abs_diff and n_iterations are
6281 unsigned, because they can be as large as 2^n - 1. */
6283 inc
= INTVAL (increment
);
6287 abs_diff
= INTVAL (final_value
) - INTVAL (initial_value
);
6292 abs_diff
= INTVAL (initial_value
) - INTVAL (final_value
);
6296 /* Given that iteration_var is going to iterate over its own mode,
6297 not HOST_WIDE_INT, disregard higher bits that might have come
6298 into the picture due to sign extension of initial and final
6300 abs_diff
&= ((unsigned HOST_WIDE_INT
) 1
6301 << (GET_MODE_BITSIZE (GET_MODE (iteration_var
)) - 1)
6304 /* For NE tests, make sure that the iteration variable won't miss
6305 the final value. If abs_diff mod abs_incr is not zero, then the
6306 iteration variable will overflow before the loop exits, and we
6307 can not calculate the number of iterations. */
6308 if (compare_dir
== 0 && (abs_diff
% abs_inc
) != 0)
6311 /* Note that the number of iterations could be calculated using
6312 (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to
6313 handle potential overflow of the summation. */
6314 loop_info
->n_iterations
= abs_diff
/ abs_inc
+ ((abs_diff
% abs_inc
) != 0);
6315 return loop_info
->n_iterations
;
6318 /* Perform strength reduction and induction variable elimination.
6320 Pseudo registers created during this function will be beyond the
6321 last valid index in several tables including
6322 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
6323 problem here, because the added registers cannot be givs outside of
6324 their loop, and hence will never be reconsidered. But scan_loop
6325 must check regnos to make sure they are in bounds. */
6328 strength_reduce (struct loop
*loop
, int flags
)
6330 struct loop_info
*loop_info
= LOOP_INFO (loop
);
6331 struct loop_regs
*regs
= LOOP_REGS (loop
);
6332 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
6334 /* Temporary list pointer for traversing ivs->list. */
6335 struct iv_class
*bl
;
6336 /* Ratio of extra register life span we can justify
6337 for saving an instruction. More if loop doesn't call subroutines
6338 since in that case saving an insn makes more difference
6339 and more registers are available. */
6340 /* ??? could set this to last value of threshold in move_movables */
6341 int threshold
= (loop_info
->has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
6342 /* Map of pseudo-register replacements. */
6343 rtx
*reg_map
= NULL
;
6345 rtx test_reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
6346 int insn_count
= count_insns_in_loop (loop
);
6348 addr_placeholder
= gen_reg_rtx (Pmode
);
6350 ivs
->n_regs
= max_reg_before_loop
;
6351 ivs
->regs
= xcalloc (ivs
->n_regs
, sizeof (struct iv
));
6353 /* Find all BIVs in loop. */
6354 loop_bivs_find (loop
);
6356 /* Exit if there are no bivs. */
6359 loop_ivs_free (loop
);
6363 /* Determine how BIVS are initialized by looking through pre-header
6364 extended basic block. */
6365 loop_bivs_init_find (loop
);
6367 /* Look at the each biv and see if we can say anything better about its
6368 initial value from any initializing insns set up above. */
6369 loop_bivs_check (loop
);
6371 /* Search the loop for general induction variables. */
6372 loop_givs_find (loop
);
6374 /* Try to calculate and save the number of loop iterations. This is
6375 set to zero if the actual number can not be calculated. This must
6376 be called after all giv's have been identified, since otherwise it may
6377 fail if the iteration variable is a giv. */
6378 loop_iterations (loop
);
6380 #ifdef HAVE_prefetch
6381 if (flags
& LOOP_PREFETCH
)
6382 emit_prefetch_instructions (loop
);
6385 /* Now for each giv for which we still don't know whether or not it is
6386 replaceable, check to see if it is replaceable because its final value
6387 can be calculated. This must be done after loop_iterations is called,
6388 so that final_giv_value will work correctly. */
6389 loop_givs_check (loop
);
6391 /* Try to prove that the loop counter variable (if any) is always
6392 nonnegative; if so, record that fact with a REG_NONNEG note
6393 so that "decrement and branch until zero" insn can be used. */
6394 check_dbra_loop (loop
, insn_count
);
6396 /* Create reg_map to hold substitutions for replaceable giv regs.
6397 Some givs might have been made from biv increments, so look at
6398 ivs->reg_iv_type for a suitable size. */
6399 reg_map_size
= ivs
->n_regs
;
6400 reg_map
= xcalloc (reg_map_size
, sizeof (rtx
));
6402 /* Examine each iv class for feasibility of strength reduction/induction
6403 variable elimination. */
6405 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
6407 struct induction
*v
;
6410 /* Test whether it will be possible to eliminate this biv
6411 provided all givs are reduced. */
6412 bl
->eliminable
= loop_biv_eliminable_p (loop
, bl
, threshold
, insn_count
);
6414 /* This will be true at the end, if all givs which depend on this
6415 biv have been strength reduced.
6416 We can't (currently) eliminate the biv unless this is so. */
6417 bl
->all_reduced
= 1;
6419 /* Check each extension dependent giv in this class to see if its
6420 root biv is safe from wrapping in the interior mode. */
6421 check_ext_dependent_givs (loop
, bl
);
6423 /* Combine all giv's for this iv_class. */
6424 combine_givs (regs
, bl
);
6426 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6428 struct induction
*tv
;
6430 if (v
->ignore
|| v
->same
)
6433 benefit
= loop_giv_reduce_benefit (loop
, bl
, v
, test_reg
);
6435 /* If an insn is not to be strength reduced, then set its ignore
6436 flag, and clear bl->all_reduced. */
6438 /* A giv that depends on a reversed biv must be reduced if it is
6439 used after the loop exit, otherwise, it would have the wrong
6440 value after the loop exit. To make it simple, just reduce all
6441 of such giv's whether or not we know they are used after the loop
6444 if (v
->lifetime
* threshold
* benefit
< insn_count
6447 if (loop_dump_stream
)
6448 fprintf (loop_dump_stream
,
6449 "giv of insn %d not worth while, %d vs %d.\n",
6451 v
->lifetime
* threshold
* benefit
, insn_count
);
6453 bl
->all_reduced
= 0;
6457 /* Check that we can increment the reduced giv without a
6458 multiply insn. If not, reject it. */
6460 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
6461 if (tv
->mult_val
== const1_rtx
6462 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
6464 if (loop_dump_stream
)
6465 fprintf (loop_dump_stream
,
6466 "giv of insn %d: would need a multiply.\n",
6467 INSN_UID (v
->insn
));
6469 bl
->all_reduced
= 0;
6475 /* Check for givs whose first use is their definition and whose
6476 last use is the definition of another giv. If so, it is likely
6477 dead and should not be used to derive another giv nor to
6479 loop_givs_dead_check (loop
, bl
);
6481 /* Reduce each giv that we decided to reduce. */
6482 loop_givs_reduce (loop
, bl
);
6484 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
6487 For each giv register that can be reduced now: if replaceable,
6488 substitute reduced reg wherever the old giv occurs;
6489 else add new move insn "giv_reg = reduced_reg". */
6490 loop_givs_rescan (loop
, bl
, reg_map
);
6492 /* All the givs based on the biv bl have been reduced if they
6495 /* For each giv not marked as maybe dead that has been combined with a
6496 second giv, clear any "maybe dead" mark on that second giv.
6497 v->new_reg will either be or refer to the register of the giv it
6500 Doing this clearing avoids problems in biv elimination where
6501 a giv's new_reg is a complex value that can't be put in the
6502 insn but the giv combined with (with a reg as new_reg) is
6503 marked maybe_dead. Since the register will be used in either
6504 case, we'd prefer it be used from the simpler giv. */
6506 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
6507 if (! v
->maybe_dead
&& v
->same
)
6508 v
->same
->maybe_dead
= 0;
6510 /* Try to eliminate the biv, if it is a candidate.
6511 This won't work if ! bl->all_reduced,
6512 since the givs we planned to use might not have been reduced.
6514 We have to be careful that we didn't initially think we could
6515 eliminate this biv because of a giv that we now think may be
6516 dead and shouldn't be used as a biv replacement.
6518 Also, there is the possibility that we may have a giv that looks
6519 like it can be used to eliminate a biv, but the resulting insn
6520 isn't valid. This can happen, for example, on the 88k, where a
6521 JUMP_INSN can compare a register only with zero. Attempts to
6522 replace it with a compare with a constant will fail.
6524 Note that in cases where this call fails, we may have replaced some
6525 of the occurrences of the biv with a giv, but no harm was done in
6526 doing so in the rare cases where it can occur. */
6528 if (bl
->all_reduced
== 1 && bl
->eliminable
6529 && maybe_eliminate_biv (loop
, bl
, 1, threshold
, insn_count
))
6531 /* ?? If we created a new test to bypass the loop entirely,
6532 or otherwise drop straight in, based on this test, then
6533 we might want to rewrite it also. This way some later
6534 pass has more hope of removing the initialization of this
6537 /* If final_value != 0, then the biv may be used after loop end
6538 and we must emit an insn to set it just in case.
6540 Reversed bivs already have an insn after the loop setting their
6541 value, so we don't need another one. We can't calculate the
6542 proper final value for such a biv here anyways. */
6543 if (bl
->final_value
&& ! bl
->reversed
)
6544 loop_insn_sink_or_swim (loop
,
6545 gen_load_of_final_value (bl
->biv
->dest_reg
,
6548 if (loop_dump_stream
)
6549 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
6552 /* See above note wrt final_value. But since we couldn't eliminate
6553 the biv, we must set the value after the loop instead of before. */
6554 else if (bl
->final_value
&& ! bl
->reversed
)
6555 loop_insn_sink (loop
, gen_load_of_final_value (bl
->biv
->dest_reg
,
6559 /* Go through all the instructions in the loop, making all the
6560 register substitutions scheduled in REG_MAP. */
6562 for (p
= loop
->start
; p
!= loop
->end
; p
= NEXT_INSN (p
))
6565 replace_regs (PATTERN (p
), reg_map
, reg_map_size
, 0);
6566 replace_regs (REG_NOTES (p
), reg_map
, reg_map_size
, 0);
6570 if (loop_dump_stream
)
6571 fprintf (loop_dump_stream
, "\n");
6573 loop_ivs_free (loop
);
6578 /*Record all basic induction variables calculated in the insn. */
6580 check_insn_for_bivs (struct loop
*loop
, rtx p
, int not_every_iteration
,
6583 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
6590 if (NONJUMP_INSN_P (p
)
6591 && (set
= single_set (p
))
6592 && REG_P (SET_DEST (set
)))
6594 dest_reg
= SET_DEST (set
);
6595 if (REGNO (dest_reg
) < max_reg_before_loop
6596 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
6597 && REG_IV_TYPE (ivs
, REGNO (dest_reg
)) != NOT_BASIC_INDUCT
)
6599 if (basic_induction_var (loop
, SET_SRC (set
),
6600 GET_MODE (SET_SRC (set
)),
6601 dest_reg
, p
, &inc_val
, &mult_val
,
6604 /* It is a possible basic induction variable.
6605 Create and initialize an induction structure for it. */
6607 struct induction
*v
= xmalloc (sizeof (struct induction
));
6609 record_biv (loop
, v
, p
, dest_reg
, inc_val
, mult_val
, location
,
6610 not_every_iteration
, maybe_multiple
);
6611 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = BASIC_INDUCT
;
6613 else if (REGNO (dest_reg
) < ivs
->n_regs
)
6614 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = NOT_BASIC_INDUCT
;
6620 /* Record all givs calculated in the insn.
6621 A register is a giv if: it is only set once, it is a function of a
6622 biv and a constant (or invariant), and it is not a biv. */
6624 check_insn_for_givs (struct loop
*loop
, rtx p
, int not_every_iteration
,
6627 struct loop_regs
*regs
= LOOP_REGS (loop
);
6630 /* Look for a general induction variable in a register. */
6631 if (NONJUMP_INSN_P (p
)
6632 && (set
= single_set (p
))
6633 && REG_P (SET_DEST (set
))
6634 && ! regs
->array
[REGNO (SET_DEST (set
))].may_not_optimize
)
6643 rtx last_consec_insn
;
6645 dest_reg
= SET_DEST (set
);
6646 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
6649 if (/* SET_SRC is a giv. */
6650 (general_induction_var (loop
, SET_SRC (set
), &src_reg
, &add_val
,
6651 &mult_val
, &ext_val
, 0, &benefit
, VOIDmode
)
6652 /* Equivalent expression is a giv. */
6653 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
6654 && general_induction_var (loop
, XEXP (regnote
, 0), &src_reg
,
6655 &add_val
, &mult_val
, &ext_val
, 0,
6656 &benefit
, VOIDmode
)))
6657 /* Don't try to handle any regs made by loop optimization.
6658 We have nothing on them in regno_first_uid, etc. */
6659 && REGNO (dest_reg
) < max_reg_before_loop
6660 /* Don't recognize a BASIC_INDUCT_VAR here. */
6661 && dest_reg
!= src_reg
6662 /* This must be the only place where the register is set. */
6663 && (regs
->array
[REGNO (dest_reg
)].n_times_set
== 1
6664 /* or all sets must be consecutive and make a giv. */
6665 || (benefit
= consec_sets_giv (loop
, benefit
, p
,
6667 &add_val
, &mult_val
, &ext_val
,
6668 &last_consec_insn
))))
6670 struct induction
*v
= xmalloc (sizeof (struct induction
));
6672 /* If this is a library call, increase benefit. */
6673 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
6674 benefit
+= libcall_benefit (p
);
6676 /* Skip the consecutive insns, if there are any. */
6677 if (regs
->array
[REGNO (dest_reg
)].n_times_set
!= 1)
6678 p
= last_consec_insn
;
6680 record_giv (loop
, v
, p
, src_reg
, dest_reg
, mult_val
, add_val
,
6681 ext_val
, benefit
, DEST_REG
, not_every_iteration
,
6682 maybe_multiple
, (rtx
*) 0);
6687 /* Look for givs which are memory addresses. */
6688 if (NONJUMP_INSN_P (p
))
6689 find_mem_givs (loop
, PATTERN (p
), p
, not_every_iteration
,
6692 /* Update the status of whether giv can derive other givs. This can
6693 change when we pass a label or an insn that updates a biv. */
6694 if (INSN_P (p
) || LABEL_P (p
))
6695 update_giv_derive (loop
, p
);
6699 /* Return 1 if X is a valid source for an initial value (or as value being
6700 compared against in an initial test).
6702 X must be either a register or constant and must not be clobbered between
6703 the current insn and the start of the loop.
6705 INSN is the insn containing X. */
6708 valid_initial_value_p (rtx x
, rtx insn
, int call_seen
, rtx loop_start
)
6713 /* Only consider pseudos we know about initialized in insns whose luids
6716 || REGNO (x
) >= max_reg_before_loop
)
6719 /* Don't use call-clobbered registers across a call which clobbers it. On
6720 some machines, don't use any hard registers at all. */
6721 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
6722 && (SMALL_REGISTER_CLASSES
6723 || (call_seen
&& call_used_regs
[REGNO (x
)])))
6726 /* Don't use registers that have been clobbered before the start of the
6728 if (reg_set_between_p (x
, insn
, loop_start
))
6734 /* Scan X for memory refs and check each memory address
6735 as a possible giv. INSN is the insn whose pattern X comes from.
6736 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
6737 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
6738 more than once in each loop iteration. */
6741 find_mem_givs (const struct loop
*loop
, rtx x
, rtx insn
,
6742 int not_every_iteration
, int maybe_multiple
)
6751 code
= GET_CODE (x
);
6776 /* This code used to disable creating GIVs with mult_val == 1 and
6777 add_val == 0. However, this leads to lost optimizations when
6778 it comes time to combine a set of related DEST_ADDR GIVs, since
6779 this one would not be seen. */
6781 if (general_induction_var (loop
, XEXP (x
, 0), &src_reg
, &add_val
,
6782 &mult_val
, &ext_val
, 1, &benefit
,
6785 /* Found one; record it. */
6786 struct induction
*v
= xmalloc (sizeof (struct induction
));
6788 record_giv (loop
, v
, insn
, src_reg
, addr_placeholder
, mult_val
,
6789 add_val
, ext_val
, benefit
, DEST_ADDR
,
6790 not_every_iteration
, maybe_multiple
, &XEXP (x
, 0));
6801 /* Recursively scan the subexpressions for other mem refs. */
6803 fmt
= GET_RTX_FORMAT (code
);
6804 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
6806 find_mem_givs (loop
, XEXP (x
, i
), insn
, not_every_iteration
,
6808 else if (fmt
[i
] == 'E')
6809 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
6810 find_mem_givs (loop
, XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
6814 /* Fill in the data about one biv update.
6815 V is the `struct induction' in which we record the biv. (It is
6816 allocated by the caller, with alloca.)
6817 INSN is the insn that sets it.
6818 DEST_REG is the biv's reg.
6820 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
6821 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
6822 being set to INC_VAL.
6824 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
6825 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
6826 can be executed more than once per iteration. If MAYBE_MULTIPLE
6827 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
6828 executed exactly once per iteration. */
6831 record_biv (struct loop
*loop
, struct induction
*v
, rtx insn
, rtx dest_reg
,
6832 rtx inc_val
, rtx mult_val
, rtx
*location
,
6833 int not_every_iteration
, int maybe_multiple
)
6835 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
6836 struct iv_class
*bl
;
6839 v
->src_reg
= dest_reg
;
6840 v
->dest_reg
= dest_reg
;
6841 v
->mult_val
= mult_val
;
6842 v
->add_val
= inc_val
;
6843 v
->ext_dependent
= NULL_RTX
;
6844 v
->location
= location
;
6845 v
->mode
= GET_MODE (dest_reg
);
6846 v
->always_computable
= ! not_every_iteration
;
6847 v
->always_executed
= ! not_every_iteration
;
6848 v
->maybe_multiple
= maybe_multiple
;
6851 /* Add this to the reg's iv_class, creating a class
6852 if this is the first incrementation of the reg. */
6854 bl
= REG_IV_CLASS (ivs
, REGNO (dest_reg
));
6857 /* Create and initialize new iv_class. */
6859 bl
= xmalloc (sizeof (struct iv_class
));
6861 bl
->regno
= REGNO (dest_reg
);
6867 /* Set initial value to the reg itself. */
6868 bl
->initial_value
= dest_reg
;
6869 bl
->final_value
= 0;
6870 /* We haven't seen the initializing insn yet. */
6873 bl
->initial_test
= 0;
6874 bl
->incremented
= 0;
6878 bl
->total_benefit
= 0;
6880 /* Add this class to ivs->list. */
6881 bl
->next
= ivs
->list
;
6884 /* Put it in the array of biv register classes. */
6885 REG_IV_CLASS (ivs
, REGNO (dest_reg
)) = bl
;
6889 /* Check if location is the same as a previous one. */
6890 struct induction
*induction
;
6891 for (induction
= bl
->biv
; induction
; induction
= induction
->next_iv
)
6892 if (location
== induction
->location
)
6894 v
->same
= induction
;
6899 /* Update IV_CLASS entry for this biv. */
6900 v
->next_iv
= bl
->biv
;
6903 if (mult_val
== const1_rtx
)
6904 bl
->incremented
= 1;
6906 if (loop_dump_stream
)
6907 loop_biv_dump (v
, loop_dump_stream
, 0);
6910 /* Fill in the data about one giv.
6911 V is the `struct induction' in which we record the giv. (It is
6912 allocated by the caller, with alloca.)
6913 INSN is the insn that sets it.
6914 BENEFIT estimates the savings from deleting this insn.
6915 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
6916 into a register or is used as a memory address.
6918 SRC_REG is the biv reg which the giv is computed from.
6919 DEST_REG is the giv's reg (if the giv is stored in a reg).
6920 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
6921 LOCATION points to the place where this giv's value appears in INSN. */
6924 record_giv (const struct loop
*loop
, struct induction
*v
, rtx insn
,
6925 rtx src_reg
, rtx dest_reg
, rtx mult_val
, rtx add_val
,
6926 rtx ext_val
, int benefit
, enum g_types type
,
6927 int not_every_iteration
, int maybe_multiple
, rtx
*location
)
6929 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
6930 struct induction
*b
;
6931 struct iv_class
*bl
;
6932 rtx set
= single_set (insn
);
6935 /* Attempt to prove constantness of the values. Don't let simplify_rtx
6936 undo the MULT canonicalization that we performed earlier. */
6937 temp
= simplify_rtx (add_val
);
6939 && ! (GET_CODE (add_val
) == MULT
6940 && GET_CODE (temp
) == ASHIFT
))
6944 v
->src_reg
= src_reg
;
6946 v
->dest_reg
= dest_reg
;
6947 v
->mult_val
= mult_val
;
6948 v
->add_val
= add_val
;
6949 v
->ext_dependent
= ext_val
;
6950 v
->benefit
= benefit
;
6951 v
->location
= location
;
6953 v
->combined_with
= 0;
6954 v
->maybe_multiple
= maybe_multiple
;
6956 v
->derive_adjustment
= 0;
6962 v
->auto_inc_opt
= 0;
6965 /* The v->always_computable field is used in update_giv_derive, to
6966 determine whether a giv can be used to derive another giv. For a
6967 DEST_REG giv, INSN computes a new value for the giv, so its value
6968 isn't computable if INSN insn't executed every iteration.
6969 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
6970 it does not compute a new value. Hence the value is always computable
6971 regardless of whether INSN is executed each iteration. */
6973 if (type
== DEST_ADDR
)
6974 v
->always_computable
= 1;
6976 v
->always_computable
= ! not_every_iteration
;
6978 v
->always_executed
= ! not_every_iteration
;
6980 if (type
== DEST_ADDR
)
6982 v
->mode
= GET_MODE (*location
);
6985 else /* type == DEST_REG */
6987 v
->mode
= GET_MODE (SET_DEST (set
));
6989 v
->lifetime
= LOOP_REG_LIFETIME (loop
, REGNO (dest_reg
));
6991 /* If the lifetime is zero, it means that this register is
6992 really a dead store. So mark this as a giv that can be
6993 ignored. This will not prevent the biv from being eliminated. */
6994 if (v
->lifetime
== 0)
6997 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = GENERAL_INDUCT
;
6998 REG_IV_INFO (ivs
, REGNO (dest_reg
)) = v
;
7001 /* Add the giv to the class of givs computed from one biv. */
7003 bl
= REG_IV_CLASS (ivs
, REGNO (src_reg
));
7005 v
->next_iv
= bl
->giv
;
7008 /* Don't count DEST_ADDR. This is supposed to count the number of
7009 insns that calculate givs. */
7010 if (type
== DEST_REG
)
7012 bl
->total_benefit
+= benefit
;
7014 if (type
== DEST_ADDR
)
7017 v
->not_replaceable
= 0;
7021 /* The giv can be replaced outright by the reduced register only if all
7022 of the following conditions are true:
7023 - the insn that sets the giv is always executed on any iteration
7024 on which the giv is used at all
7025 (there are two ways to deduce this:
7026 either the insn is executed on every iteration,
7027 or all uses follow that insn in the same basic block),
7028 - the giv is not used outside the loop
7029 - no assignments to the biv occur during the giv's lifetime. */
7031 if (REGNO_FIRST_UID (REGNO (dest_reg
)) == INSN_UID (insn
)
7032 /* Previous line always fails if INSN was moved by loop opt. */
7033 && REGNO_LAST_LUID (REGNO (dest_reg
))
7034 < INSN_LUID (loop
->end
)
7035 && (! not_every_iteration
7036 || last_use_this_basic_block (dest_reg
, insn
)))
7038 /* Now check that there are no assignments to the biv within the
7039 giv's lifetime. This requires two separate checks. */
7041 /* Check each biv update, and fail if any are between the first
7042 and last use of the giv.
7044 If this loop contains an inner loop that was unrolled, then
7045 the insn modifying the biv may have been emitted by the loop
7046 unrolling code, and hence does not have a valid luid. Just
7047 mark the biv as not replaceable in this case. It is not very
7048 useful as a biv, because it is used in two different loops.
7049 It is very unlikely that we would be able to optimize the giv
7050 using this biv anyways. */
7053 v
->not_replaceable
= 0;
7054 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
7056 if (INSN_UID (b
->insn
) >= max_uid_for_loop
7057 || ((INSN_LUID (b
->insn
)
7058 >= REGNO_FIRST_LUID (REGNO (dest_reg
)))
7059 && (INSN_LUID (b
->insn
)
7060 <= REGNO_LAST_LUID (REGNO (dest_reg
)))))
7063 v
->not_replaceable
= 1;
7068 /* If there are any backwards branches that go from after the
7069 biv update to before it, then this giv is not replaceable. */
7071 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
7072 if (back_branch_in_range_p (loop
, b
->insn
))
7075 v
->not_replaceable
= 1;
7081 /* May still be replaceable, we don't have enough info here to
7084 v
->not_replaceable
= 0;
7088 /* Record whether the add_val contains a const_int, for later use by
7093 v
->no_const_addval
= 1;
7094 if (tem
== const0_rtx
)
7096 else if (CONSTANT_P (add_val
))
7097 v
->no_const_addval
= 0;
7098 if (GET_CODE (tem
) == PLUS
)
7102 if (GET_CODE (XEXP (tem
, 0)) == PLUS
)
7103 tem
= XEXP (tem
, 0);
7104 else if (GET_CODE (XEXP (tem
, 1)) == PLUS
)
7105 tem
= XEXP (tem
, 1);
7109 if (CONSTANT_P (XEXP (tem
, 1)))
7110 v
->no_const_addval
= 0;
7114 if (loop_dump_stream
)
7115 loop_giv_dump (v
, loop_dump_stream
, 0);
7118 /* Try to calculate the final value of the giv, the value it will have at
7119 the end of the loop. If we can do it, return that value. */
7122 final_giv_value (const struct loop
*loop
, struct induction
*v
)
7124 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
7125 struct iv_class
*bl
;
7129 rtx loop_end
= loop
->end
;
7130 unsigned HOST_WIDE_INT n_iterations
= LOOP_INFO (loop
)->n_iterations
;
7132 bl
= REG_IV_CLASS (ivs
, REGNO (v
->src_reg
));
7134 /* The final value for givs which depend on reversed bivs must be calculated
7135 differently than for ordinary givs. In this case, there is already an
7136 insn after the loop which sets this giv's final value (if necessary),
7137 and there are no other loop exits, so we can return any value. */
7140 if (loop_dump_stream
)
7141 fprintf (loop_dump_stream
,
7142 "Final giv value for %d, depends on reversed biv\n",
7143 REGNO (v
->dest_reg
));
7147 /* Try to calculate the final value as a function of the biv it depends
7148 upon. The only exit from the loop must be the fall through at the bottom
7149 and the insn that sets the giv must be executed on every iteration
7150 (otherwise the giv may not have its final value when the loop exits). */
7152 /* ??? Can calculate the final giv value by subtracting off the
7153 extra biv increments times the giv's mult_val. The loop must have
7154 only one exit for this to work, but the loop iterations does not need
7157 if (n_iterations
!= 0
7158 && ! loop
->exit_count
7159 && v
->always_executed
)
7161 /* ?? It is tempting to use the biv's value here since these insns will
7162 be put after the loop, and hence the biv will have its final value
7163 then. However, this fails if the biv is subsequently eliminated.
7164 Perhaps determine whether biv's are eliminable before trying to
7165 determine whether giv's are replaceable so that we can use the
7166 biv value here if it is not eliminable. */
7168 /* We are emitting code after the end of the loop, so we must make
7169 sure that bl->initial_value is still valid then. It will still
7170 be valid if it is invariant. */
7172 increment
= biv_total_increment (bl
);
7174 if (increment
&& loop_invariant_p (loop
, increment
)
7175 && loop_invariant_p (loop
, bl
->initial_value
))
7177 /* Can calculate the loop exit value of its biv as
7178 (n_iterations * increment) + initial_value */
7180 /* The loop exit value of the giv is then
7181 (final_biv_value - extra increments) * mult_val + add_val.
7182 The extra increments are any increments to the biv which
7183 occur in the loop after the giv's value is calculated.
7184 We must search from the insn that sets the giv to the end
7185 of the loop to calculate this value. */
7187 /* Put the final biv value in tem. */
7188 tem
= gen_reg_rtx (v
->mode
);
7189 record_base_value (REGNO (tem
), bl
->biv
->add_val
, 0);
7190 loop_iv_add_mult_sink (loop
, extend_value_for_giv (v
, increment
),
7191 GEN_INT (n_iterations
),
7192 extend_value_for_giv (v
, bl
->initial_value
),
7195 /* Subtract off extra increments as we find them. */
7196 for (insn
= NEXT_INSN (v
->insn
); insn
!= loop_end
;
7197 insn
= NEXT_INSN (insn
))
7199 struct induction
*biv
;
7201 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
7202 if (biv
->insn
== insn
)
7205 tem
= expand_simple_binop (GET_MODE (tem
), MINUS
, tem
,
7206 biv
->add_val
, NULL_RTX
, 0,
7210 loop_insn_sink (loop
, seq
);
7214 /* Now calculate the giv's final value. */
7215 loop_iv_add_mult_sink (loop
, tem
, v
->mult_val
, v
->add_val
, tem
);
7217 if (loop_dump_stream
)
7218 fprintf (loop_dump_stream
,
7219 "Final giv value for %d, calc from biv's value.\n",
7220 REGNO (v
->dest_reg
));
7226 /* Replaceable giv's should never reach here. */
7227 gcc_assert (!v
->replaceable
);
7229 /* Check to see if the biv is dead at all loop exits. */
7230 if (reg_dead_after_loop (loop
, v
->dest_reg
))
7232 if (loop_dump_stream
)
7233 fprintf (loop_dump_stream
,
7234 "Final giv value for %d, giv dead after loop exit.\n",
7235 REGNO (v
->dest_reg
));
7243 /* All this does is determine whether a giv can be made replaceable because
7244 its final value can be calculated. This code can not be part of record_giv
7245 above, because final_giv_value requires that the number of loop iterations
7246 be known, and that can not be accurately calculated until after all givs
7247 have been identified. */
7250 check_final_value (const struct loop
*loop
, struct induction
*v
)
7252 rtx final_value
= 0;
7254 /* DEST_ADDR givs will never reach here, because they are always marked
7255 replaceable above in record_giv. */
7257 /* The giv can be replaced outright by the reduced register only if all
7258 of the following conditions are true:
7259 - the insn that sets the giv is always executed on any iteration
7260 on which the giv is used at all
7261 (there are two ways to deduce this:
7262 either the insn is executed on every iteration,
7263 or all uses follow that insn in the same basic block),
7264 - its final value can be calculated (this condition is different
7265 than the one above in record_giv)
7266 - it's not used before the it's set
7267 - no assignments to the biv occur during the giv's lifetime. */
7270 /* This is only called now when replaceable is known to be false. */
7271 /* Clear replaceable, so that it won't confuse final_giv_value. */
7275 if ((final_value
= final_giv_value (loop
, v
))
7276 && (v
->always_executed
7277 || last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
7279 int biv_increment_seen
= 0, before_giv_insn
= 0;
7284 v
->not_replaceable
= 0;
7286 /* When trying to determine whether or not a biv increment occurs
7287 during the lifetime of the giv, we can ignore uses of the variable
7288 outside the loop because final_value is true. Hence we can not
7289 use regno_last_uid and regno_first_uid as above in record_giv. */
7291 /* Search the loop to determine whether any assignments to the
7292 biv occur during the giv's lifetime. Start with the insn
7293 that sets the giv, and search around the loop until we come
7294 back to that insn again.
7296 Also fail if there is a jump within the giv's lifetime that jumps
7297 to somewhere outside the lifetime but still within the loop. This
7298 catches spaghetti code where the execution order is not linear, and
7299 hence the above test fails. Here we assume that the giv lifetime
7300 does not extend from one iteration of the loop to the next, so as
7301 to make the test easier. Since the lifetime isn't known yet,
7302 this requires two loops. See also record_giv above. */
7304 last_giv_use
= v
->insn
;
7311 before_giv_insn
= 1;
7312 p
= NEXT_INSN (loop
->start
);
7319 /* It is possible for the BIV increment to use the GIV if we
7320 have a cycle. Thus we must be sure to check each insn for
7321 both BIV and GIV uses, and we must check for BIV uses
7324 if (! biv_increment_seen
7325 && reg_set_p (v
->src_reg
, PATTERN (p
)))
7326 biv_increment_seen
= 1;
7328 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
7330 if (biv_increment_seen
|| before_giv_insn
)
7333 v
->not_replaceable
= 1;
7341 /* Now that the lifetime of the giv is known, check for branches
7342 from within the lifetime to outside the lifetime if it is still
7352 p
= NEXT_INSN (loop
->start
);
7353 if (p
== last_giv_use
)
7356 if (JUMP_P (p
) && JUMP_LABEL (p
)
7357 && LABEL_NAME (JUMP_LABEL (p
))
7358 && ((loop_insn_first_p (JUMP_LABEL (p
), v
->insn
)
7359 && loop_insn_first_p (loop
->start
, JUMP_LABEL (p
)))
7360 || (loop_insn_first_p (last_giv_use
, JUMP_LABEL (p
))
7361 && loop_insn_first_p (JUMP_LABEL (p
), loop
->end
))))
7364 v
->not_replaceable
= 1;
7366 if (loop_dump_stream
)
7367 fprintf (loop_dump_stream
,
7368 "Found branch outside giv lifetime.\n");
7375 /* If it is replaceable, then save the final value. */
7377 v
->final_value
= final_value
;
7380 if (loop_dump_stream
&& v
->replaceable
)
7381 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
7382 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
7385 /* Update the status of whether a giv can derive other givs.
7387 We need to do something special if there is or may be an update to the biv
7388 between the time the giv is defined and the time it is used to derive
7391 In addition, a giv that is only conditionally set is not allowed to
7392 derive another giv once a label has been passed.
7394 The cases we look at are when a label or an update to a biv is passed. */
7397 update_giv_derive (const struct loop
*loop
, rtx p
)
7399 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
7400 struct iv_class
*bl
;
7401 struct induction
*biv
, *giv
;
7405 /* Search all IV classes, then all bivs, and finally all givs.
7407 There are three cases we are concerned with. First we have the situation
7408 of a giv that is only updated conditionally. In that case, it may not
7409 derive any givs after a label is passed.
7411 The second case is when a biv update occurs, or may occur, after the
7412 definition of a giv. For certain biv updates (see below) that are
7413 known to occur between the giv definition and use, we can adjust the
7414 giv definition. For others, or when the biv update is conditional,
7415 we must prevent the giv from deriving any other givs. There are two
7416 sub-cases within this case.
7418 If this is a label, we are concerned with any biv update that is done
7419 conditionally, since it may be done after the giv is defined followed by
7420 a branch here (actually, we need to pass both a jump and a label, but
7421 this extra tracking doesn't seem worth it).
7423 If this is a jump, we are concerned about any biv update that may be
7424 executed multiple times. We are actually only concerned about
7425 backward jumps, but it is probably not worth performing the test
7426 on the jump again here.
7428 If this is a biv update, we must adjust the giv status to show that a
7429 subsequent biv update was performed. If this adjustment cannot be done,
7430 the giv cannot derive further givs. */
7432 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
7433 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
7434 if (LABEL_P (p
) || JUMP_P (p
)
7437 /* Skip if location is the same as a previous one. */
7441 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
7443 /* If cant_derive is already true, there is no point in
7444 checking all of these conditions again. */
7445 if (giv
->cant_derive
)
7448 /* If this giv is conditionally set and we have passed a label,
7449 it cannot derive anything. */
7450 if (LABEL_P (p
) && ! giv
->always_computable
)
7451 giv
->cant_derive
= 1;
7453 /* Skip givs that have mult_val == 0, since
7454 they are really invariants. Also skip those that are
7455 replaceable, since we know their lifetime doesn't contain
7457 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
7460 /* The only way we can allow this giv to derive another
7461 is if this is a biv increment and we can form the product
7462 of biv->add_val and giv->mult_val. In this case, we will
7463 be able to compute a compensation. */
7464 else if (biv
->insn
== p
)
7469 if (biv
->mult_val
== const1_rtx
)
7470 tem
= simplify_giv_expr (loop
,
7471 gen_rtx_MULT (giv
->mode
,
7474 &ext_val_dummy
, &dummy
);
7476 if (tem
&& giv
->derive_adjustment
)
7477 tem
= simplify_giv_expr
7479 gen_rtx_PLUS (giv
->mode
, tem
, giv
->derive_adjustment
),
7480 &ext_val_dummy
, &dummy
);
7483 giv
->derive_adjustment
= tem
;
7485 giv
->cant_derive
= 1;
7487 else if ((LABEL_P (p
) && ! biv
->always_computable
)
7488 || (JUMP_P (p
) && biv
->maybe_multiple
))
7489 giv
->cant_derive
= 1;
7494 /* Check whether an insn is an increment legitimate for a basic induction var.
7495 X is the source of insn P, or a part of it.
7496 MODE is the mode in which X should be interpreted.
7498 DEST_REG is the putative biv, also the destination of the insn.
7499 We accept patterns of these forms:
7500 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
7501 REG = INVARIANT + REG
7503 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
7504 store the additive term into *INC_VAL, and store the place where
7505 we found the additive term into *LOCATION.
7507 If X is an assignment of an invariant into DEST_REG, we set
7508 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
7510 We also want to detect a BIV when it corresponds to a variable
7511 whose mode was promoted. In that case, an increment
7512 of the variable may be a PLUS that adds a SUBREG of that variable to
7513 an invariant and then sign- or zero-extends the result of the PLUS
7516 Most GIVs in such cases will be in the promoted mode, since that is the
7517 probably the natural computation mode (and almost certainly the mode
7518 used for addresses) on the machine. So we view the pseudo-reg containing
7519 the variable as the BIV, as if it were simply incremented.
7521 Note that treating the entire pseudo as a BIV will result in making
7522 simple increments to any GIVs based on it. However, if the variable
7523 overflows in its declared mode but not its promoted mode, the result will
7524 be incorrect. This is acceptable if the variable is signed, since
7525 overflows in such cases are undefined, but not if it is unsigned, since
7526 those overflows are defined. So we only check for SIGN_EXTEND and
7529 If we cannot find a biv, we return 0. */
7532 basic_induction_var (const struct loop
*loop
, rtx x
, enum machine_mode mode
,
7533 rtx dest_reg
, rtx p
, rtx
*inc_val
, rtx
*mult_val
,
7538 rtx insn
, set
= 0, last
, inc
;
7540 code
= GET_CODE (x
);
7545 if (rtx_equal_p (XEXP (x
, 0), dest_reg
)
7546 || (GET_CODE (XEXP (x
, 0)) == SUBREG
7547 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
7548 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
7550 argp
= &XEXP (x
, 1);
7552 else if (rtx_equal_p (XEXP (x
, 1), dest_reg
)
7553 || (GET_CODE (XEXP (x
, 1)) == SUBREG
7554 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
7555 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
7557 argp
= &XEXP (x
, 0);
7563 if (loop_invariant_p (loop
, arg
) != 1)
7566 /* convert_modes can emit new instructions, e.g. when arg is a loop
7567 invariant MEM and dest_reg has a different mode.
7568 These instructions would be emitted after the end of the function
7569 and then *inc_val would be an uninitialized pseudo.
7570 Detect this and bail in this case.
7571 Other alternatives to solve this can be introducing a convert_modes
7572 variant which is allowed to fail but not allowed to emit new
7573 instructions, emit these instructions before loop start and let
7574 it be garbage collected if *inc_val is never used or saving the
7575 *inc_val initialization sequence generated here and when *inc_val
7576 is going to be actually used, emit it at some suitable place. */
7577 last
= get_last_insn ();
7578 inc
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
7579 if (get_last_insn () != last
)
7581 delete_insns_since (last
);
7586 *mult_val
= const1_rtx
;
7591 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
7592 handle addition of promoted variables.
7593 ??? The comment at the start of this function is wrong: promoted
7594 variable increments don't look like it says they do. */
7595 return basic_induction_var (loop
, SUBREG_REG (x
),
7596 GET_MODE (SUBREG_REG (x
)),
7597 dest_reg
, p
, inc_val
, mult_val
, location
);
7600 /* If this register is assigned in a previous insn, look at its
7601 source, but don't go outside the loop or past a label. */
7603 /* If this sets a register to itself, we would repeat any previous
7604 biv increment if we applied this strategy blindly. */
7605 if (rtx_equal_p (dest_reg
, x
))
7614 insn
= PREV_INSN (insn
);
7616 while (insn
&& NOTE_P (insn
)
7617 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
7621 set
= single_set (insn
);
7624 dest
= SET_DEST (set
);
7626 || (GET_CODE (dest
) == SUBREG
7627 && (GET_MODE_SIZE (GET_MODE (dest
)) <= UNITS_PER_WORD
)
7628 && (GET_MODE_CLASS (GET_MODE (dest
)) == MODE_INT
)
7629 && SUBREG_REG (dest
) == x
))
7630 return basic_induction_var (loop
, SET_SRC (set
),
7631 (GET_MODE (SET_SRC (set
)) == VOIDmode
7633 : GET_MODE (SET_SRC (set
))),
7635 inc_val
, mult_val
, location
);
7637 while (GET_CODE (dest
) == SUBREG
7638 || GET_CODE (dest
) == ZERO_EXTRACT
7639 || GET_CODE (dest
) == STRICT_LOW_PART
)
7640 dest
= XEXP (dest
, 0);
7646 /* Can accept constant setting of biv only when inside inner most loop.
7647 Otherwise, a biv of an inner loop may be incorrectly recognized
7648 as a biv of the outer loop,
7649 causing code to be moved INTO the inner loop. */
7651 if (loop_invariant_p (loop
, x
) != 1)
7656 /* convert_modes dies if we try to convert to or from CCmode, so just
7657 exclude that case. It is very unlikely that a condition code value
7658 would be a useful iterator anyways. convert_modes dies if we try to
7659 convert a float mode to non-float or vice versa too. */
7660 if (loop
->level
== 1
7661 && GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (dest_reg
))
7662 && GET_MODE_CLASS (mode
) != MODE_CC
)
7664 /* Possible bug here? Perhaps we don't know the mode of X. */
7665 last
= get_last_insn ();
7666 inc
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
7667 if (get_last_insn () != last
)
7669 delete_insns_since (last
);
7674 *mult_val
= const0_rtx
;
7681 /* Ignore this BIV if signed arithmetic overflow is defined. */
7684 return basic_induction_var (loop
, XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
7685 dest_reg
, p
, inc_val
, mult_val
, location
);
7688 /* Similar, since this can be a sign extension. */
7689 for (insn
= PREV_INSN (p
);
7690 (insn
&& NOTE_P (insn
)
7691 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
7692 insn
= PREV_INSN (insn
))
7696 set
= single_set (insn
);
7698 if (! rtx_equal_p (dest_reg
, XEXP (x
, 0))
7699 && set
&& SET_DEST (set
) == XEXP (x
, 0)
7700 && GET_CODE (XEXP (x
, 1)) == CONST_INT
7701 && INTVAL (XEXP (x
, 1)) >= 0
7702 && GET_CODE (SET_SRC (set
)) == ASHIFT
7703 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
7704 return basic_induction_var (loop
, XEXP (SET_SRC (set
), 0),
7705 GET_MODE (XEXP (x
, 0)),
7706 dest_reg
, insn
, inc_val
, mult_val
,
7715 /* A general induction variable (giv) is any quantity that is a linear
7716 function of a basic induction variable,
7717 i.e. giv = biv * mult_val + add_val.
7718 The coefficients can be any loop invariant quantity.
7719 A giv need not be computed directly from the biv;
7720 it can be computed by way of other givs. */
7722 /* Determine whether X computes a giv.
7723 If it does, return a nonzero value
7724 which is the benefit from eliminating the computation of X;
7725 set *SRC_REG to the register of the biv that it is computed from;
7726 set *ADD_VAL and *MULT_VAL to the coefficients,
7727 such that the value of X is biv * mult + add; */
7730 general_induction_var (const struct loop
*loop
, rtx x
, rtx
*src_reg
,
7731 rtx
*add_val
, rtx
*mult_val
, rtx
*ext_val
,
7732 int is_addr
, int *pbenefit
,
7733 enum machine_mode addr_mode
)
7735 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
7738 /* If this is an invariant, forget it, it isn't a giv. */
7739 if (loop_invariant_p (loop
, x
) == 1)
7743 *ext_val
= NULL_RTX
;
7744 x
= simplify_giv_expr (loop
, x
, ext_val
, pbenefit
);
7748 switch (GET_CODE (x
))
7752 /* Since this is now an invariant and wasn't before, it must be a giv
7753 with MULT_VAL == 0. It doesn't matter which BIV we associate this
7755 *src_reg
= ivs
->list
->biv
->dest_reg
;
7756 *mult_val
= const0_rtx
;
7761 /* This is equivalent to a BIV. */
7763 *mult_val
= const1_rtx
;
7764 *add_val
= const0_rtx
;
7768 /* Either (plus (biv) (invar)) or
7769 (plus (mult (biv) (invar_1)) (invar_2)). */
7770 if (GET_CODE (XEXP (x
, 0)) == MULT
)
7772 *src_reg
= XEXP (XEXP (x
, 0), 0);
7773 *mult_val
= XEXP (XEXP (x
, 0), 1);
7777 *src_reg
= XEXP (x
, 0);
7778 *mult_val
= const1_rtx
;
7780 *add_val
= XEXP (x
, 1);
7784 /* ADD_VAL is zero. */
7785 *src_reg
= XEXP (x
, 0);
7786 *mult_val
= XEXP (x
, 1);
7787 *add_val
= const0_rtx
;
7794 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
7795 unless they are CONST_INT). */
7796 if (GET_CODE (*add_val
) == USE
)
7797 *add_val
= XEXP (*add_val
, 0);
7798 if (GET_CODE (*mult_val
) == USE
)
7799 *mult_val
= XEXP (*mult_val
, 0);
7802 *pbenefit
+= address_cost (orig_x
, addr_mode
) - reg_address_cost
;
7804 *pbenefit
+= rtx_cost (orig_x
, SET
);
7806 /* Always return true if this is a giv so it will be detected as such,
7807 even if the benefit is zero or negative. This allows elimination
7808 of bivs that might otherwise not be eliminated. */
7812 /* Given an expression, X, try to form it as a linear function of a biv.
7813 We will canonicalize it to be of the form
7814 (plus (mult (BIV) (invar_1))
7816 with possible degeneracies.
7818 The invariant expressions must each be of a form that can be used as a
7819 machine operand. We surround then with a USE rtx (a hack, but localized
7820 and certainly unambiguous!) if not a CONST_INT for simplicity in this
7821 routine; it is the caller's responsibility to strip them.
7823 If no such canonicalization is possible (i.e., two biv's are used or an
7824 expression that is neither invariant nor a biv or giv), this routine
7827 For a nonzero return, the result will have a code of CONST_INT, USE,
7828 REG (for a BIV), PLUS, or MULT. No other codes will occur.
7830 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
7832 static rtx
sge_plus (enum machine_mode
, rtx
, rtx
);
7833 static rtx
sge_plus_constant (rtx
, rtx
);
7836 simplify_giv_expr (const struct loop
*loop
, rtx x
, rtx
*ext_val
, int *benefit
)
7838 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
7839 struct loop_regs
*regs
= LOOP_REGS (loop
);
7840 enum machine_mode mode
= GET_MODE (x
);
7844 /* If this is not an integer mode, or if we cannot do arithmetic in this
7845 mode, this can't be a giv. */
7846 if (mode
!= VOIDmode
7847 && (GET_MODE_CLASS (mode
) != MODE_INT
7848 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
7851 switch (GET_CODE (x
))
7854 arg0
= simplify_giv_expr (loop
, XEXP (x
, 0), ext_val
, benefit
);
7855 arg1
= simplify_giv_expr (loop
, XEXP (x
, 1), ext_val
, benefit
);
7856 if (arg0
== 0 || arg1
== 0)
7859 /* Put constant last, CONST_INT last if both constant. */
7860 if ((GET_CODE (arg0
) == USE
7861 || GET_CODE (arg0
) == CONST_INT
)
7862 && ! ((GET_CODE (arg0
) == USE
7863 && GET_CODE (arg1
) == USE
)
7864 || GET_CODE (arg1
) == CONST_INT
))
7865 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
7867 /* Handle addition of zero, then addition of an invariant. */
7868 if (arg1
== const0_rtx
)
7870 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
7871 switch (GET_CODE (arg0
))
7875 /* Adding two invariants must result in an invariant, so enclose
7876 addition operation inside a USE and return it. */
7877 if (GET_CODE (arg0
) == USE
)
7878 arg0
= XEXP (arg0
, 0);
7879 if (GET_CODE (arg1
) == USE
)
7880 arg1
= XEXP (arg1
, 0);
7882 if (GET_CODE (arg0
) == CONST_INT
)
7883 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
7884 if (GET_CODE (arg1
) == CONST_INT
)
7885 tem
= sge_plus_constant (arg0
, arg1
);
7887 tem
= sge_plus (mode
, arg0
, arg1
);
7889 if (GET_CODE (tem
) != CONST_INT
)
7890 tem
= gen_rtx_USE (mode
, tem
);
7895 /* biv + invar or mult + invar. Return sum. */
7896 return gen_rtx_PLUS (mode
, arg0
, arg1
);
7899 /* (a + invar_1) + invar_2. Associate. */
7901 simplify_giv_expr (loop
,
7913 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
7914 MULT to reduce cases. */
7916 arg0
= gen_rtx_MULT (mode
, arg0
, const1_rtx
);
7918 arg1
= gen_rtx_MULT (mode
, arg1
, const1_rtx
);
7920 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
7921 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
7922 Recurse to associate the second PLUS. */
7923 if (GET_CODE (arg1
) == MULT
)
7924 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
7926 if (GET_CODE (arg1
) == PLUS
)
7928 simplify_giv_expr (loop
,
7930 gen_rtx_PLUS (mode
, arg0
,
7935 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
7936 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
7939 if (!rtx_equal_p (arg0
, arg1
))
7942 return simplify_giv_expr (loop
,
7951 /* Handle "a - b" as "a + b * (-1)". */
7952 return simplify_giv_expr (loop
,
7961 arg0
= simplify_giv_expr (loop
, XEXP (x
, 0), ext_val
, benefit
);
7962 arg1
= simplify_giv_expr (loop
, XEXP (x
, 1), ext_val
, benefit
);
7963 if (arg0
== 0 || arg1
== 0)
7966 /* Put constant last, CONST_INT last if both constant. */
7967 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
7968 && GET_CODE (arg1
) != CONST_INT
)
7969 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
7971 /* If second argument is not now constant, not giv. */
7972 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
7975 /* Handle multiply by 0 or 1. */
7976 if (arg1
== const0_rtx
)
7979 else if (arg1
== const1_rtx
)
7982 switch (GET_CODE (arg0
))
7985 /* biv * invar. Done. */
7986 return gen_rtx_MULT (mode
, arg0
, arg1
);
7989 /* Product of two constants. */
7990 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
7993 /* invar * invar is a giv, but attempt to simplify it somehow. */
7994 if (GET_CODE (arg1
) != CONST_INT
)
7997 arg0
= XEXP (arg0
, 0);
7998 if (GET_CODE (arg0
) == MULT
)
8000 /* (invar_0 * invar_1) * invar_2. Associate. */
8001 return simplify_giv_expr (loop
,
8010 /* Propagate the MULT expressions to the innermost nodes. */
8011 else if (GET_CODE (arg0
) == PLUS
)
8013 /* (invar_0 + invar_1) * invar_2. Distribute. */
8014 return simplify_giv_expr (loop
,
8026 return gen_rtx_USE (mode
, gen_rtx_MULT (mode
, arg0
, arg1
));
8029 /* (a * invar_1) * invar_2. Associate. */
8030 return simplify_giv_expr (loop
,
8039 /* (a + invar_1) * invar_2. Distribute. */
8040 return simplify_giv_expr (loop
,
8055 /* Shift by constant is multiply by power of two. */
8056 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
8060 simplify_giv_expr (loop
,
8063 GEN_INT ((HOST_WIDE_INT
) 1
8064 << INTVAL (XEXP (x
, 1)))),
8068 /* "-a" is "a * (-1)" */
8069 return simplify_giv_expr (loop
,
8070 gen_rtx_MULT (mode
, XEXP (x
, 0), constm1_rtx
),
8074 /* "~a" is "-a - 1". Silly, but easy. */
8075 return simplify_giv_expr (loop
,
8076 gen_rtx_MINUS (mode
,
8077 gen_rtx_NEG (mode
, XEXP (x
, 0)),
8082 /* Already in proper form for invariant. */
8088 /* Conditionally recognize extensions of simple IVs. After we've
8089 computed loop traversal counts and verified the range of the
8090 source IV, we'll reevaluate this as a GIV. */
8091 if (*ext_val
== NULL_RTX
)
8093 arg0
= simplify_giv_expr (loop
, XEXP (x
, 0), ext_val
, benefit
);
8094 if (arg0
&& *ext_val
== NULL_RTX
&& REG_P (arg0
))
8096 *ext_val
= gen_rtx_fmt_e (GET_CODE (x
), mode
, arg0
);
8103 /* If this is a new register, we can't deal with it. */
8104 if (REGNO (x
) >= max_reg_before_loop
)
8107 /* Check for biv or giv. */
8108 switch (REG_IV_TYPE (ivs
, REGNO (x
)))
8112 case GENERAL_INDUCT
:
8114 struct induction
*v
= REG_IV_INFO (ivs
, REGNO (x
));
8116 /* Form expression from giv and add benefit. Ensure this giv
8117 can derive another and subtract any needed adjustment if so. */
8119 /* Increasing the benefit here is risky. The only case in which it
8120 is arguably correct is if this is the only use of V. In other
8121 cases, this will artificially inflate the benefit of the current
8122 giv, and lead to suboptimal code. Thus, it is disabled, since
8123 potentially not reducing an only marginally beneficial giv is
8124 less harmful than reducing many givs that are not really
8127 rtx single_use
= regs
->array
[REGNO (x
)].single_usage
;
8128 if (single_use
&& single_use
!= const0_rtx
)
8129 *benefit
+= v
->benefit
;
8135 tem
= gen_rtx_PLUS (mode
, gen_rtx_MULT (mode
,
8136 v
->src_reg
, v
->mult_val
),
8139 if (v
->derive_adjustment
)
8140 tem
= gen_rtx_MINUS (mode
, tem
, v
->derive_adjustment
);
8141 arg0
= simplify_giv_expr (loop
, tem
, ext_val
, benefit
);
8144 if (!v
->ext_dependent
)
8149 *ext_val
= v
->ext_dependent
;
8157 /* If it isn't an induction variable, and it is invariant, we
8158 may be able to simplify things further by looking through
8159 the bits we just moved outside the loop. */
8160 if (loop_invariant_p (loop
, x
) == 1)
8163 struct loop_movables
*movables
= LOOP_MOVABLES (loop
);
8165 for (m
= movables
->head
; m
; m
= m
->next
)
8166 if (rtx_equal_p (x
, m
->set_dest
))
8168 /* Ok, we found a match. Substitute and simplify. */
8170 /* If we match another movable, we must use that, as
8171 this one is going away. */
8173 return simplify_giv_expr (loop
, m
->match
->set_dest
,
8176 /* If consec is nonzero, this is a member of a group of
8177 instructions that were moved together. We handle this
8178 case only to the point of seeking to the last insn and
8179 looking for a REG_EQUAL. Fail if we don't find one. */
8186 tem
= NEXT_INSN (tem
);
8190 tem
= find_reg_note (tem
, REG_EQUAL
, NULL_RTX
);
8192 tem
= XEXP (tem
, 0);
8196 tem
= single_set (m
->insn
);
8198 tem
= SET_SRC (tem
);
8203 /* What we are most interested in is pointer
8204 arithmetic on invariants -- only take
8205 patterns we may be able to do something with. */
8206 if (GET_CODE (tem
) == PLUS
8207 || GET_CODE (tem
) == MULT
8208 || GET_CODE (tem
) == ASHIFT
8209 || GET_CODE (tem
) == CONST_INT
8210 || GET_CODE (tem
) == SYMBOL_REF
)
8212 tem
= simplify_giv_expr (loop
, tem
, ext_val
,
8217 else if (GET_CODE (tem
) == CONST
8218 && GET_CODE (XEXP (tem
, 0)) == PLUS
8219 && GET_CODE (XEXP (XEXP (tem
, 0), 0)) == SYMBOL_REF
8220 && GET_CODE (XEXP (XEXP (tem
, 0), 1)) == CONST_INT
)
8222 tem
= simplify_giv_expr (loop
, XEXP (tem
, 0),
8234 /* Fall through to general case. */
8236 /* If invariant, return as USE (unless CONST_INT).
8237 Otherwise, not giv. */
8238 if (GET_CODE (x
) == USE
)
8241 if (loop_invariant_p (loop
, x
) == 1)
8243 if (GET_CODE (x
) == CONST_INT
)
8245 if (GET_CODE (x
) == CONST
8246 && GET_CODE (XEXP (x
, 0)) == PLUS
8247 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
8248 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
8250 return gen_rtx_USE (mode
, x
);
8257 /* This routine folds invariants such that there is only ever one
8258 CONST_INT in the summation. It is only used by simplify_giv_expr. */
8261 sge_plus_constant (rtx x
, rtx c
)
8263 if (GET_CODE (x
) == CONST_INT
)
8264 return GEN_INT (INTVAL (x
) + INTVAL (c
));
8265 else if (GET_CODE (x
) != PLUS
)
8266 return gen_rtx_PLUS (GET_MODE (x
), x
, c
);
8267 else if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
8269 return gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
8270 GEN_INT (INTVAL (XEXP (x
, 1)) + INTVAL (c
)));
8272 else if (GET_CODE (XEXP (x
, 0)) == PLUS
8273 || GET_CODE (XEXP (x
, 1)) != PLUS
)
8275 return gen_rtx_PLUS (GET_MODE (x
),
8276 sge_plus_constant (XEXP (x
, 0), c
), XEXP (x
, 1));
8280 return gen_rtx_PLUS (GET_MODE (x
),
8281 sge_plus_constant (XEXP (x
, 1), c
), XEXP (x
, 0));
8286 sge_plus (enum machine_mode mode
, rtx x
, rtx y
)
8288 while (GET_CODE (y
) == PLUS
)
8290 rtx a
= XEXP (y
, 0);
8291 if (GET_CODE (a
) == CONST_INT
)
8292 x
= sge_plus_constant (x
, a
);
8294 x
= gen_rtx_PLUS (mode
, x
, a
);
8297 if (GET_CODE (y
) == CONST_INT
)
8298 x
= sge_plus_constant (x
, y
);
8300 x
= gen_rtx_PLUS (mode
, x
, y
);
8304 /* Help detect a giv that is calculated by several consecutive insns;
8308 The caller has already identified the first insn P as having a giv as dest;
8309 we check that all other insns that set the same register follow
8310 immediately after P, that they alter nothing else,
8311 and that the result of the last is still a giv.
8313 The value is 0 if the reg set in P is not really a giv.
8314 Otherwise, the value is the amount gained by eliminating
8315 all the consecutive insns that compute the value.
8317 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
8318 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
8320 The coefficients of the ultimate giv value are stored in
8321 *MULT_VAL and *ADD_VAL. */
8324 consec_sets_giv (const struct loop
*loop
, int first_benefit
, rtx p
,
8325 rtx src_reg
, rtx dest_reg
, rtx
*add_val
, rtx
*mult_val
,
8326 rtx
*ext_val
, rtx
*last_consec_insn
)
8328 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
8329 struct loop_regs
*regs
= LOOP_REGS (loop
);
8336 /* Indicate that this is a giv so that we can update the value produced in
8337 each insn of the multi-insn sequence.
8339 This induction structure will be used only by the call to
8340 general_induction_var below, so we can allocate it on our stack.
8341 If this is a giv, our caller will replace the induct var entry with
8342 a new induction structure. */
8343 struct induction
*v
;
8345 if (REG_IV_TYPE (ivs
, REGNO (dest_reg
)) != UNKNOWN_INDUCT
)
8348 v
= alloca (sizeof (struct induction
));
8349 v
->src_reg
= src_reg
;
8350 v
->mult_val
= *mult_val
;
8351 v
->add_val
= *add_val
;
8352 v
->benefit
= first_benefit
;
8354 v
->derive_adjustment
= 0;
8355 v
->ext_dependent
= NULL_RTX
;
8357 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = GENERAL_INDUCT
;
8358 REG_IV_INFO (ivs
, REGNO (dest_reg
)) = v
;
8360 count
= regs
->array
[REGNO (dest_reg
)].n_times_set
- 1;
8365 code
= GET_CODE (p
);
8367 /* If libcall, skip to end of call sequence. */
8368 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
8372 && (set
= single_set (p
))
8373 && REG_P (SET_DEST (set
))
8374 && SET_DEST (set
) == dest_reg
8375 && (general_induction_var (loop
, SET_SRC (set
), &src_reg
,
8376 add_val
, mult_val
, ext_val
, 0,
8378 /* Giv created by equivalent expression. */
8379 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
8380 && general_induction_var (loop
, XEXP (temp
, 0), &src_reg
,
8381 add_val
, mult_val
, ext_val
, 0,
8382 &benefit
, VOIDmode
)))
8383 && src_reg
== v
->src_reg
)
8385 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
8386 benefit
+= libcall_benefit (p
);
8389 v
->mult_val
= *mult_val
;
8390 v
->add_val
= *add_val
;
8391 v
->benefit
+= benefit
;
8393 else if (code
!= NOTE
)
8395 /* Allow insns that set something other than this giv to a
8396 constant. Such insns are needed on machines which cannot
8397 include long constants and should not disqualify a giv. */
8399 && (set
= single_set (p
))
8400 && SET_DEST (set
) != dest_reg
8401 && CONSTANT_P (SET_SRC (set
)))
8404 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = UNKNOWN_INDUCT
;
8409 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = UNKNOWN_INDUCT
;
8410 *last_consec_insn
= p
;
8414 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8415 represented by G1. If no such expression can be found, or it is clear that
8416 it cannot possibly be a valid address, 0 is returned.
8418 To perform the computation, we note that
8421 where `v' is the biv.
8423 So G2 = (y/b) * G1 + (b - a*y/x).
8425 Note that MULT = y/x.
8427 Update: A and B are now allowed to be additive expressions such that
8428 B contains all variables in A. That is, computing B-A will not require
8429 subtracting variables. */
8432 express_from_1 (rtx a
, rtx b
, rtx mult
)
8434 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
8436 if (mult
== const0_rtx
)
8439 /* If MULT is not 1, we cannot handle A with non-constants, since we
8440 would then be required to subtract multiples of the registers in A.
8441 This is theoretically possible, and may even apply to some Fortran
8442 constructs, but it is a lot of work and we do not attempt it here. */
8444 if (mult
!= const1_rtx
&& GET_CODE (a
) != CONST_INT
)
8447 /* In general these structures are sorted top to bottom (down the PLUS
8448 chain), but not left to right across the PLUS. If B is a higher
8449 order giv than A, we can strip one level and recurse. If A is higher
8450 order, we'll eventually bail out, but won't know that until the end.
8451 If they are the same, we'll strip one level around this loop. */
8453 while (GET_CODE (a
) == PLUS
&& GET_CODE (b
) == PLUS
)
8455 rtx ra
, rb
, oa
, ob
, tmp
;
8457 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
8458 if (GET_CODE (ra
) == PLUS
)
8459 tmp
= ra
, ra
= oa
, oa
= tmp
;
8461 rb
= XEXP (b
, 0), ob
= XEXP (b
, 1);
8462 if (GET_CODE (rb
) == PLUS
)
8463 tmp
= rb
, rb
= ob
, ob
= tmp
;
8465 if (rtx_equal_p (ra
, rb
))
8466 /* We matched: remove one reg completely. */
8468 else if (GET_CODE (ob
) != PLUS
&& rtx_equal_p (ra
, ob
))
8469 /* An alternate match. */
8471 else if (GET_CODE (oa
) != PLUS
&& rtx_equal_p (oa
, rb
))
8472 /* An alternate match. */
8476 /* Indicates an extra register in B. Strip one level from B and
8477 recurse, hoping B was the higher order expression. */
8478 ob
= express_from_1 (a
, ob
, mult
);
8481 return gen_rtx_PLUS (GET_MODE (b
), rb
, ob
);
8485 /* Here we are at the last level of A, go through the cases hoping to
8486 get rid of everything but a constant. */
8488 if (GET_CODE (a
) == PLUS
)
8492 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
8493 if (rtx_equal_p (oa
, b
))
8495 else if (!rtx_equal_p (ra
, b
))
8498 if (GET_CODE (oa
) != CONST_INT
)
8501 return GEN_INT (-INTVAL (oa
) * INTVAL (mult
));
8503 else if (GET_CODE (a
) == CONST_INT
)
8505 return plus_constant (b
, -INTVAL (a
) * INTVAL (mult
));
8507 else if (CONSTANT_P (a
))
8509 enum machine_mode mode_a
= GET_MODE (a
);
8510 enum machine_mode mode_b
= GET_MODE (b
);
8511 enum machine_mode mode
= mode_b
== VOIDmode
? mode_a
: mode_b
;
8512 return simplify_gen_binary (MINUS
, mode
, b
, a
);
8514 else if (GET_CODE (b
) == PLUS
)
8516 if (rtx_equal_p (a
, XEXP (b
, 0)))
8518 else if (rtx_equal_p (a
, XEXP (b
, 1)))
8523 else if (rtx_equal_p (a
, b
))
8530 express_from (struct induction
*g1
, struct induction
*g2
)
8534 /* The value that G1 will be multiplied by must be a constant integer. Also,
8535 the only chance we have of getting a valid address is if b*c/a (see above
8536 for notation) is also an integer. */
8537 if (GET_CODE (g1
->mult_val
) == CONST_INT
8538 && GET_CODE (g2
->mult_val
) == CONST_INT
)
8540 if (g1
->mult_val
== const0_rtx
8541 || (g1
->mult_val
== constm1_rtx
8542 && INTVAL (g2
->mult_val
)
8543 == (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1))
8544 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
8546 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
8548 else if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
))
8552 /* ??? Find out if the one is a multiple of the other? */
8556 add
= express_from_1 (g1
->add_val
, g2
->add_val
, mult
);
8557 if (add
== NULL_RTX
)
8559 /* Failed. If we've got a multiplication factor between G1 and G2,
8560 scale G1's addend and try again. */
8561 if (INTVAL (mult
) > 1)
8563 rtx g1_add_val
= g1
->add_val
;
8564 if (GET_CODE (g1_add_val
) == MULT
8565 && GET_CODE (XEXP (g1_add_val
, 1)) == CONST_INT
)
8568 m
= INTVAL (mult
) * INTVAL (XEXP (g1_add_val
, 1));
8569 g1_add_val
= gen_rtx_MULT (GET_MODE (g1_add_val
),
8570 XEXP (g1_add_val
, 0), GEN_INT (m
));
8574 g1_add_val
= gen_rtx_MULT (GET_MODE (g1_add_val
), g1_add_val
,
8578 add
= express_from_1 (g1_add_val
, g2
->add_val
, const1_rtx
);
8581 if (add
== NULL_RTX
)
8584 /* Form simplified final result. */
8585 if (mult
== const0_rtx
)
8587 else if (mult
== const1_rtx
)
8588 mult
= g1
->dest_reg
;
8590 mult
= gen_rtx_MULT (g2
->mode
, g1
->dest_reg
, mult
);
8592 if (add
== const0_rtx
)
8596 if (GET_CODE (add
) == PLUS
8597 && CONSTANT_P (XEXP (add
, 1)))
8599 rtx tem
= XEXP (add
, 1);
8600 mult
= gen_rtx_PLUS (g2
->mode
, mult
, XEXP (add
, 0));
8604 return gen_rtx_PLUS (g2
->mode
, mult
, add
);
8608 /* Return an rtx, if any, that expresses giv G2 as a function of the register
8609 represented by G1. This indicates that G2 should be combined with G1 and
8610 that G2 can use (either directly or via an address expression) a register
8611 used to represent G1. */
8614 combine_givs_p (struct induction
*g1
, struct induction
*g2
)
8618 /* With the introduction of ext dependent givs, we must care for modes.
8619 G2 must not use a wider mode than G1. */
8620 if (GET_MODE_SIZE (g1
->mode
) < GET_MODE_SIZE (g2
->mode
))
8623 ret
= comb
= express_from (g1
, g2
);
8624 if (comb
== NULL_RTX
)
8626 if (g1
->mode
!= g2
->mode
)
8627 ret
= gen_lowpart (g2
->mode
, comb
);
8629 /* If these givs are identical, they can be combined. We use the results
8630 of express_from because the addends are not in a canonical form, so
8631 rtx_equal_p is a weaker test. */
8632 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
8633 combination to be the other way round. */
8634 if (comb
== g1
->dest_reg
8635 && (g1
->giv_type
== DEST_REG
|| g2
->giv_type
== DEST_ADDR
))
8640 /* If G2 can be expressed as a function of G1 and that function is valid
8641 as an address and no more expensive than using a register for G2,
8642 the expression of G2 in terms of G1 can be used. */
8644 && g2
->giv_type
== DEST_ADDR
8645 && memory_address_p (GET_MODE (g2
->mem
), ret
))
8651 /* See if BL is monotonic and has a constant per-iteration increment.
8652 Return the increment if so, otherwise return 0. */
8654 static HOST_WIDE_INT
8655 get_monotonic_increment (struct iv_class
*bl
)
8657 struct induction
*v
;
8660 /* Get the total increment and check that it is constant. */
8661 incr
= biv_total_increment (bl
);
8662 if (incr
== 0 || GET_CODE (incr
) != CONST_INT
)
8665 for (v
= bl
->biv
; v
!= 0; v
= v
->next_iv
)
8667 if (GET_CODE (v
->add_val
) != CONST_INT
)
8670 if (INTVAL (v
->add_val
) < 0 && INTVAL (incr
) >= 0)
8673 if (INTVAL (v
->add_val
) > 0 && INTVAL (incr
) <= 0)
8676 return INTVAL (incr
);
8680 /* Subroutine of biv_fits_mode_p. Return true if biv BL, when biased by
8681 BIAS, will never exceed the unsigned range of MODE. LOOP is the loop
8682 to which the biv belongs and INCR is its per-iteration increment. */
8685 biased_biv_fits_mode_p (const struct loop
*loop
, struct iv_class
*bl
,
8686 HOST_WIDE_INT incr
, enum machine_mode mode
,
8687 unsigned HOST_WIDE_INT bias
)
8689 unsigned HOST_WIDE_INT initial
, maximum
, span
, delta
;
8691 /* We need to be able to manipulate MODE-size constants. */
8692 if (HOST_BITS_PER_WIDE_INT
< GET_MODE_BITSIZE (mode
))
8695 /* The number of loop iterations must be constant. */
8696 if (LOOP_INFO (loop
)->n_iterations
== 0)
8699 /* So must the biv's initial value. */
8700 if (bl
->initial_value
== 0 || GET_CODE (bl
->initial_value
) != CONST_INT
)
8703 initial
= bias
+ INTVAL (bl
->initial_value
);
8704 maximum
= GET_MODE_MASK (mode
);
8706 /* Make sure that the initial value is within range. */
8707 if (initial
> maximum
)
8710 /* Set up DELTA and SPAN such that the number of iterations * DELTA
8711 (calculated to arbitrary precision) must be <= SPAN. */
8720 /* Handle the special case in which MAXIMUM is the largest
8721 unsigned HOST_WIDE_INT and INITIAL is 0. */
8722 if (maximum
+ 1 == initial
)
8723 span
= LOOP_INFO (loop
)->n_iterations
* delta
;
8725 span
= maximum
+ 1 - initial
;
8727 return (span
/ LOOP_INFO (loop
)->n_iterations
>= delta
);
8731 /* Return true if biv BL will never exceed the bounds of MODE. LOOP is
8732 the loop to which BL belongs and INCR is its per-iteration increment.
8733 UNSIGNEDP is true if the biv should be treated as unsigned. */
8736 biv_fits_mode_p (const struct loop
*loop
, struct iv_class
*bl
,
8737 HOST_WIDE_INT incr
, enum machine_mode mode
, bool unsignedp
)
8739 struct loop_info
*loop_info
;
8740 unsigned HOST_WIDE_INT bias
;
8742 /* A biv's value will always be limited to its natural mode.
8743 Larger modes will observe the same wrap-around. */
8744 if (GET_MODE_SIZE (mode
) > GET_MODE_SIZE (GET_MODE (bl
->biv
->src_reg
)))
8745 mode
= GET_MODE (bl
->biv
->src_reg
);
8747 loop_info
= LOOP_INFO (loop
);
8749 bias
= (unsignedp
? 0 : (GET_MODE_MASK (mode
) >> 1) + 1);
8750 if (biased_biv_fits_mode_p (loop
, bl
, incr
, mode
, bias
))
8753 if (mode
== GET_MODE (bl
->biv
->src_reg
)
8754 && bl
->biv
->src_reg
== loop_info
->iteration_var
8755 && loop_info
->comparison_value
8756 && loop_invariant_p (loop
, loop_info
->comparison_value
))
8758 /* If the increment is +1, and the exit test is a <, the BIV
8759 cannot overflow. (For <=, we have the problematic case that
8760 the comparison value might be the maximum value of the range.) */
8763 if (loop_info
->comparison_code
== LT
)
8765 if (loop_info
->comparison_code
== LTU
&& unsignedp
)
8769 /* Likewise for increment -1 and exit test >. */
8772 if (loop_info
->comparison_code
== GT
)
8774 if (loop_info
->comparison_code
== GTU
&& unsignedp
)
8782 /* Given that X is an extension or truncation of BL, return true
8783 if it is unaffected by overflow. LOOP is the loop to which
8784 BL belongs and INCR is its per-iteration increment. */
8787 extension_within_bounds_p (const struct loop
*loop
, struct iv_class
*bl
,
8788 HOST_WIDE_INT incr
, rtx x
)
8790 enum machine_mode mode
;
8791 bool signedp
, unsignedp
;
8793 switch (GET_CODE (x
))
8797 mode
= GET_MODE (XEXP (x
, 0));
8798 signedp
= (GET_CODE (x
) == SIGN_EXTEND
);
8799 unsignedp
= (GET_CODE (x
) == ZERO_EXTEND
);
8803 /* We don't know whether this value is being used as signed
8804 or unsigned, so check the conditions for both. */
8805 mode
= GET_MODE (x
);
8806 signedp
= unsignedp
= true;
8813 return ((!signedp
|| biv_fits_mode_p (loop
, bl
, incr
, mode
, false))
8814 && (!unsignedp
|| biv_fits_mode_p (loop
, bl
, incr
, mode
, true)));
8818 /* Check each extension dependent giv in this class to see if its
8819 root biv is safe from wrapping in the interior mode, which would
8820 make the giv illegal. */
8823 check_ext_dependent_givs (const struct loop
*loop
, struct iv_class
*bl
)
8825 struct induction
*v
;
8828 incr
= get_monotonic_increment (bl
);
8830 /* Invalidate givs that fail the tests. */
8831 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
8832 if (v
->ext_dependent
)
8835 && extension_within_bounds_p (loop
, bl
, incr
, v
->ext_dependent
))
8837 if (loop_dump_stream
)
8838 fprintf (loop_dump_stream
,
8839 "Verified ext dependent giv at %d of reg %d\n",
8840 INSN_UID (v
->insn
), bl
->regno
);
8844 if (loop_dump_stream
)
8845 fprintf (loop_dump_stream
,
8846 "Failed ext dependent giv at %d\n",
8847 INSN_UID (v
->insn
));
8850 bl
->all_reduced
= 0;
8855 /* Generate a version of VALUE in a mode appropriate for initializing V. */
8858 extend_value_for_giv (struct induction
*v
, rtx value
)
8860 rtx ext_dep
= v
->ext_dependent
;
8865 /* Recall that check_ext_dependent_givs verified that the known bounds
8866 of a biv did not overflow or wrap with respect to the extension for
8867 the giv. Therefore, constants need no additional adjustment. */
8868 if (CONSTANT_P (value
) && GET_MODE (value
) == VOIDmode
)
8871 /* Otherwise, we must adjust the value to compensate for the
8872 differing modes of the biv and the giv. */
8873 return gen_rtx_fmt_e (GET_CODE (ext_dep
), GET_MODE (ext_dep
), value
);
8876 struct combine_givs_stats
8883 cmp_combine_givs_stats (const void *xp
, const void *yp
)
8885 const struct combine_givs_stats
* const x
=
8886 (const struct combine_givs_stats
*) xp
;
8887 const struct combine_givs_stats
* const y
=
8888 (const struct combine_givs_stats
*) yp
;
8890 d
= y
->total_benefit
- x
->total_benefit
;
8891 /* Stabilize the sort. */
8893 d
= x
->giv_number
- y
->giv_number
;
8897 /* Check all pairs of givs for iv_class BL and see if any can be combined with
8898 any other. If so, point SAME to the giv combined with and set NEW_REG to
8899 be an expression (in terms of the other giv's DEST_REG) equivalent to the
8900 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
8903 combine_givs (struct loop_regs
*regs
, struct iv_class
*bl
)
8905 /* Additional benefit to add for being combined multiple times. */
8906 const int extra_benefit
= 3;
8908 struct induction
*g1
, *g2
, **giv_array
;
8909 int i
, j
, k
, giv_count
;
8910 struct combine_givs_stats
*stats
;
8913 /* Count givs, because bl->giv_count is incorrect here. */
8915 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
8919 giv_array
= alloca (giv_count
* sizeof (struct induction
*));
8921 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
8923 giv_array
[i
++] = g1
;
8925 stats
= xcalloc (giv_count
, sizeof (*stats
));
8926 can_combine
= xcalloc (giv_count
, giv_count
* sizeof (rtx
));
8928 for (i
= 0; i
< giv_count
; i
++)
8934 stats
[i
].giv_number
= i
;
8936 /* If a DEST_REG GIV is used only once, do not allow it to combine
8937 with anything, for in doing so we will gain nothing that cannot
8938 be had by simply letting the GIV with which we would have combined
8939 to be reduced on its own. The lossage shows up in particular with
8940 DEST_ADDR targets on hosts with reg+reg addressing, though it can
8941 be seen elsewhere as well. */
8942 if (g1
->giv_type
== DEST_REG
8943 && (single_use
= regs
->array
[REGNO (g1
->dest_reg
)].single_usage
)
8944 && single_use
!= const0_rtx
)
8947 this_benefit
= g1
->benefit
;
8948 /* Add an additional weight for zero addends. */
8949 if (g1
->no_const_addval
)
8952 for (j
= 0; j
< giv_count
; j
++)
8958 && (this_combine
= combine_givs_p (g1
, g2
)) != NULL_RTX
)
8960 can_combine
[i
* giv_count
+ j
] = this_combine
;
8961 this_benefit
+= g2
->benefit
+ extra_benefit
;
8964 stats
[i
].total_benefit
= this_benefit
;
8967 /* Iterate, combining until we can't. */
8969 qsort (stats
, giv_count
, sizeof (*stats
), cmp_combine_givs_stats
);
8971 if (loop_dump_stream
)
8973 fprintf (loop_dump_stream
, "Sorted combine statistics:\n");
8974 for (k
= 0; k
< giv_count
; k
++)
8976 g1
= giv_array
[stats
[k
].giv_number
];
8977 if (!g1
->combined_with
&& !g1
->same
)
8978 fprintf (loop_dump_stream
, " {%d, %d}",
8979 INSN_UID (giv_array
[stats
[k
].giv_number
]->insn
),
8980 stats
[k
].total_benefit
);
8982 putc ('\n', loop_dump_stream
);
8985 for (k
= 0; k
< giv_count
; k
++)
8987 int g1_add_benefit
= 0;
8989 i
= stats
[k
].giv_number
;
8992 /* If it has already been combined, skip. */
8993 if (g1
->combined_with
|| g1
->same
)
8996 for (j
= 0; j
< giv_count
; j
++)
8999 if (g1
!= g2
&& can_combine
[i
* giv_count
+ j
]
9000 /* If it has already been combined, skip. */
9001 && ! g2
->same
&& ! g2
->combined_with
)
9005 g2
->new_reg
= can_combine
[i
* giv_count
+ j
];
9007 /* For destination, we now may replace by mem expression instead
9008 of register. This changes the costs considerably, so add the
9010 if (g2
->giv_type
== DEST_ADDR
)
9011 g2
->benefit
= (g2
->benefit
+ reg_address_cost
9012 - address_cost (g2
->new_reg
,
9013 GET_MODE (g2
->mem
)));
9014 g1
->combined_with
++;
9015 g1
->lifetime
+= g2
->lifetime
;
9017 g1_add_benefit
+= g2
->benefit
;
9019 /* ??? The new final_[bg]iv_value code does a much better job
9020 of finding replaceable giv's, and hence this code may no
9021 longer be necessary. */
9022 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
9023 g1_add_benefit
-= copy_cost
;
9025 /* To help optimize the next set of combinations, remove
9026 this giv from the benefits of other potential mates. */
9027 for (l
= 0; l
< giv_count
; ++l
)
9029 int m
= stats
[l
].giv_number
;
9030 if (can_combine
[m
* giv_count
+ j
])
9031 stats
[l
].total_benefit
-= g2
->benefit
+ extra_benefit
;
9034 if (loop_dump_stream
)
9035 fprintf (loop_dump_stream
,
9036 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
9037 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
),
9038 g1
->benefit
, g1_add_benefit
, g1
->lifetime
);
9042 /* To help optimize the next set of combinations, remove
9043 this giv from the benefits of other potential mates. */
9044 if (g1
->combined_with
)
9046 for (j
= 0; j
< giv_count
; ++j
)
9048 int m
= stats
[j
].giv_number
;
9049 if (can_combine
[m
* giv_count
+ i
])
9050 stats
[j
].total_benefit
-= g1
->benefit
+ extra_benefit
;
9053 g1
->benefit
+= g1_add_benefit
;
9055 /* We've finished with this giv, and everything it touched.
9056 Restart the combination so that proper weights for the
9057 rest of the givs are properly taken into account. */
9058 /* ??? Ideally we would compact the arrays at this point, so
9059 as to not cover old ground. But sanely compacting
9060 can_combine is tricky. */
9070 /* Generate sequence for REG = B * M + A. B is the initial value of
9071 the basic induction variable, M a multiplicative constant, A an
9072 additive constant and REG the destination register. */
9075 gen_add_mult (rtx b
, rtx m
, rtx a
, rtx reg
)
9081 /* Use unsigned arithmetic. */
9082 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 1);
9084 emit_move_insn (reg
, result
);
9092 /* Update registers created in insn sequence SEQ. */
9095 loop_regs_update (const struct loop
*loop ATTRIBUTE_UNUSED
, rtx seq
)
9099 /* Update register info for alias analysis. */
9102 while (insn
!= NULL_RTX
)
9104 rtx set
= single_set (insn
);
9106 if (set
&& REG_P (SET_DEST (set
)))
9107 record_base_value (REGNO (SET_DEST (set
)), SET_SRC (set
), 0);
9109 insn
= NEXT_INSN (insn
);
9114 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
9115 is the initial value of the basic induction variable, M a
9116 multiplicative constant, A an additive constant and REG the
9117 destination register. */
9120 loop_iv_add_mult_emit_before (const struct loop
*loop
, rtx b
, rtx m
, rtx a
,
9121 rtx reg
, basic_block before_bb
, rtx before_insn
)
9127 loop_iv_add_mult_hoist (loop
, b
, m
, a
, reg
);
9131 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9132 seq
= gen_add_mult (copy_rtx (b
), copy_rtx (m
), copy_rtx (a
), reg
);
9134 /* Increase the lifetime of any invariants moved further in code. */
9135 update_reg_last_use (a
, before_insn
);
9136 update_reg_last_use (b
, before_insn
);
9137 update_reg_last_use (m
, before_insn
);
9139 /* It is possible that the expansion created lots of new registers.
9140 Iterate over the sequence we just created and record them all. We
9141 must do this before inserting the sequence. */
9142 loop_regs_update (loop
, seq
);
9144 loop_insn_emit_before (loop
, before_bb
, before_insn
, seq
);
9148 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
9149 initial value of the basic induction variable, M a multiplicative
9150 constant, A an additive constant and REG the destination
9154 loop_iv_add_mult_sink (const struct loop
*loop
, rtx b
, rtx m
, rtx a
, rtx reg
)
9158 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9159 seq
= gen_add_mult (copy_rtx (b
), copy_rtx (m
), copy_rtx (a
), reg
);
9161 /* Increase the lifetime of any invariants moved further in code.
9162 ???? Is this really necessary? */
9163 update_reg_last_use (a
, loop
->sink
);
9164 update_reg_last_use (b
, loop
->sink
);
9165 update_reg_last_use (m
, loop
->sink
);
9167 /* It is possible that the expansion created lots of new registers.
9168 Iterate over the sequence we just created and record them all. We
9169 must do this before inserting the sequence. */
9170 loop_regs_update (loop
, seq
);
9172 loop_insn_sink (loop
, seq
);
9176 /* Emit insns after loop to set REG = B * M + A. B is the initial
9177 value of the basic induction variable, M a multiplicative constant,
9178 A an additive constant and REG the destination register. */
9181 loop_iv_add_mult_hoist (const struct loop
*loop
, rtx b
, rtx m
, rtx a
, rtx reg
)
9185 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
9186 seq
= gen_add_mult (copy_rtx (b
), copy_rtx (m
), copy_rtx (a
), reg
);
9188 /* It is possible that the expansion created lots of new registers.
9189 Iterate over the sequence we just created and record them all. We
9190 must do this before inserting the sequence. */
9191 loop_regs_update (loop
, seq
);
9193 loop_insn_hoist (loop
, seq
);
9198 /* Similar to gen_add_mult, but compute cost rather than generating
9202 iv_add_mult_cost (rtx b
, rtx m
, rtx a
, rtx reg
)
9208 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 1);
9210 emit_move_insn (reg
, result
);
9211 last
= get_last_insn ();
9214 rtx t
= single_set (last
);
9216 cost
+= rtx_cost (SET_SRC (t
), SET
);
9217 last
= PREV_INSN (last
);
9223 /* Test whether A * B can be computed without
9224 an actual multiply insn. Value is 1 if so.
9226 ??? This function stinks because it generates a ton of wasted RTL
9227 ??? and as a result fragments GC memory to no end. There are other
9228 ??? places in the compiler which are invoked a lot and do the same
9229 ??? thing, generate wasted RTL just to see if something is possible. */
9232 product_cheap_p (rtx a
, rtx b
)
9237 /* If only one is constant, make it B. */
9238 if (GET_CODE (a
) == CONST_INT
)
9239 tmp
= a
, a
= b
, b
= tmp
;
9241 /* If first constant, both constant, so don't need multiply. */
9242 if (GET_CODE (a
) == CONST_INT
)
9245 /* If second not constant, neither is constant, so would need multiply. */
9246 if (GET_CODE (b
) != CONST_INT
)
9249 /* One operand is constant, so might not need multiply insn. Generate the
9250 code for the multiply and see if a call or multiply, or long sequence
9251 of insns is generated. */
9254 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 1);
9259 if (tmp
== NULL_RTX
)
9261 else if (INSN_P (tmp
))
9264 while (tmp
!= NULL_RTX
)
9266 rtx next
= NEXT_INSN (tmp
);
9269 || !NONJUMP_INSN_P (tmp
)
9270 || (GET_CODE (PATTERN (tmp
)) == SET
9271 && GET_CODE (SET_SRC (PATTERN (tmp
))) == MULT
)
9272 || (GET_CODE (PATTERN (tmp
)) == PARALLEL
9273 && GET_CODE (XVECEXP (PATTERN (tmp
), 0, 0)) == SET
9274 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp
), 0, 0))) == MULT
))
9283 else if (GET_CODE (tmp
) == SET
9284 && GET_CODE (SET_SRC (tmp
)) == MULT
)
9286 else if (GET_CODE (tmp
) == PARALLEL
9287 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
9288 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
9294 /* Check to see if loop can be terminated by a "decrement and branch until
9295 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
9296 Also try reversing an increment loop to a decrement loop
9297 to see if the optimization can be performed.
9298 Value is nonzero if optimization was performed. */
9300 /* This is useful even if the architecture doesn't have such an insn,
9301 because it might change a loops which increments from 0 to n to a loop
9302 which decrements from n to 0. A loop that decrements to zero is usually
9303 faster than one that increments from zero. */
9305 /* ??? This could be rewritten to use some of the loop unrolling procedures,
9306 such as approx_final_value, biv_total_increment, loop_iterations, and
9307 final_[bg]iv_value. */
9310 check_dbra_loop (struct loop
*loop
, int insn_count
)
9312 struct loop_info
*loop_info
= LOOP_INFO (loop
);
9313 struct loop_regs
*regs
= LOOP_REGS (loop
);
9314 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
9315 struct iv_class
*bl
;
9317 enum machine_mode mode
;
9323 rtx before_comparison
;
9327 int compare_and_branch
;
9328 rtx loop_start
= loop
->start
;
9329 rtx loop_end
= loop
->end
;
9331 /* If last insn is a conditional branch, and the insn before tests a
9332 register value, try to optimize it. Otherwise, we can't do anything. */
9334 jump
= PREV_INSN (loop_end
);
9335 comparison
= get_condition_for_loop (loop
, jump
);
9336 if (comparison
== 0)
9338 if (!onlyjump_p (jump
))
9341 /* Try to compute whether the compare/branch at the loop end is one or
9342 two instructions. */
9343 get_condition (jump
, &first_compare
, false, true);
9344 if (first_compare
== jump
)
9345 compare_and_branch
= 1;
9346 else if (first_compare
== prev_nonnote_insn (jump
))
9347 compare_and_branch
= 2;
9352 /* If more than one condition is present to control the loop, then
9353 do not proceed, as this function does not know how to rewrite
9354 loop tests with more than one condition.
9356 Look backwards from the first insn in the last comparison
9357 sequence and see if we've got another comparison sequence. */
9360 if ((jump1
= prev_nonnote_insn (first_compare
))
9365 /* Check all of the bivs to see if the compare uses one of them.
9366 Skip biv's set more than once because we can't guarantee that
9367 it will be zero on the last iteration. Also skip if the biv is
9368 used between its update and the test insn. */
9370 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
9372 if (bl
->biv_count
== 1
9373 && ! bl
->biv
->maybe_multiple
9374 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
9375 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
9380 /* Try swapping the comparison to identify a suitable biv. */
9382 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
9383 if (bl
->biv_count
== 1
9384 && ! bl
->biv
->maybe_multiple
9385 && bl
->biv
->dest_reg
== XEXP (comparison
, 1)
9386 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
9389 comparison
= gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison
)),
9391 XEXP (comparison
, 1),
9392 XEXP (comparison
, 0));
9399 /* Look for the case where the basic induction variable is always
9400 nonnegative, and equals zero on the last iteration.
9401 In this case, add a reg_note REG_NONNEG, which allows the
9402 m68k DBRA instruction to be used. */
9404 if (((GET_CODE (comparison
) == GT
&& XEXP (comparison
, 1) == constm1_rtx
)
9405 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
9406 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
9407 && INTVAL (bl
->biv
->add_val
) < 0)
9409 /* Initial value must be greater than 0,
9410 init_val % -dec_value == 0 to ensure that it equals zero on
9411 the last iteration */
9413 if (GET_CODE (bl
->initial_value
) == CONST_INT
9414 && INTVAL (bl
->initial_value
) > 0
9415 && (INTVAL (bl
->initial_value
)
9416 % (-INTVAL (bl
->biv
->add_val
))) == 0)
9418 /* Register always nonnegative, add REG_NOTE to branch. */
9419 if (! find_reg_note (jump
, REG_NONNEG
, NULL_RTX
))
9421 = gen_rtx_EXPR_LIST (REG_NONNEG
, bl
->biv
->dest_reg
,
9428 /* If the decrement is 1 and the value was tested as >= 0 before
9429 the loop, then we can safely optimize. */
9430 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
9437 before_comparison
= get_condition_for_loop (loop
, p
);
9438 if (before_comparison
9439 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
9440 && (GET_CODE (before_comparison
) == LT
9441 || GET_CODE (before_comparison
) == LTU
)
9442 && XEXP (before_comparison
, 1) == const0_rtx
9443 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
9444 && INTVAL (bl
->biv
->add_val
) == -1)
9446 if (! find_reg_note (jump
, REG_NONNEG
, NULL_RTX
))
9448 = gen_rtx_EXPR_LIST (REG_NONNEG
, bl
->biv
->dest_reg
,
9456 else if (GET_CODE (bl
->biv
->add_val
) == CONST_INT
9457 && INTVAL (bl
->biv
->add_val
) > 0)
9459 /* Try to change inc to dec, so can apply above optimization. */
9461 all registers modified are induction variables or invariant,
9462 all memory references have non-overlapping addresses
9463 (obviously true if only one write)
9464 allow 2 insns for the compare/jump at the end of the loop. */
9465 /* Also, we must avoid any instructions which use both the reversed
9466 biv and another biv. Such instructions will fail if the loop is
9467 reversed. We meet this condition by requiring that either
9468 no_use_except_counting is true, or else that there is only
9470 int num_nonfixed_reads
= 0;
9471 /* 1 if the iteration var is used only to count iterations. */
9472 int no_use_except_counting
= 0;
9473 /* 1 if the loop has no memory store, or it has a single memory store
9474 which is reversible. */
9475 int reversible_mem_store
= 1;
9477 if (bl
->giv_count
== 0
9478 && !loop
->exit_count
9479 && !loop_info
->has_multiple_exit_targets
)
9481 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
9482 struct iv_class
*blt
;
9484 /* If there are no givs for this biv, and the only exit is the
9485 fall through at the end of the loop, then
9486 see if perhaps there are no uses except to count. */
9487 no_use_except_counting
= 1;
9488 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
9491 rtx set
= single_set (p
);
9493 if (set
&& REG_P (SET_DEST (set
))
9494 && REGNO (SET_DEST (set
)) == bl
->regno
)
9495 /* An insn that sets the biv is okay. */
9497 else if (!reg_mentioned_p (bivreg
, PATTERN (p
)))
9498 /* An insn that doesn't mention the biv is okay. */
9500 else if (p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
9501 || p
== prev_nonnote_insn (loop_end
))
9503 /* If either of these insns uses the biv and sets a pseudo
9504 that has more than one usage, then the biv has uses
9505 other than counting since it's used to derive a value
9506 that is used more than one time. */
9507 note_stores (PATTERN (p
), note_set_pseudo_multiple_uses
,
9509 if (regs
->multiple_uses
)
9511 no_use_except_counting
= 0;
9517 no_use_except_counting
= 0;
9522 /* A biv has uses besides counting if it is used to set
9524 for (blt
= ivs
->list
; blt
; blt
= blt
->next
)
9526 && reg_mentioned_p (bivreg
, SET_SRC (blt
->init_set
)))
9528 no_use_except_counting
= 0;
9533 if (no_use_except_counting
)
9534 /* No need to worry about MEMs. */
9536 else if (loop_info
->num_mem_sets
<= 1)
9538 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
9540 num_nonfixed_reads
+= count_nonfixed_reads (loop
, PATTERN (p
));
9542 /* If the loop has a single store, and the destination address is
9543 invariant, then we can't reverse the loop, because this address
9544 might then have the wrong value at loop exit.
9545 This would work if the source was invariant also, however, in that
9546 case, the insn should have been moved out of the loop. */
9548 if (loop_info
->num_mem_sets
== 1)
9550 struct induction
*v
;
9552 /* If we could prove that each of the memory locations
9553 written to was different, then we could reverse the
9554 store -- but we don't presently have any way of
9556 reversible_mem_store
= 0;
9558 /* If the store depends on a register that is set after the
9559 store, it depends on the initial value, and is thus not
9561 for (v
= bl
->giv
; reversible_mem_store
&& v
; v
= v
->next_iv
)
9563 if (v
->giv_type
== DEST_REG
9564 && reg_mentioned_p (v
->dest_reg
,
9565 PATTERN (loop_info
->first_loop_store_insn
))
9566 && loop_insn_first_p (loop_info
->first_loop_store_insn
,
9568 reversible_mem_store
= 0;
9575 /* This code only acts for innermost loops. Also it simplifies
9576 the memory address check by only reversing loops with
9577 zero or one memory access.
9578 Two memory accesses could involve parts of the same array,
9579 and that can't be reversed.
9580 If the biv is used only for counting, than we don't need to worry
9581 about all these things. */
9583 if ((num_nonfixed_reads
<= 1
9584 && ! loop_info
->has_nonconst_call
9585 && ! loop_info
->has_prefetch
9586 && ! loop_info
->has_volatile
9587 && reversible_mem_store
9588 && (bl
->giv_count
+ bl
->biv_count
+ loop_info
->num_mem_sets
9589 + num_unmoved_movables (loop
) + compare_and_branch
== insn_count
)
9590 && (bl
== ivs
->list
&& bl
->next
== 0))
9591 || (no_use_except_counting
&& ! loop_info
->has_prefetch
))
9595 /* Loop can be reversed. */
9596 if (loop_dump_stream
)
9597 fprintf (loop_dump_stream
, "Can reverse loop\n");
9599 /* Now check other conditions:
9601 The increment must be a constant, as must the initial value,
9602 and the comparison code must be LT.
9604 This test can probably be improved since +/- 1 in the constant
9605 can be obtained by changing LT to LE and vice versa; this is
9609 /* for constants, LE gets turned into LT */
9610 && (GET_CODE (comparison
) == LT
9611 || (GET_CODE (comparison
) == LE
9612 && no_use_except_counting
)
9613 || GET_CODE (comparison
) == LTU
))
9615 HOST_WIDE_INT add_val
, add_adjust
, comparison_val
= 0;
9616 rtx initial_value
, comparison_value
;
9618 enum rtx_code cmp_code
;
9619 int comparison_const_width
;
9620 unsigned HOST_WIDE_INT comparison_sign_mask
;
9621 bool keep_first_compare
;
9623 add_val
= INTVAL (bl
->biv
->add_val
);
9624 comparison_value
= XEXP (comparison
, 1);
9625 if (GET_MODE (comparison_value
) == VOIDmode
)
9626 comparison_const_width
9627 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison
, 0)));
9629 comparison_const_width
9630 = GET_MODE_BITSIZE (GET_MODE (comparison_value
));
9631 if (comparison_const_width
> HOST_BITS_PER_WIDE_INT
)
9632 comparison_const_width
= HOST_BITS_PER_WIDE_INT
;
9633 comparison_sign_mask
9634 = (unsigned HOST_WIDE_INT
) 1 << (comparison_const_width
- 1);
9636 /* If the comparison value is not a loop invariant, then we
9637 can not reverse this loop.
9639 ??? If the insns which initialize the comparison value as
9640 a whole compute an invariant result, then we could move
9641 them out of the loop and proceed with loop reversal. */
9642 if (! loop_invariant_p (loop
, comparison_value
))
9645 if (GET_CODE (comparison_value
) == CONST_INT
)
9646 comparison_val
= INTVAL (comparison_value
);
9647 initial_value
= bl
->initial_value
;
9649 /* Normalize the initial value if it is an integer and
9650 has no other use except as a counter. This will allow
9651 a few more loops to be reversed. */
9652 if (no_use_except_counting
9653 && GET_CODE (comparison_value
) == CONST_INT
9654 && GET_CODE (initial_value
) == CONST_INT
)
9656 comparison_val
= comparison_val
- INTVAL (bl
->initial_value
);
9657 /* The code below requires comparison_val to be a multiple
9658 of add_val in order to do the loop reversal, so
9659 round up comparison_val to a multiple of add_val.
9660 Since comparison_value is constant, we know that the
9661 current comparison code is LT. */
9662 comparison_val
= comparison_val
+ add_val
- 1;
9664 -= (unsigned HOST_WIDE_INT
) comparison_val
% add_val
;
9665 /* We postpone overflow checks for COMPARISON_VAL here;
9666 even if there is an overflow, we might still be able to
9667 reverse the loop, if converting the loop exit test to
9669 initial_value
= const0_rtx
;
9672 /* First check if we can do a vanilla loop reversal. */
9673 if (initial_value
== const0_rtx
9674 && GET_CODE (comparison_value
) == CONST_INT
9675 /* Now do postponed overflow checks on COMPARISON_VAL. */
9676 && ! (((comparison_val
- add_val
) ^ INTVAL (comparison_value
))
9677 & comparison_sign_mask
))
9679 /* Register will always be nonnegative, with value
9680 0 on last iteration */
9681 add_adjust
= add_val
;
9688 if (GET_CODE (comparison
) == LE
)
9689 add_adjust
-= add_val
;
9691 /* If the initial value is not zero, or if the comparison
9692 value is not an exact multiple of the increment, then we
9693 can not reverse this loop. */
9694 if (initial_value
== const0_rtx
9695 && GET_CODE (comparison_value
) == CONST_INT
)
9697 if (((unsigned HOST_WIDE_INT
) comparison_val
% add_val
) != 0)
9702 if (! no_use_except_counting
|| add_val
!= 1)
9706 final_value
= comparison_value
;
9708 /* Reset these in case we normalized the initial value
9709 and comparison value above. */
9710 if (GET_CODE (comparison_value
) == CONST_INT
9711 && GET_CODE (initial_value
) == CONST_INT
)
9713 comparison_value
= GEN_INT (comparison_val
);
9715 = GEN_INT (comparison_val
+ INTVAL (bl
->initial_value
));
9717 bl
->initial_value
= initial_value
;
9719 /* Save some info needed to produce the new insns. */
9720 reg
= bl
->biv
->dest_reg
;
9721 mode
= GET_MODE (reg
);
9722 jump_label
= condjump_label (PREV_INSN (loop_end
));
9723 new_add_val
= GEN_INT (-INTVAL (bl
->biv
->add_val
));
9725 /* Set start_value; if this is not a CONST_INT, we need
9727 Initialize biv to start_value before loop start.
9728 The old initializing insn will be deleted as a
9729 dead store by flow.c. */
9730 if (initial_value
== const0_rtx
9731 && GET_CODE (comparison_value
) == CONST_INT
)
9734 = gen_int_mode (comparison_val
- add_adjust
, mode
);
9735 loop_insn_hoist (loop
, gen_move_insn (reg
, start_value
));
9737 else if (GET_CODE (initial_value
) == CONST_INT
)
9739 rtx offset
= GEN_INT (-INTVAL (initial_value
) - add_adjust
);
9740 rtx add_insn
= gen_add3_insn (reg
, comparison_value
, offset
);
9746 = gen_rtx_PLUS (mode
, comparison_value
, offset
);
9747 loop_insn_hoist (loop
, add_insn
);
9748 if (GET_CODE (comparison
) == LE
)
9749 final_value
= gen_rtx_PLUS (mode
, comparison_value
,
9752 else if (! add_adjust
)
9754 rtx sub_insn
= gen_sub3_insn (reg
, comparison_value
,
9760 = gen_rtx_MINUS (mode
, comparison_value
, initial_value
);
9761 loop_insn_hoist (loop
, sub_insn
);
9764 /* We could handle the other cases too, but it'll be
9765 better to have a testcase first. */
9768 /* We may not have a single insn which can increment a reg, so
9769 create a sequence to hold all the insns from expand_inc. */
9771 expand_inc (reg
, new_add_val
);
9775 p
= loop_insn_emit_before (loop
, 0, bl
->biv
->insn
, tem
);
9776 delete_insn (bl
->biv
->insn
);
9778 /* Update biv info to reflect its new status. */
9780 bl
->initial_value
= start_value
;
9781 bl
->biv
->add_val
= new_add_val
;
9783 /* Update loop info. */
9784 loop_info
->initial_value
= reg
;
9785 loop_info
->initial_equiv_value
= reg
;
9786 loop_info
->final_value
= const0_rtx
;
9787 loop_info
->final_equiv_value
= const0_rtx
;
9788 loop_info
->comparison_value
= const0_rtx
;
9789 loop_info
->comparison_code
= cmp_code
;
9790 loop_info
->increment
= new_add_val
;
9792 /* Inc LABEL_NUSES so that delete_insn will
9793 not delete the label. */
9794 LABEL_NUSES (XEXP (jump_label
, 0))++;
9796 /* If we have a separate comparison insn that does more
9797 than just set cc0, the result of the comparison might
9798 be used outside the loop. */
9799 keep_first_compare
= (compare_and_branch
== 2
9801 && sets_cc0_p (first_compare
) <= 0
9805 /* Emit an insn after the end of the loop to set the biv's
9806 proper exit value if it is used anywhere outside the loop. */
9807 if (keep_first_compare
9808 || (REGNO_LAST_UID (bl
->regno
) != INSN_UID (first_compare
))
9810 || REGNO_FIRST_UID (bl
->regno
) != INSN_UID (bl
->init_insn
))
9811 loop_insn_sink (loop
, gen_load_of_final_value (reg
, final_value
));
9813 if (keep_first_compare
)
9814 loop_insn_sink (loop
, PATTERN (first_compare
));
9816 /* Delete compare/branch at end of loop. */
9817 delete_related_insns (PREV_INSN (loop_end
));
9818 if (compare_and_branch
== 2)
9819 delete_related_insns (first_compare
);
9821 /* Add new compare/branch insn at end of loop. */
9823 emit_cmp_and_jump_insns (reg
, const0_rtx
, cmp_code
, NULL_RTX
,
9825 XEXP (jump_label
, 0));
9828 emit_jump_insn_before (tem
, loop_end
);
9830 for (tem
= PREV_INSN (loop_end
);
9831 tem
&& !JUMP_P (tem
);
9832 tem
= PREV_INSN (tem
))
9836 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
9842 /* Increment of LABEL_NUSES done above. */
9843 /* Register is now always nonnegative,
9844 so add REG_NONNEG note to the branch. */
9845 REG_NOTES (tem
) = gen_rtx_EXPR_LIST (REG_NONNEG
, reg
,
9851 /* No insn may reference both the reversed and another biv or it
9852 will fail (see comment near the top of the loop reversal
9854 Earlier on, we have verified that the biv has no use except
9855 counting, or it is the only biv in this function.
9856 However, the code that computes no_use_except_counting does
9857 not verify reg notes. It's possible to have an insn that
9858 references another biv, and has a REG_EQUAL note with an
9859 expression based on the reversed biv. To avoid this case,
9860 remove all REG_EQUAL notes based on the reversed biv
9862 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
9866 rtx set
= single_set (p
);
9867 /* If this is a set of a GIV based on the reversed biv, any
9868 REG_EQUAL notes should still be correct. */
9870 || !REG_P (SET_DEST (set
))
9871 || (size_t) REGNO (SET_DEST (set
)) >= ivs
->n_regs
9872 || REG_IV_TYPE (ivs
, REGNO (SET_DEST (set
))) != GENERAL_INDUCT
9873 || REG_IV_INFO (ivs
, REGNO (SET_DEST (set
)))->src_reg
!= bl
->biv
->src_reg
)
9874 for (pnote
= ®_NOTES (p
); *pnote
;)
9876 if (REG_NOTE_KIND (*pnote
) == REG_EQUAL
9877 && reg_mentioned_p (regno_reg_rtx
[bl
->regno
],
9879 *pnote
= XEXP (*pnote
, 1);
9881 pnote
= &XEXP (*pnote
, 1);
9885 /* Mark that this biv has been reversed. Each giv which depends
9886 on this biv, and which is also live past the end of the loop
9887 will have to be fixed up. */
9891 if (loop_dump_stream
)
9893 fprintf (loop_dump_stream
, "Reversed loop");
9895 fprintf (loop_dump_stream
, " and added reg_nonneg\n");
9897 fprintf (loop_dump_stream
, "\n");
9908 /* Verify whether the biv BL appears to be eliminable,
9909 based on the insns in the loop that refer to it.
9911 If ELIMINATE_P is nonzero, actually do the elimination.
9913 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
9914 determine whether invariant insns should be placed inside or at the
9915 start of the loop. */
9918 maybe_eliminate_biv (const struct loop
*loop
, struct iv_class
*bl
,
9919 int eliminate_p
, int threshold
, int insn_count
)
9921 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
9922 rtx reg
= bl
->biv
->dest_reg
;
9925 /* Scan all insns in the loop, stopping if we find one that uses the
9926 biv in a way that we cannot eliminate. */
9928 for (p
= loop
->start
; p
!= loop
->end
; p
= NEXT_INSN (p
))
9930 enum rtx_code code
= GET_CODE (p
);
9931 basic_block where_bb
= 0;
9932 rtx where_insn
= threshold
>= insn_count
? 0 : p
;
9935 /* If this is a libcall that sets a giv, skip ahead to its end. */
9938 note
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
);
9942 rtx last
= XEXP (note
, 0);
9943 rtx set
= single_set (last
);
9945 if (set
&& REG_P (SET_DEST (set
)))
9947 unsigned int regno
= REGNO (SET_DEST (set
));
9949 if (regno
< ivs
->n_regs
9950 && REG_IV_TYPE (ivs
, regno
) == GENERAL_INDUCT
9951 && REG_IV_INFO (ivs
, regno
)->src_reg
== bl
->biv
->src_reg
)
9957 /* Closely examine the insn if the biv is mentioned. */
9958 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
9959 && reg_mentioned_p (reg
, PATTERN (p
))
9960 && ! maybe_eliminate_biv_1 (loop
, PATTERN (p
), p
, bl
,
9961 eliminate_p
, where_bb
, where_insn
))
9963 if (loop_dump_stream
)
9964 fprintf (loop_dump_stream
,
9965 "Cannot eliminate biv %d: biv used in insn %d.\n",
9966 bl
->regno
, INSN_UID (p
));
9970 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
9972 && (note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
)) != NULL_RTX
9973 && reg_mentioned_p (reg
, XEXP (note
, 0)))
9974 remove_note (p
, note
);
9979 if (loop_dump_stream
)
9980 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
9981 bl
->regno
, eliminate_p
? "was" : "can be");
9988 /* INSN and REFERENCE are instructions in the same insn chain.
9989 Return nonzero if INSN is first. */
9992 loop_insn_first_p (rtx insn
, rtx reference
)
9996 for (p
= insn
, q
= reference
;;)
9998 /* Start with test for not first so that INSN == REFERENCE yields not
10000 if (q
== insn
|| ! p
)
10002 if (p
== reference
|| ! q
)
10005 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
10006 previous insn, hence the <= comparison below does not work if
10008 if (INSN_UID (p
) < max_uid_for_loop
10009 && INSN_UID (q
) < max_uid_for_loop
10011 return INSN_LUID (p
) <= INSN_LUID (q
);
10013 if (INSN_UID (p
) >= max_uid_for_loop
10016 if (INSN_UID (q
) >= max_uid_for_loop
)
10021 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
10022 the offset that we have to take into account due to auto-increment /
10023 div derivation is zero. */
10025 biv_elimination_giv_has_0_offset (struct induction
*biv
,
10026 struct induction
*giv
, rtx insn
)
10028 /* If the giv V had the auto-inc address optimization applied
10029 to it, and INSN occurs between the giv insn and the biv
10030 insn, then we'd have to adjust the value used here.
10031 This is rare, so we don't bother to make this possible. */
10032 if (giv
->auto_inc_opt
10033 && ((loop_insn_first_p (giv
->insn
, insn
)
10034 && loop_insn_first_p (insn
, biv
->insn
))
10035 || (loop_insn_first_p (biv
->insn
, insn
)
10036 && loop_insn_first_p (insn
, giv
->insn
))))
10042 /* If BL appears in X (part of the pattern of INSN), see if we can
10043 eliminate its use. If so, return 1. If not, return 0.
10045 If BIV does not appear in X, return 1.
10047 If ELIMINATE_P is nonzero, actually do the elimination.
10048 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
10049 Depending on how many items have been moved out of the loop, it
10050 will either be before INSN (when WHERE_INSN is nonzero) or at the
10051 start of the loop (when WHERE_INSN is zero). */
10054 maybe_eliminate_biv_1 (const struct loop
*loop
, rtx x
, rtx insn
,
10055 struct iv_class
*bl
, int eliminate_p
,
10056 basic_block where_bb
, rtx where_insn
)
10058 enum rtx_code code
= GET_CODE (x
);
10059 rtx reg
= bl
->biv
->dest_reg
;
10060 enum machine_mode mode
= GET_MODE (reg
);
10061 struct induction
*v
;
10073 /* If we haven't already been able to do something with this BIV,
10074 we can't eliminate it. */
10080 /* If this sets the BIV, it is not a problem. */
10081 if (SET_DEST (x
) == reg
)
10084 /* If this is an insn that defines a giv, it is also ok because
10085 it will go away when the giv is reduced. */
10086 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10087 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
10091 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
10093 /* Can replace with any giv that was reduced and
10094 that has (MULT_VAL != 0) and (ADD_VAL == 0).
10095 Require a constant for MULT_VAL, so we know it's nonzero.
10096 ??? We disable this optimization to avoid potential
10099 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10100 if (GET_CODE (v
->mult_val
) == CONST_INT
&& v
->mult_val
!= const0_rtx
10101 && v
->add_val
== const0_rtx
10102 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
10106 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
10112 /* If the giv has the opposite direction of change,
10113 then reverse the comparison. */
10114 if (INTVAL (v
->mult_val
) < 0)
10115 new = gen_rtx_COMPARE (GET_MODE (v
->new_reg
),
10116 const0_rtx
, v
->new_reg
);
10120 /* We can probably test that giv's reduced reg. */
10121 if (validate_change (insn
, &SET_SRC (x
), new, 0))
10125 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
10126 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
10127 Require a constant for MULT_VAL, so we know it's nonzero.
10128 ??? Do this only if ADD_VAL is a pointer to avoid a potential
10129 overflow problem. */
10131 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10132 if (GET_CODE (v
->mult_val
) == CONST_INT
10133 && v
->mult_val
!= const0_rtx
10134 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
10136 && (GET_CODE (v
->add_val
) == SYMBOL_REF
10137 || GET_CODE (v
->add_val
) == LABEL_REF
10138 || GET_CODE (v
->add_val
) == CONST
10139 || (REG_P (v
->add_val
)
10140 && REG_POINTER (v
->add_val
))))
10142 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
10148 /* If the giv has the opposite direction of change,
10149 then reverse the comparison. */
10150 if (INTVAL (v
->mult_val
) < 0)
10151 new = gen_rtx_COMPARE (VOIDmode
, copy_rtx (v
->add_val
),
10154 new = gen_rtx_COMPARE (VOIDmode
, v
->new_reg
,
10155 copy_rtx (v
->add_val
));
10157 /* Replace biv with the giv's reduced register. */
10158 update_reg_last_use (v
->add_val
, insn
);
10159 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
10162 /* Insn doesn't support that constant or invariant. Copy it
10163 into a register (it will be a loop invariant.) */
10164 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
10166 loop_insn_emit_before (loop
, 0, where_insn
,
10167 gen_move_insn (tem
,
10168 copy_rtx (v
->add_val
)));
10170 /* Substitute the new register for its invariant value in
10171 the compare expression. */
10172 XEXP (new, (INTVAL (v
->mult_val
) < 0) ? 0 : 1) = tem
;
10173 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
10182 case GT
: case GE
: case GTU
: case GEU
:
10183 case LT
: case LE
: case LTU
: case LEU
:
10184 /* See if either argument is the biv. */
10185 if (XEXP (x
, 0) == reg
)
10186 arg
= XEXP (x
, 1), arg_operand
= 1;
10187 else if (XEXP (x
, 1) == reg
)
10188 arg
= XEXP (x
, 0), arg_operand
= 0;
10192 if (CONSTANT_P (arg
))
10194 /* First try to replace with any giv that has constant positive
10195 mult_val and constant add_val. We might be able to support
10196 negative mult_val, but it seems complex to do it in general. */
10198 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10199 if (GET_CODE (v
->mult_val
) == CONST_INT
10200 && INTVAL (v
->mult_val
) > 0
10201 && (GET_CODE (v
->add_val
) == SYMBOL_REF
10202 || GET_CODE (v
->add_val
) == LABEL_REF
10203 || GET_CODE (v
->add_val
) == CONST
10204 || (REG_P (v
->add_val
)
10205 && REG_POINTER (v
->add_val
)))
10206 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
10207 && v
->mode
== mode
)
10209 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
10212 /* Don't eliminate if the linear combination that makes up
10213 the giv overflows when it is applied to ARG. */
10214 if (GET_CODE (arg
) == CONST_INT
)
10218 if (GET_CODE (v
->add_val
) == CONST_INT
)
10219 add_val
= v
->add_val
;
10221 add_val
= const0_rtx
;
10223 if (const_mult_add_overflow_p (arg
, v
->mult_val
,
10231 /* Replace biv with the giv's reduced reg. */
10232 validate_change (insn
, &XEXP (x
, 1 - arg_operand
), v
->new_reg
, 1);
10234 /* If all constants are actually constant integers and
10235 the derived constant can be directly placed in the COMPARE,
10237 if (GET_CODE (arg
) == CONST_INT
10238 && GET_CODE (v
->add_val
) == CONST_INT
)
10240 tem
= expand_mult_add (arg
, NULL_RTX
, v
->mult_val
,
10241 v
->add_val
, mode
, 1);
10245 /* Otherwise, load it into a register. */
10246 tem
= gen_reg_rtx (mode
);
10247 loop_iv_add_mult_emit_before (loop
, arg
,
10248 v
->mult_val
, v
->add_val
,
10249 tem
, where_bb
, where_insn
);
10252 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
10254 if (apply_change_group ())
10258 /* Look for giv with positive constant mult_val and nonconst add_val.
10259 Insert insns to calculate new compare value.
10260 ??? Turn this off due to possible overflow. */
10262 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10263 if (GET_CODE (v
->mult_val
) == CONST_INT
10264 && INTVAL (v
->mult_val
) > 0
10265 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
10271 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
10277 tem
= gen_reg_rtx (mode
);
10279 /* Replace biv with giv's reduced register. */
10280 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
10283 /* Compute value to compare against. */
10284 loop_iv_add_mult_emit_before (loop
, arg
,
10285 v
->mult_val
, v
->add_val
,
10286 tem
, where_bb
, where_insn
);
10287 /* Use it in this insn. */
10288 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
10289 if (apply_change_group ())
10293 else if (REG_P (arg
) || MEM_P (arg
))
10295 if (loop_invariant_p (loop
, arg
) == 1)
10297 /* Look for giv with constant positive mult_val and nonconst
10298 add_val. Insert insns to compute new compare value.
10299 ??? Turn this off due to possible overflow. */
10301 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10302 if (GET_CODE (v
->mult_val
) == CONST_INT
&& INTVAL (v
->mult_val
) > 0
10303 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
10309 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
10315 tem
= gen_reg_rtx (mode
);
10317 /* Replace biv with giv's reduced register. */
10318 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
10321 /* Compute value to compare against. */
10322 loop_iv_add_mult_emit_before (loop
, arg
,
10323 v
->mult_val
, v
->add_val
,
10324 tem
, where_bb
, where_insn
);
10325 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
10326 if (apply_change_group ())
10331 /* This code has problems. Basically, you can't know when
10332 seeing if we will eliminate BL, whether a particular giv
10333 of ARG will be reduced. If it isn't going to be reduced,
10334 we can't eliminate BL. We can try forcing it to be reduced,
10335 but that can generate poor code.
10337 The problem is that the benefit of reducing TV, below should
10338 be increased if BL can actually be eliminated, but this means
10339 we might have to do a topological sort of the order in which
10340 we try to process biv. It doesn't seem worthwhile to do
10341 this sort of thing now. */
10344 /* Otherwise the reg compared with had better be a biv. */
10346 || REG_IV_TYPE (ivs
, REGNO (arg
)) != BASIC_INDUCT
)
10349 /* Look for a pair of givs, one for each biv,
10350 with identical coefficients. */
10351 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10353 struct induction
*tv
;
10355 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
10358 for (tv
= REG_IV_CLASS (ivs
, REGNO (arg
))->giv
; tv
;
10360 if (! tv
->ignore
&& ! tv
->maybe_dead
10361 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
10362 && rtx_equal_p (tv
->add_val
, v
->add_val
)
10363 && tv
->mode
== mode
)
10365 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
10371 /* Replace biv with its giv's reduced reg. */
10372 XEXP (x
, 1 - arg_operand
) = v
->new_reg
;
10373 /* Replace other operand with the other giv's
10375 XEXP (x
, arg_operand
) = tv
->new_reg
;
10382 /* If we get here, the biv can't be eliminated. */
10386 /* If this address is a DEST_ADDR giv, it doesn't matter if the
10387 biv is used in it, since it will be replaced. */
10388 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10389 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
10397 /* See if any subexpression fails elimination. */
10398 fmt
= GET_RTX_FORMAT (code
);
10399 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
10404 if (! maybe_eliminate_biv_1 (loop
, XEXP (x
, i
), insn
, bl
,
10405 eliminate_p
, where_bb
, where_insn
))
10410 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
10411 if (! maybe_eliminate_biv_1 (loop
, XVECEXP (x
, i
, j
), insn
, bl
,
10412 eliminate_p
, where_bb
, where_insn
))
10421 /* Return nonzero if the last use of REG
10422 is in an insn following INSN in the same basic block. */
10425 last_use_this_basic_block (rtx reg
, rtx insn
)
10429 n
&& !LABEL_P (n
) && !JUMP_P (n
);
10432 if (REGNO_LAST_UID (REGNO (reg
)) == INSN_UID (n
))
10438 /* Called via `note_stores' to record the initial value of a biv. Here we
10439 just record the location of the set and process it later. */
10442 record_initial (rtx dest
, rtx set
, void *data ATTRIBUTE_UNUSED
)
10444 struct loop_ivs
*ivs
= (struct loop_ivs
*) data
;
10445 struct iv_class
*bl
;
10448 || REGNO (dest
) >= ivs
->n_regs
10449 || REG_IV_TYPE (ivs
, REGNO (dest
)) != BASIC_INDUCT
)
10452 bl
= REG_IV_CLASS (ivs
, REGNO (dest
));
10454 /* If this is the first set found, record it. */
10455 if (bl
->init_insn
== 0)
10457 bl
->init_insn
= note_insn
;
10458 bl
->init_set
= set
;
10462 /* If any of the registers in X are "old" and currently have a last use earlier
10463 than INSN, update them to have a last use of INSN. Their actual last use
10464 will be the previous insn but it will not have a valid uid_luid so we can't
10465 use it. X must be a source expression only. */
10468 update_reg_last_use (rtx x
, rtx insn
)
10470 /* Check for the case where INSN does not have a valid luid. In this case,
10471 there is no need to modify the regno_last_uid, as this can only happen
10472 when code is inserted after the loop_end to set a pseudo's final value,
10473 and hence this insn will never be the last use of x.
10474 ???? This comment is not correct. See for example loop_givs_reduce.
10475 This may insert an insn before another new insn. */
10476 if (REG_P (x
) && REGNO (x
) < max_reg_before_loop
10477 && INSN_UID (insn
) < max_uid_for_loop
10478 && REGNO_LAST_LUID (REGNO (x
)) < INSN_LUID (insn
))
10480 REGNO_LAST_UID (REGNO (x
)) = INSN_UID (insn
);
10485 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
10486 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
10489 update_reg_last_use (XEXP (x
, i
), insn
);
10490 else if (fmt
[i
] == 'E')
10491 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
10492 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
10497 /* Similar to rtlanal.c:get_condition, except that we also put an
10498 invariant last unless both operands are invariants. */
10501 get_condition_for_loop (const struct loop
*loop
, rtx x
)
10503 rtx comparison
= get_condition (x
, (rtx
*) 0, false, true);
10505 if (comparison
== 0
10506 || ! loop_invariant_p (loop
, XEXP (comparison
, 0))
10507 || loop_invariant_p (loop
, XEXP (comparison
, 1)))
10510 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison
)), VOIDmode
,
10511 XEXP (comparison
, 1), XEXP (comparison
, 0));
10514 /* Scan the function and determine whether it has indirect (computed) jumps.
10516 This is taken mostly from flow.c; similar code exists elsewhere
10517 in the compiler. It may be useful to put this into rtlanal.c. */
10519 indirect_jump_in_function_p (rtx start
)
10523 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
10524 if (computed_jump_p (insn
))
10530 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
10531 documentation for LOOP_MEMS for the definition of `appropriate'.
10532 This function is called from prescan_loop via for_each_rtx. */
10535 insert_loop_mem (rtx
*mem
, void *data ATTRIBUTE_UNUSED
)
10537 struct loop_info
*loop_info
= data
;
10544 switch (GET_CODE (m
))
10550 /* We're not interested in MEMs that are only clobbered. */
10554 /* We're not interested in the MEM associated with a
10555 CONST_DOUBLE, so there's no need to traverse into this. */
10559 /* We're not interested in any MEMs that only appear in notes. */
10563 /* This is not a MEM. */
10567 /* See if we've already seen this MEM. */
10568 for (i
= 0; i
< loop_info
->mems_idx
; ++i
)
10569 if (rtx_equal_p (m
, loop_info
->mems
[i
].mem
))
10571 if (MEM_VOLATILE_P (m
) && !MEM_VOLATILE_P (loop_info
->mems
[i
].mem
))
10572 loop_info
->mems
[i
].mem
= m
;
10573 if (GET_MODE (m
) != GET_MODE (loop_info
->mems
[i
].mem
))
10574 /* The modes of the two memory accesses are different. If
10575 this happens, something tricky is going on, and we just
10576 don't optimize accesses to this MEM. */
10577 loop_info
->mems
[i
].optimize
= 0;
10582 /* Resize the array, if necessary. */
10583 if (loop_info
->mems_idx
== loop_info
->mems_allocated
)
10585 if (loop_info
->mems_allocated
!= 0)
10586 loop_info
->mems_allocated
*= 2;
10588 loop_info
->mems_allocated
= 32;
10590 loop_info
->mems
= xrealloc (loop_info
->mems
,
10591 loop_info
->mems_allocated
* sizeof (loop_mem_info
));
10594 /* Actually insert the MEM. */
10595 loop_info
->mems
[loop_info
->mems_idx
].mem
= m
;
10596 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
10597 because we can't put it in a register. We still store it in the
10598 table, though, so that if we see the same address later, but in a
10599 non-BLK mode, we'll not think we can optimize it at that point. */
10600 loop_info
->mems
[loop_info
->mems_idx
].optimize
= (GET_MODE (m
) != BLKmode
);
10601 loop_info
->mems
[loop_info
->mems_idx
].reg
= NULL_RTX
;
10602 ++loop_info
->mems_idx
;
10608 /* Allocate REGS->ARRAY or reallocate it if it is too small.
10610 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
10611 register that is modified by an insn between FROM and TO. If the
10612 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
10613 more, stop incrementing it, to avoid overflow.
10615 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
10616 register I is used, if it is only used once. Otherwise, it is set
10617 to 0 (for no uses) or const0_rtx for more than one use. This
10618 parameter may be zero, in which case this processing is not done.
10620 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
10621 optimize register I. */
10624 loop_regs_scan (const struct loop
*loop
, int extra_size
)
10626 struct loop_regs
*regs
= LOOP_REGS (loop
);
10628 /* last_set[n] is nonzero iff reg n has been set in the current
10629 basic block. In that case, it is the insn that last set reg n. */
10634 old_nregs
= regs
->num
;
10635 regs
->num
= max_reg_num ();
10637 /* Grow the regs array if not allocated or too small. */
10638 if (regs
->num
>= regs
->size
)
10640 regs
->size
= regs
->num
+ extra_size
;
10642 regs
->array
= xrealloc (regs
->array
, regs
->size
* sizeof (*regs
->array
));
10644 /* Zero the new elements. */
10645 memset (regs
->array
+ old_nregs
, 0,
10646 (regs
->size
- old_nregs
) * sizeof (*regs
->array
));
10649 /* Clear previously scanned fields but do not clear n_times_set. */
10650 for (i
= 0; i
< old_nregs
; i
++)
10652 regs
->array
[i
].set_in_loop
= 0;
10653 regs
->array
[i
].may_not_optimize
= 0;
10654 regs
->array
[i
].single_usage
= NULL_RTX
;
10657 last_set
= xcalloc (regs
->num
, sizeof (rtx
));
10659 /* Scan the loop, recording register usage. */
10660 for (insn
= loop
->top
? loop
->top
: loop
->start
; insn
!= loop
->end
;
10661 insn
= NEXT_INSN (insn
))
10665 /* Record registers that have exactly one use. */
10666 find_single_use_in_loop (regs
, insn
, PATTERN (insn
));
10668 /* Include uses in REG_EQUAL notes. */
10669 if (REG_NOTES (insn
))
10670 find_single_use_in_loop (regs
, insn
, REG_NOTES (insn
));
10672 if (GET_CODE (PATTERN (insn
)) == SET
10673 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
10674 count_one_set (regs
, insn
, PATTERN (insn
), last_set
);
10675 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
10678 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
10679 count_one_set (regs
, insn
, XVECEXP (PATTERN (insn
), 0, i
),
10684 if (LABEL_P (insn
) || JUMP_P (insn
))
10685 memset (last_set
, 0, regs
->num
* sizeof (rtx
));
10687 /* Invalidate all registers used for function argument passing.
10688 We check rtx_varies_p for the same reason as below, to allow
10689 optimizing PIC calculations. */
10693 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
10695 link
= XEXP (link
, 1))
10699 if (GET_CODE (op
= XEXP (link
, 0)) == USE
10700 && REG_P (reg
= XEXP (op
, 0))
10701 && rtx_varies_p (reg
, 1))
10702 regs
->array
[REGNO (reg
)].may_not_optimize
= 1;
10707 /* Invalidate all hard registers clobbered by calls. With one exception:
10708 a call-clobbered PIC register is still function-invariant for our
10709 purposes, since we can hoist any PIC calculations out of the loop.
10710 Thus the call to rtx_varies_p. */
10711 if (LOOP_INFO (loop
)->has_call
)
10712 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
10713 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
)
10714 && rtx_varies_p (regno_reg_rtx
[i
], 1))
10716 regs
->array
[i
].may_not_optimize
= 1;
10717 regs
->array
[i
].set_in_loop
= 1;
10720 #ifdef AVOID_CCMODE_COPIES
10721 /* Don't try to move insns which set CC registers if we should not
10722 create CCmode register copies. */
10723 for (i
= regs
->num
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
10724 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx
[i
])) == MODE_CC
)
10725 regs
->array
[i
].may_not_optimize
= 1;
10728 /* Set regs->array[I].n_times_set for the new registers. */
10729 for (i
= old_nregs
; i
< regs
->num
; i
++)
10730 regs
->array
[i
].n_times_set
= regs
->array
[i
].set_in_loop
;
10735 /* Returns the number of real INSNs in the LOOP. */
10738 count_insns_in_loop (const struct loop
*loop
)
10743 for (insn
= loop
->top
? loop
->top
: loop
->start
; insn
!= loop
->end
;
10744 insn
= NEXT_INSN (insn
))
10751 /* Move MEMs into registers for the duration of the loop. */
10754 load_mems (const struct loop
*loop
)
10756 struct loop_info
*loop_info
= LOOP_INFO (loop
);
10757 struct loop_regs
*regs
= LOOP_REGS (loop
);
10758 int maybe_never
= 0;
10760 rtx p
, prev_ebb_head
;
10761 rtx label
= NULL_RTX
;
10763 /* Nonzero if the next instruction may never be executed. */
10764 int next_maybe_never
= 0;
10765 unsigned int last_max_reg
= max_reg_num ();
10767 if (loop_info
->mems_idx
== 0)
10770 /* We cannot use next_label here because it skips over normal insns. */
10771 end_label
= next_nonnote_insn (loop
->end
);
10772 if (end_label
&& !LABEL_P (end_label
))
10773 end_label
= NULL_RTX
;
10775 /* Check to see if it's possible that some instructions in the loop are
10776 never executed. Also check if there is a goto out of the loop other
10777 than right after the end of the loop. */
10778 for (p
= next_insn_in_loop (loop
, loop
->scan_start
);
10780 p
= next_insn_in_loop (loop
, p
))
10784 else if (JUMP_P (p
)
10785 /* If we enter the loop in the middle, and scan
10786 around to the beginning, don't set maybe_never
10787 for that. This must be an unconditional jump,
10788 otherwise the code at the top of the loop might
10789 never be executed. Unconditional jumps are
10790 followed a by barrier then loop end. */
10792 && JUMP_LABEL (p
) == loop
->top
10793 && NEXT_INSN (NEXT_INSN (p
)) == loop
->end
10794 && any_uncondjump_p (p
)))
10796 /* If this is a jump outside of the loop but not right
10797 after the end of the loop, we would have to emit new fixup
10798 sequences for each such label. */
10799 if (/* If we can't tell where control might go when this
10800 JUMP_INSN is executed, we must be conservative. */
10802 || (JUMP_LABEL (p
) != end_label
10803 && (INSN_UID (JUMP_LABEL (p
)) >= max_uid_for_loop
10804 || INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop
->start
)
10805 || INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop
->end
))))
10808 if (!any_condjump_p (p
))
10809 /* Something complicated. */
10812 /* If there are any more instructions in the loop, they
10813 might not be reached. */
10814 next_maybe_never
= 1;
10816 else if (next_maybe_never
)
10820 /* Find start of the extended basic block that enters the loop. */
10821 for (p
= loop
->start
;
10822 PREV_INSN (p
) && !LABEL_P (p
);
10827 cselib_init (true);
10829 /* Build table of mems that get set to constant values before the
10831 for (; p
!= loop
->start
; p
= NEXT_INSN (p
))
10832 cselib_process_insn (p
);
10834 /* Actually move the MEMs. */
10835 for (i
= 0; i
< loop_info
->mems_idx
; ++i
)
10837 regset_head load_copies
;
10838 regset_head store_copies
;
10841 rtx mem
= loop_info
->mems
[i
].mem
;
10842 rtx mem_list_entry
;
10844 if (MEM_VOLATILE_P (mem
)
10845 || loop_invariant_p (loop
, XEXP (mem
, 0)) != 1)
10846 /* There's no telling whether or not MEM is modified. */
10847 loop_info
->mems
[i
].optimize
= 0;
10849 /* Go through the MEMs written to in the loop to see if this
10850 one is aliased by one of them. */
10851 mem_list_entry
= loop_info
->store_mems
;
10852 while (mem_list_entry
)
10854 if (rtx_equal_p (mem
, XEXP (mem_list_entry
, 0)))
10856 else if (true_dependence (XEXP (mem_list_entry
, 0), VOIDmode
,
10857 mem
, rtx_varies_p
))
10859 /* MEM is indeed aliased by this store. */
10860 loop_info
->mems
[i
].optimize
= 0;
10863 mem_list_entry
= XEXP (mem_list_entry
, 1);
10866 if (flag_float_store
&& written
10867 && GET_MODE_CLASS (GET_MODE (mem
)) == MODE_FLOAT
)
10868 loop_info
->mems
[i
].optimize
= 0;
10870 /* If this MEM is written to, we must be sure that there
10871 are no reads from another MEM that aliases this one. */
10872 if (loop_info
->mems
[i
].optimize
&& written
)
10876 for (j
= 0; j
< loop_info
->mems_idx
; ++j
)
10880 else if (true_dependence (mem
,
10882 loop_info
->mems
[j
].mem
,
10885 /* It's not safe to hoist loop_info->mems[i] out of
10886 the loop because writes to it might not be
10887 seen by reads from loop_info->mems[j]. */
10888 loop_info
->mems
[i
].optimize
= 0;
10894 if (maybe_never
&& may_trap_p (mem
))
10895 /* We can't access the MEM outside the loop; it might
10896 cause a trap that wouldn't have happened otherwise. */
10897 loop_info
->mems
[i
].optimize
= 0;
10899 if (!loop_info
->mems
[i
].optimize
)
10900 /* We thought we were going to lift this MEM out of the
10901 loop, but later discovered that we could not. */
10904 INIT_REG_SET (&load_copies
);
10905 INIT_REG_SET (&store_copies
);
10907 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
10908 order to keep scan_loop from moving stores to this MEM
10909 out of the loop just because this REG is neither a
10910 user-variable nor used in the loop test. */
10911 reg
= gen_reg_rtx (GET_MODE (mem
));
10912 REG_USERVAR_P (reg
) = 1;
10913 loop_info
->mems
[i
].reg
= reg
;
10915 /* Now, replace all references to the MEM with the
10916 corresponding pseudos. */
10918 for (p
= next_insn_in_loop (loop
, loop
->scan_start
);
10920 p
= next_insn_in_loop (loop
, p
))
10926 set
= single_set (p
);
10928 /* See if this copies the mem into a register that isn't
10929 modified afterwards. We'll try to do copy propagation
10930 a little further on. */
10932 /* @@@ This test is _way_ too conservative. */
10934 && REG_P (SET_DEST (set
))
10935 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
10936 && REGNO (SET_DEST (set
)) < last_max_reg
10937 && regs
->array
[REGNO (SET_DEST (set
))].n_times_set
== 1
10938 && rtx_equal_p (SET_SRC (set
), mem
))
10939 SET_REGNO_REG_SET (&load_copies
, REGNO (SET_DEST (set
)));
10941 /* See if this copies the mem from a register that isn't
10942 modified afterwards. We'll try to remove the
10943 redundant copy later on by doing a little register
10944 renaming and copy propagation. This will help
10945 to untangle things for the BIV detection code. */
10948 && REG_P (SET_SRC (set
))
10949 && REGNO (SET_SRC (set
)) >= FIRST_PSEUDO_REGISTER
10950 && REGNO (SET_SRC (set
)) < last_max_reg
10951 && regs
->array
[REGNO (SET_SRC (set
))].n_times_set
== 1
10952 && rtx_equal_p (SET_DEST (set
), mem
))
10953 SET_REGNO_REG_SET (&store_copies
, REGNO (SET_SRC (set
)));
10955 /* If this is a call which uses / clobbers this memory
10956 location, we must not change the interface here. */
10958 && reg_mentioned_p (loop_info
->mems
[i
].mem
,
10959 CALL_INSN_FUNCTION_USAGE (p
)))
10961 cancel_changes (0);
10962 loop_info
->mems
[i
].optimize
= 0;
10966 /* Replace the memory reference with the shadow register. */
10967 replace_loop_mems (p
, loop_info
->mems
[i
].mem
,
10968 loop_info
->mems
[i
].reg
, written
);
10976 if (! loop_info
->mems
[i
].optimize
)
10977 ; /* We found we couldn't do the replacement, so do nothing. */
10978 else if (! apply_change_group ())
10979 /* We couldn't replace all occurrences of the MEM. */
10980 loop_info
->mems
[i
].optimize
= 0;
10983 /* Load the memory immediately before LOOP->START, which is
10984 the NOTE_LOOP_BEG. */
10985 cselib_val
*e
= cselib_lookup (mem
, VOIDmode
, 0);
10989 struct elt_loc_list
*const_equiv
= 0;
10990 reg_set_iterator rsi
;
10994 struct elt_loc_list
*equiv
;
10995 struct elt_loc_list
*best_equiv
= 0;
10996 for (equiv
= e
->locs
; equiv
; equiv
= equiv
->next
)
10998 if (CONSTANT_P (equiv
->loc
))
10999 const_equiv
= equiv
;
11000 else if (REG_P (equiv
->loc
)
11001 /* Extending hard register lifetimes causes crash
11002 on SRC targets. Doing so on non-SRC is
11003 probably also not good idea, since we most
11004 probably have pseudoregister equivalence as
11006 && REGNO (equiv
->loc
) >= FIRST_PSEUDO_REGISTER
)
11007 best_equiv
= equiv
;
11009 /* Use the constant equivalence if that is cheap enough. */
11011 best_equiv
= const_equiv
;
11012 else if (const_equiv
11013 && (rtx_cost (const_equiv
->loc
, SET
)
11014 <= rtx_cost (best_equiv
->loc
, SET
)))
11016 best_equiv
= const_equiv
;
11020 /* If best_equiv is nonzero, we know that MEM is set to a
11021 constant or register before the loop. We will use this
11022 knowledge to initialize the shadow register with that
11023 constant or reg rather than by loading from MEM. */
11025 best
= copy_rtx (best_equiv
->loc
);
11028 set
= gen_move_insn (reg
, best
);
11029 set
= loop_insn_hoist (loop
, set
);
11032 for (p
= prev_ebb_head
; p
!= loop
->start
; p
= NEXT_INSN (p
))
11033 if (REGNO_LAST_UID (REGNO (best
)) == INSN_UID (p
))
11035 REGNO_LAST_UID (REGNO (best
)) = INSN_UID (set
);
11041 set_unique_reg_note (set
, REG_EQUAL
, copy_rtx (const_equiv
->loc
));
11045 if (label
== NULL_RTX
)
11047 label
= gen_label_rtx ();
11048 emit_label_after (label
, loop
->end
);
11051 /* Store the memory immediately after END, which is
11052 the NOTE_LOOP_END. */
11053 set
= gen_move_insn (copy_rtx (mem
), reg
);
11054 loop_insn_emit_after (loop
, 0, label
, set
);
11057 if (loop_dump_stream
)
11059 fprintf (loop_dump_stream
, "Hoisted regno %d %s from ",
11060 REGNO (reg
), (written
? "r/w" : "r/o"));
11061 print_rtl (loop_dump_stream
, mem
);
11062 fputc ('\n', loop_dump_stream
);
11065 /* Attempt a bit of copy propagation. This helps untangle the
11066 data flow, and enables {basic,general}_induction_var to find
11068 EXECUTE_IF_SET_IN_REG_SET
11069 (&load_copies
, FIRST_PSEUDO_REGISTER
, j
, rsi
)
11071 try_copy_prop (loop
, reg
, j
);
11073 CLEAR_REG_SET (&load_copies
);
11075 EXECUTE_IF_SET_IN_REG_SET
11076 (&store_copies
, FIRST_PSEUDO_REGISTER
, j
, rsi
)
11078 try_swap_copy_prop (loop
, reg
, j
);
11080 CLEAR_REG_SET (&store_copies
);
11084 /* Now, we need to replace all references to the previous exit
11085 label with the new one. */
11086 if (label
!= NULL_RTX
&& end_label
!= NULL_RTX
)
11087 for (p
= loop
->start
; p
!= loop
->end
; p
= NEXT_INSN (p
))
11088 if (JUMP_P (p
) && JUMP_LABEL (p
) == end_label
)
11089 redirect_jump (p
, label
, false);
11094 /* For communication between note_reg_stored and its caller. */
11095 struct note_reg_stored_arg
11101 /* Called via note_stores, record in SET_SEEN whether X, which is written,
11102 is equal to ARG. */
11104 note_reg_stored (rtx x
, rtx setter ATTRIBUTE_UNUSED
, void *arg
)
11106 struct note_reg_stored_arg
*t
= (struct note_reg_stored_arg
*) arg
;
11111 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
11112 There must be exactly one insn that sets this pseudo; it will be
11113 deleted if all replacements succeed and we can prove that the register
11114 is not used after the loop. */
11117 try_copy_prop (const struct loop
*loop
, rtx replacement
, unsigned int regno
)
11119 /* This is the reg that we are copying from. */
11120 rtx reg_rtx
= regno_reg_rtx
[regno
];
11123 /* These help keep track of whether we replaced all uses of the reg. */
11124 int replaced_last
= 0;
11125 int store_is_first
= 0;
11127 for (insn
= next_insn_in_loop (loop
, loop
->scan_start
);
11129 insn
= next_insn_in_loop (loop
, insn
))
11133 /* Only substitute within one extended basic block from the initializing
11135 if (LABEL_P (insn
) && init_insn
)
11138 if (! INSN_P (insn
))
11141 /* Is this the initializing insn? */
11142 set
= single_set (insn
);
11144 && REG_P (SET_DEST (set
))
11145 && REGNO (SET_DEST (set
)) == regno
)
11147 gcc_assert (!init_insn
);
11150 if (REGNO_FIRST_UID (regno
) == INSN_UID (insn
))
11151 store_is_first
= 1;
11154 /* Only substitute after seeing the initializing insn. */
11155 if (init_insn
&& insn
!= init_insn
)
11157 struct note_reg_stored_arg arg
;
11159 replace_loop_regs (insn
, reg_rtx
, replacement
);
11160 if (REGNO_LAST_UID (regno
) == INSN_UID (insn
))
11163 /* Stop replacing when REPLACEMENT is modified. */
11164 arg
.reg
= replacement
;
11166 note_stores (PATTERN (insn
), note_reg_stored
, &arg
);
11169 rtx note
= find_reg_note (insn
, REG_EQUAL
, NULL
);
11171 /* It is possible that we've turned previously valid REG_EQUAL to
11172 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
11173 REPLACEMENT is modified, we get different meaning. */
11174 if (note
&& reg_mentioned_p (replacement
, XEXP (note
, 0)))
11175 remove_note (insn
, note
);
11180 gcc_assert (init_insn
);
11181 if (apply_change_group ())
11183 if (loop_dump_stream
)
11184 fprintf (loop_dump_stream
, " Replaced reg %d", regno
);
11185 if (store_is_first
&& replaced_last
)
11190 /* Assume we're just deleting INIT_INSN. */
11192 /* Look for REG_RETVAL note. If we're deleting the end of
11193 the libcall sequence, the whole sequence can go. */
11194 retval_note
= find_reg_note (init_insn
, REG_RETVAL
, NULL_RTX
);
11195 /* If we found a REG_RETVAL note, find the first instruction
11196 in the sequence. */
11198 first
= XEXP (retval_note
, 0);
11200 /* Delete the instructions. */
11201 loop_delete_insns (first
, init_insn
);
11203 if (loop_dump_stream
)
11204 fprintf (loop_dump_stream
, ".\n");
11208 /* Replace all the instructions from FIRST up to and including LAST
11209 with NOTE_INSN_DELETED notes. */
11212 loop_delete_insns (rtx first
, rtx last
)
11216 if (loop_dump_stream
)
11217 fprintf (loop_dump_stream
, ", deleting init_insn (%d)",
11219 delete_insn (first
);
11221 /* If this was the LAST instructions we're supposed to delete,
11226 first
= NEXT_INSN (first
);
11230 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
11231 loop LOOP if the order of the sets of these registers can be
11232 swapped. There must be exactly one insn within the loop that sets
11233 this pseudo followed immediately by a move insn that sets
11234 REPLACEMENT with REGNO. */
11236 try_swap_copy_prop (const struct loop
*loop
, rtx replacement
,
11237 unsigned int regno
)
11240 rtx set
= NULL_RTX
;
11241 unsigned int new_regno
;
11243 new_regno
= REGNO (replacement
);
11245 for (insn
= next_insn_in_loop (loop
, loop
->scan_start
);
11247 insn
= next_insn_in_loop (loop
, insn
))
11249 /* Search for the insn that copies REGNO to NEW_REGNO? */
11251 && (set
= single_set (insn
))
11252 && REG_P (SET_DEST (set
))
11253 && REGNO (SET_DEST (set
)) == new_regno
11254 && REG_P (SET_SRC (set
))
11255 && REGNO (SET_SRC (set
)) == regno
)
11259 if (insn
!= NULL_RTX
)
11264 /* Some DEF-USE info would come in handy here to make this
11265 function more general. For now, just check the previous insn
11266 which is the most likely candidate for setting REGNO. */
11268 prev_insn
= PREV_INSN (insn
);
11271 && (prev_set
= single_set (prev_insn
))
11272 && REG_P (SET_DEST (prev_set
))
11273 && REGNO (SET_DEST (prev_set
)) == regno
)
11276 (set (reg regno) (expr))
11277 (set (reg new_regno) (reg regno))
11279 so try converting this to:
11280 (set (reg new_regno) (expr))
11281 (set (reg regno) (reg new_regno))
11283 The former construct is often generated when a global
11284 variable used for an induction variable is shadowed by a
11285 register (NEW_REGNO). The latter construct improves the
11286 chances of GIV replacement and BIV elimination. */
11288 validate_change (prev_insn
, &SET_DEST (prev_set
),
11290 validate_change (insn
, &SET_DEST (set
),
11292 validate_change (insn
, &SET_SRC (set
),
11295 if (apply_change_group ())
11297 if (loop_dump_stream
)
11298 fprintf (loop_dump_stream
,
11299 " Swapped set of reg %d at %d with reg %d at %d.\n",
11300 regno
, INSN_UID (insn
),
11301 new_regno
, INSN_UID (prev_insn
));
11303 /* Update first use of REGNO. */
11304 if (REGNO_FIRST_UID (regno
) == INSN_UID (prev_insn
))
11305 REGNO_FIRST_UID (regno
) = INSN_UID (insn
);
11307 /* Now perform copy propagation to hopefully
11308 remove all uses of REGNO within the loop. */
11309 try_copy_prop (loop
, replacement
, regno
);
11315 /* Worker function for find_mem_in_note, called via for_each_rtx. */
11318 find_mem_in_note_1 (rtx
*x
, void *data
)
11320 if (*x
!= NULL_RTX
&& MEM_P (*x
))
11322 rtx
*res
= (rtx
*) data
;
11329 /* Returns the first MEM found in NOTE by depth-first search. */
11332 find_mem_in_note (rtx note
)
11334 if (note
&& for_each_rtx (¬e
, find_mem_in_note_1
, ¬e
))
11339 /* Replace MEM with its associated pseudo register. This function is
11340 called from load_mems via for_each_rtx. DATA is actually a pointer
11341 to a structure describing the instruction currently being scanned
11342 and the MEM we are currently replacing. */
11345 replace_loop_mem (rtx
*mem
, void *data
)
11347 loop_replace_args
*args
= (loop_replace_args
*) data
;
11353 switch (GET_CODE (m
))
11359 /* We're not interested in the MEM associated with a
11360 CONST_DOUBLE, so there's no need to traverse into one. */
11364 /* This is not a MEM. */
11368 if (!rtx_equal_p (args
->match
, m
))
11369 /* This is not the MEM we are currently replacing. */
11372 /* Actually replace the MEM. */
11373 validate_change (args
->insn
, mem
, args
->replacement
, 1);
11379 replace_loop_mems (rtx insn
, rtx mem
, rtx reg
, int written
)
11381 loop_replace_args args
;
11385 args
.replacement
= reg
;
11387 for_each_rtx (&insn
, replace_loop_mem
, &args
);
11389 /* If we hoist a mem write out of the loop, then REG_EQUAL
11390 notes referring to the mem are no longer valid. */
11396 for (link
= ®_NOTES (insn
); (note
= *link
); link
= &XEXP (note
, 1))
11398 if (REG_NOTE_KIND (note
) == REG_EQUAL
11399 && (sub
= find_mem_in_note (note
))
11400 && true_dependence (mem
, VOIDmode
, sub
, rtx_varies_p
))
11402 /* Remove the note. */
11403 validate_change (NULL_RTX
, link
, XEXP (note
, 1), 1);
11410 /* Replace one register with another. Called through for_each_rtx; PX points
11411 to the rtx being scanned. DATA is actually a pointer to
11412 a structure of arguments. */
11415 replace_loop_reg (rtx
*px
, void *data
)
11418 loop_replace_args
*args
= (loop_replace_args
*) data
;
11423 if (x
== args
->match
)
11424 validate_change (args
->insn
, px
, args
->replacement
, 1);
11430 replace_loop_regs (rtx insn
, rtx reg
, rtx replacement
)
11432 loop_replace_args args
;
11436 args
.replacement
= replacement
;
11438 for_each_rtx (&insn
, replace_loop_reg
, &args
);
11441 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
11442 (ignored in the interim). */
11445 loop_insn_emit_after (const struct loop
*loop ATTRIBUTE_UNUSED
,
11446 basic_block where_bb ATTRIBUTE_UNUSED
, rtx where_insn
,
11449 return emit_insn_after (pattern
, where_insn
);
11453 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
11454 in basic block WHERE_BB (ignored in the interim) within the loop
11455 otherwise hoist PATTERN into the loop pre-header. */
11458 loop_insn_emit_before (const struct loop
*loop
,
11459 basic_block where_bb ATTRIBUTE_UNUSED
,
11460 rtx where_insn
, rtx pattern
)
11463 return loop_insn_hoist (loop
, pattern
);
11464 return emit_insn_before (pattern
, where_insn
);
11468 /* Emit call insn for PATTERN before WHERE_INSN in basic block
11469 WHERE_BB (ignored in the interim) within the loop. */
11472 loop_call_insn_emit_before (const struct loop
*loop ATTRIBUTE_UNUSED
,
11473 basic_block where_bb ATTRIBUTE_UNUSED
,
11474 rtx where_insn
, rtx pattern
)
11476 return emit_call_insn_before (pattern
, where_insn
);
11480 /* Hoist insn for PATTERN into the loop pre-header. */
11483 loop_insn_hoist (const struct loop
*loop
, rtx pattern
)
11485 return loop_insn_emit_before (loop
, 0, loop
->start
, pattern
);
11489 /* Hoist call insn for PATTERN into the loop pre-header. */
11492 loop_call_insn_hoist (const struct loop
*loop
, rtx pattern
)
11494 return loop_call_insn_emit_before (loop
, 0, loop
->start
, pattern
);
11498 /* Sink insn for PATTERN after the loop end. */
11501 loop_insn_sink (const struct loop
*loop
, rtx pattern
)
11503 return loop_insn_emit_before (loop
, 0, loop
->sink
, pattern
);
11506 /* bl->final_value can be either general_operand or PLUS of general_operand
11507 and constant. Emit sequence of instructions to load it into REG. */
11509 gen_load_of_final_value (rtx reg
, rtx final_value
)
11513 final_value
= force_operand (final_value
, reg
);
11514 if (final_value
!= reg
)
11515 emit_move_insn (reg
, final_value
);
11516 seq
= get_insns ();
11521 /* If the loop has multiple exits, emit insn for PATTERN before the
11522 loop to ensure that it will always be executed no matter how the
11523 loop exits. Otherwise, emit the insn for PATTERN after the loop,
11524 since this is slightly more efficient. */
11527 loop_insn_sink_or_swim (const struct loop
*loop
, rtx pattern
)
11529 if (loop
->exit_count
)
11530 return loop_insn_hoist (loop
, pattern
);
11532 return loop_insn_sink (loop
, pattern
);
11536 loop_ivs_dump (const struct loop
*loop
, FILE *file
, int verbose
)
11538 struct iv_class
*bl
;
11541 if (! loop
|| ! file
)
11544 for (bl
= LOOP_IVS (loop
)->list
; bl
; bl
= bl
->next
)
11547 fprintf (file
, "Loop %d: %d IV classes\n", loop
->num
, iv_num
);
11549 for (bl
= LOOP_IVS (loop
)->list
; bl
; bl
= bl
->next
)
11551 loop_iv_class_dump (bl
, file
, verbose
);
11552 fputc ('\n', file
);
11558 loop_iv_class_dump (const struct iv_class
*bl
, FILE *file
,
11559 int verbose ATTRIBUTE_UNUSED
)
11561 struct induction
*v
;
11565 if (! bl
|| ! file
)
11568 fprintf (file
, "IV class for reg %d, benefit %d\n",
11569 bl
->regno
, bl
->total_benefit
);
11571 fprintf (file
, " Init insn %d", INSN_UID (bl
->init_insn
));
11572 if (bl
->initial_value
)
11574 fprintf (file
, ", init val: ");
11575 print_simple_rtl (file
, bl
->initial_value
);
11577 if (bl
->initial_test
)
11579 fprintf (file
, ", init test: ");
11580 print_simple_rtl (file
, bl
->initial_test
);
11582 fputc ('\n', file
);
11584 if (bl
->final_value
)
11586 fprintf (file
, " Final val: ");
11587 print_simple_rtl (file
, bl
->final_value
);
11588 fputc ('\n', file
);
11591 if ((incr
= biv_total_increment (bl
)))
11593 fprintf (file
, " Total increment: ");
11594 print_simple_rtl (file
, incr
);
11595 fputc ('\n', file
);
11598 /* List the increments. */
11599 for (i
= 0, v
= bl
->biv
; v
; v
= v
->next_iv
, i
++)
11601 fprintf (file
, " Inc%d: insn %d, incr: ", i
, INSN_UID (v
->insn
));
11602 print_simple_rtl (file
, v
->add_val
);
11603 fputc ('\n', file
);
11606 /* List the givs. */
11607 for (i
= 0, v
= bl
->giv
; v
; v
= v
->next_iv
, i
++)
11609 fprintf (file
, " Giv%d: insn %d, benefit %d, ",
11610 i
, INSN_UID (v
->insn
), v
->benefit
);
11611 if (v
->giv_type
== DEST_ADDR
)
11612 print_simple_rtl (file
, v
->mem
);
11614 print_simple_rtl (file
, single_set (v
->insn
));
11615 fputc ('\n', file
);
11621 loop_biv_dump (const struct induction
*v
, FILE *file
, int verbose
)
11628 REGNO (v
->dest_reg
), INSN_UID (v
->insn
));
11629 fprintf (file
, " const ");
11630 print_simple_rtl (file
, v
->add_val
);
11632 if (verbose
&& v
->final_value
)
11634 fputc ('\n', file
);
11635 fprintf (file
, " final ");
11636 print_simple_rtl (file
, v
->final_value
);
11639 fputc ('\n', file
);
11644 loop_giv_dump (const struct induction
*v
, FILE *file
, int verbose
)
11649 if (v
->giv_type
== DEST_REG
)
11650 fprintf (file
, "Giv %d: insn %d",
11651 REGNO (v
->dest_reg
), INSN_UID (v
->insn
));
11653 fprintf (file
, "Dest address: insn %d",
11654 INSN_UID (v
->insn
));
11656 fprintf (file
, " src reg %d benefit %d",
11657 REGNO (v
->src_reg
), v
->benefit
);
11658 fprintf (file
, " lifetime %d",
11661 if (v
->replaceable
)
11662 fprintf (file
, " replaceable");
11664 if (v
->no_const_addval
)
11665 fprintf (file
, " ncav");
11667 if (v
->ext_dependent
)
11669 switch (GET_CODE (v
->ext_dependent
))
11672 fprintf (file
, " ext se");
11675 fprintf (file
, " ext ze");
11678 fprintf (file
, " ext tr");
11681 gcc_unreachable ();
11685 fputc ('\n', file
);
11686 fprintf (file
, " mult ");
11687 print_simple_rtl (file
, v
->mult_val
);
11689 fputc ('\n', file
);
11690 fprintf (file
, " add ");
11691 print_simple_rtl (file
, v
->add_val
);
11693 if (verbose
&& v
->final_value
)
11695 fputc ('\n', file
);
11696 fprintf (file
, " final ");
11697 print_simple_rtl (file
, v
->final_value
);
11700 fputc ('\n', file
);
11705 debug_ivs (const struct loop
*loop
)
11707 loop_ivs_dump (loop
, stderr
, 1);
11712 debug_iv_class (const struct iv_class
*bl
)
11714 loop_iv_class_dump (bl
, stderr
, 1);
11719 debug_biv (const struct induction
*v
)
11721 loop_biv_dump (v
, stderr
, 1);
11726 debug_giv (const struct induction
*v
)
11728 loop_giv_dump (v
, stderr
, 1);
11732 #define LOOP_BLOCK_NUM_1(INSN) \
11733 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
11735 /* The notes do not have an assigned block, so look at the next insn. */
11736 #define LOOP_BLOCK_NUM(INSN) \
11737 ((INSN) ? (NOTE_P (INSN) \
11738 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
11739 : LOOP_BLOCK_NUM_1 (INSN)) \
11742 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
11745 loop_dump_aux (const struct loop
*loop
, FILE *file
,
11746 int verbose ATTRIBUTE_UNUSED
)
11750 if (! loop
|| ! file
|| !BB_HEAD (loop
->first
))
11753 /* Print diagnostics to compare our concept of a loop with
11754 what the loop notes say. */
11755 if (! PREV_INSN (BB_HEAD (loop
->first
))
11756 || !NOTE_P (PREV_INSN (BB_HEAD (loop
->first
)))
11757 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop
->first
)))
11758 != NOTE_INSN_LOOP_BEG
)
11759 fprintf (file
, ";; No NOTE_INSN_LOOP_BEG at %d\n",
11760 INSN_UID (PREV_INSN (BB_HEAD (loop
->first
))));
11761 if (! NEXT_INSN (BB_END (loop
->last
))
11762 || !NOTE_P (NEXT_INSN (BB_END (loop
->last
)))
11763 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop
->last
)))
11764 != NOTE_INSN_LOOP_END
)
11765 fprintf (file
, ";; No NOTE_INSN_LOOP_END at %d\n",
11766 INSN_UID (NEXT_INSN (BB_END (loop
->last
))));
11771 ";; start %d (%d), end %d (%d)\n",
11772 LOOP_BLOCK_NUM (loop
->start
),
11773 LOOP_INSN_UID (loop
->start
),
11774 LOOP_BLOCK_NUM (loop
->end
),
11775 LOOP_INSN_UID (loop
->end
));
11776 fprintf (file
, ";; top %d (%d), scan start %d (%d)\n",
11777 LOOP_BLOCK_NUM (loop
->top
),
11778 LOOP_INSN_UID (loop
->top
),
11779 LOOP_BLOCK_NUM (loop
->scan_start
),
11780 LOOP_INSN_UID (loop
->scan_start
));
11781 fprintf (file
, ";; exit_count %d", loop
->exit_count
);
11782 if (loop
->exit_count
)
11784 fputs (", labels:", file
);
11785 for (label
= loop
->exit_labels
; label
; label
= LABEL_NEXTREF (label
))
11787 fprintf (file
, " %d ",
11788 LOOP_INSN_UID (XEXP (label
, 0)));
11791 fputs ("\n", file
);
11795 /* Call this function from the debugger to dump LOOP. */
11798 debug_loop (const struct loop
*loop
)
11800 flow_loop_dump (loop
, stderr
, loop_dump_aux
, 1);
11803 /* Call this function from the debugger to dump LOOPS. */
11806 debug_loops (const struct loops
*loops
)
11808 flow_loops_dump (loops
, stderr
, loop_dump_aux
, 1);