1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
39 #include "coretypes.h"
45 #include "hard-reg-set.h"
46 #include "basic-block.h"
47 #include "insn-config.h"
57 #include "insn-flags.h"
61 /* Not really meaningful values, but at least something. */
62 #ifndef SIMULTANEOUS_PREFETCHES
63 #define SIMULTANEOUS_PREFETCHES 3
65 #ifndef PREFETCH_BLOCK
66 #define PREFETCH_BLOCK 32
69 #define HAVE_prefetch 0
70 #define CODE_FOR_prefetch 0
71 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
74 /* Give up the prefetch optimizations once we exceed a given threshhold.
75 It is unlikely that we would be able to optimize something in a loop
76 with so many detected prefetches. */
77 #define MAX_PREFETCHES 100
78 /* The number of prefetch blocks that are beneficial to fetch at once before
79 a loop with a known (and low) iteration count. */
80 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
81 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
82 since it is likely that the data are already in the cache. */
83 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
85 /* Parameterize some prefetch heuristics so they can be turned on and off
86 easily for performance testing on new architectures. These can be
87 defined in target-dependent files. */
89 /* Prefetch is worthwhile only when loads/stores are dense. */
90 #ifndef PREFETCH_ONLY_DENSE_MEM
91 #define PREFETCH_ONLY_DENSE_MEM 1
94 /* Define what we mean by "dense" loads and stores; This value divided by 256
95 is the minimum percentage of memory references that worth prefetching. */
96 #ifndef PREFETCH_DENSE_MEM
97 #define PREFETCH_DENSE_MEM 220
100 /* Do not prefetch for a loop whose iteration count is known to be low. */
101 #ifndef PREFETCH_NO_LOW_LOOPCNT
102 #define PREFETCH_NO_LOW_LOOPCNT 1
105 /* Define what we mean by a "low" iteration count. */
106 #ifndef PREFETCH_LOW_LOOPCNT
107 #define PREFETCH_LOW_LOOPCNT 32
110 /* Do not prefetch for a loop that contains a function call; such a loop is
111 probably not an internal loop. */
112 #ifndef PREFETCH_NO_CALL
113 #define PREFETCH_NO_CALL 1
116 /* Do not prefetch accesses with an extreme stride. */
117 #ifndef PREFETCH_NO_EXTREME_STRIDE
118 #define PREFETCH_NO_EXTREME_STRIDE 1
121 /* Define what we mean by an "extreme" stride. */
122 #ifndef PREFETCH_EXTREME_STRIDE
123 #define PREFETCH_EXTREME_STRIDE 4096
126 /* Define a limit to how far apart indices can be and still be merged
127 into a single prefetch. */
128 #ifndef PREFETCH_EXTREME_DIFFERENCE
129 #define PREFETCH_EXTREME_DIFFERENCE 4096
132 /* Issue prefetch instructions before the loop to fetch data to be used
133 in the first few loop iterations. */
134 #ifndef PREFETCH_BEFORE_LOOP
135 #define PREFETCH_BEFORE_LOOP 1
138 /* Do not handle reversed order prefetches (negative stride). */
139 #ifndef PREFETCH_NO_REVERSE_ORDER
140 #define PREFETCH_NO_REVERSE_ORDER 1
143 /* Prefetch even if the GIV is in conditional code. */
144 #ifndef PREFETCH_CONDITIONAL
145 #define PREFETCH_CONDITIONAL 1
148 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
149 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
151 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
152 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
153 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
155 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
156 ((REGNO) < FIRST_PSEUDO_REGISTER \
157 ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
160 /* Vector mapping INSN_UIDs to luids.
161 The luids are like uids but increase monotonically always.
162 We use them to see whether a jump comes from outside a given loop. */
166 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
167 number the insn is contained in. */
169 struct loop
**uid_loop
;
171 /* 1 + largest uid of any insn. */
173 int max_uid_for_loop
;
175 /* Number of loops detected in current function. Used as index to the
178 static int max_loop_num
;
180 /* Bound on pseudo register number before loop optimization.
181 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
182 unsigned int max_reg_before_loop
;
184 /* The value to pass to the next call of reg_scan_update. */
185 static int loop_max_reg
;
187 /* During the analysis of a loop, a chain of `struct movable's
188 is made to record all the movable insns found.
189 Then the entire chain can be scanned to decide which to move. */
193 rtx insn
; /* A movable insn */
194 rtx set_src
; /* The expression this reg is set from. */
195 rtx set_dest
; /* The destination of this SET. */
196 rtx dependencies
; /* When INSN is libcall, this is an EXPR_LIST
197 of any registers used within the LIBCALL. */
198 int consec
; /* Number of consecutive following insns
199 that must be moved with this one. */
200 unsigned int regno
; /* The register it sets */
201 short lifetime
; /* lifetime of that register;
202 may be adjusted when matching movables
203 that load the same value are found. */
204 short savings
; /* Number of insns we can move for this reg,
205 including other movables that force this
206 or match this one. */
207 ENUM_BITFIELD(machine_mode
) savemode
: 8; /* Nonzero means it is a mode for
208 a low part that we should avoid changing when
209 clearing the rest of the reg. */
210 unsigned int cond
: 1; /* 1 if only conditionally movable */
211 unsigned int force
: 1; /* 1 means MUST move this insn */
212 unsigned int global
: 1; /* 1 means reg is live outside this loop */
213 /* If PARTIAL is 1, GLOBAL means something different:
214 that the reg is live outside the range from where it is set
215 to the following label. */
216 unsigned int done
: 1; /* 1 inhibits further processing of this */
218 unsigned int partial
: 1; /* 1 means this reg is used for zero-extending.
219 In particular, moving it does not make it
221 unsigned int move_insn
: 1; /* 1 means that we call emit_move_insn to
222 load SRC, rather than copying INSN. */
223 unsigned int move_insn_first
:1;/* Same as above, if this is necessary for the
224 first insn of a consecutive sets group. */
225 unsigned int is_equiv
: 1; /* 1 means a REG_EQUIV is present on INSN. */
226 unsigned int insert_temp
: 1; /* 1 means we copy to a new pseudo and replace
227 the original insn with a copy from that
228 pseudo, rather than deleting it. */
229 struct movable
*match
; /* First entry for same value */
230 struct movable
*forces
; /* An insn that must be moved if this is */
231 struct movable
*next
;
235 FILE *loop_dump_stream
;
237 /* Forward declarations. */
239 static void invalidate_loops_containing_label
PARAMS ((rtx
));
240 static void find_and_verify_loops
PARAMS ((rtx
, struct loops
*));
241 static void mark_loop_jump
PARAMS ((rtx
, struct loop
*));
242 static void prescan_loop
PARAMS ((struct loop
*));
243 static int reg_in_basic_block_p
PARAMS ((rtx
, rtx
));
244 static int consec_sets_invariant_p
PARAMS ((const struct loop
*,
246 static int labels_in_range_p
PARAMS ((rtx
, int));
247 static void count_one_set
PARAMS ((struct loop_regs
*, rtx
, rtx
, rtx
*));
248 static void note_addr_stored
PARAMS ((rtx
, rtx
, void *));
249 static void note_set_pseudo_multiple_uses
PARAMS ((rtx
, rtx
, void *));
250 static int loop_reg_used_before_p
PARAMS ((const struct loop
*, rtx
, rtx
));
251 static void scan_loop
PARAMS ((struct loop
*, int));
253 static void replace_call_address
PARAMS ((rtx
, rtx
, rtx
));
255 static rtx skip_consec_insns
PARAMS ((rtx
, int));
256 static int libcall_benefit
PARAMS ((rtx
));
257 static void ignore_some_movables
PARAMS ((struct loop_movables
*));
258 static void force_movables
PARAMS ((struct loop_movables
*));
259 static void combine_movables
PARAMS ((struct loop_movables
*,
260 struct loop_regs
*));
261 static int num_unmoved_movables
PARAMS ((const struct loop
*));
262 static int regs_match_p
PARAMS ((rtx
, rtx
, struct loop_movables
*));
263 static int rtx_equal_for_loop_p
PARAMS ((rtx
, rtx
, struct loop_movables
*,
264 struct loop_regs
*));
265 static void add_label_notes
PARAMS ((rtx
, rtx
));
266 static void move_movables
PARAMS ((struct loop
*loop
, struct loop_movables
*,
268 static void loop_movables_add
PARAMS((struct loop_movables
*,
270 static void loop_movables_free
PARAMS((struct loop_movables
*));
271 static int count_nonfixed_reads
PARAMS ((const struct loop
*, rtx
));
272 static void loop_bivs_find
PARAMS((struct loop
*));
273 static void loop_bivs_init_find
PARAMS((struct loop
*));
274 static void loop_bivs_check
PARAMS((struct loop
*));
275 static void loop_givs_find
PARAMS((struct loop
*));
276 static void loop_givs_check
PARAMS((struct loop
*));
277 static int loop_biv_eliminable_p
PARAMS((struct loop
*, struct iv_class
*,
279 static int loop_giv_reduce_benefit
PARAMS((struct loop
*, struct iv_class
*,
280 struct induction
*, rtx
));
281 static void loop_givs_dead_check
PARAMS((struct loop
*, struct iv_class
*));
282 static void loop_givs_reduce
PARAMS((struct loop
*, struct iv_class
*));
283 static void loop_givs_rescan
PARAMS((struct loop
*, struct iv_class
*,
285 static void loop_ivs_free
PARAMS((struct loop
*));
286 static void strength_reduce
PARAMS ((struct loop
*, int));
287 static void find_single_use_in_loop
PARAMS ((struct loop_regs
*, rtx
, rtx
));
288 static int valid_initial_value_p
PARAMS ((rtx
, rtx
, int, rtx
));
289 static void find_mem_givs
PARAMS ((const struct loop
*, rtx
, rtx
, int, int));
290 static void record_biv
PARAMS ((struct loop
*, struct induction
*,
291 rtx
, rtx
, rtx
, rtx
, rtx
*,
293 static void check_final_value
PARAMS ((const struct loop
*,
294 struct induction
*));
295 static void loop_ivs_dump
PARAMS((const struct loop
*, FILE *, int));
296 static void loop_iv_class_dump
PARAMS((const struct iv_class
*, FILE *, int));
297 static void loop_biv_dump
PARAMS((const struct induction
*, FILE *, int));
298 static void loop_giv_dump
PARAMS((const struct induction
*, FILE *, int));
299 static void record_giv
PARAMS ((const struct loop
*, struct induction
*,
300 rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, int,
301 enum g_types
, int, int, rtx
*));
302 static void update_giv_derive
PARAMS ((const struct loop
*, rtx
));
303 static void check_ext_dependent_givs
PARAMS ((struct iv_class
*,
304 struct loop_info
*));
305 static int basic_induction_var
PARAMS ((const struct loop
*, rtx
,
306 enum machine_mode
, rtx
, rtx
,
307 rtx
*, rtx
*, rtx
**));
308 static rtx simplify_giv_expr
PARAMS ((const struct loop
*, rtx
, rtx
*, int *));
309 static int general_induction_var
PARAMS ((const struct loop
*loop
, rtx
, rtx
*,
310 rtx
*, rtx
*, rtx
*, int, int *,
312 static int consec_sets_giv
PARAMS ((const struct loop
*, int, rtx
,
313 rtx
, rtx
, rtx
*, rtx
*, rtx
*, rtx
*));
314 static int check_dbra_loop
PARAMS ((struct loop
*, int));
315 static rtx express_from_1
PARAMS ((rtx
, rtx
, rtx
));
316 static rtx combine_givs_p
PARAMS ((struct induction
*, struct induction
*));
317 static int cmp_combine_givs_stats
PARAMS ((const PTR
, const PTR
));
318 static void combine_givs
PARAMS ((struct loop_regs
*, struct iv_class
*));
319 static int product_cheap_p
PARAMS ((rtx
, rtx
));
320 static int maybe_eliminate_biv
PARAMS ((const struct loop
*, struct iv_class
*,
322 static int maybe_eliminate_biv_1
PARAMS ((const struct loop
*, rtx
, rtx
,
323 struct iv_class
*, int,
325 static int last_use_this_basic_block
PARAMS ((rtx
, rtx
));
326 static void record_initial
PARAMS ((rtx
, rtx
, void *));
327 static void update_reg_last_use
PARAMS ((rtx
, rtx
));
328 static rtx next_insn_in_loop
PARAMS ((const struct loop
*, rtx
));
329 static void loop_regs_scan
PARAMS ((const struct loop
*, int));
330 static int count_insns_in_loop
PARAMS ((const struct loop
*));
331 static void load_mems
PARAMS ((const struct loop
*));
332 static int insert_loop_mem
PARAMS ((rtx
*, void *));
333 static int replace_loop_mem
PARAMS ((rtx
*, void *));
334 static void replace_loop_mems
PARAMS ((rtx
, rtx
, rtx
));
335 static int replace_loop_reg
PARAMS ((rtx
*, void *));
336 static void replace_loop_regs
PARAMS ((rtx insn
, rtx
, rtx
));
337 static void note_reg_stored
PARAMS ((rtx
, rtx
, void *));
338 static void try_copy_prop
PARAMS ((const struct loop
*, rtx
, unsigned int));
339 static void try_swap_copy_prop
PARAMS ((const struct loop
*, rtx
,
341 static rtx check_insn_for_givs
PARAMS((struct loop
*, rtx
, int, int));
342 static rtx check_insn_for_bivs
PARAMS((struct loop
*, rtx
, int, int));
343 static rtx gen_add_mult
PARAMS ((rtx
, rtx
, rtx
, rtx
));
344 static void loop_regs_update
PARAMS ((const struct loop
*, rtx
));
345 static int iv_add_mult_cost
PARAMS ((rtx
, rtx
, rtx
, rtx
));
347 static rtx loop_insn_emit_after
PARAMS((const struct loop
*, basic_block
,
349 static rtx loop_call_insn_emit_before
PARAMS((const struct loop
*,
350 basic_block
, rtx
, rtx
));
351 static rtx loop_call_insn_hoist
PARAMS((const struct loop
*, rtx
));
352 static rtx loop_insn_sink_or_swim
PARAMS((const struct loop
*, rtx
));
354 static void loop_dump_aux
PARAMS ((const struct loop
*, FILE *, int));
355 static void loop_delete_insns
PARAMS ((rtx
, rtx
));
356 static HOST_WIDE_INT remove_constant_addition
PARAMS ((rtx
*));
357 static rtx gen_load_of_final_value
PARAMS ((rtx
, rtx
));
358 void debug_ivs
PARAMS ((const struct loop
*));
359 void debug_iv_class
PARAMS ((const struct iv_class
*));
360 void debug_biv
PARAMS ((const struct induction
*));
361 void debug_giv
PARAMS ((const struct induction
*));
362 void debug_loop
PARAMS ((const struct loop
*));
363 void debug_loops
PARAMS ((const struct loops
*));
365 typedef struct loop_replace_args
372 /* Nonzero iff INSN is between START and END, inclusive. */
373 #define INSN_IN_RANGE_P(INSN, START, END) \
374 (INSN_UID (INSN) < max_uid_for_loop \
375 && INSN_LUID (INSN) >= INSN_LUID (START) \
376 && INSN_LUID (INSN) <= INSN_LUID (END))
378 /* Indirect_jump_in_function is computed once per function. */
379 static int indirect_jump_in_function
;
380 static int indirect_jump_in_function_p
PARAMS ((rtx
));
382 static int compute_luids
PARAMS ((rtx
, rtx
, int));
384 static int biv_elimination_giv_has_0_offset
PARAMS ((struct induction
*,
388 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
389 copy the value of the strength reduced giv to its original register. */
390 static int copy_cost
;
392 /* Cost of using a register, to normalize the benefits of a giv. */
393 static int reg_address_cost
;
398 rtx reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
400 reg_address_cost
= address_cost (reg
, SImode
);
402 copy_cost
= COSTS_N_INSNS (1);
405 /* Compute the mapping from uids to luids.
406 LUIDs are numbers assigned to insns, like uids,
407 except that luids increase monotonically through the code.
408 Start at insn START and stop just before END. Assign LUIDs
409 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
411 compute_luids (start
, end
, prev_luid
)
418 for (insn
= start
, i
= prev_luid
; insn
!= end
; insn
= NEXT_INSN (insn
))
420 if (INSN_UID (insn
) >= max_uid_for_loop
)
422 /* Don't assign luids to line-number NOTEs, so that the distance in
423 luids between two insns is not affected by -g. */
424 if (GET_CODE (insn
) != NOTE
425 || NOTE_LINE_NUMBER (insn
) <= 0)
426 uid_luid
[INSN_UID (insn
)] = ++i
;
428 /* Give a line number note the same luid as preceding insn. */
429 uid_luid
[INSN_UID (insn
)] = i
;
434 /* Entry point of this file. Perform loop optimization
435 on the current function. F is the first insn of the function
436 and DUMPFILE is a stream for output of a trace of actions taken
437 (or 0 if none should be output). */
440 loop_optimize (f
, dumpfile
, flags
)
441 /* f is the first instruction of a chain of insns for one function */
448 struct loops loops_data
;
449 struct loops
*loops
= &loops_data
;
450 struct loop_info
*loops_info
;
452 loop_dump_stream
= dumpfile
;
454 init_recog_no_volatile ();
456 max_reg_before_loop
= max_reg_num ();
457 loop_max_reg
= max_reg_before_loop
;
461 /* Count the number of loops. */
464 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
466 if (GET_CODE (insn
) == NOTE
467 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
471 /* Don't waste time if no loops. */
472 if (max_loop_num
== 0)
475 loops
->num
= max_loop_num
;
477 /* Get size to use for tables indexed by uids.
478 Leave some space for labels allocated by find_and_verify_loops. */
479 max_uid_for_loop
= get_max_uid () + 1 + max_loop_num
* 32;
481 uid_luid
= (int *) xcalloc (max_uid_for_loop
, sizeof (int));
482 uid_loop
= (struct loop
**) xcalloc (max_uid_for_loop
,
483 sizeof (struct loop
*));
485 /* Allocate storage for array of loops. */
486 loops
->array
= (struct loop
*)
487 xcalloc (loops
->num
, sizeof (struct loop
));
489 /* Find and process each loop.
490 First, find them, and record them in order of their beginnings. */
491 find_and_verify_loops (f
, loops
);
493 /* Allocate and initialize auxiliary loop information. */
494 loops_info
= xcalloc (loops
->num
, sizeof (struct loop_info
));
495 for (i
= 0; i
< (int) loops
->num
; i
++)
496 loops
->array
[i
].aux
= loops_info
+ i
;
498 /* Now find all register lifetimes. This must be done after
499 find_and_verify_loops, because it might reorder the insns in the
501 reg_scan (f
, max_reg_before_loop
, 1);
503 /* This must occur after reg_scan so that registers created by gcse
504 will have entries in the register tables.
506 We could have added a call to reg_scan after gcse_main in toplev.c,
507 but moving this call to init_alias_analysis is more efficient. */
508 init_alias_analysis ();
510 /* See if we went too far. Note that get_max_uid already returns
511 one more that the maximum uid of all insn. */
512 if (get_max_uid () > max_uid_for_loop
)
514 /* Now reset it to the actual size we need. See above. */
515 max_uid_for_loop
= get_max_uid ();
517 /* find_and_verify_loops has already called compute_luids, but it
518 might have rearranged code afterwards, so we need to recompute
520 compute_luids (f
, NULL_RTX
, 0);
522 /* Don't leave gaps in uid_luid for insns that have been
523 deleted. It is possible that the first or last insn
524 using some register has been deleted by cross-jumping.
525 Make sure that uid_luid for that former insn's uid
526 points to the general area where that insn used to be. */
527 for (i
= 0; i
< max_uid_for_loop
; i
++)
529 uid_luid
[0] = uid_luid
[i
];
530 if (uid_luid
[0] != 0)
533 for (i
= 0; i
< max_uid_for_loop
; i
++)
534 if (uid_luid
[i
] == 0)
535 uid_luid
[i
] = uid_luid
[i
- 1];
537 /* Determine if the function has indirect jump. On some systems
538 this prevents low overhead loop instructions from being used. */
539 indirect_jump_in_function
= indirect_jump_in_function_p (f
);
541 /* Now scan the loops, last ones first, since this means inner ones are done
542 before outer ones. */
543 for (i
= max_loop_num
- 1; i
>= 0; i
--)
545 struct loop
*loop
= &loops
->array
[i
];
547 if (! loop
->invalid
&& loop
->end
)
548 scan_loop (loop
, flags
);
551 end_alias_analysis ();
560 /* Returns the next insn, in execution order, after INSN. START and
561 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
562 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
563 insn-stream; it is used with loops that are entered near the
567 next_insn_in_loop (loop
, insn
)
568 const struct loop
*loop
;
571 insn
= NEXT_INSN (insn
);
573 if (insn
== loop
->end
)
576 /* Go to the top of the loop, and continue there. */
583 if (insn
== loop
->scan_start
)
590 /* Optimize one loop described by LOOP. */
592 /* ??? Could also move memory writes out of loops if the destination address
593 is invariant, the source is invariant, the memory write is not volatile,
594 and if we can prove that no read inside the loop can read this address
595 before the write occurs. If there is a read of this address after the
596 write, then we can also mark the memory read as invariant. */
599 scan_loop (loop
, flags
)
603 struct loop_info
*loop_info
= LOOP_INFO (loop
);
604 struct loop_regs
*regs
= LOOP_REGS (loop
);
606 rtx loop_start
= loop
->start
;
607 rtx loop_end
= loop
->end
;
609 /* 1 if we are scanning insns that could be executed zero times. */
611 /* 1 if we are scanning insns that might never be executed
612 due to a subroutine call which might exit before they are reached. */
614 /* Number of insns in the loop. */
617 rtx temp
, update_start
, update_end
;
618 /* The SET from an insn, if it is the only SET in the insn. */
620 /* Chain describing insns movable in current loop. */
621 struct loop_movables
*movables
= LOOP_MOVABLES (loop
);
622 /* Ratio of extra register life span we can justify
623 for saving an instruction. More if loop doesn't call subroutines
624 since in that case saving an insn makes more difference
625 and more registers are available. */
627 /* Nonzero if we are scanning instructions in a sub-loop. */
636 /* Determine whether this loop starts with a jump down to a test at
637 the end. This will occur for a small number of loops with a test
638 that is too complex to duplicate in front of the loop.
640 We search for the first insn or label in the loop, skipping NOTEs.
641 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
642 (because we might have a loop executed only once that contains a
643 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
644 (in case we have a degenerate loop).
646 Note that if we mistakenly think that a loop is entered at the top
647 when, in fact, it is entered at the exit test, the only effect will be
648 slightly poorer optimization. Making the opposite error can generate
649 incorrect code. Since very few loops now start with a jump to the
650 exit test, the code here to detect that case is very conservative. */
652 for (p
= NEXT_INSN (loop_start
);
654 && GET_CODE (p
) != CODE_LABEL
&& ! INSN_P (p
)
655 && (GET_CODE (p
) != NOTE
656 || (NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_BEG
657 && NOTE_LINE_NUMBER (p
) != NOTE_INSN_LOOP_END
));
661 loop
->scan_start
= p
;
663 /* If loop end is the end of the current function, then emit a
664 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
665 note insn. This is the position we use when sinking insns out of
667 if (NEXT_INSN (loop
->end
) != 0)
668 loop
->sink
= NEXT_INSN (loop
->end
);
670 loop
->sink
= emit_note_after (NOTE_INSN_DELETED
, loop
->end
);
672 /* Set up variables describing this loop. */
674 threshold
= (loop_info
->has_call
? 1 : 2) * (1 + n_non_fixed_regs
);
676 /* If loop has a jump before the first label,
677 the true entry is the target of that jump.
678 Start scan from there.
679 But record in LOOP->TOP the place where the end-test jumps
680 back to so we can scan that after the end of the loop. */
681 if (GET_CODE (p
) == JUMP_INSN
682 /* Loop entry must be unconditional jump (and not a RETURN) */
683 && any_uncondjump_p (p
)
684 && JUMP_LABEL (p
) != 0
685 /* Check to see whether the jump actually
686 jumps out of the loop (meaning it's no loop).
687 This case can happen for things like
688 do {..} while (0). If this label was generated previously
689 by loop, we can't tell anything about it and have to reject
691 && INSN_IN_RANGE_P (JUMP_LABEL (p
), loop_start
, loop_end
))
693 loop
->top
= next_label (loop
->scan_start
);
694 loop
->scan_start
= JUMP_LABEL (p
);
697 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
698 as required by loop_reg_used_before_p. So skip such loops. (This
699 test may never be true, but it's best to play it safe.)
701 Also, skip loops where we do not start scanning at a label. This
702 test also rejects loops starting with a JUMP_INSN that failed the
705 if (INSN_UID (loop
->scan_start
) >= max_uid_for_loop
706 || GET_CODE (loop
->scan_start
) != CODE_LABEL
)
708 if (loop_dump_stream
)
709 fprintf (loop_dump_stream
, "\nLoop from %d to %d is phony.\n\n",
710 INSN_UID (loop_start
), INSN_UID (loop_end
));
714 /* Allocate extra space for REGs that might be created by load_mems.
715 We allocate a little extra slop as well, in the hopes that we
716 won't have to reallocate the regs array. */
717 loop_regs_scan (loop
, loop_info
->mems_idx
+ 16);
718 insn_count
= count_insns_in_loop (loop
);
720 if (loop_dump_stream
)
722 fprintf (loop_dump_stream
, "\nLoop from %d to %d: %d real insns.\n",
723 INSN_UID (loop_start
), INSN_UID (loop_end
), insn_count
);
725 fprintf (loop_dump_stream
, "Continue at insn %d.\n",
726 INSN_UID (loop
->cont
));
729 /* Scan through the loop finding insns that are safe to move.
730 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
731 this reg will be considered invariant for subsequent insns.
732 We consider whether subsequent insns use the reg
733 in deciding whether it is worth actually moving.
735 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
736 and therefore it is possible that the insns we are scanning
737 would never be executed. At such times, we must make sure
738 that it is safe to execute the insn once instead of zero times.
739 When MAYBE_NEVER is 0, all insns will be executed at least once
740 so that is not a problem. */
742 for (in_libcall
= 0, p
= next_insn_in_loop (loop
, loop
->scan_start
);
744 p
= next_insn_in_loop (loop
, p
))
746 if (in_libcall
&& INSN_P (p
) && find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
748 if (GET_CODE (p
) == INSN
)
750 temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
);
754 && (set
= single_set (p
))
755 && GET_CODE (SET_DEST (set
)) == REG
756 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
757 && SET_DEST (set
) != pic_offset_table_rtx
759 && ! regs
->array
[REGNO (SET_DEST (set
))].may_not_optimize
)
765 rtx src
= SET_SRC (set
);
766 rtx dependencies
= 0;
768 /* Figure out what to use as a source of this insn. If a
769 REG_EQUIV note is given or if a REG_EQUAL note with a
770 constant operand is specified, use it as the source and
771 mark that we should move this insn by calling
772 emit_move_insn rather that duplicating the insn.
774 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
776 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
778 src
= XEXP (temp
, 0), move_insn
= 1;
781 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
782 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
783 src
= XEXP (temp
, 0), move_insn
= 1;
784 if (temp
&& find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
786 src
= XEXP (temp
, 0);
787 /* A libcall block can use regs that don't appear in
788 the equivalent expression. To move the libcall,
789 we must move those regs too. */
790 dependencies
= libcall_other_reg (p
, src
);
794 /* For parallels, add any possible uses to the dependencies, as
795 we can't move the insn without resolving them first. */
796 if (GET_CODE (PATTERN (p
)) == PARALLEL
)
798 for (i
= 0; i
< XVECLEN (PATTERN (p
), 0); i
++)
800 rtx x
= XVECEXP (PATTERN (p
), 0, i
);
801 if (GET_CODE (x
) == USE
)
803 = gen_rtx_EXPR_LIST (VOIDmode
, XEXP (x
, 0),
808 if (/* The register is used in basic blocks other
809 than the one where it is set (meaning that
810 something after this point in the loop might
811 depend on its value before the set). */
812 ! reg_in_basic_block_p (p
, SET_DEST (set
))
813 /* And the set is not guaranteed to be executed once
814 the loop starts, or the value before the set is
815 needed before the set occurs...
817 ??? Note we have quadratic behavior here, mitigated
818 by the fact that the previous test will often fail for
819 large loops. Rather than re-scanning the entire loop
820 each time for register usage, we should build tables
821 of the register usage and use them here instead. */
823 || loop_reg_used_before_p (loop
, set
, p
)))
824 /* It is unsafe to move the set. However, it may be OK to
825 move the source into a new psuedo, and subsitute a
826 reg-to-reg copy for the original insn.
828 This code used to consider it OK to move a set of a variable
829 which was not created by the user and not used in an exit
831 That behavior is incorrect and was removed. */
834 /* Don't try to optimize a register that was made
835 by loop-optimization for an inner loop.
836 We don't know its life-span, so we can't compute
838 if (REGNO (SET_DEST (set
)) >= max_reg_before_loop
)
840 /* Don't move the source and add a reg-to-reg copy with -Os
841 (this certainly increases size) or if the source is
842 already a reg (the motion will gain nothing). */
844 && (optimize_size
|| GET_CODE (SET_SRC (set
)) == REG
845 || (CONSTANT_P (SET_SRC (set
))
846 && LEGITIMATE_CONSTANT_P (SET_SRC (set
)))))
848 else if ((tem
= loop_invariant_p (loop
, src
))
849 && (dependencies
== 0
851 = loop_invariant_p (loop
, dependencies
)) != 0)
852 && (regs
->array
[REGNO (SET_DEST (set
))].set_in_loop
== 1
854 = consec_sets_invariant_p
855 (loop
, SET_DEST (set
),
856 regs
->array
[REGNO (SET_DEST (set
))].set_in_loop
,
858 /* If the insn can cause a trap (such as divide by zero),
859 can't move it unless it's guaranteed to be executed
860 once loop is entered. Even a function call might
861 prevent the trap insn from being reached
862 (since it might exit!) */
863 && ! ((maybe_never
|| call_passed
)
864 && may_trap_p (src
)))
867 int regno
= REGNO (SET_DEST (set
));
869 /* A potential lossage is where we have a case where two insns
870 can be combined as long as they are both in the loop, but
871 we move one of them outside the loop. For large loops,
872 this can lose. The most common case of this is the address
873 of a function being called.
875 Therefore, if this register is marked as being used
876 exactly once if we are in a loop with calls
877 (a "large loop"), see if we can replace the usage of
878 this register with the source of this SET. If we can,
881 Don't do this if P has a REG_RETVAL note or if we have
882 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
884 if (loop_info
->has_call
885 && regs
->array
[regno
].single_usage
!= 0
886 && regs
->array
[regno
].single_usage
!= const0_rtx
887 && REGNO_FIRST_UID (regno
) == INSN_UID (p
)
888 && (REGNO_LAST_UID (regno
)
889 == INSN_UID (regs
->array
[regno
].single_usage
))
890 && regs
->array
[regno
].set_in_loop
== 1
891 && GET_CODE (SET_SRC (set
)) != ASM_OPERANDS
892 && ! side_effects_p (SET_SRC (set
))
893 && ! find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
894 && (! SMALL_REGISTER_CLASSES
895 || (! (GET_CODE (SET_SRC (set
)) == REG
896 && (REGNO (SET_SRC (set
))
897 < FIRST_PSEUDO_REGISTER
))))
898 /* This test is not redundant; SET_SRC (set) might be
899 a call-clobbered register and the life of REGNO
900 might span a call. */
901 && ! modified_between_p (SET_SRC (set
), p
,
902 regs
->array
[regno
].single_usage
)
903 && no_labels_between_p (p
,
904 regs
->array
[regno
].single_usage
)
905 && validate_replace_rtx (SET_DEST (set
), SET_SRC (set
),
906 regs
->array
[regno
].single_usage
))
908 /* Replace any usage in a REG_EQUAL note. Must copy
909 the new source, so that we don't get rtx sharing
910 between the SET_SOURCE and REG_NOTES of insn p. */
911 REG_NOTES (regs
->array
[regno
].single_usage
)
913 (REG_NOTES (regs
->array
[regno
].single_usage
),
914 SET_DEST (set
), copy_rtx (SET_SRC (set
))));
917 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, SET_DEST (set
));
919 regs
->array
[regno
+i
].set_in_loop
= 0;
923 m
= (struct movable
*) xmalloc (sizeof (struct movable
));
927 m
->dependencies
= dependencies
;
928 m
->set_dest
= SET_DEST (set
);
931 = regs
->array
[REGNO (SET_DEST (set
))].set_in_loop
- 1;
935 m
->move_insn
= move_insn
;
936 m
->move_insn_first
= 0;
937 m
->insert_temp
= insert_temp
;
938 m
->is_equiv
= (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
939 m
->savemode
= VOIDmode
;
941 /* Set M->cond if either loop_invariant_p
942 or consec_sets_invariant_p returned 2
943 (only conditionally invariant). */
944 m
->cond
= ((tem
| tem1
| tem2
) > 1);
945 m
->global
= LOOP_REG_GLOBAL_P (loop
, regno
);
947 m
->lifetime
= LOOP_REG_LIFETIME (loop
, regno
);
948 m
->savings
= regs
->array
[regno
].n_times_set
;
949 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
950 m
->savings
+= libcall_benefit (p
);
951 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, SET_DEST (set
)); i
++)
952 regs
->array
[regno
+i
].set_in_loop
= move_insn
? -2 : -1;
953 /* Add M to the end of the chain MOVABLES. */
954 loop_movables_add (movables
, m
);
958 /* It is possible for the first instruction to have a
959 REG_EQUAL note but a non-invariant SET_SRC, so we must
960 remember the status of the first instruction in case
961 the last instruction doesn't have a REG_EQUAL note. */
962 m
->move_insn_first
= m
->move_insn
;
964 /* Skip this insn, not checking REG_LIBCALL notes. */
965 p
= next_nonnote_insn (p
);
966 /* Skip the consecutive insns, if there are any. */
967 p
= skip_consec_insns (p
, m
->consec
);
968 /* Back up to the last insn of the consecutive group. */
969 p
= prev_nonnote_insn (p
);
971 /* We must now reset m->move_insn, m->is_equiv, and
972 possibly m->set_src to correspond to the effects of
974 temp
= find_reg_note (p
, REG_EQUIV
, NULL_RTX
);
976 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
979 temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
980 if (temp
&& CONSTANT_P (XEXP (temp
, 0)))
981 m
->set_src
= XEXP (temp
, 0), m
->move_insn
= 1;
987 = (find_reg_note (p
, REG_EQUIV
, NULL_RTX
) != 0);
990 /* If this register is always set within a STRICT_LOW_PART
991 or set to zero, then its high bytes are constant.
992 So clear them outside the loop and within the loop
993 just load the low bytes.
994 We must check that the machine has an instruction to do so.
995 Also, if the value loaded into the register
996 depends on the same register, this cannot be done. */
997 else if (SET_SRC (set
) == const0_rtx
998 && GET_CODE (NEXT_INSN (p
)) == INSN
999 && (set1
= single_set (NEXT_INSN (p
)))
1000 && GET_CODE (set1
) == SET
1001 && (GET_CODE (SET_DEST (set1
)) == STRICT_LOW_PART
)
1002 && (GET_CODE (XEXP (SET_DEST (set1
), 0)) == SUBREG
)
1003 && (SUBREG_REG (XEXP (SET_DEST (set1
), 0))
1005 && !reg_mentioned_p (SET_DEST (set
), SET_SRC (set1
)))
1007 int regno
= REGNO (SET_DEST (set
));
1008 if (regs
->array
[regno
].set_in_loop
== 2)
1011 m
= (struct movable
*) xmalloc (sizeof (struct movable
));
1014 m
->set_dest
= SET_DEST (set
);
1015 m
->dependencies
= 0;
1021 m
->move_insn_first
= 0;
1022 m
->insert_temp
= insert_temp
;
1024 /* If the insn may not be executed on some cycles,
1025 we can't clear the whole reg; clear just high part.
1026 Not even if the reg is used only within this loop.
1033 Clearing x before the inner loop could clobber a value
1034 being saved from the last time around the outer loop.
1035 However, if the reg is not used outside this loop
1036 and all uses of the register are in the same
1037 basic block as the store, there is no problem.
1039 If this insn was made by loop, we don't know its
1040 INSN_LUID and hence must make a conservative
1042 m
->global
= (INSN_UID (p
) >= max_uid_for_loop
1043 || LOOP_REG_GLOBAL_P (loop
, regno
)
1044 || (labels_in_range_p
1045 (p
, REGNO_FIRST_LUID (regno
))));
1046 if (maybe_never
&& m
->global
)
1047 m
->savemode
= GET_MODE (SET_SRC (set1
));
1049 m
->savemode
= VOIDmode
;
1053 m
->lifetime
= LOOP_REG_LIFETIME (loop
, regno
);
1056 i
< LOOP_REGNO_NREGS (regno
, SET_DEST (set
));
1058 regs
->array
[regno
+i
].set_in_loop
= -1;
1059 /* Add M to the end of the chain MOVABLES. */
1060 loop_movables_add (movables
, m
);
1065 /* Past a call insn, we get to insns which might not be executed
1066 because the call might exit. This matters for insns that trap.
1067 Constant and pure call insns always return, so they don't count. */
1068 else if (GET_CODE (p
) == CALL_INSN
&& ! CONST_OR_PURE_CALL_P (p
))
1070 /* Past a label or a jump, we get to insns for which we
1071 can't count on whether or how many times they will be
1072 executed during each iteration. Therefore, we can
1073 only move out sets of trivial variables
1074 (those not used after the loop). */
1075 /* Similar code appears twice in strength_reduce. */
1076 else if ((GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
)
1077 /* If we enter the loop in the middle, and scan around to the
1078 beginning, don't set maybe_never for that. This must be an
1079 unconditional jump, otherwise the code at the top of the
1080 loop might never be executed. Unconditional jumps are
1081 followed by a barrier then the loop_end. */
1082 && ! (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
) == loop
->top
1083 && NEXT_INSN (NEXT_INSN (p
)) == loop_end
1084 && any_uncondjump_p (p
)))
1086 else if (GET_CODE (p
) == NOTE
)
1088 /* At the virtual top of a converted loop, insns are again known to
1089 be executed: logically, the loop begins here even though the exit
1090 code has been duplicated. */
1091 if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
&& loop_depth
== 0)
1092 maybe_never
= call_passed
= 0;
1093 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
1095 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
1100 /* If one movable subsumes another, ignore that other. */
1102 ignore_some_movables (movables
);
1104 /* For each movable insn, see if the reg that it loads
1105 leads when it dies right into another conditionally movable insn.
1106 If so, record that the second insn "forces" the first one,
1107 since the second can be moved only if the first is. */
1109 force_movables (movables
);
1111 /* See if there are multiple movable insns that load the same value.
1112 If there are, make all but the first point at the first one
1113 through the `match' field, and add the priorities of them
1114 all together as the priority of the first. */
1116 combine_movables (movables
, regs
);
1118 /* Now consider each movable insn to decide whether it is worth moving.
1119 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1121 For machines with few registers this increases code size, so do not
1122 move moveables when optimizing for code size on such machines.
1123 (The 18 below is the value for i386.) */
1126 || (reg_class_size
[GENERAL_REGS
] > 18 && !loop_info
->has_call
))
1128 move_movables (loop
, movables
, threshold
, insn_count
);
1130 /* Recalculate regs->array if move_movables has created new
1132 if (max_reg_num () > regs
->num
)
1134 loop_regs_scan (loop
, 0);
1135 for (update_start
= loop_start
;
1136 PREV_INSN (update_start
)
1137 && GET_CODE (PREV_INSN (update_start
)) != CODE_LABEL
;
1138 update_start
= PREV_INSN (update_start
))
1140 update_end
= NEXT_INSN (loop_end
);
1142 reg_scan_update (update_start
, update_end
, loop_max_reg
);
1143 loop_max_reg
= max_reg_num ();
1147 /* Now candidates that still are negative are those not moved.
1148 Change regs->array[I].set_in_loop to indicate that those are not actually
1150 for (i
= 0; i
< regs
->num
; i
++)
1151 if (regs
->array
[i
].set_in_loop
< 0)
1152 regs
->array
[i
].set_in_loop
= regs
->array
[i
].n_times_set
;
1154 /* Now that we've moved some things out of the loop, we might be able to
1155 hoist even more memory references. */
1158 /* Recalculate regs->array if load_mems has created new registers. */
1159 if (max_reg_num () > regs
->num
)
1160 loop_regs_scan (loop
, 0);
1162 for (update_start
= loop_start
;
1163 PREV_INSN (update_start
)
1164 && GET_CODE (PREV_INSN (update_start
)) != CODE_LABEL
;
1165 update_start
= PREV_INSN (update_start
))
1167 update_end
= NEXT_INSN (loop_end
);
1169 reg_scan_update (update_start
, update_end
, loop_max_reg
);
1170 loop_max_reg
= max_reg_num ();
1172 if (flag_strength_reduce
)
1174 if (update_end
&& GET_CODE (update_end
) == CODE_LABEL
)
1175 /* Ensure our label doesn't go away. */
1176 LABEL_NUSES (update_end
)++;
1178 strength_reduce (loop
, flags
);
1180 reg_scan_update (update_start
, update_end
, loop_max_reg
);
1181 loop_max_reg
= max_reg_num ();
1183 if (update_end
&& GET_CODE (update_end
) == CODE_LABEL
1184 && --LABEL_NUSES (update_end
) == 0)
1185 delete_related_insns (update_end
);
1189 /* The movable information is required for strength reduction. */
1190 loop_movables_free (movables
);
1197 /* Add elements to *OUTPUT to record all the pseudo-regs
1198 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1201 record_excess_regs (in_this
, not_in_this
, output
)
1202 rtx in_this
, not_in_this
;
1209 code
= GET_CODE (in_this
);
1223 if (REGNO (in_this
) >= FIRST_PSEUDO_REGISTER
1224 && ! reg_mentioned_p (in_this
, not_in_this
))
1225 *output
= gen_rtx_EXPR_LIST (VOIDmode
, in_this
, *output
);
1232 fmt
= GET_RTX_FORMAT (code
);
1233 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1240 for (j
= 0; j
< XVECLEN (in_this
, i
); j
++)
1241 record_excess_regs (XVECEXP (in_this
, i
, j
), not_in_this
, output
);
1245 record_excess_regs (XEXP (in_this
, i
), not_in_this
, output
);
1251 /* Check what regs are referred to in the libcall block ending with INSN,
1252 aside from those mentioned in the equivalent value.
1253 If there are none, return 0.
1254 If there are one or more, return an EXPR_LIST containing all of them. */
1257 libcall_other_reg (insn
, equiv
)
1260 rtx note
= find_reg_note (insn
, REG_RETVAL
, NULL_RTX
);
1261 rtx p
= XEXP (note
, 0);
1264 /* First, find all the regs used in the libcall block
1265 that are not mentioned as inputs to the result. */
1269 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
1270 || GET_CODE (p
) == CALL_INSN
)
1271 record_excess_regs (PATTERN (p
), equiv
, &output
);
1278 /* Return 1 if all uses of REG
1279 are between INSN and the end of the basic block. */
1282 reg_in_basic_block_p (insn
, reg
)
1285 int regno
= REGNO (reg
);
1288 if (REGNO_FIRST_UID (regno
) != INSN_UID (insn
))
1291 /* Search this basic block for the already recorded last use of the reg. */
1292 for (p
= insn
; p
; p
= NEXT_INSN (p
))
1294 switch (GET_CODE (p
))
1301 /* Ordinary insn: if this is the last use, we win. */
1302 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1307 /* Jump insn: if this is the last use, we win. */
1308 if (REGNO_LAST_UID (regno
) == INSN_UID (p
))
1310 /* Otherwise, it's the end of the basic block, so we lose. */
1315 /* It's the end of the basic block, so we lose. */
1323 /* The "last use" that was recorded can't be found after the first
1324 use. This can happen when the last use was deleted while
1325 processing an inner loop, this inner loop was then completely
1326 unrolled, and the outer loop is always exited after the inner loop,
1327 so that everything after the first use becomes a single basic block. */
1331 /* Compute the benefit of eliminating the insns in the block whose
1332 last insn is LAST. This may be a group of insns used to compute a
1333 value directly or can contain a library call. */
1336 libcall_benefit (last
)
1342 for (insn
= XEXP (find_reg_note (last
, REG_RETVAL
, NULL_RTX
), 0);
1343 insn
!= last
; insn
= NEXT_INSN (insn
))
1345 if (GET_CODE (insn
) == CALL_INSN
)
1346 benefit
+= 10; /* Assume at least this many insns in a library
1348 else if (GET_CODE (insn
) == INSN
1349 && GET_CODE (PATTERN (insn
)) != USE
1350 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1357 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1360 skip_consec_insns (insn
, count
)
1364 for (; count
> 0; count
--)
1368 /* If first insn of libcall sequence, skip to end. */
1369 /* Do this at start of loop, since INSN is guaranteed to
1371 if (GET_CODE (insn
) != NOTE
1372 && (temp
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)))
1373 insn
= XEXP (temp
, 0);
1376 insn
= NEXT_INSN (insn
);
1377 while (GET_CODE (insn
) == NOTE
);
1383 /* Ignore any movable whose insn falls within a libcall
1384 which is part of another movable.
1385 We make use of the fact that the movable for the libcall value
1386 was made later and so appears later on the chain. */
1389 ignore_some_movables (movables
)
1390 struct loop_movables
*movables
;
1392 struct movable
*m
, *m1
;
1394 for (m
= movables
->head
; m
; m
= m
->next
)
1396 /* Is this a movable for the value of a libcall? */
1397 rtx note
= find_reg_note (m
->insn
, REG_RETVAL
, NULL_RTX
);
1401 /* Check for earlier movables inside that range,
1402 and mark them invalid. We cannot use LUIDs here because
1403 insns created by loop.c for prior loops don't have LUIDs.
1404 Rather than reject all such insns from movables, we just
1405 explicitly check each insn in the libcall (since invariant
1406 libcalls aren't that common). */
1407 for (insn
= XEXP (note
, 0); insn
!= m
->insn
; insn
= NEXT_INSN (insn
))
1408 for (m1
= movables
->head
; m1
!= m
; m1
= m1
->next
)
1409 if (m1
->insn
== insn
)
1415 /* For each movable insn, see if the reg that it loads
1416 leads when it dies right into another conditionally movable insn.
1417 If so, record that the second insn "forces" the first one,
1418 since the second can be moved only if the first is. */
1421 force_movables (movables
)
1422 struct loop_movables
*movables
;
1424 struct movable
*m
, *m1
;
1426 for (m1
= movables
->head
; m1
; m1
= m1
->next
)
1427 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1428 if (!m1
->partial
&& !m1
->done
)
1430 int regno
= m1
->regno
;
1431 for (m
= m1
->next
; m
; m
= m
->next
)
1432 /* ??? Could this be a bug? What if CSE caused the
1433 register of M1 to be used after this insn?
1434 Since CSE does not update regno_last_uid,
1435 this insn M->insn might not be where it dies.
1436 But very likely this doesn't matter; what matters is
1437 that M's reg is computed from M1's reg. */
1438 if (INSN_UID (m
->insn
) == REGNO_LAST_UID (regno
)
1441 if (m
!= 0 && m
->set_src
== m1
->set_dest
1442 /* If m->consec, m->set_src isn't valid. */
1446 /* Increase the priority of the moving the first insn
1447 since it permits the second to be moved as well. */
1451 m1
->lifetime
+= m
->lifetime
;
1452 m1
->savings
+= m
->savings
;
1457 /* Find invariant expressions that are equal and can be combined into
1461 combine_movables (movables
, regs
)
1462 struct loop_movables
*movables
;
1463 struct loop_regs
*regs
;
1466 char *matched_regs
= (char *) xmalloc (regs
->num
);
1467 enum machine_mode mode
;
1469 /* Regs that are set more than once are not allowed to match
1470 or be matched. I'm no longer sure why not. */
1471 /* Only pseudo registers are allowed to match or be matched,
1472 since move_movables does not validate the change. */
1473 /* Perhaps testing m->consec_sets would be more appropriate here? */
1475 for (m
= movables
->head
; m
; m
= m
->next
)
1476 if (m
->match
== 0 && regs
->array
[m
->regno
].n_times_set
== 1
1477 && m
->regno
>= FIRST_PSEUDO_REGISTER
1482 int regno
= m
->regno
;
1484 memset (matched_regs
, 0, regs
->num
);
1485 matched_regs
[regno
] = 1;
1487 /* We want later insns to match the first one. Don't make the first
1488 one match any later ones. So start this loop at m->next. */
1489 for (m1
= m
->next
; m1
; m1
= m1
->next
)
1490 if (m
!= m1
&& m1
->match
== 0
1492 && regs
->array
[m1
->regno
].n_times_set
== 1
1493 && m1
->regno
>= FIRST_PSEUDO_REGISTER
1494 /* A reg used outside the loop mustn't be eliminated. */
1496 /* A reg used for zero-extending mustn't be eliminated. */
1498 && (matched_regs
[m1
->regno
]
1501 /* Can combine regs with different modes loaded from the
1502 same constant only if the modes are the same or
1503 if both are integer modes with M wider or the same
1504 width as M1. The check for integer is redundant, but
1505 safe, since the only case of differing destination
1506 modes with equal sources is when both sources are
1507 VOIDmode, i.e., CONST_INT. */
1508 (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
)
1509 || (GET_MODE_CLASS (GET_MODE (m
->set_dest
)) == MODE_INT
1510 && GET_MODE_CLASS (GET_MODE (m1
->set_dest
)) == MODE_INT
1511 && (GET_MODE_BITSIZE (GET_MODE (m
->set_dest
))
1512 >= GET_MODE_BITSIZE (GET_MODE (m1
->set_dest
)))))
1513 /* See if the source of M1 says it matches M. */
1514 && ((GET_CODE (m1
->set_src
) == REG
1515 && matched_regs
[REGNO (m1
->set_src
)])
1516 || rtx_equal_for_loop_p (m
->set_src
, m1
->set_src
,
1518 && ((m
->dependencies
== m1
->dependencies
)
1519 || rtx_equal_p (m
->dependencies
, m1
->dependencies
)))
1521 m
->lifetime
+= m1
->lifetime
;
1522 m
->savings
+= m1
->savings
;
1525 matched_regs
[m1
->regno
] = 1;
1529 /* Now combine the regs used for zero-extension.
1530 This can be done for those not marked `global'
1531 provided their lives don't overlap. */
1533 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1534 mode
= GET_MODE_WIDER_MODE (mode
))
1536 struct movable
*m0
= 0;
1538 /* Combine all the registers for extension from mode MODE.
1539 Don't combine any that are used outside this loop. */
1540 for (m
= movables
->head
; m
; m
= m
->next
)
1541 if (m
->partial
&& ! m
->global
1542 && mode
== GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m
->insn
)))))
1546 int first
= REGNO_FIRST_LUID (m
->regno
);
1547 int last
= REGNO_LAST_LUID (m
->regno
);
1551 /* First one: don't check for overlap, just record it. */
1556 /* Make sure they extend to the same mode.
1557 (Almost always true.) */
1558 if (GET_MODE (m
->set_dest
) != GET_MODE (m0
->set_dest
))
1561 /* We already have one: check for overlap with those
1562 already combined together. */
1563 for (m1
= movables
->head
; m1
!= m
; m1
= m1
->next
)
1564 if (m1
== m0
|| (m1
->partial
&& m1
->match
== m0
))
1565 if (! (REGNO_FIRST_LUID (m1
->regno
) > last
1566 || REGNO_LAST_LUID (m1
->regno
) < first
))
1569 /* No overlap: we can combine this with the others. */
1570 m0
->lifetime
+= m
->lifetime
;
1571 m0
->savings
+= m
->savings
;
1581 free (matched_regs
);
1584 /* Returns the number of movable instructions in LOOP that were not
1585 moved outside the loop. */
1588 num_unmoved_movables (loop
)
1589 const struct loop
*loop
;
1594 for (m
= LOOP_MOVABLES (loop
)->head
; m
; m
= m
->next
)
1602 /* Return 1 if regs X and Y will become the same if moved. */
1605 regs_match_p (x
, y
, movables
)
1607 struct loop_movables
*movables
;
1609 unsigned int xn
= REGNO (x
);
1610 unsigned int yn
= REGNO (y
);
1611 struct movable
*mx
, *my
;
1613 for (mx
= movables
->head
; mx
; mx
= mx
->next
)
1614 if (mx
->regno
== xn
)
1617 for (my
= movables
->head
; my
; my
= my
->next
)
1618 if (my
->regno
== yn
)
1622 && ((mx
->match
== my
->match
&& mx
->match
!= 0)
1624 || mx
== my
->match
));
1627 /* Return 1 if X and Y are identical-looking rtx's.
1628 This is the Lisp function EQUAL for rtx arguments.
1630 If two registers are matching movables or a movable register and an
1631 equivalent constant, consider them equal. */
1634 rtx_equal_for_loop_p (x
, y
, movables
, regs
)
1636 struct loop_movables
*movables
;
1637 struct loop_regs
*regs
;
1647 if (x
== 0 || y
== 0)
1650 code
= GET_CODE (x
);
1652 /* If we have a register and a constant, they may sometimes be
1654 if (GET_CODE (x
) == REG
&& regs
->array
[REGNO (x
)].set_in_loop
== -2
1657 for (m
= movables
->head
; m
; m
= m
->next
)
1658 if (m
->move_insn
&& m
->regno
== REGNO (x
)
1659 && rtx_equal_p (m
->set_src
, y
))
1662 else if (GET_CODE (y
) == REG
&& regs
->array
[REGNO (y
)].set_in_loop
== -2
1665 for (m
= movables
->head
; m
; m
= m
->next
)
1666 if (m
->move_insn
&& m
->regno
== REGNO (y
)
1667 && rtx_equal_p (m
->set_src
, x
))
1671 /* Otherwise, rtx's of different codes cannot be equal. */
1672 if (code
!= GET_CODE (y
))
1675 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1676 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1678 if (GET_MODE (x
) != GET_MODE (y
))
1681 /* These three types of rtx's can be compared nonrecursively. */
1683 return (REGNO (x
) == REGNO (y
) || regs_match_p (x
, y
, movables
));
1685 if (code
== LABEL_REF
)
1686 return XEXP (x
, 0) == XEXP (y
, 0);
1687 if (code
== SYMBOL_REF
)
1688 return XSTR (x
, 0) == XSTR (y
, 0);
1690 /* Compare the elements. If any pair of corresponding elements
1691 fail to match, return 0 for the whole things. */
1693 fmt
= GET_RTX_FORMAT (code
);
1694 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1699 if (XWINT (x
, i
) != XWINT (y
, i
))
1704 if (XINT (x
, i
) != XINT (y
, i
))
1709 /* Two vectors must have the same length. */
1710 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
1713 /* And the corresponding elements must match. */
1714 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1715 if (rtx_equal_for_loop_p (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
),
1716 movables
, regs
) == 0)
1721 if (rtx_equal_for_loop_p (XEXP (x
, i
), XEXP (y
, i
), movables
, regs
)
1727 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
1732 /* These are just backpointers, so they don't matter. */
1738 /* It is believed that rtx's at this level will never
1739 contain anything but integers and other rtx's,
1740 except for within LABEL_REFs and SYMBOL_REFs. */
1748 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1749 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1750 references is incremented once for each added note. */
1753 add_label_notes (x
, insns
)
1757 enum rtx_code code
= GET_CODE (x
);
1762 if (code
== LABEL_REF
&& !LABEL_REF_NONLOCAL_P (x
))
1764 /* This code used to ignore labels that referred to dispatch tables to
1765 avoid flow generating (slighly) worse code.
1767 We no longer ignore such label references (see LABEL_REF handling in
1768 mark_jump_label for additional information). */
1769 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
1770 if (reg_mentioned_p (XEXP (x
, 0), insn
))
1772 REG_NOTES (insn
) = gen_rtx_INSN_LIST (REG_LABEL
, XEXP (x
, 0),
1774 if (LABEL_P (XEXP (x
, 0)))
1775 LABEL_NUSES (XEXP (x
, 0))++;
1779 fmt
= GET_RTX_FORMAT (code
);
1780 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1783 add_label_notes (XEXP (x
, i
), insns
);
1784 else if (fmt
[i
] == 'E')
1785 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1786 add_label_notes (XVECEXP (x
, i
, j
), insns
);
1790 /* Scan MOVABLES, and move the insns that deserve to be moved.
1791 If two matching movables are combined, replace one reg with the
1792 other throughout. */
1795 move_movables (loop
, movables
, threshold
, insn_count
)
1797 struct loop_movables
*movables
;
1801 struct loop_regs
*regs
= LOOP_REGS (loop
);
1802 int nregs
= regs
->num
;
1806 rtx loop_start
= loop
->start
;
1807 rtx loop_end
= loop
->end
;
1808 /* Map of pseudo-register replacements to handle combining
1809 when we move several insns that load the same value
1810 into different pseudo-registers. */
1811 rtx
*reg_map
= (rtx
*) xcalloc (nregs
, sizeof (rtx
));
1812 char *already_moved
= (char *) xcalloc (nregs
, sizeof (char));
1814 for (m
= movables
->head
; m
; m
= m
->next
)
1816 /* Describe this movable insn. */
1818 if (loop_dump_stream
)
1820 fprintf (loop_dump_stream
, "Insn %d: regno %d (life %d), ",
1821 INSN_UID (m
->insn
), m
->regno
, m
->lifetime
);
1823 fprintf (loop_dump_stream
, "consec %d, ", m
->consec
);
1825 fprintf (loop_dump_stream
, "cond ");
1827 fprintf (loop_dump_stream
, "force ");
1829 fprintf (loop_dump_stream
, "global ");
1831 fprintf (loop_dump_stream
, "done ");
1833 fprintf (loop_dump_stream
, "move-insn ");
1835 fprintf (loop_dump_stream
, "matches %d ",
1836 INSN_UID (m
->match
->insn
));
1838 fprintf (loop_dump_stream
, "forces %d ",
1839 INSN_UID (m
->forces
->insn
));
1842 /* Ignore the insn if it's already done (it matched something else).
1843 Otherwise, see if it is now safe to move. */
1847 || (1 == loop_invariant_p (loop
, m
->set_src
)
1848 && (m
->dependencies
== 0
1849 || 1 == loop_invariant_p (loop
, m
->dependencies
))
1851 || 1 == consec_sets_invariant_p (loop
, m
->set_dest
,
1854 && (! m
->forces
|| m
->forces
->done
))
1858 int savings
= m
->savings
;
1860 /* We have an insn that is safe to move.
1861 Compute its desirability. */
1866 if (loop_dump_stream
)
1867 fprintf (loop_dump_stream
, "savings %d ", savings
);
1869 if (regs
->array
[regno
].moved_once
&& loop_dump_stream
)
1870 fprintf (loop_dump_stream
, "halved since already moved ");
1872 /* An insn MUST be moved if we already moved something else
1873 which is safe only if this one is moved too: that is,
1874 if already_moved[REGNO] is nonzero. */
1876 /* An insn is desirable to move if the new lifetime of the
1877 register is no more than THRESHOLD times the old lifetime.
1878 If it's not desirable, it means the loop is so big
1879 that moving won't speed things up much,
1880 and it is liable to make register usage worse. */
1882 /* It is also desirable to move if it can be moved at no
1883 extra cost because something else was already moved. */
1885 if (already_moved
[regno
]
1886 || flag_move_all_movables
1887 || (threshold
* savings
* m
->lifetime
) >=
1888 (regs
->array
[regno
].moved_once
? insn_count
* 2 : insn_count
)
1889 || (m
->forces
&& m
->forces
->done
1890 && regs
->array
[m
->forces
->regno
].n_times_set
== 1))
1894 rtx first
= NULL_RTX
;
1895 rtx newreg
= NULL_RTX
;
1898 newreg
= gen_reg_rtx (GET_MODE (m
->set_dest
));
1900 /* Now move the insns that set the reg. */
1902 if (m
->partial
&& m
->match
)
1906 /* Find the end of this chain of matching regs.
1907 Thus, we load each reg in the chain from that one reg.
1908 And that reg is loaded with 0 directly,
1909 since it has ->match == 0. */
1910 for (m1
= m
; m1
->match
; m1
= m1
->match
);
1911 newpat
= gen_move_insn (SET_DEST (PATTERN (m
->insn
)),
1912 SET_DEST (PATTERN (m1
->insn
)));
1913 i1
= loop_insn_hoist (loop
, newpat
);
1915 /* Mark the moved, invariant reg as being allowed to
1916 share a hard reg with the other matching invariant. */
1917 REG_NOTES (i1
) = REG_NOTES (m
->insn
);
1918 r1
= SET_DEST (PATTERN (m
->insn
));
1919 r2
= SET_DEST (PATTERN (m1
->insn
));
1921 = gen_rtx_EXPR_LIST (VOIDmode
, r1
,
1922 gen_rtx_EXPR_LIST (VOIDmode
, r2
,
1924 delete_insn (m
->insn
);
1929 if (loop_dump_stream
)
1930 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1932 /* If we are to re-generate the item being moved with a
1933 new move insn, first delete what we have and then emit
1934 the move insn before the loop. */
1935 else if (m
->move_insn
)
1939 for (count
= m
->consec
; count
>= 0; count
--)
1941 /* If this is the first insn of a library call sequence,
1942 something is very wrong. */
1943 if (GET_CODE (p
) != NOTE
1944 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
1947 /* If this is the last insn of a libcall sequence, then
1948 delete every insn in the sequence except the last.
1949 The last insn is handled in the normal manner. */
1950 if (GET_CODE (p
) != NOTE
1951 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
1953 temp
= XEXP (temp
, 0);
1955 temp
= delete_insn (temp
);
1959 p
= delete_insn (p
);
1961 /* simplify_giv_expr expects that it can walk the insns
1962 at m->insn forwards and see this old sequence we are
1963 tossing here. delete_insn does preserve the next
1964 pointers, but when we skip over a NOTE we must fix
1965 it up. Otherwise that code walks into the non-deleted
1967 while (p
&& GET_CODE (p
) == NOTE
)
1968 p
= NEXT_INSN (temp
) = NEXT_INSN (p
);
1972 /* Replace the original insn with a move from
1973 our newly created temp. */
1975 emit_move_insn (m
->set_dest
, newreg
);
1978 emit_insn_before (seq
, p
);
1983 emit_move_insn (m
->insert_temp
? newreg
: m
->set_dest
,
1988 add_label_notes (m
->set_src
, seq
);
1990 i1
= loop_insn_hoist (loop
, seq
);
1991 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
1992 set_unique_reg_note (i1
,
1993 m
->is_equiv
? REG_EQUIV
: REG_EQUAL
,
1996 if (loop_dump_stream
)
1997 fprintf (loop_dump_stream
, " moved to %d", INSN_UID (i1
));
1999 /* The more regs we move, the less we like moving them. */
2004 for (count
= m
->consec
; count
>= 0; count
--)
2008 /* If first insn of libcall sequence, skip to end. */
2009 /* Do this at start of loop, since p is guaranteed to
2011 if (GET_CODE (p
) != NOTE
2012 && (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
2015 /* If last insn of libcall sequence, move all
2016 insns except the last before the loop. The last
2017 insn is handled in the normal manner. */
2018 if (GET_CODE (p
) != NOTE
2019 && (temp
= find_reg_note (p
, REG_RETVAL
, NULL_RTX
)))
2023 rtx fn_address_insn
= 0;
2026 for (temp
= XEXP (temp
, 0); temp
!= p
;
2027 temp
= NEXT_INSN (temp
))
2033 if (GET_CODE (temp
) == NOTE
)
2036 body
= PATTERN (temp
);
2038 /* Find the next insn after TEMP,
2039 not counting USE or NOTE insns. */
2040 for (next
= NEXT_INSN (temp
); next
!= p
;
2041 next
= NEXT_INSN (next
))
2042 if (! (GET_CODE (next
) == INSN
2043 && GET_CODE (PATTERN (next
)) == USE
)
2044 && GET_CODE (next
) != NOTE
)
2047 /* If that is the call, this may be the insn
2048 that loads the function address.
2050 Extract the function address from the insn
2051 that loads it into a register.
2052 If this insn was cse'd, we get incorrect code.
2054 So emit a new move insn that copies the
2055 function address into the register that the
2056 call insn will use. flow.c will delete any
2057 redundant stores that we have created. */
2058 if (GET_CODE (next
) == CALL_INSN
2059 && GET_CODE (body
) == SET
2060 && GET_CODE (SET_DEST (body
)) == REG
2061 && (n
= find_reg_note (temp
, REG_EQUAL
,
2064 fn_reg
= SET_SRC (body
);
2065 if (GET_CODE (fn_reg
) != REG
)
2066 fn_reg
= SET_DEST (body
);
2067 fn_address
= XEXP (n
, 0);
2068 fn_address_insn
= temp
;
2070 /* We have the call insn.
2071 If it uses the register we suspect it might,
2072 load it with the correct address directly. */
2073 if (GET_CODE (temp
) == CALL_INSN
2075 && reg_referenced_p (fn_reg
, body
))
2076 loop_insn_emit_after (loop
, 0, fn_address_insn
,
2078 (fn_reg
, fn_address
));
2080 if (GET_CODE (temp
) == CALL_INSN
)
2082 i1
= loop_call_insn_hoist (loop
, body
);
2083 /* Because the USAGE information potentially
2084 contains objects other than hard registers
2085 we need to copy it. */
2086 if (CALL_INSN_FUNCTION_USAGE (temp
))
2087 CALL_INSN_FUNCTION_USAGE (i1
)
2088 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp
));
2091 i1
= loop_insn_hoist (loop
, body
);
2094 if (temp
== fn_address_insn
)
2095 fn_address_insn
= i1
;
2096 REG_NOTES (i1
) = REG_NOTES (temp
);
2097 REG_NOTES (temp
) = NULL
;
2103 if (m
->savemode
!= VOIDmode
)
2105 /* P sets REG to zero; but we should clear only
2106 the bits that are not covered by the mode
2108 rtx reg
= m
->set_dest
;
2113 tem
= expand_simple_binop
2114 (GET_MODE (reg
), AND
, reg
,
2115 GEN_INT ((((HOST_WIDE_INT
) 1
2116 << GET_MODE_BITSIZE (m
->savemode
)))
2118 reg
, 1, OPTAB_LIB_WIDEN
);
2122 emit_move_insn (reg
, tem
);
2123 sequence
= get_insns ();
2125 i1
= loop_insn_hoist (loop
, sequence
);
2127 else if (GET_CODE (p
) == CALL_INSN
)
2129 i1
= loop_call_insn_hoist (loop
, PATTERN (p
));
2130 /* Because the USAGE information potentially
2131 contains objects other than hard registers
2132 we need to copy it. */
2133 if (CALL_INSN_FUNCTION_USAGE (p
))
2134 CALL_INSN_FUNCTION_USAGE (i1
)
2135 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p
));
2137 else if (count
== m
->consec
&& m
->move_insn_first
)
2140 /* The SET_SRC might not be invariant, so we must
2141 use the REG_EQUAL note. */
2143 emit_move_insn (m
->set_dest
, m
->set_src
);
2147 add_label_notes (m
->set_src
, seq
);
2149 i1
= loop_insn_hoist (loop
, seq
);
2150 if (! find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2151 set_unique_reg_note (i1
, m
->is_equiv
? REG_EQUIV
2152 : REG_EQUAL
, m
->set_src
);
2154 else if (m
->insert_temp
)
2156 rtx
*reg_map2
= (rtx
*) xcalloc (REGNO (newreg
),
2158 reg_map2
[m
->regno
] = newreg
;
2160 i1
= loop_insn_hoist (loop
, copy_rtx (PATTERN (p
)));
2161 replace_regs (i1
, reg_map2
, REGNO (newreg
), 1);
2165 i1
= loop_insn_hoist (loop
, PATTERN (p
));
2167 if (REG_NOTES (i1
) == 0)
2169 REG_NOTES (i1
) = REG_NOTES (p
);
2170 REG_NOTES (p
) = NULL
;
2172 /* If there is a REG_EQUAL note present whose value
2173 is not loop invariant, then delete it, since it
2174 may cause problems with later optimization passes.
2175 It is possible for cse to create such notes
2176 like this as a result of record_jump_cond. */
2178 if ((temp
= find_reg_note (i1
, REG_EQUAL
, NULL_RTX
))
2179 && ! loop_invariant_p (loop
, XEXP (temp
, 0)))
2180 remove_note (i1
, temp
);
2186 if (loop_dump_stream
)
2187 fprintf (loop_dump_stream
, " moved to %d",
2190 /* If library call, now fix the REG_NOTES that contain
2191 insn pointers, namely REG_LIBCALL on FIRST
2192 and REG_RETVAL on I1. */
2193 if ((temp
= find_reg_note (i1
, REG_RETVAL
, NULL_RTX
)))
2195 XEXP (temp
, 0) = first
;
2196 temp
= find_reg_note (first
, REG_LIBCALL
, NULL_RTX
);
2197 XEXP (temp
, 0) = i1
;
2204 /* simplify_giv_expr expects that it can walk the insns
2205 at m->insn forwards and see this old sequence we are
2206 tossing here. delete_insn does preserve the next
2207 pointers, but when we skip over a NOTE we must fix
2208 it up. Otherwise that code walks into the non-deleted
2210 while (p
&& GET_CODE (p
) == NOTE
)
2211 p
= NEXT_INSN (temp
) = NEXT_INSN (p
);
2216 /* Replace the original insn with a move from
2217 our newly created temp. */
2219 emit_move_insn (m
->set_dest
, newreg
);
2222 emit_insn_before (seq
, p
);
2226 /* The more regs we move, the less we like moving them. */
2232 if (!m
->insert_temp
)
2234 /* Any other movable that loads the same register
2236 already_moved
[regno
] = 1;
2238 /* This reg has been moved out of one loop. */
2239 regs
->array
[regno
].moved_once
= 1;
2241 /* The reg set here is now invariant. */
2245 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, m
->set_dest
); i
++)
2246 regs
->array
[regno
+i
].set_in_loop
= 0;
2249 /* Change the length-of-life info for the register
2250 to say it lives at least the full length of this loop.
2251 This will help guide optimizations in outer loops. */
2253 if (REGNO_FIRST_LUID (regno
) > INSN_LUID (loop_start
))
2254 /* This is the old insn before all the moved insns.
2255 We can't use the moved insn because it is out of range
2256 in uid_luid. Only the old insns have luids. */
2257 REGNO_FIRST_UID (regno
) = INSN_UID (loop_start
);
2258 if (REGNO_LAST_LUID (regno
) < INSN_LUID (loop_end
))
2259 REGNO_LAST_UID (regno
) = INSN_UID (loop_end
);
2262 /* Combine with this moved insn any other matching movables. */
2265 for (m1
= movables
->head
; m1
; m1
= m1
->next
)
2270 /* Schedule the reg loaded by M1
2271 for replacement so that shares the reg of M.
2272 If the modes differ (only possible in restricted
2273 circumstances, make a SUBREG.
2275 Note this assumes that the target dependent files
2276 treat REG and SUBREG equally, including within
2277 GO_IF_LEGITIMATE_ADDRESS and in all the
2278 predicates since we never verify that replacing the
2279 original register with a SUBREG results in a
2280 recognizable insn. */
2281 if (GET_MODE (m
->set_dest
) == GET_MODE (m1
->set_dest
))
2282 reg_map
[m1
->regno
] = m
->set_dest
;
2285 = gen_lowpart_common (GET_MODE (m1
->set_dest
),
2288 /* Get rid of the matching insn
2289 and prevent further processing of it. */
2292 /* if library call, delete all insns. */
2293 if ((temp
= find_reg_note (m1
->insn
, REG_RETVAL
,
2295 delete_insn_chain (XEXP (temp
, 0), m1
->insn
);
2297 delete_insn (m1
->insn
);
2299 /* Any other movable that loads the same register
2301 already_moved
[m1
->regno
] = 1;
2303 /* The reg merged here is now invariant,
2304 if the reg it matches is invariant. */
2309 i
< LOOP_REGNO_NREGS (regno
, m1
->set_dest
);
2311 regs
->array
[m1
->regno
+i
].set_in_loop
= 0;
2315 else if (loop_dump_stream
)
2316 fprintf (loop_dump_stream
, "not desirable");
2318 else if (loop_dump_stream
&& !m
->match
)
2319 fprintf (loop_dump_stream
, "not safe");
2321 if (loop_dump_stream
)
2322 fprintf (loop_dump_stream
, "\n");
2326 new_start
= loop_start
;
2328 /* Go through all the instructions in the loop, making
2329 all the register substitutions scheduled in REG_MAP. */
2330 for (p
= new_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
2331 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
2332 || GET_CODE (p
) == CALL_INSN
)
2334 replace_regs (PATTERN (p
), reg_map
, nregs
, 0);
2335 replace_regs (REG_NOTES (p
), reg_map
, nregs
, 0);
2341 free (already_moved
);
2346 loop_movables_add (movables
, m
)
2347 struct loop_movables
*movables
;
2350 if (movables
->head
== 0)
2353 movables
->last
->next
= m
;
2359 loop_movables_free (movables
)
2360 struct loop_movables
*movables
;
2363 struct movable
*m_next
;
2365 for (m
= movables
->head
; m
; m
= m_next
)
2373 /* Scan X and replace the address of any MEM in it with ADDR.
2374 REG is the address that MEM should have before the replacement. */
2377 replace_call_address (x
, reg
, addr
)
2386 code
= GET_CODE (x
);
2400 /* Short cut for very common case. */
2401 replace_call_address (XEXP (x
, 1), reg
, addr
);
2405 /* Short cut for very common case. */
2406 replace_call_address (XEXP (x
, 0), reg
, addr
);
2410 /* If this MEM uses a reg other than the one we expected,
2411 something is wrong. */
2412 if (XEXP (x
, 0) != reg
)
2421 fmt
= GET_RTX_FORMAT (code
);
2422 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2425 replace_call_address (XEXP (x
, i
), reg
, addr
);
2426 else if (fmt
[i
] == 'E')
2429 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2430 replace_call_address (XVECEXP (x
, i
, j
), reg
, addr
);
2436 /* Return the number of memory refs to addresses that vary
2440 count_nonfixed_reads (loop
, x
)
2441 const struct loop
*loop
;
2452 code
= GET_CODE (x
);
2466 return ((loop_invariant_p (loop
, XEXP (x
, 0)) != 1)
2467 + count_nonfixed_reads (loop
, XEXP (x
, 0)));
2474 fmt
= GET_RTX_FORMAT (code
);
2475 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2478 value
+= count_nonfixed_reads (loop
, XEXP (x
, i
));
2482 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2483 value
+= count_nonfixed_reads (loop
, XVECEXP (x
, i
, j
));
2489 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2490 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2491 `unknown_address_altered', `unknown_constant_address_altered', and
2492 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2493 list `store_mems' in LOOP. */
2501 struct loop_info
*loop_info
= LOOP_INFO (loop
);
2502 rtx start
= loop
->start
;
2503 rtx end
= loop
->end
;
2504 /* The label after END. Jumping here is just like falling off the
2505 end of the loop. We use next_nonnote_insn instead of next_label
2506 as a hedge against the (pathological) case where some actual insn
2507 might end up between the two. */
2508 rtx exit_target
= next_nonnote_insn (end
);
2510 loop_info
->has_indirect_jump
= indirect_jump_in_function
;
2511 loop_info
->pre_header_has_call
= 0;
2512 loop_info
->has_call
= 0;
2513 loop_info
->has_nonconst_call
= 0;
2514 loop_info
->has_prefetch
= 0;
2515 loop_info
->has_volatile
= 0;
2516 loop_info
->has_tablejump
= 0;
2517 loop_info
->has_multiple_exit_targets
= 0;
2520 loop_info
->unknown_address_altered
= 0;
2521 loop_info
->unknown_constant_address_altered
= 0;
2522 loop_info
->store_mems
= NULL_RTX
;
2523 loop_info
->first_loop_store_insn
= NULL_RTX
;
2524 loop_info
->mems_idx
= 0;
2525 loop_info
->num_mem_sets
= 0;
2526 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2527 loop_info
->preconditioned
= NOTE_PRECONDITIONED (end
);
2529 for (insn
= start
; insn
&& GET_CODE (insn
) != CODE_LABEL
;
2530 insn
= PREV_INSN (insn
))
2532 if (GET_CODE (insn
) == CALL_INSN
)
2534 loop_info
->pre_header_has_call
= 1;
2539 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2540 insn
= NEXT_INSN (insn
))
2542 switch (GET_CODE (insn
))
2545 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
2548 /* Count number of loops contained in this one. */
2551 else if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
2556 if (! CONST_OR_PURE_CALL_P (insn
))
2558 loop_info
->unknown_address_altered
= 1;
2559 loop_info
->has_nonconst_call
= 1;
2561 else if (pure_call_p (insn
))
2562 loop_info
->has_nonconst_call
= 1;
2563 loop_info
->has_call
= 1;
2564 if (can_throw_internal (insn
))
2565 loop_info
->has_multiple_exit_targets
= 1;
2569 if (! loop_info
->has_multiple_exit_targets
)
2571 rtx set
= pc_set (insn
);
2575 rtx src
= SET_SRC (set
);
2578 if (GET_CODE (src
) == IF_THEN_ELSE
)
2580 label1
= XEXP (src
, 1);
2581 label2
= XEXP (src
, 2);
2591 if (label1
&& label1
!= pc_rtx
)
2593 if (GET_CODE (label1
) != LABEL_REF
)
2595 /* Something tricky. */
2596 loop_info
->has_multiple_exit_targets
= 1;
2599 else if (XEXP (label1
, 0) != exit_target
2600 && LABEL_OUTSIDE_LOOP_P (label1
))
2602 /* A jump outside the current loop. */
2603 loop_info
->has_multiple_exit_targets
= 1;
2615 /* A return, or something tricky. */
2616 loop_info
->has_multiple_exit_targets
= 1;
2622 if (volatile_refs_p (PATTERN (insn
)))
2623 loop_info
->has_volatile
= 1;
2625 if (GET_CODE (insn
) == JUMP_INSN
2626 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
2627 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
2628 loop_info
->has_tablejump
= 1;
2630 note_stores (PATTERN (insn
), note_addr_stored
, loop_info
);
2631 if (! loop_info
->first_loop_store_insn
&& loop_info
->store_mems
)
2632 loop_info
->first_loop_store_insn
= insn
;
2634 if (flag_non_call_exceptions
&& can_throw_internal (insn
))
2635 loop_info
->has_multiple_exit_targets
= 1;
2643 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2644 if (/* An exception thrown by a called function might land us
2646 ! loop_info
->has_nonconst_call
2647 /* We don't want loads for MEMs moved to a location before the
2648 one at which their stack memory becomes allocated. (Note
2649 that this is not a problem for malloc, etc., since those
2650 require actual function calls. */
2651 && ! current_function_calls_alloca
2652 /* There are ways to leave the loop other than falling off the
2654 && ! loop_info
->has_multiple_exit_targets
)
2655 for (insn
= NEXT_INSN (start
); insn
!= NEXT_INSN (end
);
2656 insn
= NEXT_INSN (insn
))
2657 for_each_rtx (&insn
, insert_loop_mem
, loop_info
);
2659 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2660 that loop_invariant_p and load_mems can use true_dependence
2661 to determine what is really clobbered. */
2662 if (loop_info
->unknown_address_altered
)
2664 rtx mem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
2666 loop_info
->store_mems
2667 = gen_rtx_EXPR_LIST (VOIDmode
, mem
, loop_info
->store_mems
);
2669 if (loop_info
->unknown_constant_address_altered
)
2671 rtx mem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
2673 RTX_UNCHANGING_P (mem
) = 1;
2674 loop_info
->store_mems
2675 = gen_rtx_EXPR_LIST (VOIDmode
, mem
, loop_info
->store_mems
);
2679 /* Invalidate all loops containing LABEL. */
2682 invalidate_loops_containing_label (label
)
2686 for (loop
= uid_loop
[INSN_UID (label
)]; loop
; loop
= loop
->outer
)
2690 /* Scan the function looking for loops. Record the start and end of each loop.
2691 Also mark as invalid loops any loops that contain a setjmp or are branched
2692 to from outside the loop. */
2695 find_and_verify_loops (f
, loops
)
2697 struct loops
*loops
;
2702 struct loop
*current_loop
;
2703 struct loop
*next_loop
;
2706 num_loops
= loops
->num
;
2708 compute_luids (f
, NULL_RTX
, 0);
2710 /* If there are jumps to undefined labels,
2711 treat them as jumps out of any/all loops.
2712 This also avoids writing past end of tables when there are no loops. */
2715 /* Find boundaries of loops, mark which loops are contained within
2716 loops, and invalidate loops that have setjmp. */
2719 current_loop
= NULL
;
2720 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2722 if (GET_CODE (insn
) == NOTE
)
2723 switch (NOTE_LINE_NUMBER (insn
))
2725 case NOTE_INSN_LOOP_BEG
:
2726 next_loop
= loops
->array
+ num_loops
;
2727 next_loop
->num
= num_loops
;
2729 next_loop
->start
= insn
;
2730 next_loop
->outer
= current_loop
;
2731 current_loop
= next_loop
;
2734 case NOTE_INSN_LOOP_CONT
:
2735 current_loop
->cont
= insn
;
2738 case NOTE_INSN_LOOP_VTOP
:
2739 current_loop
->vtop
= insn
;
2742 case NOTE_INSN_LOOP_END
:
2746 current_loop
->end
= insn
;
2747 current_loop
= current_loop
->outer
;
2754 if (GET_CODE (insn
) == CALL_INSN
2755 && find_reg_note (insn
, REG_SETJMP
, NULL
))
2757 /* In this case, we must invalidate our current loop and any
2759 for (loop
= current_loop
; loop
; loop
= loop
->outer
)
2762 if (loop_dump_stream
)
2763 fprintf (loop_dump_stream
,
2764 "\nLoop at %d ignored due to setjmp.\n",
2765 INSN_UID (loop
->start
));
2769 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2770 enclosing loop, but this doesn't matter. */
2771 uid_loop
[INSN_UID (insn
)] = current_loop
;
2774 /* Any loop containing a label used in an initializer must be invalidated,
2775 because it can be jumped into from anywhere. */
2776 for (label
= forced_labels
; label
; label
= XEXP (label
, 1))
2777 invalidate_loops_containing_label (XEXP (label
, 0));
2779 /* Any loop containing a label used for an exception handler must be
2780 invalidated, because it can be jumped into from anywhere. */
2781 for_each_eh_label (invalidate_loops_containing_label
);
2783 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2784 loop that it is not contained within, that loop is marked invalid.
2785 If any INSN or CALL_INSN uses a label's address, then the loop containing
2786 that label is marked invalid, because it could be jumped into from
2789 Also look for blocks of code ending in an unconditional branch that
2790 exits the loop. If such a block is surrounded by a conditional
2791 branch around the block, move the block elsewhere (see below) and
2792 invert the jump to point to the code block. This may eliminate a
2793 label in our loop and will simplify processing by both us and a
2794 possible second cse pass. */
2796 for (insn
= f
; insn
; insn
= NEXT_INSN (insn
))
2799 struct loop
*this_loop
= uid_loop
[INSN_UID (insn
)];
2801 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == CALL_INSN
)
2803 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
2805 invalidate_loops_containing_label (XEXP (note
, 0));
2808 if (GET_CODE (insn
) != JUMP_INSN
)
2811 mark_loop_jump (PATTERN (insn
), this_loop
);
2813 /* See if this is an unconditional branch outside the loop. */
2815 && (GET_CODE (PATTERN (insn
)) == RETURN
2816 || (any_uncondjump_p (insn
)
2817 && onlyjump_p (insn
)
2818 && (uid_loop
[INSN_UID (JUMP_LABEL (insn
))]
2820 && get_max_uid () < max_uid_for_loop
)
2823 rtx our_next
= next_real_insn (insn
);
2824 rtx last_insn_to_move
= NEXT_INSN (insn
);
2825 struct loop
*dest_loop
;
2826 struct loop
*outer_loop
= NULL
;
2828 /* Go backwards until we reach the start of the loop, a label,
2830 for (p
= PREV_INSN (insn
);
2831 GET_CODE (p
) != CODE_LABEL
2832 && ! (GET_CODE (p
) == NOTE
2833 && NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
2834 && GET_CODE (p
) != JUMP_INSN
;
2838 /* Check for the case where we have a jump to an inner nested
2839 loop, and do not perform the optimization in that case. */
2841 if (JUMP_LABEL (insn
))
2843 dest_loop
= uid_loop
[INSN_UID (JUMP_LABEL (insn
))];
2846 for (outer_loop
= dest_loop
; outer_loop
;
2847 outer_loop
= outer_loop
->outer
)
2848 if (outer_loop
== this_loop
)
2853 /* Make sure that the target of P is within the current loop. */
2855 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
2856 && uid_loop
[INSN_UID (JUMP_LABEL (p
))] != this_loop
)
2857 outer_loop
= this_loop
;
2859 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2860 we have a block of code to try to move.
2862 We look backward and then forward from the target of INSN
2863 to find a BARRIER at the same loop depth as the target.
2864 If we find such a BARRIER, we make a new label for the start
2865 of the block, invert the jump in P and point it to that label,
2866 and move the block of code to the spot we found. */
2869 && GET_CODE (p
) == JUMP_INSN
2870 && JUMP_LABEL (p
) != 0
2871 /* Just ignore jumps to labels that were never emitted.
2872 These always indicate compilation errors. */
2873 && INSN_UID (JUMP_LABEL (p
)) != 0
2874 && any_condjump_p (p
) && onlyjump_p (p
)
2875 && next_real_insn (JUMP_LABEL (p
)) == our_next
2876 /* If it's not safe to move the sequence, then we
2878 && insns_safe_to_move_p (p
, NEXT_INSN (insn
),
2879 &last_insn_to_move
))
2882 = JUMP_LABEL (insn
) ? JUMP_LABEL (insn
) : get_last_insn ();
2883 struct loop
*target_loop
= uid_loop
[INSN_UID (target
)];
2887 /* Search for possible garbage past the conditional jumps
2888 and look for the last barrier. */
2889 for (tmp
= last_insn_to_move
;
2890 tmp
&& GET_CODE (tmp
) != CODE_LABEL
; tmp
= NEXT_INSN (tmp
))
2891 if (GET_CODE (tmp
) == BARRIER
)
2892 last_insn_to_move
= tmp
;
2894 for (loc
= target
; loc
; loc
= PREV_INSN (loc
))
2895 if (GET_CODE (loc
) == BARRIER
2896 /* Don't move things inside a tablejump. */
2897 && ((loc2
= next_nonnote_insn (loc
)) == 0
2898 || GET_CODE (loc2
) != CODE_LABEL
2899 || (loc2
= next_nonnote_insn (loc2
)) == 0
2900 || GET_CODE (loc2
) != JUMP_INSN
2901 || (GET_CODE (PATTERN (loc2
)) != ADDR_VEC
2902 && GET_CODE (PATTERN (loc2
)) != ADDR_DIFF_VEC
))
2903 && uid_loop
[INSN_UID (loc
)] == target_loop
)
2907 for (loc
= target
; loc
; loc
= NEXT_INSN (loc
))
2908 if (GET_CODE (loc
) == BARRIER
2909 /* Don't move things inside a tablejump. */
2910 && ((loc2
= next_nonnote_insn (loc
)) == 0
2911 || GET_CODE (loc2
) != CODE_LABEL
2912 || (loc2
= next_nonnote_insn (loc2
)) == 0
2913 || GET_CODE (loc2
) != JUMP_INSN
2914 || (GET_CODE (PATTERN (loc2
)) != ADDR_VEC
2915 && GET_CODE (PATTERN (loc2
)) != ADDR_DIFF_VEC
))
2916 && uid_loop
[INSN_UID (loc
)] == target_loop
)
2921 rtx cond_label
= JUMP_LABEL (p
);
2922 rtx new_label
= get_label_after (p
);
2924 /* Ensure our label doesn't go away. */
2925 LABEL_NUSES (cond_label
)++;
2927 /* Verify that uid_loop is large enough and that
2929 if (invert_jump (p
, new_label
, 1))
2933 /* If no suitable BARRIER was found, create a suitable
2934 one before TARGET. Since TARGET is a fall through
2935 path, we'll need to insert a jump around our block
2936 and add a BARRIER before TARGET.
2938 This creates an extra unconditional jump outside
2939 the loop. However, the benefits of removing rarely
2940 executed instructions from inside the loop usually
2941 outweighs the cost of the extra unconditional jump
2942 outside the loop. */
2947 temp
= gen_jump (JUMP_LABEL (insn
));
2948 temp
= emit_jump_insn_before (temp
, target
);
2949 JUMP_LABEL (temp
) = JUMP_LABEL (insn
);
2950 LABEL_NUSES (JUMP_LABEL (insn
))++;
2951 loc
= emit_barrier_before (target
);
2954 /* Include the BARRIER after INSN and copy the
2956 if (squeeze_notes (&new_label
, &last_insn_to_move
))
2958 reorder_insns (new_label
, last_insn_to_move
, loc
);
2960 /* All those insns are now in TARGET_LOOP. */
2962 q
!= NEXT_INSN (last_insn_to_move
);
2964 uid_loop
[INSN_UID (q
)] = target_loop
;
2966 /* The label jumped to by INSN is no longer a loop
2967 exit. Unless INSN does not have a label (e.g.,
2968 it is a RETURN insn), search loop->exit_labels
2969 to find its label_ref, and remove it. Also turn
2970 off LABEL_OUTSIDE_LOOP_P bit. */
2971 if (JUMP_LABEL (insn
))
2973 for (q
= 0, r
= this_loop
->exit_labels
;
2975 q
= r
, r
= LABEL_NEXTREF (r
))
2976 if (XEXP (r
, 0) == JUMP_LABEL (insn
))
2978 LABEL_OUTSIDE_LOOP_P (r
) = 0;
2980 LABEL_NEXTREF (q
) = LABEL_NEXTREF (r
);
2982 this_loop
->exit_labels
= LABEL_NEXTREF (r
);
2986 for (loop
= this_loop
; loop
&& loop
!= target_loop
;
2990 /* If we didn't find it, then something is
2996 /* P is now a jump outside the loop, so it must be put
2997 in loop->exit_labels, and marked as such.
2998 The easiest way to do this is to just call
2999 mark_loop_jump again for P. */
3000 mark_loop_jump (PATTERN (p
), this_loop
);
3002 /* If INSN now jumps to the insn after it,
3004 if (JUMP_LABEL (insn
) != 0
3005 && (next_real_insn (JUMP_LABEL (insn
))
3006 == next_real_insn (insn
)))
3007 delete_related_insns (insn
);
3010 /* Continue the loop after where the conditional
3011 branch used to jump, since the only branch insn
3012 in the block (if it still remains) is an inter-loop
3013 branch and hence needs no processing. */
3014 insn
= NEXT_INSN (cond_label
);
3016 if (--LABEL_NUSES (cond_label
) == 0)
3017 delete_related_insns (cond_label
);
3019 /* This loop will be continued with NEXT_INSN (insn). */
3020 insn
= PREV_INSN (insn
);
3027 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3028 loops it is contained in, mark the target loop invalid.
3030 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3033 mark_loop_jump (x
, loop
)
3037 struct loop
*dest_loop
;
3038 struct loop
*outer_loop
;
3041 switch (GET_CODE (x
))
3054 /* There could be a label reference in here. */
3055 mark_loop_jump (XEXP (x
, 0), loop
);
3061 mark_loop_jump (XEXP (x
, 0), loop
);
3062 mark_loop_jump (XEXP (x
, 1), loop
);
3066 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3067 mark_loop_jump (XEXP (x
, 1), loop
);
3072 mark_loop_jump (XEXP (x
, 0), loop
);
3076 dest_loop
= uid_loop
[INSN_UID (XEXP (x
, 0))];
3078 /* Link together all labels that branch outside the loop. This
3079 is used by final_[bg]iv_value and the loop unrolling code. Also
3080 mark this LABEL_REF so we know that this branch should predict
3083 /* A check to make sure the label is not in an inner nested loop,
3084 since this does not count as a loop exit. */
3087 for (outer_loop
= dest_loop
; outer_loop
;
3088 outer_loop
= outer_loop
->outer
)
3089 if (outer_loop
== loop
)
3095 if (loop
&& ! outer_loop
)
3097 LABEL_OUTSIDE_LOOP_P (x
) = 1;
3098 LABEL_NEXTREF (x
) = loop
->exit_labels
;
3099 loop
->exit_labels
= x
;
3101 for (outer_loop
= loop
;
3102 outer_loop
&& outer_loop
!= dest_loop
;
3103 outer_loop
= outer_loop
->outer
)
3104 outer_loop
->exit_count
++;
3107 /* If this is inside a loop, but not in the current loop or one enclosed
3108 by it, it invalidates at least one loop. */
3113 /* We must invalidate every nested loop containing the target of this
3114 label, except those that also contain the jump insn. */
3116 for (; dest_loop
; dest_loop
= dest_loop
->outer
)
3118 /* Stop when we reach a loop that also contains the jump insn. */
3119 for (outer_loop
= loop
; outer_loop
; outer_loop
= outer_loop
->outer
)
3120 if (dest_loop
== outer_loop
)
3123 /* If we get here, we know we need to invalidate a loop. */
3124 if (loop_dump_stream
&& ! dest_loop
->invalid
)
3125 fprintf (loop_dump_stream
,
3126 "\nLoop at %d ignored due to multiple entry points.\n",
3127 INSN_UID (dest_loop
->start
));
3129 dest_loop
->invalid
= 1;
3134 /* If this is not setting pc, ignore. */
3135 if (SET_DEST (x
) == pc_rtx
)
3136 mark_loop_jump (SET_SRC (x
), loop
);
3140 mark_loop_jump (XEXP (x
, 1), loop
);
3141 mark_loop_jump (XEXP (x
, 2), loop
);
3146 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
3147 mark_loop_jump (XVECEXP (x
, 0, i
), loop
);
3151 for (i
= 0; i
< XVECLEN (x
, 1); i
++)
3152 mark_loop_jump (XVECEXP (x
, 1, i
), loop
);
3156 /* Strictly speaking this is not a jump into the loop, only a possible
3157 jump out of the loop. However, we have no way to link the destination
3158 of this jump onto the list of exit labels. To be safe we mark this
3159 loop and any containing loops as invalid. */
3162 for (outer_loop
= loop
; outer_loop
; outer_loop
= outer_loop
->outer
)
3164 if (loop_dump_stream
&& ! outer_loop
->invalid
)
3165 fprintf (loop_dump_stream
,
3166 "\nLoop at %d ignored due to unknown exit jump.\n",
3167 INSN_UID (outer_loop
->start
));
3168 outer_loop
->invalid
= 1;
3175 /* Return nonzero if there is a label in the range from
3176 insn INSN to and including the insn whose luid is END
3177 INSN must have an assigned luid (i.e., it must not have
3178 been previously created by loop.c). */
3181 labels_in_range_p (insn
, end
)
3185 while (insn
&& INSN_LUID (insn
) <= end
)
3187 if (GET_CODE (insn
) == CODE_LABEL
)
3189 insn
= NEXT_INSN (insn
);
3195 /* Record that a memory reference X is being set. */
3198 note_addr_stored (x
, y
, data
)
3200 rtx y ATTRIBUTE_UNUSED
;
3201 void *data ATTRIBUTE_UNUSED
;
3203 struct loop_info
*loop_info
= data
;
3205 if (x
== 0 || GET_CODE (x
) != MEM
)
3208 /* Count number of memory writes.
3209 This affects heuristics in strength_reduce. */
3210 loop_info
->num_mem_sets
++;
3212 /* BLKmode MEM means all memory is clobbered. */
3213 if (GET_MODE (x
) == BLKmode
)
3215 if (RTX_UNCHANGING_P (x
))
3216 loop_info
->unknown_constant_address_altered
= 1;
3218 loop_info
->unknown_address_altered
= 1;
3223 loop_info
->store_mems
= gen_rtx_EXPR_LIST (VOIDmode
, x
,
3224 loop_info
->store_mems
);
3227 /* X is a value modified by an INSN that references a biv inside a loop
3228 exit test (ie, X is somehow related to the value of the biv). If X
3229 is a pseudo that is used more than once, then the biv is (effectively)
3230 used more than once. DATA is a pointer to a loop_regs structure. */
3233 note_set_pseudo_multiple_uses (x
, y
, data
)
3235 rtx y ATTRIBUTE_UNUSED
;
3238 struct loop_regs
*regs
= (struct loop_regs
*) data
;
3243 while (GET_CODE (x
) == STRICT_LOW_PART
3244 || GET_CODE (x
) == SIGN_EXTRACT
3245 || GET_CODE (x
) == ZERO_EXTRACT
3246 || GET_CODE (x
) == SUBREG
)
3249 if (GET_CODE (x
) != REG
|| REGNO (x
) < FIRST_PSEUDO_REGISTER
)
3252 /* If we do not have usage information, or if we know the register
3253 is used more than once, note that fact for check_dbra_loop. */
3254 if (REGNO (x
) >= max_reg_before_loop
3255 || ! regs
->array
[REGNO (x
)].single_usage
3256 || regs
->array
[REGNO (x
)].single_usage
== const0_rtx
)
3257 regs
->multiple_uses
= 1;
3260 /* Return nonzero if the rtx X is invariant over the current loop.
3262 The value is 2 if we refer to something only conditionally invariant.
3264 A memory ref is invariant if it is not volatile and does not conflict
3265 with anything stored in `loop_info->store_mems'. */
3268 loop_invariant_p (loop
, x
)
3269 const struct loop
*loop
;
3272 struct loop_info
*loop_info
= LOOP_INFO (loop
);
3273 struct loop_regs
*regs
= LOOP_REGS (loop
);
3277 int conditional
= 0;
3282 code
= GET_CODE (x
);
3292 /* A LABEL_REF is normally invariant, however, if we are unrolling
3293 loops, and this label is inside the loop, then it isn't invariant.
3294 This is because each unrolled copy of the loop body will have
3295 a copy of this label. If this was invariant, then an insn loading
3296 the address of this label into a register might get moved outside
3297 the loop, and then each loop body would end up using the same label.
3299 We don't know the loop bounds here though, so just fail for all
3301 if (flag_old_unroll_loops
)
3308 case UNSPEC_VOLATILE
:
3312 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3313 since the reg might be set by initialization within the loop. */
3315 if ((x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
3316 || x
== arg_pointer_rtx
|| x
== pic_offset_table_rtx
)
3317 && ! current_function_has_nonlocal_goto
)
3320 if (LOOP_INFO (loop
)->has_call
3321 && REGNO (x
) < FIRST_PSEUDO_REGISTER
&& call_used_regs
[REGNO (x
)])
3324 /* Out-of-range regs can occur when we are called from unrolling.
3325 These have always been created by the unroller and are set in
3326 the loop, hence are never invariant. */
3328 if (REGNO (x
) >= (unsigned) regs
->num
)
3331 if (regs
->array
[REGNO (x
)].set_in_loop
< 0)
3334 return regs
->array
[REGNO (x
)].set_in_loop
== 0;
3337 /* Volatile memory references must be rejected. Do this before
3338 checking for read-only items, so that volatile read-only items
3339 will be rejected also. */
3340 if (MEM_VOLATILE_P (x
))
3343 /* See if there is any dependence between a store and this load. */
3344 mem_list_entry
= loop_info
->store_mems
;
3345 while (mem_list_entry
)
3347 if (true_dependence (XEXP (mem_list_entry
, 0), VOIDmode
,
3351 mem_list_entry
= XEXP (mem_list_entry
, 1);
3354 /* It's not invalidated by a store in memory
3355 but we must still verify the address is invariant. */
3359 /* Don't mess with insns declared volatile. */
3360 if (MEM_VOLATILE_P (x
))
3368 fmt
= GET_RTX_FORMAT (code
);
3369 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3373 int tem
= loop_invariant_p (loop
, XEXP (x
, i
));
3379 else if (fmt
[i
] == 'E')
3382 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3384 int tem
= loop_invariant_p (loop
, XVECEXP (x
, i
, j
));
3394 return 1 + conditional
;
3397 /* Return nonzero if all the insns in the loop that set REG
3398 are INSN and the immediately following insns,
3399 and if each of those insns sets REG in an invariant way
3400 (not counting uses of REG in them).
3402 The value is 2 if some of these insns are only conditionally invariant.
3404 We assume that INSN itself is the first set of REG
3405 and that its source is invariant. */
3408 consec_sets_invariant_p (loop
, reg
, n_sets
, insn
)
3409 const struct loop
*loop
;
3413 struct loop_regs
*regs
= LOOP_REGS (loop
);
3415 unsigned int regno
= REGNO (reg
);
3417 /* Number of sets we have to insist on finding after INSN. */
3418 int count
= n_sets
- 1;
3419 int old
= regs
->array
[regno
].set_in_loop
;
3423 /* If N_SETS hit the limit, we can't rely on its value. */
3427 regs
->array
[regno
].set_in_loop
= 0;
3435 code
= GET_CODE (p
);
3437 /* If library call, skip to end of it. */
3438 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
3443 && (set
= single_set (p
))
3444 && GET_CODE (SET_DEST (set
)) == REG
3445 && REGNO (SET_DEST (set
)) == regno
)
3447 this = loop_invariant_p (loop
, SET_SRC (set
));
3450 else if ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
)))
3452 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3453 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3455 this = (CONSTANT_P (XEXP (temp
, 0))
3456 || (find_reg_note (p
, REG_RETVAL
, NULL_RTX
)
3457 && loop_invariant_p (loop
, XEXP (temp
, 0))));
3464 else if (code
!= NOTE
)
3466 regs
->array
[regno
].set_in_loop
= old
;
3471 regs
->array
[regno
].set_in_loop
= old
;
3472 /* If loop_invariant_p ever returned 2, we return 2. */
3473 return 1 + (value
& 2);
3477 /* I don't think this condition is sufficient to allow INSN
3478 to be moved, so we no longer test it. */
3480 /* Return 1 if all insns in the basic block of INSN and following INSN
3481 that set REG are invariant according to TABLE. */
3484 all_sets_invariant_p (reg
, insn
, table
)
3489 int regno
= REGNO (reg
);
3495 code
= GET_CODE (p
);
3496 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
3498 if (code
== INSN
&& GET_CODE (PATTERN (p
)) == SET
3499 && GET_CODE (SET_DEST (PATTERN (p
))) == REG
3500 && REGNO (SET_DEST (PATTERN (p
))) == regno
)
3502 if (! loop_invariant_p (loop
, SET_SRC (PATTERN (p
)), table
))
3509 /* Look at all uses (not sets) of registers in X. For each, if it is
3510 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3511 a different insn, set USAGE[REGNO] to const0_rtx. */
3514 find_single_use_in_loop (regs
, insn
, x
)
3515 struct loop_regs
*regs
;
3519 enum rtx_code code
= GET_CODE (x
);
3520 const char *fmt
= GET_RTX_FORMAT (code
);
3524 regs
->array
[REGNO (x
)].single_usage
3525 = (regs
->array
[REGNO (x
)].single_usage
!= 0
3526 && regs
->array
[REGNO (x
)].single_usage
!= insn
)
3527 ? const0_rtx
: insn
;
3529 else if (code
== SET
)
3531 /* Don't count SET_DEST if it is a REG; otherwise count things
3532 in SET_DEST because if a register is partially modified, it won't
3533 show up as a potential movable so we don't care how USAGE is set
3535 if (GET_CODE (SET_DEST (x
)) != REG
)
3536 find_single_use_in_loop (regs
, insn
, SET_DEST (x
));
3537 find_single_use_in_loop (regs
, insn
, SET_SRC (x
));
3540 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3542 if (fmt
[i
] == 'e' && XEXP (x
, i
) != 0)
3543 find_single_use_in_loop (regs
, insn
, XEXP (x
, i
));
3544 else if (fmt
[i
] == 'E')
3545 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3546 find_single_use_in_loop (regs
, insn
, XVECEXP (x
, i
, j
));
3550 /* Count and record any set in X which is contained in INSN. Update
3551 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3555 count_one_set (regs
, insn
, x
, last_set
)
3556 struct loop_regs
*regs
;
3560 if (GET_CODE (x
) == CLOBBER
&& GET_CODE (XEXP (x
, 0)) == REG
)
3561 /* Don't move a reg that has an explicit clobber.
3562 It's not worth the pain to try to do it correctly. */
3563 regs
->array
[REGNO (XEXP (x
, 0))].may_not_optimize
= 1;
3565 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
3567 rtx dest
= SET_DEST (x
);
3568 while (GET_CODE (dest
) == SUBREG
3569 || GET_CODE (dest
) == ZERO_EXTRACT
3570 || GET_CODE (dest
) == SIGN_EXTRACT
3571 || GET_CODE (dest
) == STRICT_LOW_PART
)
3572 dest
= XEXP (dest
, 0);
3573 if (GET_CODE (dest
) == REG
)
3576 int regno
= REGNO (dest
);
3577 for (i
= 0; i
< LOOP_REGNO_NREGS (regno
, dest
); i
++)
3579 /* If this is the first setting of this reg
3580 in current basic block, and it was set before,
3581 it must be set in two basic blocks, so it cannot
3582 be moved out of the loop. */
3583 if (regs
->array
[regno
].set_in_loop
> 0
3585 regs
->array
[regno
+i
].may_not_optimize
= 1;
3586 /* If this is not first setting in current basic block,
3587 see if reg was used in between previous one and this.
3588 If so, neither one can be moved. */
3589 if (last_set
[regno
] != 0
3590 && reg_used_between_p (dest
, last_set
[regno
], insn
))
3591 regs
->array
[regno
+i
].may_not_optimize
= 1;
3592 if (regs
->array
[regno
+i
].set_in_loop
< 127)
3593 ++regs
->array
[regno
+i
].set_in_loop
;
3594 last_set
[regno
+i
] = insn
;
3600 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3601 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3602 contained in insn INSN is used by any insn that precedes INSN in
3603 cyclic order starting from the loop entry point.
3605 We don't want to use INSN_LUID here because if we restrict INSN to those
3606 that have a valid INSN_LUID, it means we cannot move an invariant out
3607 from an inner loop past two loops. */
3610 loop_reg_used_before_p (loop
, set
, insn
)
3611 const struct loop
*loop
;
3614 rtx reg
= SET_DEST (set
);
3617 /* Scan forward checking for register usage. If we hit INSN, we
3618 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3619 for (p
= loop
->scan_start
; p
!= insn
; p
= NEXT_INSN (p
))
3621 if (INSN_P (p
) && reg_overlap_mentioned_p (reg
, PATTERN (p
)))
3632 /* Information we collect about arrays that we might want to prefetch. */
3633 struct prefetch_info
3635 struct iv_class
*class; /* Class this prefetch is based on. */
3636 struct induction
*giv
; /* GIV this prefetch is based on. */
3637 rtx base_address
; /* Start prefetching from this address plus
3639 HOST_WIDE_INT index
;
3640 HOST_WIDE_INT stride
; /* Prefetch stride in bytes in each
3642 unsigned int bytes_accessed
; /* Sum of sizes of all accesses to this
3643 prefetch area in one iteration. */
3644 unsigned int total_bytes
; /* Total bytes loop will access in this block.
3645 This is set only for loops with known
3646 iteration counts and is 0xffffffff
3648 int prefetch_in_loop
; /* Number of prefetch insns in loop. */
3649 int prefetch_before_loop
; /* Number of prefetch insns before loop. */
3650 unsigned int write
: 1; /* 1 for read/write prefetches. */
3653 /* Data used by check_store function. */
3654 struct check_store_data
3660 static void check_store
PARAMS ((rtx
, rtx
, void *));
3661 static void emit_prefetch_instructions
PARAMS ((struct loop
*));
3662 static int rtx_equal_for_prefetch_p
PARAMS ((rtx
, rtx
));
3664 /* Set mem_write when mem_address is found. Used as callback to
3667 check_store (x
, pat
, data
)
3668 rtx x
, pat ATTRIBUTE_UNUSED
;
3671 struct check_store_data
*d
= (struct check_store_data
*) data
;
3673 if ((GET_CODE (x
) == MEM
) && rtx_equal_p (d
->mem_address
, XEXP (x
, 0)))
3677 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3678 important to get some addresses combined. Later more sophisticated
3679 transformations can be added when necessary.
3681 ??? Same trick with swapping operand is done at several other places.
3682 It can be nice to develop some common way to handle this. */
3685 rtx_equal_for_prefetch_p (x
, y
)
3690 enum rtx_code code
= GET_CODE (x
);
3695 if (code
!= GET_CODE (y
))
3698 code
= GET_CODE (x
);
3700 if (GET_RTX_CLASS (code
) == 'c')
3702 return ((rtx_equal_for_prefetch_p (XEXP (x
, 0), XEXP (y
, 0))
3703 && rtx_equal_for_prefetch_p (XEXP (x
, 1), XEXP (y
, 1)))
3704 || (rtx_equal_for_prefetch_p (XEXP (x
, 0), XEXP (y
, 1))
3705 && rtx_equal_for_prefetch_p (XEXP (x
, 1), XEXP (y
, 0))));
3707 /* Compare the elements. If any pair of corresponding elements fails to
3708 match, return 0 for the whole thing. */
3710 fmt
= GET_RTX_FORMAT (code
);
3711 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3716 if (XWINT (x
, i
) != XWINT (y
, i
))
3721 if (XINT (x
, i
) != XINT (y
, i
))
3726 /* Two vectors must have the same length. */
3727 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
3730 /* And the corresponding elements must match. */
3731 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3732 if (rtx_equal_for_prefetch_p (XVECEXP (x
, i
, j
),
3733 XVECEXP (y
, i
, j
)) == 0)
3738 if (rtx_equal_for_prefetch_p (XEXP (x
, i
), XEXP (y
, i
)) == 0)
3743 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
3748 /* These are just backpointers, so they don't matter. */
3754 /* It is believed that rtx's at this level will never
3755 contain anything but integers and other rtx's,
3756 except for within LABEL_REFs and SYMBOL_REFs. */
3764 /* Remove constant addition value from the expression X (when present)
3767 static HOST_WIDE_INT
3768 remove_constant_addition (x
)
3771 HOST_WIDE_INT addval
= 0;
3774 /* Avoid clobbering a shared CONST expression. */
3775 if (GET_CODE (exp
) == CONST
)
3777 if (GET_CODE (XEXP (exp
, 0)) == PLUS
3778 && GET_CODE (XEXP (XEXP (exp
, 0), 0)) == SYMBOL_REF
3779 && GET_CODE (XEXP (XEXP (exp
, 0), 1)) == CONST_INT
)
3781 *x
= XEXP (XEXP (exp
, 0), 0);
3782 return INTVAL (XEXP (XEXP (exp
, 0), 1));
3787 if (GET_CODE (exp
) == CONST_INT
)
3789 addval
= INTVAL (exp
);
3793 /* For plus expression recurse on ourself. */
3794 else if (GET_CODE (exp
) == PLUS
)
3796 addval
+= remove_constant_addition (&XEXP (exp
, 0));
3797 addval
+= remove_constant_addition (&XEXP (exp
, 1));
3799 /* In case our parameter was constant, remove extra zero from the
3801 if (XEXP (exp
, 0) == const0_rtx
)
3803 else if (XEXP (exp
, 1) == const0_rtx
)
3810 /* Attempt to identify accesses to arrays that are most likely to cause cache
3811 misses, and emit prefetch instructions a few prefetch blocks forward.
3813 To detect the arrays we use the GIV information that was collected by the
3814 strength reduction pass.
3816 The prefetch instructions are generated after the GIV information is done
3817 and before the strength reduction process. The new GIVs are injected into
3818 the strength reduction tables, so the prefetch addresses are optimized as
3821 GIVs are split into base address, stride, and constant addition values.
3822 GIVs with the same address, stride and close addition values are combined
3823 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3824 for write instructions can be used for the block we write to, on machines
3825 that support write prefetches.
3827 Several heuristics are used to determine when to prefetch. They are
3828 controlled by defined symbols that can be overridden for each target. */
3831 emit_prefetch_instructions (loop
)
3834 int num_prefetches
= 0;
3835 int num_real_prefetches
= 0;
3836 int num_real_write_prefetches
= 0;
3837 int num_prefetches_before
= 0;
3838 int num_write_prefetches_before
= 0;
3841 struct iv_class
*bl
;
3842 struct induction
*iv
;
3843 struct prefetch_info info
[MAX_PREFETCHES
];
3844 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
3849 /* Consider only loops w/o calls. When a call is done, the loop is probably
3850 slow enough to read the memory. */
3851 if (PREFETCH_NO_CALL
&& LOOP_INFO (loop
)->has_call
)
3853 if (loop_dump_stream
)
3854 fprintf (loop_dump_stream
, "Prefetch: ignoring loop: has call.\n");
3859 /* Don't prefetch in loops known to have few iterations. */
3860 if (PREFETCH_NO_LOW_LOOPCNT
3861 && LOOP_INFO (loop
)->n_iterations
3862 && LOOP_INFO (loop
)->n_iterations
<= PREFETCH_LOW_LOOPCNT
)
3864 if (loop_dump_stream
)
3865 fprintf (loop_dump_stream
,
3866 "Prefetch: ignoring loop: not enough iterations.\n");
3870 /* Search all induction variables and pick those interesting for the prefetch
3872 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
3874 struct induction
*biv
= bl
->biv
, *biv1
;
3879 /* Expect all BIVs to be executed in each iteration. This makes our
3880 analysis more conservative. */
3883 /* Discard non-constant additions that we can't handle well yet, and
3884 BIVs that are executed multiple times; such BIVs ought to be
3885 handled in the nested loop. We accept not_every_iteration BIVs,
3886 since these only result in larger strides and make our
3887 heuristics more conservative. */
3888 if (GET_CODE (biv
->add_val
) != CONST_INT
)
3890 if (loop_dump_stream
)
3892 fprintf (loop_dump_stream
,
3893 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3894 REGNO (biv
->src_reg
), INSN_UID (biv
->insn
));
3895 print_rtl (loop_dump_stream
, biv
->add_val
);
3896 fprintf (loop_dump_stream
, "\n");
3901 if (biv
->maybe_multiple
)
3903 if (loop_dump_stream
)
3905 fprintf (loop_dump_stream
,
3906 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3907 REGNO (biv
->src_reg
), INSN_UID (biv
->insn
));
3908 print_rtl (loop_dump_stream
, biv
->add_val
);
3909 fprintf (loop_dump_stream
, "\n");
3914 basestride
+= INTVAL (biv1
->add_val
);
3915 biv1
= biv1
->next_iv
;
3918 if (biv1
|| !basestride
)
3921 for (iv
= bl
->giv
; iv
; iv
= iv
->next_iv
)
3925 HOST_WIDE_INT index
= 0;
3927 HOST_WIDE_INT stride
= 0;
3928 int stride_sign
= 1;
3929 struct check_store_data d
;
3930 const char *ignore_reason
= NULL
;
3931 int size
= GET_MODE_SIZE (GET_MODE (iv
));
3933 /* See whether an induction variable is interesting to us and if
3934 not, report the reason. */
3935 if (iv
->giv_type
!= DEST_ADDR
)
3936 ignore_reason
= "giv is not a destination address";
3938 /* We are interested only in constant stride memory references
3939 in order to be able to compute density easily. */
3940 else if (GET_CODE (iv
->mult_val
) != CONST_INT
)
3941 ignore_reason
= "stride is not constant";
3945 stride
= INTVAL (iv
->mult_val
) * basestride
;
3952 /* On some targets, reversed order prefetches are not
3954 if (PREFETCH_NO_REVERSE_ORDER
&& stride_sign
< 0)
3955 ignore_reason
= "reversed order stride";
3957 /* Prefetch of accesses with an extreme stride might not be
3958 worthwhile, either. */
3959 else if (PREFETCH_NO_EXTREME_STRIDE
3960 && stride
> PREFETCH_EXTREME_STRIDE
)
3961 ignore_reason
= "extreme stride";
3963 /* Ignore GIVs with varying add values; we can't predict the
3964 value for the next iteration. */
3965 else if (!loop_invariant_p (loop
, iv
->add_val
))
3966 ignore_reason
= "giv has varying add value";
3968 /* Ignore GIVs in the nested loops; they ought to have been
3970 else if (iv
->maybe_multiple
)
3971 ignore_reason
= "giv is in nested loop";
3974 if (ignore_reason
!= NULL
)
3976 if (loop_dump_stream
)
3977 fprintf (loop_dump_stream
,
3978 "Prefetch: ignoring giv at %d: %s.\n",
3979 INSN_UID (iv
->insn
), ignore_reason
);
3983 /* Determine the pointer to the basic array we are examining. It is
3984 the sum of the BIV's initial value and the GIV's add_val. */
3985 address
= copy_rtx (iv
->add_val
);
3986 temp
= copy_rtx (bl
->initial_value
);
3988 address
= simplify_gen_binary (PLUS
, Pmode
, temp
, address
);
3989 index
= remove_constant_addition (&address
);
3992 d
.mem_address
= *iv
->location
;
3994 /* When the GIV is not always executed, we might be better off by
3995 not dirtying the cache pages. */
3996 if (PREFETCH_CONDITIONAL
|| iv
->always_executed
)
3997 note_stores (PATTERN (iv
->insn
), check_store
, &d
);
4000 if (loop_dump_stream
)
4001 fprintf (loop_dump_stream
, "Prefetch: Ignoring giv at %d: %s\n",
4002 INSN_UID (iv
->insn
), "in conditional code.");
4006 /* Attempt to find another prefetch to the same array and see if we
4007 can merge this one. */
4008 for (i
= 0; i
< num_prefetches
; i
++)
4009 if (rtx_equal_for_prefetch_p (address
, info
[i
].base_address
)
4010 && stride
== info
[i
].stride
)
4012 /* In case both access same array (same location
4013 just with small difference in constant indexes), merge
4014 the prefetches. Just do the later and the earlier will
4015 get prefetched from previous iteration.
4016 The artificial threshold should not be too small,
4017 but also not bigger than small portion of memory usually
4018 traversed by single loop. */
4019 if (index
>= info
[i
].index
4020 && index
- info
[i
].index
< PREFETCH_EXTREME_DIFFERENCE
)
4022 info
[i
].write
|= d
.mem_write
;
4023 info
[i
].bytes_accessed
+= size
;
4024 info
[i
].index
= index
;
4027 info
[num_prefetches
].base_address
= address
;
4032 if (index
< info
[i
].index
4033 && info
[i
].index
- index
< PREFETCH_EXTREME_DIFFERENCE
)
4035 info
[i
].write
|= d
.mem_write
;
4036 info
[i
].bytes_accessed
+= size
;
4042 /* Merging failed. */
4045 info
[num_prefetches
].giv
= iv
;
4046 info
[num_prefetches
].class = bl
;
4047 info
[num_prefetches
].index
= index
;
4048 info
[num_prefetches
].stride
= stride
;
4049 info
[num_prefetches
].base_address
= address
;
4050 info
[num_prefetches
].write
= d
.mem_write
;
4051 info
[num_prefetches
].bytes_accessed
= size
;
4053 if (num_prefetches
>= MAX_PREFETCHES
)
4055 if (loop_dump_stream
)
4056 fprintf (loop_dump_stream
,
4057 "Maximal number of prefetches exceeded.\n");
4064 for (i
= 0; i
< num_prefetches
; i
++)
4068 /* Attempt to calculate the total number of bytes fetched by all
4069 iterations of the loop. Avoid overflow. */
4070 if (LOOP_INFO (loop
)->n_iterations
4071 && ((unsigned HOST_WIDE_INT
) (0xffffffff / info
[i
].stride
)
4072 >= LOOP_INFO (loop
)->n_iterations
))
4073 info
[i
].total_bytes
= info
[i
].stride
* LOOP_INFO (loop
)->n_iterations
;
4075 info
[i
].total_bytes
= 0xffffffff;
4077 density
= info
[i
].bytes_accessed
* 100 / info
[i
].stride
;
4079 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4080 if (PREFETCH_ONLY_DENSE_MEM
)
4081 if (density
* 256 > PREFETCH_DENSE_MEM
* 100
4082 && (info
[i
].total_bytes
/ PREFETCH_BLOCK
4083 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN
))
4085 info
[i
].prefetch_before_loop
= 1;
4086 info
[i
].prefetch_in_loop
4087 = (info
[i
].total_bytes
/ PREFETCH_BLOCK
4088 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX
);
4092 info
[i
].prefetch_in_loop
= 0, info
[i
].prefetch_before_loop
= 0;
4093 if (loop_dump_stream
)
4094 fprintf (loop_dump_stream
,
4095 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4096 INSN_UID (info
[i
].giv
->insn
), density
);
4099 info
[i
].prefetch_in_loop
= 1, info
[i
].prefetch_before_loop
= 1;
4101 /* Find how many prefetch instructions we'll use within the loop. */
4102 if (info
[i
].prefetch_in_loop
!= 0)
4104 info
[i
].prefetch_in_loop
= ((info
[i
].stride
+ PREFETCH_BLOCK
- 1)
4106 num_real_prefetches
+= info
[i
].prefetch_in_loop
;
4108 num_real_write_prefetches
+= info
[i
].prefetch_in_loop
;
4112 /* Determine how many iterations ahead to prefetch within the loop, based
4113 on how many prefetches we currently expect to do within the loop. */
4114 if (num_real_prefetches
!= 0)
4116 if ((ahead
= SIMULTANEOUS_PREFETCHES
/ num_real_prefetches
) == 0)
4118 if (loop_dump_stream
)
4119 fprintf (loop_dump_stream
,
4120 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4121 SIMULTANEOUS_PREFETCHES
, num_real_prefetches
);
4122 num_real_prefetches
= 0, num_real_write_prefetches
= 0;
4125 /* We'll also use AHEAD to determine how many prefetch instructions to
4126 emit before a loop, so don't leave it zero. */
4128 ahead
= PREFETCH_BLOCKS_BEFORE_LOOP_MAX
;
4130 for (i
= 0; i
< num_prefetches
; i
++)
4132 /* Update if we've decided not to prefetch anything within the loop. */
4133 if (num_real_prefetches
== 0)
4134 info
[i
].prefetch_in_loop
= 0;
4136 /* Find how many prefetch instructions we'll use before the loop. */
4137 if (info
[i
].prefetch_before_loop
!= 0)
4139 int n
= info
[i
].total_bytes
/ PREFETCH_BLOCK
;
4142 info
[i
].prefetch_before_loop
= n
;
4143 num_prefetches_before
+= n
;
4145 num_write_prefetches_before
+= n
;
4148 if (loop_dump_stream
)
4150 if (info
[i
].prefetch_in_loop
== 0
4151 && info
[i
].prefetch_before_loop
== 0)
4153 fprintf (loop_dump_stream
, "Prefetch insn: %d",
4154 INSN_UID (info
[i
].giv
->insn
));
4155 fprintf (loop_dump_stream
,
4156 "; in loop: %d; before: %d; %s\n",
4157 info
[i
].prefetch_in_loop
,
4158 info
[i
].prefetch_before_loop
,
4159 info
[i
].write
? "read/write" : "read only");
4160 fprintf (loop_dump_stream
,
4161 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4162 (int) (info
[i
].bytes_accessed
* 100 / info
[i
].stride
),
4163 info
[i
].bytes_accessed
, info
[i
].total_bytes
);
4164 fprintf (loop_dump_stream
, " index: ");
4165 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, info
[i
].index
);
4166 fprintf (loop_dump_stream
, "; stride: ");
4167 fprintf (loop_dump_stream
, HOST_WIDE_INT_PRINT_DEC
, info
[i
].stride
);
4168 fprintf (loop_dump_stream
, "; address: ");
4169 print_rtl (loop_dump_stream
, info
[i
].base_address
);
4170 fprintf (loop_dump_stream
, "\n");
4174 if (num_real_prefetches
+ num_prefetches_before
> 0)
4176 /* Record that this loop uses prefetch instructions. */
4177 LOOP_INFO (loop
)->has_prefetch
= 1;
4179 if (loop_dump_stream
)
4181 fprintf (loop_dump_stream
, "Real prefetches needed within loop: %d (write: %d)\n",
4182 num_real_prefetches
, num_real_write_prefetches
);
4183 fprintf (loop_dump_stream
, "Real prefetches needed before loop: %d (write: %d)\n",
4184 num_prefetches_before
, num_write_prefetches_before
);
4188 for (i
= 0; i
< num_prefetches
; i
++)
4192 for (y
= 0; y
< info
[i
].prefetch_in_loop
; y
++)
4194 rtx loc
= copy_rtx (*info
[i
].giv
->location
);
4196 int bytes_ahead
= PREFETCH_BLOCK
* (ahead
+ y
);
4197 rtx before_insn
= info
[i
].giv
->insn
;
4198 rtx prev_insn
= PREV_INSN (info
[i
].giv
->insn
);
4201 /* We can save some effort by offsetting the address on
4202 architectures with offsettable memory references. */
4203 if (offsettable_address_p (0, VOIDmode
, loc
))
4204 loc
= plus_constant (loc
, bytes_ahead
);
4207 rtx reg
= gen_reg_rtx (Pmode
);
4208 loop_iv_add_mult_emit_before (loop
, loc
, const1_rtx
,
4209 GEN_INT (bytes_ahead
), reg
,
4215 /* Make sure the address operand is valid for prefetch. */
4216 if (! (*insn_data
[(int)CODE_FOR_prefetch
].operand
[0].predicate
)
4217 (loc
, insn_data
[(int)CODE_FOR_prefetch
].operand
[0].mode
))
4218 loc
= force_reg (Pmode
, loc
);
4219 emit_insn (gen_prefetch (loc
, GEN_INT (info
[i
].write
),
4223 emit_insn_before (seq
, before_insn
);
4225 /* Check all insns emitted and record the new GIV
4227 insn
= NEXT_INSN (prev_insn
);
4228 while (insn
!= before_insn
)
4230 insn
= check_insn_for_givs (loop
, insn
,
4231 info
[i
].giv
->always_executed
,
4232 info
[i
].giv
->maybe_multiple
);
4233 insn
= NEXT_INSN (insn
);
4237 if (PREFETCH_BEFORE_LOOP
)
4239 /* Emit insns before the loop to fetch the first cache lines or,
4240 if we're not prefetching within the loop, everything we expect
4242 for (y
= 0; y
< info
[i
].prefetch_before_loop
; y
++)
4244 rtx reg
= gen_reg_rtx (Pmode
);
4245 rtx loop_start
= loop
->start
;
4246 rtx init_val
= info
[i
].class->initial_value
;
4247 rtx add_val
= simplify_gen_binary (PLUS
, Pmode
,
4248 info
[i
].giv
->add_val
,
4249 GEN_INT (y
* PREFETCH_BLOCK
));
4251 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4252 non-constant INIT_VAL to have the same mode as REG, which
4253 in this case we know to be Pmode. */
4254 if (GET_MODE (init_val
) != Pmode
&& !CONSTANT_P (init_val
))
4259 init_val
= convert_to_mode (Pmode
, init_val
, 0);
4262 loop_insn_emit_before (loop
, 0, loop_start
, seq
);
4264 loop_iv_add_mult_emit_before (loop
, init_val
,
4265 info
[i
].giv
->mult_val
,
4266 add_val
, reg
, 0, loop_start
);
4267 emit_insn_before (gen_prefetch (reg
, GEN_INT (info
[i
].write
),
4277 /* A "basic induction variable" or biv is a pseudo reg that is set
4278 (within this loop) only by incrementing or decrementing it. */
4279 /* A "general induction variable" or giv is a pseudo reg whose
4280 value is a linear function of a biv. */
4282 /* Bivs are recognized by `basic_induction_var';
4283 Givs by `general_induction_var'. */
4285 /* Communication with routines called via `note_stores'. */
4287 static rtx note_insn
;
4289 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4291 static rtx addr_placeholder
;
4293 /* ??? Unfinished optimizations, and possible future optimizations,
4294 for the strength reduction code. */
4296 /* ??? The interaction of biv elimination, and recognition of 'constant'
4297 bivs, may cause problems. */
4299 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4300 performance problems.
4302 Perhaps don't eliminate things that can be combined with an addressing
4303 mode. Find all givs that have the same biv, mult_val, and add_val;
4304 then for each giv, check to see if its only use dies in a following
4305 memory address. If so, generate a new memory address and check to see
4306 if it is valid. If it is valid, then store the modified memory address,
4307 otherwise, mark the giv as not done so that it will get its own iv. */
4309 /* ??? Could try to optimize branches when it is known that a biv is always
4312 /* ??? When replace a biv in a compare insn, we should replace with closest
4313 giv so that an optimized branch can still be recognized by the combiner,
4314 e.g. the VAX acb insn. */
4316 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4317 was rerun in loop_optimize whenever a register was added or moved.
4318 Also, some of the optimizations could be a little less conservative. */
4320 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4321 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4324 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4325 least once for every loop iteration except for the last one.
4327 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4331 for_each_insn_in_loop (loop
, fncall
)
4333 loop_insn_callback fncall
;
4335 int not_every_iteration
= 0;
4336 int maybe_multiple
= 0;
4337 int past_loop_latch
= 0;
4341 /* If loop_scan_start points to the loop exit test, we have to be wary of
4342 subversive use of gotos inside expression statements. */
4343 if (prev_nonnote_insn (loop
->scan_start
) != prev_nonnote_insn (loop
->start
))
4344 maybe_multiple
= back_branch_in_range_p (loop
, loop
->scan_start
);
4346 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4347 for (p
= next_insn_in_loop (loop
, loop
->scan_start
);
4349 p
= next_insn_in_loop (loop
, p
))
4351 p
= fncall (loop
, p
, not_every_iteration
, maybe_multiple
);
4353 /* Past CODE_LABEL, we get to insns that may be executed multiple
4354 times. The only way we can be sure that they can't is if every
4355 jump insn between here and the end of the loop either
4356 returns, exits the loop, is a jump to a location that is still
4357 behind the label, or is a jump to the loop start. */
4359 if (GET_CODE (p
) == CODE_LABEL
)
4367 insn
= NEXT_INSN (insn
);
4368 if (insn
== loop
->scan_start
)
4370 if (insn
== loop
->end
)
4376 if (insn
== loop
->scan_start
)
4380 if (GET_CODE (insn
) == JUMP_INSN
4381 && GET_CODE (PATTERN (insn
)) != RETURN
4382 && (!any_condjump_p (insn
)
4383 || (JUMP_LABEL (insn
) != 0
4384 && JUMP_LABEL (insn
) != loop
->scan_start
4385 && !loop_insn_first_p (p
, JUMP_LABEL (insn
)))))
4393 /* Past a jump, we get to insns for which we can't count
4394 on whether they will be executed during each iteration. */
4395 /* This code appears twice in strength_reduce. There is also similar
4396 code in scan_loop. */
4397 if (GET_CODE (p
) == JUMP_INSN
4398 /* If we enter the loop in the middle, and scan around to the
4399 beginning, don't set not_every_iteration for that.
4400 This can be any kind of jump, since we want to know if insns
4401 will be executed if the loop is executed. */
4402 && !(JUMP_LABEL (p
) == loop
->top
4403 && ((NEXT_INSN (NEXT_INSN (p
)) == loop
->end
4404 && any_uncondjump_p (p
))
4405 || (NEXT_INSN (p
) == loop
->end
&& any_condjump_p (p
)))))
4409 /* If this is a jump outside the loop, then it also doesn't
4410 matter. Check to see if the target of this branch is on the
4411 loop->exits_labels list. */
4413 for (label
= loop
->exit_labels
; label
; label
= LABEL_NEXTREF (label
))
4414 if (XEXP (label
, 0) == JUMP_LABEL (p
))
4418 not_every_iteration
= 1;
4421 else if (GET_CODE (p
) == NOTE
)
4423 /* At the virtual top of a converted loop, insns are again known to
4424 be executed each iteration: logically, the loop begins here
4425 even though the exit code has been duplicated.
4427 Insns are also again known to be executed each iteration at
4428 the LOOP_CONT note. */
4429 if ((NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_VTOP
4430 || NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_CONT
)
4432 not_every_iteration
= 0;
4433 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_BEG
)
4435 else if (NOTE_LINE_NUMBER (p
) == NOTE_INSN_LOOP_END
)
4439 /* Note if we pass a loop latch. If we do, then we can not clear
4440 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4441 a loop since a jump before the last CODE_LABEL may have started
4442 a new loop iteration.
4444 Note that LOOP_TOP is only set for rotated loops and we need
4445 this check for all loops, so compare against the CODE_LABEL
4446 which immediately follows LOOP_START. */
4447 if (GET_CODE (p
) == JUMP_INSN
4448 && JUMP_LABEL (p
) == NEXT_INSN (loop
->start
))
4449 past_loop_latch
= 1;
4451 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4452 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4453 or not an insn is known to be executed each iteration of the
4454 loop, whether or not any iterations are known to occur.
4456 Therefore, if we have just passed a label and have no more labels
4457 between here and the test insn of the loop, and we have not passed
4458 a jump to the top of the loop, then we know these insns will be
4459 executed each iteration. */
4461 if (not_every_iteration
4463 && GET_CODE (p
) == CODE_LABEL
4464 && no_labels_between_p (p
, loop
->end
)
4465 && loop_insn_first_p (p
, loop
->cont
))
4466 not_every_iteration
= 0;
4471 loop_bivs_find (loop
)
4474 struct loop_regs
*regs
= LOOP_REGS (loop
);
4475 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4476 /* Temporary list pointers for traversing ivs->list. */
4477 struct iv_class
*bl
, **backbl
;
4481 for_each_insn_in_loop (loop
, check_insn_for_bivs
);
4483 /* Scan ivs->list to remove all regs that proved not to be bivs.
4484 Make a sanity check against regs->n_times_set. */
4485 for (backbl
= &ivs
->list
, bl
= *backbl
; bl
; bl
= bl
->next
)
4487 if (REG_IV_TYPE (ivs
, bl
->regno
) != BASIC_INDUCT
4488 /* Above happens if register modified by subreg, etc. */
4489 /* Make sure it is not recognized as a basic induction var: */
4490 || regs
->array
[bl
->regno
].n_times_set
!= bl
->biv_count
4491 /* If never incremented, it is invariant that we decided not to
4492 move. So leave it alone. */
4493 || ! bl
->incremented
)
4495 if (loop_dump_stream
)
4496 fprintf (loop_dump_stream
, "Biv %d: discarded, %s\n",
4498 (REG_IV_TYPE (ivs
, bl
->regno
) != BASIC_INDUCT
4499 ? "not induction variable"
4500 : (! bl
->incremented
? "never incremented"
4503 REG_IV_TYPE (ivs
, bl
->regno
) = NOT_BASIC_INDUCT
;
4510 if (loop_dump_stream
)
4511 fprintf (loop_dump_stream
, "Biv %d: verified\n", bl
->regno
);
4517 /* Determine how BIVS are initialized by looking through pre-header
4518 extended basic block. */
4520 loop_bivs_init_find (loop
)
4523 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4524 /* Temporary list pointers for traversing ivs->list. */
4525 struct iv_class
*bl
;
4529 /* Find initial value for each biv by searching backwards from loop_start,
4530 halting at first label. Also record any test condition. */
4533 for (p
= loop
->start
; p
&& GET_CODE (p
) != CODE_LABEL
; p
= PREV_INSN (p
))
4539 if (GET_CODE (p
) == CALL_INSN
)
4543 note_stores (PATTERN (p
), record_initial
, ivs
);
4545 /* Record any test of a biv that branches around the loop if no store
4546 between it and the start of loop. We only care about tests with
4547 constants and registers and only certain of those. */
4548 if (GET_CODE (p
) == JUMP_INSN
4549 && JUMP_LABEL (p
) != 0
4550 && next_real_insn (JUMP_LABEL (p
)) == next_real_insn (loop
->end
)
4551 && (test
= get_condition_for_loop (loop
, p
)) != 0
4552 && GET_CODE (XEXP (test
, 0)) == REG
4553 && REGNO (XEXP (test
, 0)) < max_reg_before_loop
4554 && (bl
= REG_IV_CLASS (ivs
, REGNO (XEXP (test
, 0)))) != 0
4555 && valid_initial_value_p (XEXP (test
, 1), p
, call_seen
, loop
->start
)
4556 && bl
->init_insn
== 0)
4558 /* If an NE test, we have an initial value! */
4559 if (GET_CODE (test
) == NE
)
4562 bl
->init_set
= gen_rtx_SET (VOIDmode
,
4563 XEXP (test
, 0), XEXP (test
, 1));
4566 bl
->initial_test
= test
;
4572 /* Look at the each biv and see if we can say anything better about its
4573 initial value from any initializing insns set up above. (This is done
4574 in two passes to avoid missing SETs in a PARALLEL.) */
4576 loop_bivs_check (loop
)
4579 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4580 /* Temporary list pointers for traversing ivs->list. */
4581 struct iv_class
*bl
;
4582 struct iv_class
**backbl
;
4584 for (backbl
= &ivs
->list
; (bl
= *backbl
); backbl
= &bl
->next
)
4589 if (! bl
->init_insn
)
4592 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4593 is a constant, use the value of that. */
4594 if (((note
= find_reg_note (bl
->init_insn
, REG_EQUAL
, 0)) != NULL
4595 && CONSTANT_P (XEXP (note
, 0)))
4596 || ((note
= find_reg_note (bl
->init_insn
, REG_EQUIV
, 0)) != NULL
4597 && CONSTANT_P (XEXP (note
, 0))))
4598 src
= XEXP (note
, 0);
4600 src
= SET_SRC (bl
->init_set
);
4602 if (loop_dump_stream
)
4603 fprintf (loop_dump_stream
,
4604 "Biv %d: initialized at insn %d: initial value ",
4605 bl
->regno
, INSN_UID (bl
->init_insn
));
4607 if ((GET_MODE (src
) == GET_MODE (regno_reg_rtx
[bl
->regno
])
4608 || GET_MODE (src
) == VOIDmode
)
4609 && valid_initial_value_p (src
, bl
->init_insn
,
4610 LOOP_INFO (loop
)->pre_header_has_call
,
4613 bl
->initial_value
= src
;
4615 if (loop_dump_stream
)
4617 print_simple_rtl (loop_dump_stream
, src
);
4618 fputc ('\n', loop_dump_stream
);
4621 /* If we can't make it a giv,
4622 let biv keep initial value of "itself". */
4623 else if (loop_dump_stream
)
4624 fprintf (loop_dump_stream
, "is complex\n");
4629 /* Search the loop for general induction variables. */
4632 loop_givs_find (loop
)
4635 for_each_insn_in_loop (loop
, check_insn_for_givs
);
4639 /* For each giv for which we still don't know whether or not it is
4640 replaceable, check to see if it is replaceable because its final value
4641 can be calculated. */
4644 loop_givs_check (loop
)
4647 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
4648 struct iv_class
*bl
;
4650 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
4652 struct induction
*v
;
4654 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4655 if (! v
->replaceable
&& ! v
->not_replaceable
)
4656 check_final_value (loop
, v
);
4661 /* Return nonzero if it is possible to eliminate the biv BL provided
4662 all givs are reduced. This is possible if either the reg is not
4663 used outside the loop, or we can compute what its final value will
4667 loop_biv_eliminable_p (loop
, bl
, threshold
, insn_count
)
4669 struct iv_class
*bl
;
4673 /* For architectures with a decrement_and_branch_until_zero insn,
4674 don't do this if we put a REG_NONNEG note on the endtest for this
4677 #ifdef HAVE_decrement_and_branch_until_zero
4680 if (loop_dump_stream
)
4681 fprintf (loop_dump_stream
,
4682 "Cannot eliminate nonneg biv %d.\n", bl
->regno
);
4687 /* Check that biv is used outside loop or if it has a final value.
4688 Compare against bl->init_insn rather than loop->start. We aren't
4689 concerned with any uses of the biv between init_insn and
4690 loop->start since these won't be affected by the value of the biv
4691 elsewhere in the function, so long as init_insn doesn't use the
4694 if ((REGNO_LAST_LUID (bl
->regno
) < INSN_LUID (loop
->end
)
4696 && INSN_UID (bl
->init_insn
) < max_uid_for_loop
4697 && REGNO_FIRST_LUID (bl
->regno
) >= INSN_LUID (bl
->init_insn
)
4698 && ! reg_mentioned_p (bl
->biv
->dest_reg
, SET_SRC (bl
->init_set
)))
4699 || (bl
->final_value
= final_biv_value (loop
, bl
)))
4700 return maybe_eliminate_biv (loop
, bl
, 0, threshold
, insn_count
);
4702 if (loop_dump_stream
)
4704 fprintf (loop_dump_stream
,
4705 "Cannot eliminate biv %d.\n",
4707 fprintf (loop_dump_stream
,
4708 "First use: insn %d, last use: insn %d.\n",
4709 REGNO_FIRST_UID (bl
->regno
),
4710 REGNO_LAST_UID (bl
->regno
));
4716 /* Reduce each giv of BL that we have decided to reduce. */
4719 loop_givs_reduce (loop
, bl
)
4721 struct iv_class
*bl
;
4723 struct induction
*v
;
4725 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4727 struct induction
*tv
;
4728 if (! v
->ignore
&& v
->same
== 0)
4730 int auto_inc_opt
= 0;
4732 /* If the code for derived givs immediately below has already
4733 allocated a new_reg, we must keep it. */
4735 v
->new_reg
= gen_reg_rtx (v
->mode
);
4738 /* If the target has auto-increment addressing modes, and
4739 this is an address giv, then try to put the increment
4740 immediately after its use, so that flow can create an
4741 auto-increment addressing mode. */
4742 if (v
->giv_type
== DEST_ADDR
&& bl
->biv_count
== 1
4743 && bl
->biv
->always_executed
&& ! bl
->biv
->maybe_multiple
4744 /* We don't handle reversed biv's because bl->biv->insn
4745 does not have a valid INSN_LUID. */
4747 && v
->always_executed
&& ! v
->maybe_multiple
4748 && INSN_UID (v
->insn
) < max_uid_for_loop
)
4750 /* If other giv's have been combined with this one, then
4751 this will work only if all uses of the other giv's occur
4752 before this giv's insn. This is difficult to check.
4754 We simplify this by looking for the common case where
4755 there is one DEST_REG giv, and this giv's insn is the
4756 last use of the dest_reg of that DEST_REG giv. If the
4757 increment occurs after the address giv, then we can
4758 perform the optimization. (Otherwise, the increment
4759 would have to go before other_giv, and we would not be
4760 able to combine it with the address giv to get an
4761 auto-inc address.) */
4762 if (v
->combined_with
)
4764 struct induction
*other_giv
= 0;
4766 for (tv
= bl
->giv
; tv
; tv
= tv
->next_iv
)
4774 if (! tv
&& other_giv
4775 && REGNO (other_giv
->dest_reg
) < max_reg_before_loop
4776 && (REGNO_LAST_UID (REGNO (other_giv
->dest_reg
))
4777 == INSN_UID (v
->insn
))
4778 && INSN_LUID (v
->insn
) < INSN_LUID (bl
->biv
->insn
))
4781 /* Check for case where increment is before the address
4782 giv. Do this test in "loop order". */
4783 else if ((INSN_LUID (v
->insn
) > INSN_LUID (bl
->biv
->insn
)
4784 && (INSN_LUID (v
->insn
) < INSN_LUID (loop
->scan_start
)
4785 || (INSN_LUID (bl
->biv
->insn
)
4786 > INSN_LUID (loop
->scan_start
))))
4787 || (INSN_LUID (v
->insn
) < INSN_LUID (loop
->scan_start
)
4788 && (INSN_LUID (loop
->scan_start
)
4789 < INSN_LUID (bl
->biv
->insn
))))
4798 /* We can't put an insn immediately after one setting
4799 cc0, or immediately before one using cc0. */
4800 if ((auto_inc_opt
== 1 && sets_cc0_p (PATTERN (v
->insn
)))
4801 || (auto_inc_opt
== -1
4802 && (prev
= prev_nonnote_insn (v
->insn
)) != 0
4804 && sets_cc0_p (PATTERN (prev
))))
4810 v
->auto_inc_opt
= 1;
4814 /* For each place where the biv is incremented, add an insn
4815 to increment the new, reduced reg for the giv. */
4816 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
4821 insert_before
= NEXT_INSN (tv
->insn
);
4822 else if (auto_inc_opt
== 1)
4823 insert_before
= NEXT_INSN (v
->insn
);
4825 insert_before
= v
->insn
;
4827 if (tv
->mult_val
== const1_rtx
)
4828 loop_iv_add_mult_emit_before (loop
, tv
->add_val
, v
->mult_val
,
4829 v
->new_reg
, v
->new_reg
,
4831 else /* tv->mult_val == const0_rtx */
4832 /* A multiply is acceptable here
4833 since this is presumed to be seldom executed. */
4834 loop_iv_add_mult_emit_before (loop
, tv
->add_val
, v
->mult_val
,
4835 v
->add_val
, v
->new_reg
,
4839 /* Add code at loop start to initialize giv's reduced reg. */
4841 loop_iv_add_mult_hoist (loop
,
4842 extend_value_for_giv (v
, bl
->initial_value
),
4843 v
->mult_val
, v
->add_val
, v
->new_reg
);
4849 /* Check for givs whose first use is their definition and whose
4850 last use is the definition of another giv. If so, it is likely
4851 dead and should not be used to derive another giv nor to
4855 loop_givs_dead_check (loop
, bl
)
4856 struct loop
*loop ATTRIBUTE_UNUSED
;
4857 struct iv_class
*bl
;
4859 struct induction
*v
;
4861 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4864 || (v
->same
&& v
->same
->ignore
))
4867 if (v
->giv_type
== DEST_REG
4868 && REGNO_FIRST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v
->insn
))
4870 struct induction
*v1
;
4872 for (v1
= bl
->giv
; v1
; v1
= v1
->next_iv
)
4873 if (REGNO_LAST_UID (REGNO (v
->dest_reg
)) == INSN_UID (v1
->insn
))
4881 loop_givs_rescan (loop
, bl
, reg_map
)
4883 struct iv_class
*bl
;
4886 struct induction
*v
;
4888 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
4890 if (v
->same
&& v
->same
->ignore
)
4896 /* Update expression if this was combined, in case other giv was
4899 v
->new_reg
= replace_rtx (v
->new_reg
,
4900 v
->same
->dest_reg
, v
->same
->new_reg
);
4902 /* See if this register is known to be a pointer to something. If
4903 so, see if we can find the alignment. First see if there is a
4904 destination register that is a pointer. If so, this shares the
4905 alignment too. Next see if we can deduce anything from the
4906 computational information. If not, and this is a DEST_ADDR
4907 giv, at least we know that it's a pointer, though we don't know
4909 if (GET_CODE (v
->new_reg
) == REG
4910 && v
->giv_type
== DEST_REG
4911 && REG_POINTER (v
->dest_reg
))
4912 mark_reg_pointer (v
->new_reg
,
4913 REGNO_POINTER_ALIGN (REGNO (v
->dest_reg
)));
4914 else if (GET_CODE (v
->new_reg
) == REG
4915 && REG_POINTER (v
->src_reg
))
4917 unsigned int align
= REGNO_POINTER_ALIGN (REGNO (v
->src_reg
));
4920 || GET_CODE (v
->add_val
) != CONST_INT
4921 || INTVAL (v
->add_val
) % (align
/ BITS_PER_UNIT
) != 0)
4924 mark_reg_pointer (v
->new_reg
, align
);
4926 else if (GET_CODE (v
->new_reg
) == REG
4927 && GET_CODE (v
->add_val
) == REG
4928 && REG_POINTER (v
->add_val
))
4930 unsigned int align
= REGNO_POINTER_ALIGN (REGNO (v
->add_val
));
4932 if (align
== 0 || GET_CODE (v
->mult_val
) != CONST_INT
4933 || INTVAL (v
->mult_val
) % (align
/ BITS_PER_UNIT
) != 0)
4936 mark_reg_pointer (v
->new_reg
, align
);
4938 else if (GET_CODE (v
->new_reg
) == REG
&& v
->giv_type
== DEST_ADDR
)
4939 mark_reg_pointer (v
->new_reg
, 0);
4941 if (v
->giv_type
== DEST_ADDR
)
4942 /* Store reduced reg as the address in the memref where we found
4944 validate_change (v
->insn
, v
->location
, v
->new_reg
, 0);
4945 else if (v
->replaceable
)
4947 reg_map
[REGNO (v
->dest_reg
)] = v
->new_reg
;
4951 rtx original_insn
= v
->insn
;
4954 /* Not replaceable; emit an insn to set the original giv reg from
4955 the reduced giv, same as above. */
4956 v
->insn
= loop_insn_emit_after (loop
, 0, original_insn
,
4957 gen_move_insn (v
->dest_reg
,
4960 /* The original insn may have a REG_EQUAL note. This note is
4961 now incorrect and may result in invalid substitutions later.
4962 The original insn is dead, but may be part of a libcall
4963 sequence, which doesn't seem worth the bother of handling. */
4964 note
= find_reg_note (original_insn
, REG_EQUAL
, NULL_RTX
);
4966 remove_note (original_insn
, note
);
4969 /* When a loop is reversed, givs which depend on the reversed
4970 biv, and which are live outside the loop, must be set to their
4971 correct final value. This insn is only needed if the giv is
4972 not replaceable. The correct final value is the same as the
4973 value that the giv starts the reversed loop with. */
4974 if (bl
->reversed
&& ! v
->replaceable
)
4975 loop_iv_add_mult_sink (loop
,
4976 extend_value_for_giv (v
, bl
->initial_value
),
4977 v
->mult_val
, v
->add_val
, v
->dest_reg
);
4978 else if (v
->final_value
)
4979 loop_insn_sink_or_swim (loop
,
4980 gen_load_of_final_value (v
->dest_reg
,
4983 if (loop_dump_stream
)
4985 fprintf (loop_dump_stream
, "giv at %d reduced to ",
4986 INSN_UID (v
->insn
));
4987 print_simple_rtl (loop_dump_stream
, v
->new_reg
);
4988 fprintf (loop_dump_stream
, "\n");
4995 loop_giv_reduce_benefit (loop
, bl
, v
, test_reg
)
4996 struct loop
*loop ATTRIBUTE_UNUSED
;
4997 struct iv_class
*bl
;
4998 struct induction
*v
;
5004 benefit
= v
->benefit
;
5005 PUT_MODE (test_reg
, v
->mode
);
5006 add_cost
= iv_add_mult_cost (bl
->biv
->add_val
, v
->mult_val
,
5007 test_reg
, test_reg
);
5009 /* Reduce benefit if not replaceable, since we will insert a
5010 move-insn to replace the insn that calculates this giv. Don't do
5011 this unless the giv is a user variable, since it will often be
5012 marked non-replaceable because of the duplication of the exit
5013 code outside the loop. In such a case, the copies we insert are
5014 dead and will be deleted. So they don't have a cost. Similar
5015 situations exist. */
5016 /* ??? The new final_[bg]iv_value code does a much better job of
5017 finding replaceable giv's, and hence this code may no longer be
5019 if (! v
->replaceable
&& ! bl
->eliminable
5020 && REG_USERVAR_P (v
->dest_reg
))
5021 benefit
-= copy_cost
;
5023 /* Decrease the benefit to count the add-insns that we will insert
5024 to increment the reduced reg for the giv. ??? This can
5025 overestimate the run-time cost of the additional insns, e.g. if
5026 there are multiple basic blocks that increment the biv, but only
5027 one of these blocks is executed during each iteration. There is
5028 no good way to detect cases like this with the current structure
5029 of the loop optimizer. This code is more accurate for
5030 determining code size than run-time benefits. */
5031 benefit
-= add_cost
* bl
->biv_count
;
5033 /* Decide whether to strength-reduce this giv or to leave the code
5034 unchanged (recompute it from the biv each time it is used). This
5035 decision can be made independently for each giv. */
5038 /* Attempt to guess whether autoincrement will handle some of the
5039 new add insns; if so, increase BENEFIT (undo the subtraction of
5040 add_cost that was done above). */
5041 if (v
->giv_type
== DEST_ADDR
5042 /* Increasing the benefit is risky, since this is only a guess.
5043 Avoid increasing register pressure in cases where there would
5044 be no other benefit from reducing this giv. */
5046 && GET_CODE (v
->mult_val
) == CONST_INT
)
5048 int size
= GET_MODE_SIZE (GET_MODE (v
->mem
));
5050 if (HAVE_POST_INCREMENT
5051 && INTVAL (v
->mult_val
) == size
)
5052 benefit
+= add_cost
* bl
->biv_count
;
5053 else if (HAVE_PRE_INCREMENT
5054 && INTVAL (v
->mult_val
) == size
)
5055 benefit
+= add_cost
* bl
->biv_count
;
5056 else if (HAVE_POST_DECREMENT
5057 && -INTVAL (v
->mult_val
) == size
)
5058 benefit
+= add_cost
* bl
->biv_count
;
5059 else if (HAVE_PRE_DECREMENT
5060 && -INTVAL (v
->mult_val
) == size
)
5061 benefit
+= add_cost
* bl
->biv_count
;
5069 /* Free IV structures for LOOP. */
5072 loop_ivs_free (loop
)
5075 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
5076 struct iv_class
*iv
= ivs
->list
;
5082 struct iv_class
*next
= iv
->next
;
5083 struct induction
*induction
;
5084 struct induction
*next_induction
;
5086 for (induction
= iv
->biv
; induction
; induction
= next_induction
)
5088 next_induction
= induction
->next_iv
;
5091 for (induction
= iv
->giv
; induction
; induction
= next_induction
)
5093 next_induction
= induction
->next_iv
;
5103 /* Perform strength reduction and induction variable elimination.
5105 Pseudo registers created during this function will be beyond the
5106 last valid index in several tables including
5107 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5108 problem here, because the added registers cannot be givs outside of
5109 their loop, and hence will never be reconsidered. But scan_loop
5110 must check regnos to make sure they are in bounds. */
5113 strength_reduce (loop
, flags
)
5117 struct loop_info
*loop_info
= LOOP_INFO (loop
);
5118 struct loop_regs
*regs
= LOOP_REGS (loop
);
5119 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
5121 /* Temporary list pointer for traversing ivs->list. */
5122 struct iv_class
*bl
;
5123 /* Ratio of extra register life span we can justify
5124 for saving an instruction. More if loop doesn't call subroutines
5125 since in that case saving an insn makes more difference
5126 and more registers are available. */
5127 /* ??? could set this to last value of threshold in move_movables */
5128 int threshold
= (loop_info
->has_call
? 1 : 2) * (3 + n_non_fixed_regs
);
5129 /* Map of pseudo-register replacements. */
5130 rtx
*reg_map
= NULL
;
5132 int unrolled_insn_copies
= 0;
5133 rtx test_reg
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
5134 int insn_count
= count_insns_in_loop (loop
);
5136 addr_placeholder
= gen_reg_rtx (Pmode
);
5138 ivs
->n_regs
= max_reg_before_loop
;
5139 ivs
->regs
= (struct iv
*) xcalloc (ivs
->n_regs
, sizeof (struct iv
));
5141 /* Find all BIVs in loop. */
5142 loop_bivs_find (loop
);
5144 /* Exit if there are no bivs. */
5147 /* Can still unroll the loop anyways, but indicate that there is no
5148 strength reduction info available. */
5149 if (flags
& LOOP_UNROLL
)
5150 unroll_loop (loop
, insn_count
, 0);
5152 loop_ivs_free (loop
);
5156 /* Determine how BIVS are initialized by looking through pre-header
5157 extended basic block. */
5158 loop_bivs_init_find (loop
);
5160 /* Look at the each biv and see if we can say anything better about its
5161 initial value from any initializing insns set up above. */
5162 loop_bivs_check (loop
);
5164 /* Search the loop for general induction variables. */
5165 loop_givs_find (loop
);
5167 /* Try to calculate and save the number of loop iterations. This is
5168 set to zero if the actual number can not be calculated. This must
5169 be called after all giv's have been identified, since otherwise it may
5170 fail if the iteration variable is a giv. */
5171 loop_iterations (loop
);
5173 #ifdef HAVE_prefetch
5174 if (flags
& LOOP_PREFETCH
)
5175 emit_prefetch_instructions (loop
);
5178 /* Now for each giv for which we still don't know whether or not it is
5179 replaceable, check to see if it is replaceable because its final value
5180 can be calculated. This must be done after loop_iterations is called,
5181 so that final_giv_value will work correctly. */
5182 loop_givs_check (loop
);
5184 /* Try to prove that the loop counter variable (if any) is always
5185 nonnegative; if so, record that fact with a REG_NONNEG note
5186 so that "decrement and branch until zero" insn can be used. */
5187 check_dbra_loop (loop
, insn_count
);
5189 /* Create reg_map to hold substitutions for replaceable giv regs.
5190 Some givs might have been made from biv increments, so look at
5191 ivs->reg_iv_type for a suitable size. */
5192 reg_map_size
= ivs
->n_regs
;
5193 reg_map
= (rtx
*) xcalloc (reg_map_size
, sizeof (rtx
));
5195 /* Examine each iv class for feasibility of strength reduction/induction
5196 variable elimination. */
5198 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
5200 struct induction
*v
;
5203 /* Test whether it will be possible to eliminate this biv
5204 provided all givs are reduced. */
5205 bl
->eliminable
= loop_biv_eliminable_p (loop
, bl
, threshold
, insn_count
);
5207 /* This will be true at the end, if all givs which depend on this
5208 biv have been strength reduced.
5209 We can't (currently) eliminate the biv unless this is so. */
5210 bl
->all_reduced
= 1;
5212 /* Check each extension dependent giv in this class to see if its
5213 root biv is safe from wrapping in the interior mode. */
5214 check_ext_dependent_givs (bl
, loop_info
);
5216 /* Combine all giv's for this iv_class. */
5217 combine_givs (regs
, bl
);
5219 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
5221 struct induction
*tv
;
5223 if (v
->ignore
|| v
->same
)
5226 benefit
= loop_giv_reduce_benefit (loop
, bl
, v
, test_reg
);
5228 /* If an insn is not to be strength reduced, then set its ignore
5229 flag, and clear bl->all_reduced. */
5231 /* A giv that depends on a reversed biv must be reduced if it is
5232 used after the loop exit, otherwise, it would have the wrong
5233 value after the loop exit. To make it simple, just reduce all
5234 of such giv's whether or not we know they are used after the loop
5237 if (! flag_reduce_all_givs
5238 && v
->lifetime
* threshold
* benefit
< insn_count
5241 if (loop_dump_stream
)
5242 fprintf (loop_dump_stream
,
5243 "giv of insn %d not worth while, %d vs %d.\n",
5245 v
->lifetime
* threshold
* benefit
, insn_count
);
5247 bl
->all_reduced
= 0;
5251 /* Check that we can increment the reduced giv without a
5252 multiply insn. If not, reject it. */
5254 for (tv
= bl
->biv
; tv
; tv
= tv
->next_iv
)
5255 if (tv
->mult_val
== const1_rtx
5256 && ! product_cheap_p (tv
->add_val
, v
->mult_val
))
5258 if (loop_dump_stream
)
5259 fprintf (loop_dump_stream
,
5260 "giv of insn %d: would need a multiply.\n",
5261 INSN_UID (v
->insn
));
5263 bl
->all_reduced
= 0;
5269 /* Check for givs whose first use is their definition and whose
5270 last use is the definition of another giv. If so, it is likely
5271 dead and should not be used to derive another giv nor to
5273 loop_givs_dead_check (loop
, bl
);
5275 /* Reduce each giv that we decided to reduce. */
5276 loop_givs_reduce (loop
, bl
);
5278 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5281 For each giv register that can be reduced now: if replaceable,
5282 substitute reduced reg wherever the old giv occurs;
5283 else add new move insn "giv_reg = reduced_reg". */
5284 loop_givs_rescan (loop
, bl
, reg_map
);
5286 /* All the givs based on the biv bl have been reduced if they
5289 /* For each giv not marked as maybe dead that has been combined with a
5290 second giv, clear any "maybe dead" mark on that second giv.
5291 v->new_reg will either be or refer to the register of the giv it
5294 Doing this clearing avoids problems in biv elimination where
5295 a giv's new_reg is a complex value that can't be put in the
5296 insn but the giv combined with (with a reg as new_reg) is
5297 marked maybe_dead. Since the register will be used in either
5298 case, we'd prefer it be used from the simpler giv. */
5300 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
5301 if (! v
->maybe_dead
&& v
->same
)
5302 v
->same
->maybe_dead
= 0;
5304 /* Try to eliminate the biv, if it is a candidate.
5305 This won't work if ! bl->all_reduced,
5306 since the givs we planned to use might not have been reduced.
5308 We have to be careful that we didn't initially think we could
5309 eliminate this biv because of a giv that we now think may be
5310 dead and shouldn't be used as a biv replacement.
5312 Also, there is the possibility that we may have a giv that looks
5313 like it can be used to eliminate a biv, but the resulting insn
5314 isn't valid. This can happen, for example, on the 88k, where a
5315 JUMP_INSN can compare a register only with zero. Attempts to
5316 replace it with a compare with a constant will fail.
5318 Note that in cases where this call fails, we may have replaced some
5319 of the occurrences of the biv with a giv, but no harm was done in
5320 doing so in the rare cases where it can occur. */
5322 if (bl
->all_reduced
== 1 && bl
->eliminable
5323 && maybe_eliminate_biv (loop
, bl
, 1, threshold
, insn_count
))
5325 /* ?? If we created a new test to bypass the loop entirely,
5326 or otherwise drop straight in, based on this test, then
5327 we might want to rewrite it also. This way some later
5328 pass has more hope of removing the initialization of this
5331 /* If final_value != 0, then the biv may be used after loop end
5332 and we must emit an insn to set it just in case.
5334 Reversed bivs already have an insn after the loop setting their
5335 value, so we don't need another one. We can't calculate the
5336 proper final value for such a biv here anyways. */
5337 if (bl
->final_value
&& ! bl
->reversed
)
5338 loop_insn_sink_or_swim (loop
,
5339 gen_load_of_final_value (bl
->biv
->dest_reg
,
5342 if (loop_dump_stream
)
5343 fprintf (loop_dump_stream
, "Reg %d: biv eliminated\n",
5346 /* See above note wrt final_value. But since we couldn't eliminate
5347 the biv, we must set the value after the loop instead of before. */
5348 else if (bl
->final_value
&& ! bl
->reversed
)
5349 loop_insn_sink (loop
, gen_load_of_final_value (bl
->biv
->dest_reg
,
5353 /* Go through all the instructions in the loop, making all the
5354 register substitutions scheduled in REG_MAP. */
5356 for (p
= loop
->start
; p
!= loop
->end
; p
= NEXT_INSN (p
))
5357 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
5358 || GET_CODE (p
) == CALL_INSN
)
5360 replace_regs (PATTERN (p
), reg_map
, reg_map_size
, 0);
5361 replace_regs (REG_NOTES (p
), reg_map
, reg_map_size
, 0);
5365 if (loop_info
->n_iterations
> 0)
5367 /* When we completely unroll a loop we will likely not need the increment
5368 of the loop BIV and we will not need the conditional branch at the
5370 unrolled_insn_copies
= insn_count
- 2;
5373 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5374 need the comparison before the conditional branch at the end of the
5376 unrolled_insn_copies
-= 1;
5379 /* We'll need one copy for each loop iteration. */
5380 unrolled_insn_copies
*= loop_info
->n_iterations
;
5382 /* A little slop to account for the ability to remove initialization
5383 code, better CSE, and other secondary benefits of completely
5384 unrolling some loops. */
5385 unrolled_insn_copies
-= 1;
5387 /* Clamp the value. */
5388 if (unrolled_insn_copies
< 0)
5389 unrolled_insn_copies
= 0;
5392 /* Unroll loops from within strength reduction so that we can use the
5393 induction variable information that strength_reduce has already
5394 collected. Always unroll loops that would be as small or smaller
5395 unrolled than when rolled. */
5396 if ((flags
& LOOP_UNROLL
)
5397 || ((flags
& LOOP_AUTO_UNROLL
)
5398 && loop_info
->n_iterations
> 0
5399 && unrolled_insn_copies
<= insn_count
))
5400 unroll_loop (loop
, insn_count
, 1);
5402 #ifdef HAVE_doloop_end
5403 if (HAVE_doloop_end
&& (flags
& LOOP_BCT
) && flag_branch_on_count_reg
)
5404 doloop_optimize (loop
);
5405 #endif /* HAVE_doloop_end */
5407 /* In case number of iterations is known, drop branch prediction note
5408 in the branch. Do that only in second loop pass, as loop unrolling
5409 may change the number of iterations performed. */
5410 if (flags
& LOOP_BCT
)
5412 unsigned HOST_WIDE_INT n
5413 = loop_info
->n_iterations
/ loop_info
->unroll_number
;
5415 predict_insn (prev_nonnote_insn (loop
->end
), PRED_LOOP_ITERATIONS
,
5416 REG_BR_PROB_BASE
- REG_BR_PROB_BASE
/ n
);
5419 if (loop_dump_stream
)
5420 fprintf (loop_dump_stream
, "\n");
5422 loop_ivs_free (loop
);
5427 /*Record all basic induction variables calculated in the insn. */
5429 check_insn_for_bivs (loop
, p
, not_every_iteration
, maybe_multiple
)
5432 int not_every_iteration
;
5435 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
5442 if (GET_CODE (p
) == INSN
5443 && (set
= single_set (p
))
5444 && GET_CODE (SET_DEST (set
)) == REG
)
5446 dest_reg
= SET_DEST (set
);
5447 if (REGNO (dest_reg
) < max_reg_before_loop
5448 && REGNO (dest_reg
) >= FIRST_PSEUDO_REGISTER
5449 && REG_IV_TYPE (ivs
, REGNO (dest_reg
)) != NOT_BASIC_INDUCT
)
5451 if (basic_induction_var (loop
, SET_SRC (set
),
5452 GET_MODE (SET_SRC (set
)),
5453 dest_reg
, p
, &inc_val
, &mult_val
,
5456 /* It is a possible basic induction variable.
5457 Create and initialize an induction structure for it. */
5460 = (struct induction
*) xmalloc (sizeof (struct induction
));
5462 record_biv (loop
, v
, p
, dest_reg
, inc_val
, mult_val
, location
,
5463 not_every_iteration
, maybe_multiple
);
5464 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = BASIC_INDUCT
;
5466 else if (REGNO (dest_reg
) < ivs
->n_regs
)
5467 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = NOT_BASIC_INDUCT
;
5473 /* Record all givs calculated in the insn.
5474 A register is a giv if: it is only set once, it is a function of a
5475 biv and a constant (or invariant), and it is not a biv. */
5477 check_insn_for_givs (loop
, p
, not_every_iteration
, maybe_multiple
)
5480 int not_every_iteration
;
5483 struct loop_regs
*regs
= LOOP_REGS (loop
);
5486 /* Look for a general induction variable in a register. */
5487 if (GET_CODE (p
) == INSN
5488 && (set
= single_set (p
))
5489 && GET_CODE (SET_DEST (set
)) == REG
5490 && ! regs
->array
[REGNO (SET_DEST (set
))].may_not_optimize
)
5499 rtx last_consec_insn
;
5501 dest_reg
= SET_DEST (set
);
5502 if (REGNO (dest_reg
) < FIRST_PSEUDO_REGISTER
)
5505 if (/* SET_SRC is a giv. */
5506 (general_induction_var (loop
, SET_SRC (set
), &src_reg
, &add_val
,
5507 &mult_val
, &ext_val
, 0, &benefit
, VOIDmode
)
5508 /* Equivalent expression is a giv. */
5509 || ((regnote
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
5510 && general_induction_var (loop
, XEXP (regnote
, 0), &src_reg
,
5511 &add_val
, &mult_val
, &ext_val
, 0,
5512 &benefit
, VOIDmode
)))
5513 /* Don't try to handle any regs made by loop optimization.
5514 We have nothing on them in regno_first_uid, etc. */
5515 && REGNO (dest_reg
) < max_reg_before_loop
5516 /* Don't recognize a BASIC_INDUCT_VAR here. */
5517 && dest_reg
!= src_reg
5518 /* This must be the only place where the register is set. */
5519 && (regs
->array
[REGNO (dest_reg
)].n_times_set
== 1
5520 /* or all sets must be consecutive and make a giv. */
5521 || (benefit
= consec_sets_giv (loop
, benefit
, p
,
5523 &add_val
, &mult_val
, &ext_val
,
5524 &last_consec_insn
))))
5527 = (struct induction
*) xmalloc (sizeof (struct induction
));
5529 /* If this is a library call, increase benefit. */
5530 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
5531 benefit
+= libcall_benefit (p
);
5533 /* Skip the consecutive insns, if there are any. */
5534 if (regs
->array
[REGNO (dest_reg
)].n_times_set
!= 1)
5535 p
= last_consec_insn
;
5537 record_giv (loop
, v
, p
, src_reg
, dest_reg
, mult_val
, add_val
,
5538 ext_val
, benefit
, DEST_REG
, not_every_iteration
,
5539 maybe_multiple
, (rtx
*) 0);
5544 #ifndef DONT_REDUCE_ADDR
5545 /* Look for givs which are memory addresses. */
5546 /* This resulted in worse code on a VAX 8600. I wonder if it
5548 if (GET_CODE (p
) == INSN
)
5549 find_mem_givs (loop
, PATTERN (p
), p
, not_every_iteration
,
5553 /* Update the status of whether giv can derive other givs. This can
5554 change when we pass a label or an insn that updates a biv. */
5555 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
5556 || GET_CODE (p
) == CODE_LABEL
)
5557 update_giv_derive (loop
, p
);
5561 /* Return 1 if X is a valid source for an initial value (or as value being
5562 compared against in an initial test).
5564 X must be either a register or constant and must not be clobbered between
5565 the current insn and the start of the loop.
5567 INSN is the insn containing X. */
5570 valid_initial_value_p (x
, insn
, call_seen
, loop_start
)
5579 /* Only consider pseudos we know about initialized in insns whose luids
5581 if (GET_CODE (x
) != REG
5582 || REGNO (x
) >= max_reg_before_loop
)
5585 /* Don't use call-clobbered registers across a call which clobbers it. On
5586 some machines, don't use any hard registers at all. */
5587 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
5588 && (SMALL_REGISTER_CLASSES
5589 || (call_used_regs
[REGNO (x
)] && call_seen
)))
5592 /* Don't use registers that have been clobbered before the start of the
5594 if (reg_set_between_p (x
, insn
, loop_start
))
5600 /* Scan X for memory refs and check each memory address
5601 as a possible giv. INSN is the insn whose pattern X comes from.
5602 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5603 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5604 more than once in each loop iteration. */
5607 find_mem_givs (loop
, x
, insn
, not_every_iteration
, maybe_multiple
)
5608 const struct loop
*loop
;
5611 int not_every_iteration
, maybe_multiple
;
5620 code
= GET_CODE (x
);
5645 /* This code used to disable creating GIVs with mult_val == 1 and
5646 add_val == 0. However, this leads to lost optimizations when
5647 it comes time to combine a set of related DEST_ADDR GIVs, since
5648 this one would not be seen. */
5650 if (general_induction_var (loop
, XEXP (x
, 0), &src_reg
, &add_val
,
5651 &mult_val
, &ext_val
, 1, &benefit
,
5654 /* Found one; record it. */
5656 = (struct induction
*) xmalloc (sizeof (struct induction
));
5658 record_giv (loop
, v
, insn
, src_reg
, addr_placeholder
, mult_val
,
5659 add_val
, ext_val
, benefit
, DEST_ADDR
,
5660 not_every_iteration
, maybe_multiple
, &XEXP (x
, 0));
5671 /* Recursively scan the subexpressions for other mem refs. */
5673 fmt
= GET_RTX_FORMAT (code
);
5674 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
5676 find_mem_givs (loop
, XEXP (x
, i
), insn
, not_every_iteration
,
5678 else if (fmt
[i
] == 'E')
5679 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
5680 find_mem_givs (loop
, XVECEXP (x
, i
, j
), insn
, not_every_iteration
,
5684 /* Fill in the data about one biv update.
5685 V is the `struct induction' in which we record the biv. (It is
5686 allocated by the caller, with alloca.)
5687 INSN is the insn that sets it.
5688 DEST_REG is the biv's reg.
5690 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5691 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5692 being set to INC_VAL.
5694 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5695 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5696 can be executed more than once per iteration. If MAYBE_MULTIPLE
5697 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5698 executed exactly once per iteration. */
5701 record_biv (loop
, v
, insn
, dest_reg
, inc_val
, mult_val
, location
,
5702 not_every_iteration
, maybe_multiple
)
5704 struct induction
*v
;
5710 int not_every_iteration
;
5713 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
5714 struct iv_class
*bl
;
5717 v
->src_reg
= dest_reg
;
5718 v
->dest_reg
= dest_reg
;
5719 v
->mult_val
= mult_val
;
5720 v
->add_val
= inc_val
;
5721 v
->ext_dependent
= NULL_RTX
;
5722 v
->location
= location
;
5723 v
->mode
= GET_MODE (dest_reg
);
5724 v
->always_computable
= ! not_every_iteration
;
5725 v
->always_executed
= ! not_every_iteration
;
5726 v
->maybe_multiple
= maybe_multiple
;
5728 /* Add this to the reg's iv_class, creating a class
5729 if this is the first incrementation of the reg. */
5731 bl
= REG_IV_CLASS (ivs
, REGNO (dest_reg
));
5734 /* Create and initialize new iv_class. */
5736 bl
= (struct iv_class
*) xmalloc (sizeof (struct iv_class
));
5738 bl
->regno
= REGNO (dest_reg
);
5744 /* Set initial value to the reg itself. */
5745 bl
->initial_value
= dest_reg
;
5746 bl
->final_value
= 0;
5747 /* We haven't seen the initializing insn yet */
5750 bl
->initial_test
= 0;
5751 bl
->incremented
= 0;
5755 bl
->total_benefit
= 0;
5757 /* Add this class to ivs->list. */
5758 bl
->next
= ivs
->list
;
5761 /* Put it in the array of biv register classes. */
5762 REG_IV_CLASS (ivs
, REGNO (dest_reg
)) = bl
;
5765 /* Update IV_CLASS entry for this biv. */
5766 v
->next_iv
= bl
->biv
;
5769 if (mult_val
== const1_rtx
)
5770 bl
->incremented
= 1;
5772 if (loop_dump_stream
)
5773 loop_biv_dump (v
, loop_dump_stream
, 0);
5776 /* Fill in the data about one giv.
5777 V is the `struct induction' in which we record the giv. (It is
5778 allocated by the caller, with alloca.)
5779 INSN is the insn that sets it.
5780 BENEFIT estimates the savings from deleting this insn.
5781 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5782 into a register or is used as a memory address.
5784 SRC_REG is the biv reg which the giv is computed from.
5785 DEST_REG is the giv's reg (if the giv is stored in a reg).
5786 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5787 LOCATION points to the place where this giv's value appears in INSN. */
5790 record_giv (loop
, v
, insn
, src_reg
, dest_reg
, mult_val
, add_val
, ext_val
,
5791 benefit
, type
, not_every_iteration
, maybe_multiple
, location
)
5792 const struct loop
*loop
;
5793 struct induction
*v
;
5797 rtx mult_val
, add_val
, ext_val
;
5800 int not_every_iteration
, maybe_multiple
;
5803 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
5804 struct induction
*b
;
5805 struct iv_class
*bl
;
5806 rtx set
= single_set (insn
);
5809 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5810 undo the MULT canonicalization that we performed earlier. */
5811 temp
= simplify_rtx (add_val
);
5813 && ! (GET_CODE (add_val
) == MULT
5814 && GET_CODE (temp
) == ASHIFT
))
5818 v
->src_reg
= src_reg
;
5820 v
->dest_reg
= dest_reg
;
5821 v
->mult_val
= mult_val
;
5822 v
->add_val
= add_val
;
5823 v
->ext_dependent
= ext_val
;
5824 v
->benefit
= benefit
;
5825 v
->location
= location
;
5827 v
->combined_with
= 0;
5828 v
->maybe_multiple
= maybe_multiple
;
5830 v
->derive_adjustment
= 0;
5836 v
->auto_inc_opt
= 0;
5840 /* The v->always_computable field is used in update_giv_derive, to
5841 determine whether a giv can be used to derive another giv. For a
5842 DEST_REG giv, INSN computes a new value for the giv, so its value
5843 isn't computable if INSN insn't executed every iteration.
5844 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5845 it does not compute a new value. Hence the value is always computable
5846 regardless of whether INSN is executed each iteration. */
5848 if (type
== DEST_ADDR
)
5849 v
->always_computable
= 1;
5851 v
->always_computable
= ! not_every_iteration
;
5853 v
->always_executed
= ! not_every_iteration
;
5855 if (type
== DEST_ADDR
)
5857 v
->mode
= GET_MODE (*location
);
5860 else /* type == DEST_REG */
5862 v
->mode
= GET_MODE (SET_DEST (set
));
5864 v
->lifetime
= LOOP_REG_LIFETIME (loop
, REGNO (dest_reg
));
5866 /* If the lifetime is zero, it means that this register is
5867 really a dead store. So mark this as a giv that can be
5868 ignored. This will not prevent the biv from being eliminated. */
5869 if (v
->lifetime
== 0)
5872 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = GENERAL_INDUCT
;
5873 REG_IV_INFO (ivs
, REGNO (dest_reg
)) = v
;
5876 /* Add the giv to the class of givs computed from one biv. */
5878 bl
= REG_IV_CLASS (ivs
, REGNO (src_reg
));
5881 v
->next_iv
= bl
->giv
;
5883 /* Don't count DEST_ADDR. This is supposed to count the number of
5884 insns that calculate givs. */
5885 if (type
== DEST_REG
)
5887 bl
->total_benefit
+= benefit
;
5890 /* Fatal error, biv missing for this giv? */
5893 if (type
== DEST_ADDR
)
5896 v
->not_replaceable
= 0;
5900 /* The giv can be replaced outright by the reduced register only if all
5901 of the following conditions are true:
5902 - the insn that sets the giv is always executed on any iteration
5903 on which the giv is used at all
5904 (there are two ways to deduce this:
5905 either the insn is executed on every iteration,
5906 or all uses follow that insn in the same basic block),
5907 - the giv is not used outside the loop
5908 - no assignments to the biv occur during the giv's lifetime. */
5910 if (REGNO_FIRST_UID (REGNO (dest_reg
)) == INSN_UID (insn
)
5911 /* Previous line always fails if INSN was moved by loop opt. */
5912 && REGNO_LAST_LUID (REGNO (dest_reg
))
5913 < INSN_LUID (loop
->end
)
5914 && (! not_every_iteration
5915 || last_use_this_basic_block (dest_reg
, insn
)))
5917 /* Now check that there are no assignments to the biv within the
5918 giv's lifetime. This requires two separate checks. */
5920 /* Check each biv update, and fail if any are between the first
5921 and last use of the giv.
5923 If this loop contains an inner loop that was unrolled, then
5924 the insn modifying the biv may have been emitted by the loop
5925 unrolling code, and hence does not have a valid luid. Just
5926 mark the biv as not replaceable in this case. It is not very
5927 useful as a biv, because it is used in two different loops.
5928 It is very unlikely that we would be able to optimize the giv
5929 using this biv anyways. */
5932 v
->not_replaceable
= 0;
5933 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
5935 if (INSN_UID (b
->insn
) >= max_uid_for_loop
5936 || ((INSN_LUID (b
->insn
)
5937 >= REGNO_FIRST_LUID (REGNO (dest_reg
)))
5938 && (INSN_LUID (b
->insn
)
5939 <= REGNO_LAST_LUID (REGNO (dest_reg
)))))
5942 v
->not_replaceable
= 1;
5947 /* If there are any backwards branches that go from after the
5948 biv update to before it, then this giv is not replaceable. */
5950 for (b
= bl
->biv
; b
; b
= b
->next_iv
)
5951 if (back_branch_in_range_p (loop
, b
->insn
))
5954 v
->not_replaceable
= 1;
5960 /* May still be replaceable, we don't have enough info here to
5963 v
->not_replaceable
= 0;
5967 /* Record whether the add_val contains a const_int, for later use by
5972 v
->no_const_addval
= 1;
5973 if (tem
== const0_rtx
)
5975 else if (CONSTANT_P (add_val
))
5976 v
->no_const_addval
= 0;
5977 if (GET_CODE (tem
) == PLUS
)
5981 if (GET_CODE (XEXP (tem
, 0)) == PLUS
)
5982 tem
= XEXP (tem
, 0);
5983 else if (GET_CODE (XEXP (tem
, 1)) == PLUS
)
5984 tem
= XEXP (tem
, 1);
5988 if (CONSTANT_P (XEXP (tem
, 1)))
5989 v
->no_const_addval
= 0;
5993 if (loop_dump_stream
)
5994 loop_giv_dump (v
, loop_dump_stream
, 0);
5997 /* All this does is determine whether a giv can be made replaceable because
5998 its final value can be calculated. This code can not be part of record_giv
5999 above, because final_giv_value requires that the number of loop iterations
6000 be known, and that can not be accurately calculated until after all givs
6001 have been identified. */
6004 check_final_value (loop
, v
)
6005 const struct loop
*loop
;
6006 struct induction
*v
;
6008 rtx final_value
= 0;
6010 /* DEST_ADDR givs will never reach here, because they are always marked
6011 replaceable above in record_giv. */
6013 /* The giv can be replaced outright by the reduced register only if all
6014 of the following conditions are true:
6015 - the insn that sets the giv is always executed on any iteration
6016 on which the giv is used at all
6017 (there are two ways to deduce this:
6018 either the insn is executed on every iteration,
6019 or all uses follow that insn in the same basic block),
6020 - its final value can be calculated (this condition is different
6021 than the one above in record_giv)
6022 - it's not used before the it's set
6023 - no assignments to the biv occur during the giv's lifetime. */
6026 /* This is only called now when replaceable is known to be false. */
6027 /* Clear replaceable, so that it won't confuse final_giv_value. */
6031 if ((final_value
= final_giv_value (loop
, v
))
6032 && (v
->always_executed
6033 || last_use_this_basic_block (v
->dest_reg
, v
->insn
)))
6035 int biv_increment_seen
= 0, before_giv_insn
= 0;
6040 v
->not_replaceable
= 0;
6042 /* When trying to determine whether or not a biv increment occurs
6043 during the lifetime of the giv, we can ignore uses of the variable
6044 outside the loop because final_value is true. Hence we can not
6045 use regno_last_uid and regno_first_uid as above in record_giv. */
6047 /* Search the loop to determine whether any assignments to the
6048 biv occur during the giv's lifetime. Start with the insn
6049 that sets the giv, and search around the loop until we come
6050 back to that insn again.
6052 Also fail if there is a jump within the giv's lifetime that jumps
6053 to somewhere outside the lifetime but still within the loop. This
6054 catches spaghetti code where the execution order is not linear, and
6055 hence the above test fails. Here we assume that the giv lifetime
6056 does not extend from one iteration of the loop to the next, so as
6057 to make the test easier. Since the lifetime isn't known yet,
6058 this requires two loops. See also record_giv above. */
6060 last_giv_use
= v
->insn
;
6067 before_giv_insn
= 1;
6068 p
= NEXT_INSN (loop
->start
);
6073 if (GET_CODE (p
) == INSN
|| GET_CODE (p
) == JUMP_INSN
6074 || GET_CODE (p
) == CALL_INSN
)
6076 /* It is possible for the BIV increment to use the GIV if we
6077 have a cycle. Thus we must be sure to check each insn for
6078 both BIV and GIV uses, and we must check for BIV uses
6081 if (! biv_increment_seen
6082 && reg_set_p (v
->src_reg
, PATTERN (p
)))
6083 biv_increment_seen
= 1;
6085 if (reg_mentioned_p (v
->dest_reg
, PATTERN (p
)))
6087 if (biv_increment_seen
|| before_giv_insn
)
6090 v
->not_replaceable
= 1;
6098 /* Now that the lifetime of the giv is known, check for branches
6099 from within the lifetime to outside the lifetime if it is still
6109 p
= NEXT_INSN (loop
->start
);
6110 if (p
== last_giv_use
)
6113 if (GET_CODE (p
) == JUMP_INSN
&& JUMP_LABEL (p
)
6114 && LABEL_NAME (JUMP_LABEL (p
))
6115 && ((loop_insn_first_p (JUMP_LABEL (p
), v
->insn
)
6116 && loop_insn_first_p (loop
->start
, JUMP_LABEL (p
)))
6117 || (loop_insn_first_p (last_giv_use
, JUMP_LABEL (p
))
6118 && loop_insn_first_p (JUMP_LABEL (p
), loop
->end
))))
6121 v
->not_replaceable
= 1;
6123 if (loop_dump_stream
)
6124 fprintf (loop_dump_stream
,
6125 "Found branch outside giv lifetime.\n");
6132 /* If it is replaceable, then save the final value. */
6134 v
->final_value
= final_value
;
6137 if (loop_dump_stream
&& v
->replaceable
)
6138 fprintf (loop_dump_stream
, "Insn %d: giv reg %d final_value replaceable\n",
6139 INSN_UID (v
->insn
), REGNO (v
->dest_reg
));
6142 /* Update the status of whether a giv can derive other givs.
6144 We need to do something special if there is or may be an update to the biv
6145 between the time the giv is defined and the time it is used to derive
6148 In addition, a giv that is only conditionally set is not allowed to
6149 derive another giv once a label has been passed.
6151 The cases we look at are when a label or an update to a biv is passed. */
6154 update_giv_derive (loop
, p
)
6155 const struct loop
*loop
;
6158 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
6159 struct iv_class
*bl
;
6160 struct induction
*biv
, *giv
;
6164 /* Search all IV classes, then all bivs, and finally all givs.
6166 There are three cases we are concerned with. First we have the situation
6167 of a giv that is only updated conditionally. In that case, it may not
6168 derive any givs after a label is passed.
6170 The second case is when a biv update occurs, or may occur, after the
6171 definition of a giv. For certain biv updates (see below) that are
6172 known to occur between the giv definition and use, we can adjust the
6173 giv definition. For others, or when the biv update is conditional,
6174 we must prevent the giv from deriving any other givs. There are two
6175 sub-cases within this case.
6177 If this is a label, we are concerned with any biv update that is done
6178 conditionally, since it may be done after the giv is defined followed by
6179 a branch here (actually, we need to pass both a jump and a label, but
6180 this extra tracking doesn't seem worth it).
6182 If this is a jump, we are concerned about any biv update that may be
6183 executed multiple times. We are actually only concerned about
6184 backward jumps, but it is probably not worth performing the test
6185 on the jump again here.
6187 If this is a biv update, we must adjust the giv status to show that a
6188 subsequent biv update was performed. If this adjustment cannot be done,
6189 the giv cannot derive further givs. */
6191 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
6192 for (biv
= bl
->biv
; biv
; biv
= biv
->next_iv
)
6193 if (GET_CODE (p
) == CODE_LABEL
|| GET_CODE (p
) == JUMP_INSN
6196 for (giv
= bl
->giv
; giv
; giv
= giv
->next_iv
)
6198 /* If cant_derive is already true, there is no point in
6199 checking all of these conditions again. */
6200 if (giv
->cant_derive
)
6203 /* If this giv is conditionally set and we have passed a label,
6204 it cannot derive anything. */
6205 if (GET_CODE (p
) == CODE_LABEL
&& ! giv
->always_computable
)
6206 giv
->cant_derive
= 1;
6208 /* Skip givs that have mult_val == 0, since
6209 they are really invariants. Also skip those that are
6210 replaceable, since we know their lifetime doesn't contain
6212 else if (giv
->mult_val
== const0_rtx
|| giv
->replaceable
)
6215 /* The only way we can allow this giv to derive another
6216 is if this is a biv increment and we can form the product
6217 of biv->add_val and giv->mult_val. In this case, we will
6218 be able to compute a compensation. */
6219 else if (biv
->insn
== p
)
6224 if (biv
->mult_val
== const1_rtx
)
6225 tem
= simplify_giv_expr (loop
,
6226 gen_rtx_MULT (giv
->mode
,
6229 &ext_val_dummy
, &dummy
);
6231 if (tem
&& giv
->derive_adjustment
)
6232 tem
= simplify_giv_expr
6234 gen_rtx_PLUS (giv
->mode
, tem
, giv
->derive_adjustment
),
6235 &ext_val_dummy
, &dummy
);
6238 giv
->derive_adjustment
= tem
;
6240 giv
->cant_derive
= 1;
6242 else if ((GET_CODE (p
) == CODE_LABEL
&& ! biv
->always_computable
)
6243 || (GET_CODE (p
) == JUMP_INSN
&& biv
->maybe_multiple
))
6244 giv
->cant_derive
= 1;
6249 /* Check whether an insn is an increment legitimate for a basic induction var.
6250 X is the source of insn P, or a part of it.
6251 MODE is the mode in which X should be interpreted.
6253 DEST_REG is the putative biv, also the destination of the insn.
6254 We accept patterns of these forms:
6255 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6256 REG = INVARIANT + REG
6258 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6259 store the additive term into *INC_VAL, and store the place where
6260 we found the additive term into *LOCATION.
6262 If X is an assignment of an invariant into DEST_REG, we set
6263 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6265 We also want to detect a BIV when it corresponds to a variable
6266 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6267 of the variable may be a PLUS that adds a SUBREG of that variable to
6268 an invariant and then sign- or zero-extends the result of the PLUS
6271 Most GIVs in such cases will be in the promoted mode, since that is the
6272 probably the natural computation mode (and almost certainly the mode
6273 used for addresses) on the machine. So we view the pseudo-reg containing
6274 the variable as the BIV, as if it were simply incremented.
6276 Note that treating the entire pseudo as a BIV will result in making
6277 simple increments to any GIVs based on it. However, if the variable
6278 overflows in its declared mode but not its promoted mode, the result will
6279 be incorrect. This is acceptable if the variable is signed, since
6280 overflows in such cases are undefined, but not if it is unsigned, since
6281 those overflows are defined. So we only check for SIGN_EXTEND and
6284 If we cannot find a biv, we return 0. */
6287 basic_induction_var (loop
, x
, mode
, dest_reg
, p
, inc_val
, mult_val
, location
)
6288 const struct loop
*loop
;
6290 enum machine_mode mode
;
6301 code
= GET_CODE (x
);
6306 if (rtx_equal_p (XEXP (x
, 0), dest_reg
)
6307 || (GET_CODE (XEXP (x
, 0)) == SUBREG
6308 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 0))
6309 && SUBREG_REG (XEXP (x
, 0)) == dest_reg
))
6311 argp
= &XEXP (x
, 1);
6313 else if (rtx_equal_p (XEXP (x
, 1), dest_reg
)
6314 || (GET_CODE (XEXP (x
, 1)) == SUBREG
6315 && SUBREG_PROMOTED_VAR_P (XEXP (x
, 1))
6316 && SUBREG_REG (XEXP (x
, 1)) == dest_reg
))
6318 argp
= &XEXP (x
, 0);
6324 if (loop_invariant_p (loop
, arg
) != 1)
6327 *inc_val
= convert_modes (GET_MODE (dest_reg
), GET_MODE (x
), arg
, 0);
6328 *mult_val
= const1_rtx
;
6333 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6334 handle addition of promoted variables.
6335 ??? The comment at the start of this function is wrong: promoted
6336 variable increments don't look like it says they do. */
6337 return basic_induction_var (loop
, SUBREG_REG (x
),
6338 GET_MODE (SUBREG_REG (x
)),
6339 dest_reg
, p
, inc_val
, mult_val
, location
);
6342 /* If this register is assigned in a previous insn, look at its
6343 source, but don't go outside the loop or past a label. */
6345 /* If this sets a register to itself, we would repeat any previous
6346 biv increment if we applied this strategy blindly. */
6347 if (rtx_equal_p (dest_reg
, x
))
6356 insn
= PREV_INSN (insn
);
6358 while (insn
&& GET_CODE (insn
) == NOTE
6359 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
6363 set
= single_set (insn
);
6366 dest
= SET_DEST (set
);
6368 || (GET_CODE (dest
) == SUBREG
6369 && (GET_MODE_SIZE (GET_MODE (dest
)) <= UNITS_PER_WORD
)
6370 && (GET_MODE_CLASS (GET_MODE (dest
)) == MODE_INT
)
6371 && SUBREG_REG (dest
) == x
))
6372 return basic_induction_var (loop
, SET_SRC (set
),
6373 (GET_MODE (SET_SRC (set
)) == VOIDmode
6375 : GET_MODE (SET_SRC (set
))),
6377 inc_val
, mult_val
, location
);
6379 while (GET_CODE (dest
) == SIGN_EXTRACT
6380 || GET_CODE (dest
) == ZERO_EXTRACT
6381 || GET_CODE (dest
) == SUBREG
6382 || GET_CODE (dest
) == STRICT_LOW_PART
)
6383 dest
= XEXP (dest
, 0);
6389 /* Can accept constant setting of biv only when inside inner most loop.
6390 Otherwise, a biv of an inner loop may be incorrectly recognized
6391 as a biv of the outer loop,
6392 causing code to be moved INTO the inner loop. */
6394 if (loop_invariant_p (loop
, x
) != 1)
6399 /* convert_modes aborts if we try to convert to or from CCmode, so just
6400 exclude that case. It is very unlikely that a condition code value
6401 would be a useful iterator anyways. convert_modes aborts if we try to
6402 convert a float mode to non-float or vice versa too. */
6403 if (loop
->level
== 1
6404 && GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (dest_reg
))
6405 && GET_MODE_CLASS (mode
) != MODE_CC
)
6407 /* Possible bug here? Perhaps we don't know the mode of X. */
6408 *inc_val
= convert_modes (GET_MODE (dest_reg
), mode
, x
, 0);
6409 *mult_val
= const0_rtx
;
6416 return basic_induction_var (loop
, XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
6417 dest_reg
, p
, inc_val
, mult_val
, location
);
6420 /* Similar, since this can be a sign extension. */
6421 for (insn
= PREV_INSN (p
);
6422 (insn
&& GET_CODE (insn
) == NOTE
6423 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_BEG
);
6424 insn
= PREV_INSN (insn
))
6428 set
= single_set (insn
);
6430 if (! rtx_equal_p (dest_reg
, XEXP (x
, 0))
6431 && set
&& SET_DEST (set
) == XEXP (x
, 0)
6432 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6433 && INTVAL (XEXP (x
, 1)) >= 0
6434 && GET_CODE (SET_SRC (set
)) == ASHIFT
6435 && XEXP (x
, 1) == XEXP (SET_SRC (set
), 1))
6436 return basic_induction_var (loop
, XEXP (SET_SRC (set
), 0),
6437 GET_MODE (XEXP (x
, 0)),
6438 dest_reg
, insn
, inc_val
, mult_val
,
6447 /* A general induction variable (giv) is any quantity that is a linear
6448 function of a basic induction variable,
6449 i.e. giv = biv * mult_val + add_val.
6450 The coefficients can be any loop invariant quantity.
6451 A giv need not be computed directly from the biv;
6452 it can be computed by way of other givs. */
6454 /* Determine whether X computes a giv.
6455 If it does, return a nonzero value
6456 which is the benefit from eliminating the computation of X;
6457 set *SRC_REG to the register of the biv that it is computed from;
6458 set *ADD_VAL and *MULT_VAL to the coefficients,
6459 such that the value of X is biv * mult + add; */
6462 general_induction_var (loop
, x
, src_reg
, add_val
, mult_val
, ext_val
,
6463 is_addr
, pbenefit
, addr_mode
)
6464 const struct loop
*loop
;
6472 enum machine_mode addr_mode
;
6474 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
6477 /* If this is an invariant, forget it, it isn't a giv. */
6478 if (loop_invariant_p (loop
, x
) == 1)
6482 *ext_val
= NULL_RTX
;
6483 x
= simplify_giv_expr (loop
, x
, ext_val
, pbenefit
);
6487 switch (GET_CODE (x
))
6491 /* Since this is now an invariant and wasn't before, it must be a giv
6492 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6494 *src_reg
= ivs
->list
->biv
->dest_reg
;
6495 *mult_val
= const0_rtx
;
6500 /* This is equivalent to a BIV. */
6502 *mult_val
= const1_rtx
;
6503 *add_val
= const0_rtx
;
6507 /* Either (plus (biv) (invar)) or
6508 (plus (mult (biv) (invar_1)) (invar_2)). */
6509 if (GET_CODE (XEXP (x
, 0)) == MULT
)
6511 *src_reg
= XEXP (XEXP (x
, 0), 0);
6512 *mult_val
= XEXP (XEXP (x
, 0), 1);
6516 *src_reg
= XEXP (x
, 0);
6517 *mult_val
= const1_rtx
;
6519 *add_val
= XEXP (x
, 1);
6523 /* ADD_VAL is zero. */
6524 *src_reg
= XEXP (x
, 0);
6525 *mult_val
= XEXP (x
, 1);
6526 *add_val
= const0_rtx
;
6533 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6534 unless they are CONST_INT). */
6535 if (GET_CODE (*add_val
) == USE
)
6536 *add_val
= XEXP (*add_val
, 0);
6537 if (GET_CODE (*mult_val
) == USE
)
6538 *mult_val
= XEXP (*mult_val
, 0);
6541 *pbenefit
+= address_cost (orig_x
, addr_mode
) - reg_address_cost
;
6543 *pbenefit
+= rtx_cost (orig_x
, SET
);
6545 /* Always return true if this is a giv so it will be detected as such,
6546 even if the benefit is zero or negative. This allows elimination
6547 of bivs that might otherwise not be eliminated. */
6551 /* Given an expression, X, try to form it as a linear function of a biv.
6552 We will canonicalize it to be of the form
6553 (plus (mult (BIV) (invar_1))
6555 with possible degeneracies.
6557 The invariant expressions must each be of a form that can be used as a
6558 machine operand. We surround then with a USE rtx (a hack, but localized
6559 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6560 routine; it is the caller's responsibility to strip them.
6562 If no such canonicalization is possible (i.e., two biv's are used or an
6563 expression that is neither invariant nor a biv or giv), this routine
6566 For a nonzero return, the result will have a code of CONST_INT, USE,
6567 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6569 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6571 static rtx sge_plus
PARAMS ((enum machine_mode
, rtx
, rtx
));
6572 static rtx sge_plus_constant
PARAMS ((rtx
, rtx
));
6575 simplify_giv_expr (loop
, x
, ext_val
, benefit
)
6576 const struct loop
*loop
;
6581 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
6582 struct loop_regs
*regs
= LOOP_REGS (loop
);
6583 enum machine_mode mode
= GET_MODE (x
);
6587 /* If this is not an integer mode, or if we cannot do arithmetic in this
6588 mode, this can't be a giv. */
6589 if (mode
!= VOIDmode
6590 && (GET_MODE_CLASS (mode
) != MODE_INT
6591 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
))
6594 switch (GET_CODE (x
))
6597 arg0
= simplify_giv_expr (loop
, XEXP (x
, 0), ext_val
, benefit
);
6598 arg1
= simplify_giv_expr (loop
, XEXP (x
, 1), ext_val
, benefit
);
6599 if (arg0
== 0 || arg1
== 0)
6602 /* Put constant last, CONST_INT last if both constant. */
6603 if ((GET_CODE (arg0
) == USE
6604 || GET_CODE (arg0
) == CONST_INT
)
6605 && ! ((GET_CODE (arg0
) == USE
6606 && GET_CODE (arg1
) == USE
)
6607 || GET_CODE (arg1
) == CONST_INT
))
6608 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
6610 /* Handle addition of zero, then addition of an invariant. */
6611 if (arg1
== const0_rtx
)
6613 else if (GET_CODE (arg1
) == CONST_INT
|| GET_CODE (arg1
) == USE
)
6614 switch (GET_CODE (arg0
))
6618 /* Adding two invariants must result in an invariant, so enclose
6619 addition operation inside a USE and return it. */
6620 if (GET_CODE (arg0
) == USE
)
6621 arg0
= XEXP (arg0
, 0);
6622 if (GET_CODE (arg1
) == USE
)
6623 arg1
= XEXP (arg1
, 0);
6625 if (GET_CODE (arg0
) == CONST_INT
)
6626 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
6627 if (GET_CODE (arg1
) == CONST_INT
)
6628 tem
= sge_plus_constant (arg0
, arg1
);
6630 tem
= sge_plus (mode
, arg0
, arg1
);
6632 if (GET_CODE (tem
) != CONST_INT
)
6633 tem
= gen_rtx_USE (mode
, tem
);
6638 /* biv + invar or mult + invar. Return sum. */
6639 return gen_rtx_PLUS (mode
, arg0
, arg1
);
6642 /* (a + invar_1) + invar_2. Associate. */
6644 simplify_giv_expr (loop
,
6656 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6657 MULT to reduce cases. */
6658 if (GET_CODE (arg0
) == REG
)
6659 arg0
= gen_rtx_MULT (mode
, arg0
, const1_rtx
);
6660 if (GET_CODE (arg1
) == REG
)
6661 arg1
= gen_rtx_MULT (mode
, arg1
, const1_rtx
);
6663 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6664 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6665 Recurse to associate the second PLUS. */
6666 if (GET_CODE (arg1
) == MULT
)
6667 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
6669 if (GET_CODE (arg1
) == PLUS
)
6671 simplify_giv_expr (loop
,
6673 gen_rtx_PLUS (mode
, arg0
,
6678 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6679 if (GET_CODE (arg0
) != MULT
|| GET_CODE (arg1
) != MULT
)
6682 if (!rtx_equal_p (arg0
, arg1
))
6685 return simplify_giv_expr (loop
,
6694 /* Handle "a - b" as "a + b * (-1)". */
6695 return simplify_giv_expr (loop
,
6704 arg0
= simplify_giv_expr (loop
, XEXP (x
, 0), ext_val
, benefit
);
6705 arg1
= simplify_giv_expr (loop
, XEXP (x
, 1), ext_val
, benefit
);
6706 if (arg0
== 0 || arg1
== 0)
6709 /* Put constant last, CONST_INT last if both constant. */
6710 if ((GET_CODE (arg0
) == USE
|| GET_CODE (arg0
) == CONST_INT
)
6711 && GET_CODE (arg1
) != CONST_INT
)
6712 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
6714 /* If second argument is not now constant, not giv. */
6715 if (GET_CODE (arg1
) != USE
&& GET_CODE (arg1
) != CONST_INT
)
6718 /* Handle multiply by 0 or 1. */
6719 if (arg1
== const0_rtx
)
6722 else if (arg1
== const1_rtx
)
6725 switch (GET_CODE (arg0
))
6728 /* biv * invar. Done. */
6729 return gen_rtx_MULT (mode
, arg0
, arg1
);
6732 /* Product of two constants. */
6733 return GEN_INT (INTVAL (arg0
) * INTVAL (arg1
));
6736 /* invar * invar is a giv, but attempt to simplify it somehow. */
6737 if (GET_CODE (arg1
) != CONST_INT
)
6740 arg0
= XEXP (arg0
, 0);
6741 if (GET_CODE (arg0
) == MULT
)
6743 /* (invar_0 * invar_1) * invar_2. Associate. */
6744 return simplify_giv_expr (loop
,
6753 /* Propagate the MULT expressions to the intermost nodes. */
6754 else if (GET_CODE (arg0
) == PLUS
)
6756 /* (invar_0 + invar_1) * invar_2. Distribute. */
6757 return simplify_giv_expr (loop
,
6769 return gen_rtx_USE (mode
, gen_rtx_MULT (mode
, arg0
, arg1
));
6772 /* (a * invar_1) * invar_2. Associate. */
6773 return simplify_giv_expr (loop
,
6782 /* (a + invar_1) * invar_2. Distribute. */
6783 return simplify_giv_expr (loop
,
6798 /* Shift by constant is multiply by power of two. */
6799 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
6803 simplify_giv_expr (loop
,
6806 GEN_INT ((HOST_WIDE_INT
) 1
6807 << INTVAL (XEXP (x
, 1)))),
6811 /* "-a" is "a * (-1)" */
6812 return simplify_giv_expr (loop
,
6813 gen_rtx_MULT (mode
, XEXP (x
, 0), constm1_rtx
),
6817 /* "~a" is "-a - 1". Silly, but easy. */
6818 return simplify_giv_expr (loop
,
6819 gen_rtx_MINUS (mode
,
6820 gen_rtx_NEG (mode
, XEXP (x
, 0)),
6825 /* Already in proper form for invariant. */
6831 /* Conditionally recognize extensions of simple IVs. After we've
6832 computed loop traversal counts and verified the range of the
6833 source IV, we'll reevaluate this as a GIV. */
6834 if (*ext_val
== NULL_RTX
)
6836 arg0
= simplify_giv_expr (loop
, XEXP (x
, 0), ext_val
, benefit
);
6837 if (arg0
&& *ext_val
== NULL_RTX
&& GET_CODE (arg0
) == REG
)
6839 *ext_val
= gen_rtx_fmt_e (GET_CODE (x
), mode
, arg0
);
6846 /* If this is a new register, we can't deal with it. */
6847 if (REGNO (x
) >= max_reg_before_loop
)
6850 /* Check for biv or giv. */
6851 switch (REG_IV_TYPE (ivs
, REGNO (x
)))
6855 case GENERAL_INDUCT
:
6857 struct induction
*v
= REG_IV_INFO (ivs
, REGNO (x
));
6859 /* Form expression from giv and add benefit. Ensure this giv
6860 can derive another and subtract any needed adjustment if so. */
6862 /* Increasing the benefit here is risky. The only case in which it
6863 is arguably correct is if this is the only use of V. In other
6864 cases, this will artificially inflate the benefit of the current
6865 giv, and lead to suboptimal code. Thus, it is disabled, since
6866 potentially not reducing an only marginally beneficial giv is
6867 less harmful than reducing many givs that are not really
6870 rtx single_use
= regs
->array
[REGNO (x
)].single_usage
;
6871 if (single_use
&& single_use
!= const0_rtx
)
6872 *benefit
+= v
->benefit
;
6878 tem
= gen_rtx_PLUS (mode
, gen_rtx_MULT (mode
,
6879 v
->src_reg
, v
->mult_val
),
6882 if (v
->derive_adjustment
)
6883 tem
= gen_rtx_MINUS (mode
, tem
, v
->derive_adjustment
);
6884 arg0
= simplify_giv_expr (loop
, tem
, ext_val
, benefit
);
6887 if (!v
->ext_dependent
)
6892 *ext_val
= v
->ext_dependent
;
6900 /* If it isn't an induction variable, and it is invariant, we
6901 may be able to simplify things further by looking through
6902 the bits we just moved outside the loop. */
6903 if (loop_invariant_p (loop
, x
) == 1)
6906 struct loop_movables
*movables
= LOOP_MOVABLES (loop
);
6908 for (m
= movables
->head
; m
; m
= m
->next
)
6909 if (rtx_equal_p (x
, m
->set_dest
))
6911 /* Ok, we found a match. Substitute and simplify. */
6913 /* If we match another movable, we must use that, as
6914 this one is going away. */
6916 return simplify_giv_expr (loop
, m
->match
->set_dest
,
6919 /* If consec is nonzero, this is a member of a group of
6920 instructions that were moved together. We handle this
6921 case only to the point of seeking to the last insn and
6922 looking for a REG_EQUAL. Fail if we don't find one. */
6929 tem
= NEXT_INSN (tem
);
6933 tem
= find_reg_note (tem
, REG_EQUAL
, NULL_RTX
);
6935 tem
= XEXP (tem
, 0);
6939 tem
= single_set (m
->insn
);
6941 tem
= SET_SRC (tem
);
6946 /* What we are most interested in is pointer
6947 arithmetic on invariants -- only take
6948 patterns we may be able to do something with. */
6949 if (GET_CODE (tem
) == PLUS
6950 || GET_CODE (tem
) == MULT
6951 || GET_CODE (tem
) == ASHIFT
6952 || GET_CODE (tem
) == CONST_INT
6953 || GET_CODE (tem
) == SYMBOL_REF
)
6955 tem
= simplify_giv_expr (loop
, tem
, ext_val
,
6960 else if (GET_CODE (tem
) == CONST
6961 && GET_CODE (XEXP (tem
, 0)) == PLUS
6962 && GET_CODE (XEXP (XEXP (tem
, 0), 0)) == SYMBOL_REF
6963 && GET_CODE (XEXP (XEXP (tem
, 0), 1)) == CONST_INT
)
6965 tem
= simplify_giv_expr (loop
, XEXP (tem
, 0),
6977 /* Fall through to general case. */
6979 /* If invariant, return as USE (unless CONST_INT).
6980 Otherwise, not giv. */
6981 if (GET_CODE (x
) == USE
)
6984 if (loop_invariant_p (loop
, x
) == 1)
6986 if (GET_CODE (x
) == CONST_INT
)
6988 if (GET_CODE (x
) == CONST
6989 && GET_CODE (XEXP (x
, 0)) == PLUS
6990 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
6991 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
6993 return gen_rtx_USE (mode
, x
);
7000 /* This routine folds invariants such that there is only ever one
7001 CONST_INT in the summation. It is only used by simplify_giv_expr. */
7004 sge_plus_constant (x
, c
)
7007 if (GET_CODE (x
) == CONST_INT
)
7008 return GEN_INT (INTVAL (x
) + INTVAL (c
));
7009 else if (GET_CODE (x
) != PLUS
)
7010 return gen_rtx_PLUS (GET_MODE (x
), x
, c
);
7011 else if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
7013 return gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
7014 GEN_INT (INTVAL (XEXP (x
, 1)) + INTVAL (c
)));
7016 else if (GET_CODE (XEXP (x
, 0)) == PLUS
7017 || GET_CODE (XEXP (x
, 1)) != PLUS
)
7019 return gen_rtx_PLUS (GET_MODE (x
),
7020 sge_plus_constant (XEXP (x
, 0), c
), XEXP (x
, 1));
7024 return gen_rtx_PLUS (GET_MODE (x
),
7025 sge_plus_constant (XEXP (x
, 1), c
), XEXP (x
, 0));
7030 sge_plus (mode
, x
, y
)
7031 enum machine_mode mode
;
7034 while (GET_CODE (y
) == PLUS
)
7036 rtx a
= XEXP (y
, 0);
7037 if (GET_CODE (a
) == CONST_INT
)
7038 x
= sge_plus_constant (x
, a
);
7040 x
= gen_rtx_PLUS (mode
, x
, a
);
7043 if (GET_CODE (y
) == CONST_INT
)
7044 x
= sge_plus_constant (x
, y
);
7046 x
= gen_rtx_PLUS (mode
, x
, y
);
7050 /* Help detect a giv that is calculated by several consecutive insns;
7054 The caller has already identified the first insn P as having a giv as dest;
7055 we check that all other insns that set the same register follow
7056 immediately after P, that they alter nothing else,
7057 and that the result of the last is still a giv.
7059 The value is 0 if the reg set in P is not really a giv.
7060 Otherwise, the value is the amount gained by eliminating
7061 all the consecutive insns that compute the value.
7063 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
7064 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
7066 The coefficients of the ultimate giv value are stored in
7067 *MULT_VAL and *ADD_VAL. */
7070 consec_sets_giv (loop
, first_benefit
, p
, src_reg
, dest_reg
,
7071 add_val
, mult_val
, ext_val
, last_consec_insn
)
7072 const struct loop
*loop
;
7080 rtx
*last_consec_insn
;
7082 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
7083 struct loop_regs
*regs
= LOOP_REGS (loop
);
7090 /* Indicate that this is a giv so that we can update the value produced in
7091 each insn of the multi-insn sequence.
7093 This induction structure will be used only by the call to
7094 general_induction_var below, so we can allocate it on our stack.
7095 If this is a giv, our caller will replace the induct var entry with
7096 a new induction structure. */
7097 struct induction
*v
;
7099 if (REG_IV_TYPE (ivs
, REGNO (dest_reg
)) != UNKNOWN_INDUCT
)
7102 v
= (struct induction
*) alloca (sizeof (struct induction
));
7103 v
->src_reg
= src_reg
;
7104 v
->mult_val
= *mult_val
;
7105 v
->add_val
= *add_val
;
7106 v
->benefit
= first_benefit
;
7108 v
->derive_adjustment
= 0;
7109 v
->ext_dependent
= NULL_RTX
;
7111 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = GENERAL_INDUCT
;
7112 REG_IV_INFO (ivs
, REGNO (dest_reg
)) = v
;
7114 count
= regs
->array
[REGNO (dest_reg
)].n_times_set
- 1;
7119 code
= GET_CODE (p
);
7121 /* If libcall, skip to end of call sequence. */
7122 if (code
== INSN
&& (temp
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
)))
7126 && (set
= single_set (p
))
7127 && GET_CODE (SET_DEST (set
)) == REG
7128 && SET_DEST (set
) == dest_reg
7129 && (general_induction_var (loop
, SET_SRC (set
), &src_reg
,
7130 add_val
, mult_val
, ext_val
, 0,
7132 /* Giv created by equivalent expression. */
7133 || ((temp
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
))
7134 && general_induction_var (loop
, XEXP (temp
, 0), &src_reg
,
7135 add_val
, mult_val
, ext_val
, 0,
7136 &benefit
, VOIDmode
)))
7137 && src_reg
== v
->src_reg
)
7139 if (find_reg_note (p
, REG_RETVAL
, NULL_RTX
))
7140 benefit
+= libcall_benefit (p
);
7143 v
->mult_val
= *mult_val
;
7144 v
->add_val
= *add_val
;
7145 v
->benefit
+= benefit
;
7147 else if (code
!= NOTE
)
7149 /* Allow insns that set something other than this giv to a
7150 constant. Such insns are needed on machines which cannot
7151 include long constants and should not disqualify a giv. */
7153 && (set
= single_set (p
))
7154 && SET_DEST (set
) != dest_reg
7155 && CONSTANT_P (SET_SRC (set
)))
7158 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = UNKNOWN_INDUCT
;
7163 REG_IV_TYPE (ivs
, REGNO (dest_reg
)) = UNKNOWN_INDUCT
;
7164 *last_consec_insn
= p
;
7168 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7169 represented by G1. If no such expression can be found, or it is clear that
7170 it cannot possibly be a valid address, 0 is returned.
7172 To perform the computation, we note that
7175 where `v' is the biv.
7177 So G2 = (y/b) * G1 + (b - a*y/x).
7179 Note that MULT = y/x.
7181 Update: A and B are now allowed to be additive expressions such that
7182 B contains all variables in A. That is, computing B-A will not require
7183 subtracting variables. */
7186 express_from_1 (a
, b
, mult
)
7189 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7191 if (mult
== const0_rtx
)
7194 /* If MULT is not 1, we cannot handle A with non-constants, since we
7195 would then be required to subtract multiples of the registers in A.
7196 This is theoretically possible, and may even apply to some Fortran
7197 constructs, but it is a lot of work and we do not attempt it here. */
7199 if (mult
!= const1_rtx
&& GET_CODE (a
) != CONST_INT
)
7202 /* In general these structures are sorted top to bottom (down the PLUS
7203 chain), but not left to right across the PLUS. If B is a higher
7204 order giv than A, we can strip one level and recurse. If A is higher
7205 order, we'll eventually bail out, but won't know that until the end.
7206 If they are the same, we'll strip one level around this loop. */
7208 while (GET_CODE (a
) == PLUS
&& GET_CODE (b
) == PLUS
)
7210 rtx ra
, rb
, oa
, ob
, tmp
;
7212 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
7213 if (GET_CODE (ra
) == PLUS
)
7214 tmp
= ra
, ra
= oa
, oa
= tmp
;
7216 rb
= XEXP (b
, 0), ob
= XEXP (b
, 1);
7217 if (GET_CODE (rb
) == PLUS
)
7218 tmp
= rb
, rb
= ob
, ob
= tmp
;
7220 if (rtx_equal_p (ra
, rb
))
7221 /* We matched: remove one reg completely. */
7223 else if (GET_CODE (ob
) != PLUS
&& rtx_equal_p (ra
, ob
))
7224 /* An alternate match. */
7226 else if (GET_CODE (oa
) != PLUS
&& rtx_equal_p (oa
, rb
))
7227 /* An alternate match. */
7231 /* Indicates an extra register in B. Strip one level from B and
7232 recurse, hoping B was the higher order expression. */
7233 ob
= express_from_1 (a
, ob
, mult
);
7236 return gen_rtx_PLUS (GET_MODE (b
), rb
, ob
);
7240 /* Here we are at the last level of A, go through the cases hoping to
7241 get rid of everything but a constant. */
7243 if (GET_CODE (a
) == PLUS
)
7247 ra
= XEXP (a
, 0), oa
= XEXP (a
, 1);
7248 if (rtx_equal_p (oa
, b
))
7250 else if (!rtx_equal_p (ra
, b
))
7253 if (GET_CODE (oa
) != CONST_INT
)
7256 return GEN_INT (-INTVAL (oa
) * INTVAL (mult
));
7258 else if (GET_CODE (a
) == CONST_INT
)
7260 return plus_constant (b
, -INTVAL (a
) * INTVAL (mult
));
7262 else if (CONSTANT_P (a
))
7264 enum machine_mode mode_a
= GET_MODE (a
);
7265 enum machine_mode mode_b
= GET_MODE (b
);
7266 enum machine_mode mode
= mode_b
== VOIDmode
? mode_a
: mode_b
;
7267 return simplify_gen_binary (MINUS
, mode
, b
, a
);
7269 else if (GET_CODE (b
) == PLUS
)
7271 if (rtx_equal_p (a
, XEXP (b
, 0)))
7273 else if (rtx_equal_p (a
, XEXP (b
, 1)))
7278 else if (rtx_equal_p (a
, b
))
7285 express_from (g1
, g2
)
7286 struct induction
*g1
, *g2
;
7290 /* The value that G1 will be multiplied by must be a constant integer. Also,
7291 the only chance we have of getting a valid address is if b*c/a (see above
7292 for notation) is also an integer. */
7293 if (GET_CODE (g1
->mult_val
) == CONST_INT
7294 && GET_CODE (g2
->mult_val
) == CONST_INT
)
7296 if (g1
->mult_val
== const0_rtx
7297 || INTVAL (g2
->mult_val
) % INTVAL (g1
->mult_val
) != 0)
7299 mult
= GEN_INT (INTVAL (g2
->mult_val
) / INTVAL (g1
->mult_val
));
7301 else if (rtx_equal_p (g1
->mult_val
, g2
->mult_val
))
7305 /* ??? Find out if the one is a multiple of the other? */
7309 add
= express_from_1 (g1
->add_val
, g2
->add_val
, mult
);
7310 if (add
== NULL_RTX
)
7312 /* Failed. If we've got a multiplication factor between G1 and G2,
7313 scale G1's addend and try again. */
7314 if (INTVAL (mult
) > 1)
7316 rtx g1_add_val
= g1
->add_val
;
7317 if (GET_CODE (g1_add_val
) == MULT
7318 && GET_CODE (XEXP (g1_add_val
, 1)) == CONST_INT
)
7321 m
= INTVAL (mult
) * INTVAL (XEXP (g1_add_val
, 1));
7322 g1_add_val
= gen_rtx_MULT (GET_MODE (g1_add_val
),
7323 XEXP (g1_add_val
, 0), GEN_INT (m
));
7327 g1_add_val
= gen_rtx_MULT (GET_MODE (g1_add_val
), g1_add_val
,
7331 add
= express_from_1 (g1_add_val
, g2
->add_val
, const1_rtx
);
7334 if (add
== NULL_RTX
)
7337 /* Form simplified final result. */
7338 if (mult
== const0_rtx
)
7340 else if (mult
== const1_rtx
)
7341 mult
= g1
->dest_reg
;
7343 mult
= gen_rtx_MULT (g2
->mode
, g1
->dest_reg
, mult
);
7345 if (add
== const0_rtx
)
7349 if (GET_CODE (add
) == PLUS
7350 && CONSTANT_P (XEXP (add
, 1)))
7352 rtx tem
= XEXP (add
, 1);
7353 mult
= gen_rtx_PLUS (g2
->mode
, mult
, XEXP (add
, 0));
7357 return gen_rtx_PLUS (g2
->mode
, mult
, add
);
7361 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7362 represented by G1. This indicates that G2 should be combined with G1 and
7363 that G2 can use (either directly or via an address expression) a register
7364 used to represent G1. */
7367 combine_givs_p (g1
, g2
)
7368 struct induction
*g1
, *g2
;
7372 /* With the introduction of ext dependent givs, we must care for modes.
7373 G2 must not use a wider mode than G1. */
7374 if (GET_MODE_SIZE (g1
->mode
) < GET_MODE_SIZE (g2
->mode
))
7377 ret
= comb
= express_from (g1
, g2
);
7378 if (comb
== NULL_RTX
)
7380 if (g1
->mode
!= g2
->mode
)
7381 ret
= gen_lowpart (g2
->mode
, comb
);
7383 /* If these givs are identical, they can be combined. We use the results
7384 of express_from because the addends are not in a canonical form, so
7385 rtx_equal_p is a weaker test. */
7386 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7387 combination to be the other way round. */
7388 if (comb
== g1
->dest_reg
7389 && (g1
->giv_type
== DEST_REG
|| g2
->giv_type
== DEST_ADDR
))
7394 /* If G2 can be expressed as a function of G1 and that function is valid
7395 as an address and no more expensive than using a register for G2,
7396 the expression of G2 in terms of G1 can be used. */
7398 && g2
->giv_type
== DEST_ADDR
7399 && memory_address_p (GET_MODE (g2
->mem
), ret
))
7405 /* Check each extension dependent giv in this class to see if its
7406 root biv is safe from wrapping in the interior mode, which would
7407 make the giv illegal. */
7410 check_ext_dependent_givs (bl
, loop_info
)
7411 struct iv_class
*bl
;
7412 struct loop_info
*loop_info
;
7414 int ze_ok
= 0, se_ok
= 0, info_ok
= 0;
7415 enum machine_mode biv_mode
= GET_MODE (bl
->biv
->src_reg
);
7416 HOST_WIDE_INT start_val
;
7417 unsigned HOST_WIDE_INT u_end_val
= 0;
7418 unsigned HOST_WIDE_INT u_start_val
= 0;
7420 struct induction
*v
;
7422 /* Make sure the iteration data is available. We must have
7423 constants in order to be certain of no overflow. */
7424 /* ??? An unknown iteration count with an increment of +-1
7425 combined with friendly exit tests of against an invariant
7426 value is also amenable to optimization. Not implemented. */
7427 if (loop_info
->n_iterations
> 0
7428 && bl
->initial_value
7429 && GET_CODE (bl
->initial_value
) == CONST_INT
7430 && (incr
= biv_total_increment (bl
))
7431 && GET_CODE (incr
) == CONST_INT
7432 /* Make sure the host can represent the arithmetic. */
7433 && HOST_BITS_PER_WIDE_INT
>= GET_MODE_BITSIZE (biv_mode
))
7435 unsigned HOST_WIDE_INT abs_incr
, total_incr
;
7436 HOST_WIDE_INT s_end_val
;
7440 start_val
= INTVAL (bl
->initial_value
);
7441 u_start_val
= start_val
;
7443 neg_incr
= 0, abs_incr
= INTVAL (incr
);
7444 if (INTVAL (incr
) < 0)
7445 neg_incr
= 1, abs_incr
= -abs_incr
;
7446 total_incr
= abs_incr
* loop_info
->n_iterations
;
7448 /* Check for host arithmetic overflow. */
7449 if (total_incr
/ loop_info
->n_iterations
== abs_incr
)
7451 unsigned HOST_WIDE_INT u_max
;
7452 HOST_WIDE_INT s_max
;
7454 u_end_val
= start_val
+ (neg_incr
? -total_incr
: total_incr
);
7455 s_end_val
= u_end_val
;
7456 u_max
= GET_MODE_MASK (biv_mode
);
7459 /* Check zero extension of biv ok. */
7461 /* Check for host arithmetic overflow. */
7463 ? u_end_val
< u_start_val
7464 : u_end_val
> u_start_val
)
7465 /* Check for target arithmetic overflow. */
7467 ? 1 /* taken care of with host overflow */
7468 : u_end_val
<= u_max
))
7473 /* Check sign extension of biv ok. */
7474 /* ??? While it is true that overflow with signed and pointer
7475 arithmetic is undefined, I fear too many programmers don't
7476 keep this fact in mind -- myself included on occasion.
7477 So leave alone with the signed overflow optimizations. */
7478 if (start_val
>= -s_max
- 1
7479 /* Check for host arithmetic overflow. */
7481 ? s_end_val
< start_val
7482 : s_end_val
> start_val
)
7483 /* Check for target arithmetic overflow. */
7485 ? s_end_val
>= -s_max
- 1
7486 : s_end_val
<= s_max
))
7493 /* Invalidate givs that fail the tests. */
7494 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
7495 if (v
->ext_dependent
)
7497 enum rtx_code code
= GET_CODE (v
->ext_dependent
);
7510 /* We don't know whether this value is being used as either
7511 signed or unsigned, so to safely truncate we must satisfy
7512 both. The initial check here verifies the BIV itself;
7513 once that is successful we may check its range wrt the
7517 enum machine_mode outer_mode
= GET_MODE (v
->ext_dependent
);
7518 unsigned HOST_WIDE_INT max
= GET_MODE_MASK (outer_mode
) >> 1;
7520 /* We know from the above that both endpoints are nonnegative,
7521 and that there is no wrapping. Verify that both endpoints
7522 are within the (signed) range of the outer mode. */
7523 if (u_start_val
<= max
&& u_end_val
<= max
)
7534 if (loop_dump_stream
)
7536 fprintf (loop_dump_stream
,
7537 "Verified ext dependent giv at %d of reg %d\n",
7538 INSN_UID (v
->insn
), bl
->regno
);
7543 if (loop_dump_stream
)
7548 why
= "biv iteration values overflowed";
7552 incr
= biv_total_increment (bl
);
7553 if (incr
== const1_rtx
)
7554 why
= "biv iteration info incomplete; incr by 1";
7556 why
= "biv iteration info incomplete";
7559 fprintf (loop_dump_stream
,
7560 "Failed ext dependent giv at %d, %s\n",
7561 INSN_UID (v
->insn
), why
);
7564 bl
->all_reduced
= 0;
7569 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7572 extend_value_for_giv (v
, value
)
7573 struct induction
*v
;
7576 rtx ext_dep
= v
->ext_dependent
;
7581 /* Recall that check_ext_dependent_givs verified that the known bounds
7582 of a biv did not overflow or wrap with respect to the extension for
7583 the giv. Therefore, constants need no additional adjustment. */
7584 if (CONSTANT_P (value
) && GET_MODE (value
) == VOIDmode
)
7587 /* Otherwise, we must adjust the value to compensate for the
7588 differing modes of the biv and the giv. */
7589 return gen_rtx_fmt_e (GET_CODE (ext_dep
), GET_MODE (ext_dep
), value
);
7592 struct combine_givs_stats
7599 cmp_combine_givs_stats (xp
, yp
)
7603 const struct combine_givs_stats
* const x
=
7604 (const struct combine_givs_stats
*) xp
;
7605 const struct combine_givs_stats
* const y
=
7606 (const struct combine_givs_stats
*) yp
;
7608 d
= y
->total_benefit
- x
->total_benefit
;
7609 /* Stabilize the sort. */
7611 d
= x
->giv_number
- y
->giv_number
;
7615 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7616 any other. If so, point SAME to the giv combined with and set NEW_REG to
7617 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7618 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7621 combine_givs (regs
, bl
)
7622 struct loop_regs
*regs
;
7623 struct iv_class
*bl
;
7625 /* Additional benefit to add for being combined multiple times. */
7626 const int extra_benefit
= 3;
7628 struct induction
*g1
, *g2
, **giv_array
;
7629 int i
, j
, k
, giv_count
;
7630 struct combine_givs_stats
*stats
;
7633 /* Count givs, because bl->giv_count is incorrect here. */
7635 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
7640 = (struct induction
**) alloca (giv_count
* sizeof (struct induction
*));
7642 for (g1
= bl
->giv
; g1
; g1
= g1
->next_iv
)
7644 giv_array
[i
++] = g1
;
7646 stats
= (struct combine_givs_stats
*) xcalloc (giv_count
, sizeof (*stats
));
7647 can_combine
= (rtx
*) xcalloc (giv_count
, giv_count
* sizeof (rtx
));
7649 for (i
= 0; i
< giv_count
; i
++)
7655 stats
[i
].giv_number
= i
;
7657 /* If a DEST_REG GIV is used only once, do not allow it to combine
7658 with anything, for in doing so we will gain nothing that cannot
7659 be had by simply letting the GIV with which we would have combined
7660 to be reduced on its own. The losage shows up in particular with
7661 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7662 be seen elsewhere as well. */
7663 if (g1
->giv_type
== DEST_REG
7664 && (single_use
= regs
->array
[REGNO (g1
->dest_reg
)].single_usage
)
7665 && single_use
!= const0_rtx
)
7668 this_benefit
= g1
->benefit
;
7669 /* Add an additional weight for zero addends. */
7670 if (g1
->no_const_addval
)
7673 for (j
= 0; j
< giv_count
; j
++)
7679 && (this_combine
= combine_givs_p (g1
, g2
)) != NULL_RTX
)
7681 can_combine
[i
* giv_count
+ j
] = this_combine
;
7682 this_benefit
+= g2
->benefit
+ extra_benefit
;
7685 stats
[i
].total_benefit
= this_benefit
;
7688 /* Iterate, combining until we can't. */
7690 qsort (stats
, giv_count
, sizeof (*stats
), cmp_combine_givs_stats
);
7692 if (loop_dump_stream
)
7694 fprintf (loop_dump_stream
, "Sorted combine statistics:\n");
7695 for (k
= 0; k
< giv_count
; k
++)
7697 g1
= giv_array
[stats
[k
].giv_number
];
7698 if (!g1
->combined_with
&& !g1
->same
)
7699 fprintf (loop_dump_stream
, " {%d, %d}",
7700 INSN_UID (giv_array
[stats
[k
].giv_number
]->insn
),
7701 stats
[k
].total_benefit
);
7703 putc ('\n', loop_dump_stream
);
7706 for (k
= 0; k
< giv_count
; k
++)
7708 int g1_add_benefit
= 0;
7710 i
= stats
[k
].giv_number
;
7713 /* If it has already been combined, skip. */
7714 if (g1
->combined_with
|| g1
->same
)
7717 for (j
= 0; j
< giv_count
; j
++)
7720 if (g1
!= g2
&& can_combine
[i
* giv_count
+ j
]
7721 /* If it has already been combined, skip. */
7722 && ! g2
->same
&& ! g2
->combined_with
)
7726 g2
->new_reg
= can_combine
[i
* giv_count
+ j
];
7728 /* For destination, we now may replace by mem expression instead
7729 of register. This changes the costs considerably, so add the
7731 if (g2
->giv_type
== DEST_ADDR
)
7732 g2
->benefit
= (g2
->benefit
+ reg_address_cost
7733 - address_cost (g2
->new_reg
,
7734 GET_MODE (g2
->mem
)));
7735 g1
->combined_with
++;
7736 g1
->lifetime
+= g2
->lifetime
;
7738 g1_add_benefit
+= g2
->benefit
;
7740 /* ??? The new final_[bg]iv_value code does a much better job
7741 of finding replaceable giv's, and hence this code may no
7742 longer be necessary. */
7743 if (! g2
->replaceable
&& REG_USERVAR_P (g2
->dest_reg
))
7744 g1_add_benefit
-= copy_cost
;
7746 /* To help optimize the next set of combinations, remove
7747 this giv from the benefits of other potential mates. */
7748 for (l
= 0; l
< giv_count
; ++l
)
7750 int m
= stats
[l
].giv_number
;
7751 if (can_combine
[m
* giv_count
+ j
])
7752 stats
[l
].total_benefit
-= g2
->benefit
+ extra_benefit
;
7755 if (loop_dump_stream
)
7756 fprintf (loop_dump_stream
,
7757 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7758 INSN_UID (g2
->insn
), INSN_UID (g1
->insn
),
7759 g1
->benefit
, g1_add_benefit
, g1
->lifetime
);
7763 /* To help optimize the next set of combinations, remove
7764 this giv from the benefits of other potential mates. */
7765 if (g1
->combined_with
)
7767 for (j
= 0; j
< giv_count
; ++j
)
7769 int m
= stats
[j
].giv_number
;
7770 if (can_combine
[m
* giv_count
+ i
])
7771 stats
[j
].total_benefit
-= g1
->benefit
+ extra_benefit
;
7774 g1
->benefit
+= g1_add_benefit
;
7776 /* We've finished with this giv, and everything it touched.
7777 Restart the combination so that proper weights for the
7778 rest of the givs are properly taken into account. */
7779 /* ??? Ideally we would compact the arrays at this point, so
7780 as to not cover old ground. But sanely compacting
7781 can_combine is tricky. */
7791 /* Generate sequence for REG = B * M + A. */
7794 gen_add_mult (b
, m
, a
, reg
)
7795 rtx b
; /* initial value of basic induction variable */
7796 rtx m
; /* multiplicative constant */
7797 rtx a
; /* additive constant */
7798 rtx reg
; /* destination register */
7804 /* Use unsigned arithmetic. */
7805 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 1);
7807 emit_move_insn (reg
, result
);
7815 /* Update registers created in insn sequence SEQ. */
7818 loop_regs_update (loop
, seq
)
7819 const struct loop
*loop ATTRIBUTE_UNUSED
;
7824 /* Update register info for alias analysis. */
7826 if (seq
== NULL_RTX
)
7832 while (insn
!= NULL_RTX
)
7834 rtx set
= single_set (insn
);
7836 if (set
&& GET_CODE (SET_DEST (set
)) == REG
)
7837 record_base_value (REGNO (SET_DEST (set
)), SET_SRC (set
), 0);
7839 insn
= NEXT_INSN (insn
);
7842 else if (GET_CODE (seq
) == SET
7843 && GET_CODE (SET_DEST (seq
)) == REG
)
7844 record_base_value (REGNO (SET_DEST (seq
)), SET_SRC (seq
), 0);
7848 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7851 loop_iv_add_mult_emit_before (loop
, b
, m
, a
, reg
, before_bb
, before_insn
)
7852 const struct loop
*loop
;
7853 rtx b
; /* initial value of basic induction variable */
7854 rtx m
; /* multiplicative constant */
7855 rtx a
; /* additive constant */
7856 rtx reg
; /* destination register */
7857 basic_block before_bb
;
7864 loop_iv_add_mult_hoist (loop
, b
, m
, a
, reg
);
7868 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7869 seq
= gen_add_mult (copy_rtx (b
), copy_rtx (m
), copy_rtx (a
), reg
);
7871 /* Increase the lifetime of any invariants moved further in code. */
7872 update_reg_last_use (a
, before_insn
);
7873 update_reg_last_use (b
, before_insn
);
7874 update_reg_last_use (m
, before_insn
);
7876 loop_insn_emit_before (loop
, before_bb
, before_insn
, seq
);
7878 /* It is possible that the expansion created lots of new registers.
7879 Iterate over the sequence we just created and record them all. */
7880 loop_regs_update (loop
, seq
);
7884 /* Emit insns in loop pre-header to set REG = B * M + A. */
7887 loop_iv_add_mult_sink (loop
, b
, m
, a
, reg
)
7888 const struct loop
*loop
;
7889 rtx b
; /* initial value of basic induction variable */
7890 rtx m
; /* multiplicative constant */
7891 rtx a
; /* additive constant */
7892 rtx reg
; /* destination register */
7896 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7897 seq
= gen_add_mult (copy_rtx (b
), copy_rtx (m
), copy_rtx (a
), reg
);
7899 /* Increase the lifetime of any invariants moved further in code.
7900 ???? Is this really necessary? */
7901 update_reg_last_use (a
, loop
->sink
);
7902 update_reg_last_use (b
, loop
->sink
);
7903 update_reg_last_use (m
, loop
->sink
);
7905 loop_insn_sink (loop
, seq
);
7907 /* It is possible that the expansion created lots of new registers.
7908 Iterate over the sequence we just created and record them all. */
7909 loop_regs_update (loop
, seq
);
7913 /* Emit insns after loop to set REG = B * M + A. */
7916 loop_iv_add_mult_hoist (loop
, b
, m
, a
, reg
)
7917 const struct loop
*loop
;
7918 rtx b
; /* initial value of basic induction variable */
7919 rtx m
; /* multiplicative constant */
7920 rtx a
; /* additive constant */
7921 rtx reg
; /* destination register */
7925 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7926 seq
= gen_add_mult (copy_rtx (b
), copy_rtx (m
), copy_rtx (a
), reg
);
7928 loop_insn_hoist (loop
, seq
);
7930 /* It is possible that the expansion created lots of new registers.
7931 Iterate over the sequence we just created and record them all. */
7932 loop_regs_update (loop
, seq
);
7937 /* Similar to gen_add_mult, but compute cost rather than generating
7941 iv_add_mult_cost (b
, m
, a
, reg
)
7942 rtx b
; /* initial value of basic induction variable */
7943 rtx m
; /* multiplicative constant */
7944 rtx a
; /* additive constant */
7945 rtx reg
; /* destination register */
7951 result
= expand_mult_add (b
, reg
, m
, a
, GET_MODE (reg
), 1);
7953 emit_move_insn (reg
, result
);
7954 last
= get_last_insn ();
7957 rtx t
= single_set (last
);
7959 cost
+= rtx_cost (SET_SRC (t
), SET
);
7960 last
= PREV_INSN (last
);
7966 /* Test whether A * B can be computed without
7967 an actual multiply insn. Value is 1 if so.
7969 ??? This function stinks because it generates a ton of wasted RTL
7970 ??? and as a result fragments GC memory to no end. There are other
7971 ??? places in the compiler which are invoked a lot and do the same
7972 ??? thing, generate wasted RTL just to see if something is possible. */
7975 product_cheap_p (a
, b
)
7982 /* If only one is constant, make it B. */
7983 if (GET_CODE (a
) == CONST_INT
)
7984 tmp
= a
, a
= b
, b
= tmp
;
7986 /* If first constant, both constant, so don't need multiply. */
7987 if (GET_CODE (a
) == CONST_INT
)
7990 /* If second not constant, neither is constant, so would need multiply. */
7991 if (GET_CODE (b
) != CONST_INT
)
7994 /* One operand is constant, so might not need multiply insn. Generate the
7995 code for the multiply and see if a call or multiply, or long sequence
7996 of insns is generated. */
7999 expand_mult (GET_MODE (a
), a
, b
, NULL_RTX
, 1);
8007 while (tmp
!= NULL_RTX
)
8009 rtx next
= NEXT_INSN (tmp
);
8012 || GET_CODE (tmp
) != INSN
8013 || (GET_CODE (PATTERN (tmp
)) == SET
8014 && GET_CODE (SET_SRC (PATTERN (tmp
))) == MULT
)
8015 || (GET_CODE (PATTERN (tmp
)) == PARALLEL
8016 && GET_CODE (XVECEXP (PATTERN (tmp
), 0, 0)) == SET
8017 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp
), 0, 0))) == MULT
))
8026 else if (GET_CODE (tmp
) == SET
8027 && GET_CODE (SET_SRC (tmp
)) == MULT
)
8029 else if (GET_CODE (tmp
) == PARALLEL
8030 && GET_CODE (XVECEXP (tmp
, 0, 0)) == SET
8031 && GET_CODE (SET_SRC (XVECEXP (tmp
, 0, 0))) == MULT
)
8037 /* Check to see if loop can be terminated by a "decrement and branch until
8038 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
8039 Also try reversing an increment loop to a decrement loop
8040 to see if the optimization can be performed.
8041 Value is nonzero if optimization was performed. */
8043 /* This is useful even if the architecture doesn't have such an insn,
8044 because it might change a loops which increments from 0 to n to a loop
8045 which decrements from n to 0. A loop that decrements to zero is usually
8046 faster than one that increments from zero. */
8048 /* ??? This could be rewritten to use some of the loop unrolling procedures,
8049 such as approx_final_value, biv_total_increment, loop_iterations, and
8050 final_[bg]iv_value. */
8053 check_dbra_loop (loop
, insn_count
)
8057 struct loop_info
*loop_info
= LOOP_INFO (loop
);
8058 struct loop_regs
*regs
= LOOP_REGS (loop
);
8059 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
8060 struct iv_class
*bl
;
8067 rtx before_comparison
;
8071 int compare_and_branch
;
8072 rtx loop_start
= loop
->start
;
8073 rtx loop_end
= loop
->end
;
8075 /* If last insn is a conditional branch, and the insn before tests a
8076 register value, try to optimize it. Otherwise, we can't do anything. */
8078 jump
= PREV_INSN (loop_end
);
8079 comparison
= get_condition_for_loop (loop
, jump
);
8080 if (comparison
== 0)
8082 if (!onlyjump_p (jump
))
8085 /* Try to compute whether the compare/branch at the loop end is one or
8086 two instructions. */
8087 get_condition (jump
, &first_compare
);
8088 if (first_compare
== jump
)
8089 compare_and_branch
= 1;
8090 else if (first_compare
== prev_nonnote_insn (jump
))
8091 compare_and_branch
= 2;
8096 /* If more than one condition is present to control the loop, then
8097 do not proceed, as this function does not know how to rewrite
8098 loop tests with more than one condition.
8100 Look backwards from the first insn in the last comparison
8101 sequence and see if we've got another comparison sequence. */
8104 if ((jump1
= prev_nonnote_insn (first_compare
)) != loop
->cont
)
8105 if (GET_CODE (jump1
) == JUMP_INSN
)
8109 /* Check all of the bivs to see if the compare uses one of them.
8110 Skip biv's set more than once because we can't guarantee that
8111 it will be zero on the last iteration. Also skip if the biv is
8112 used between its update and the test insn. */
8114 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
8116 if (bl
->biv_count
== 1
8117 && ! bl
->biv
->maybe_multiple
8118 && bl
->biv
->dest_reg
== XEXP (comparison
, 0)
8119 && ! reg_used_between_p (regno_reg_rtx
[bl
->regno
], bl
->biv
->insn
,
8127 /* Look for the case where the basic induction variable is always
8128 nonnegative, and equals zero on the last iteration.
8129 In this case, add a reg_note REG_NONNEG, which allows the
8130 m68k DBRA instruction to be used. */
8132 if (((GET_CODE (comparison
) == GT
8133 && GET_CODE (XEXP (comparison
, 1)) == CONST_INT
8134 && INTVAL (XEXP (comparison
, 1)) == -1)
8135 || (GET_CODE (comparison
) == NE
&& XEXP (comparison
, 1) == const0_rtx
))
8136 && GET_CODE (bl
->biv
->add_val
) == CONST_INT
8137 && INTVAL (bl
->biv
->add_val
) < 0)
8139 /* Initial value must be greater than 0,
8140 init_val % -dec_value == 0 to ensure that it equals zero on
8141 the last iteration */
8143 if (GET_CODE (bl
->initial_value
) == CONST_INT
8144 && INTVAL (bl
->initial_value
) > 0
8145 && (INTVAL (bl
->initial_value
)
8146 % (-INTVAL (bl
->biv
->add_val
))) == 0)
8148 /* register always nonnegative, add REG_NOTE to branch */
8149 if (! find_reg_note (jump
, REG_NONNEG
, NULL_RTX
))
8151 = gen_rtx_EXPR_LIST (REG_NONNEG
, bl
->biv
->dest_reg
,
8158 /* If the decrement is 1 and the value was tested as >= 0 before
8159 the loop, then we can safely optimize. */
8160 for (p
= loop_start
; p
; p
= PREV_INSN (p
))
8162 if (GET_CODE (p
) == CODE_LABEL
)
8164 if (GET_CODE (p
) != JUMP_INSN
)
8167 before_comparison
= get_condition_for_loop (loop
, p
);
8168 if (before_comparison
8169 && XEXP (before_comparison
, 0) == bl
->biv
->dest_reg
8170 && GET_CODE (before_comparison
) == LT
8171 && XEXP (before_comparison
, 1) == const0_rtx
8172 && ! reg_set_between_p (bl
->biv
->dest_reg
, p
, loop_start
)
8173 && INTVAL (bl
->biv
->add_val
) == -1)
8175 if (! find_reg_note (jump
, REG_NONNEG
, NULL_RTX
))
8177 = gen_rtx_EXPR_LIST (REG_NONNEG
, bl
->biv
->dest_reg
,
8185 else if (GET_CODE (bl
->biv
->add_val
) == CONST_INT
8186 && INTVAL (bl
->biv
->add_val
) > 0)
8188 /* Try to change inc to dec, so can apply above optimization. */
8190 all registers modified are induction variables or invariant,
8191 all memory references have non-overlapping addresses
8192 (obviously true if only one write)
8193 allow 2 insns for the compare/jump at the end of the loop. */
8194 /* Also, we must avoid any instructions which use both the reversed
8195 biv and another biv. Such instructions will fail if the loop is
8196 reversed. We meet this condition by requiring that either
8197 no_use_except_counting is true, or else that there is only
8199 int num_nonfixed_reads
= 0;
8200 /* 1 if the iteration var is used only to count iterations. */
8201 int no_use_except_counting
= 0;
8202 /* 1 if the loop has no memory store, or it has a single memory store
8203 which is reversible. */
8204 int reversible_mem_store
= 1;
8206 if (bl
->giv_count
== 0
8207 && !loop
->exit_count
8208 && !loop_info
->has_multiple_exit_targets
)
8210 rtx bivreg
= regno_reg_rtx
[bl
->regno
];
8211 struct iv_class
*blt
;
8213 /* If there are no givs for this biv, and the only exit is the
8214 fall through at the end of the loop, then
8215 see if perhaps there are no uses except to count. */
8216 no_use_except_counting
= 1;
8217 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
8220 rtx set
= single_set (p
);
8222 if (set
&& GET_CODE (SET_DEST (set
)) == REG
8223 && REGNO (SET_DEST (set
)) == bl
->regno
)
8224 /* An insn that sets the biv is okay. */
8226 else if ((p
== prev_nonnote_insn (prev_nonnote_insn (loop_end
))
8227 || p
== prev_nonnote_insn (loop_end
))
8228 && reg_mentioned_p (bivreg
, PATTERN (p
)))
8230 /* If either of these insns uses the biv and sets a pseudo
8231 that has more than one usage, then the biv has uses
8232 other than counting since it's used to derive a value
8233 that is used more than one time. */
8234 note_stores (PATTERN (p
), note_set_pseudo_multiple_uses
,
8236 if (regs
->multiple_uses
)
8238 no_use_except_counting
= 0;
8242 else if (reg_mentioned_p (bivreg
, PATTERN (p
)))
8244 no_use_except_counting
= 0;
8249 /* A biv has uses besides counting if it is used to set
8251 for (blt
= ivs
->list
; blt
; blt
= blt
->next
)
8253 && reg_mentioned_p (bivreg
, SET_SRC (blt
->init_set
)))
8255 no_use_except_counting
= 0;
8260 if (no_use_except_counting
)
8261 /* No need to worry about MEMs. */
8263 else if (loop_info
->num_mem_sets
<= 1)
8265 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
8267 num_nonfixed_reads
+= count_nonfixed_reads (loop
, PATTERN (p
));
8269 /* If the loop has a single store, and the destination address is
8270 invariant, then we can't reverse the loop, because this address
8271 might then have the wrong value at loop exit.
8272 This would work if the source was invariant also, however, in that
8273 case, the insn should have been moved out of the loop. */
8275 if (loop_info
->num_mem_sets
== 1)
8277 struct induction
*v
;
8279 /* If we could prove that each of the memory locations
8280 written to was different, then we could reverse the
8281 store -- but we don't presently have any way of
8283 reversible_mem_store
= 0;
8285 /* If the store depends on a register that is set after the
8286 store, it depends on the initial value, and is thus not
8288 for (v
= bl
->giv
; reversible_mem_store
&& v
; v
= v
->next_iv
)
8290 if (v
->giv_type
== DEST_REG
8291 && reg_mentioned_p (v
->dest_reg
,
8292 PATTERN (loop_info
->first_loop_store_insn
))
8293 && loop_insn_first_p (loop_info
->first_loop_store_insn
,
8295 reversible_mem_store
= 0;
8302 /* This code only acts for innermost loops. Also it simplifies
8303 the memory address check by only reversing loops with
8304 zero or one memory access.
8305 Two memory accesses could involve parts of the same array,
8306 and that can't be reversed.
8307 If the biv is used only for counting, than we don't need to worry
8308 about all these things. */
8310 if ((num_nonfixed_reads
<= 1
8311 && ! loop_info
->has_nonconst_call
8312 && ! loop_info
->has_prefetch
8313 && ! loop_info
->has_volatile
8314 && reversible_mem_store
8315 && (bl
->giv_count
+ bl
->biv_count
+ loop_info
->num_mem_sets
8316 + num_unmoved_movables (loop
) + compare_and_branch
== insn_count
)
8317 && (bl
== ivs
->list
&& bl
->next
== 0))
8318 || (no_use_except_counting
&& ! loop_info
->has_prefetch
))
8322 /* Loop can be reversed. */
8323 if (loop_dump_stream
)
8324 fprintf (loop_dump_stream
, "Can reverse loop\n");
8326 /* Now check other conditions:
8328 The increment must be a constant, as must the initial value,
8329 and the comparison code must be LT.
8331 This test can probably be improved since +/- 1 in the constant
8332 can be obtained by changing LT to LE and vice versa; this is
8336 /* for constants, LE gets turned into LT */
8337 && (GET_CODE (comparison
) == LT
8338 || (GET_CODE (comparison
) == LE
8339 && no_use_except_counting
)))
8341 HOST_WIDE_INT add_val
, add_adjust
, comparison_val
= 0;
8342 rtx initial_value
, comparison_value
;
8344 enum rtx_code cmp_code
;
8345 int comparison_const_width
;
8346 unsigned HOST_WIDE_INT comparison_sign_mask
;
8348 add_val
= INTVAL (bl
->biv
->add_val
);
8349 comparison_value
= XEXP (comparison
, 1);
8350 if (GET_MODE (comparison_value
) == VOIDmode
)
8351 comparison_const_width
8352 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison
, 0)));
8354 comparison_const_width
8355 = GET_MODE_BITSIZE (GET_MODE (comparison_value
));
8356 if (comparison_const_width
> HOST_BITS_PER_WIDE_INT
)
8357 comparison_const_width
= HOST_BITS_PER_WIDE_INT
;
8358 comparison_sign_mask
8359 = (unsigned HOST_WIDE_INT
) 1 << (comparison_const_width
- 1);
8361 /* If the comparison value is not a loop invariant, then we
8362 can not reverse this loop.
8364 ??? If the insns which initialize the comparison value as
8365 a whole compute an invariant result, then we could move
8366 them out of the loop and proceed with loop reversal. */
8367 if (! loop_invariant_p (loop
, comparison_value
))
8370 if (GET_CODE (comparison_value
) == CONST_INT
)
8371 comparison_val
= INTVAL (comparison_value
);
8372 initial_value
= bl
->initial_value
;
8374 /* Normalize the initial value if it is an integer and
8375 has no other use except as a counter. This will allow
8376 a few more loops to be reversed. */
8377 if (no_use_except_counting
8378 && GET_CODE (comparison_value
) == CONST_INT
8379 && GET_CODE (initial_value
) == CONST_INT
)
8381 comparison_val
= comparison_val
- INTVAL (bl
->initial_value
);
8382 /* The code below requires comparison_val to be a multiple
8383 of add_val in order to do the loop reversal, so
8384 round up comparison_val to a multiple of add_val.
8385 Since comparison_value is constant, we know that the
8386 current comparison code is LT. */
8387 comparison_val
= comparison_val
+ add_val
- 1;
8389 -= (unsigned HOST_WIDE_INT
) comparison_val
% add_val
;
8390 /* We postpone overflow checks for COMPARISON_VAL here;
8391 even if there is an overflow, we might still be able to
8392 reverse the loop, if converting the loop exit test to
8394 initial_value
= const0_rtx
;
8397 /* First check if we can do a vanilla loop reversal. */
8398 if (initial_value
== const0_rtx
8399 /* If we have a decrement_and_branch_on_count,
8400 prefer the NE test, since this will allow that
8401 instruction to be generated. Note that we must
8402 use a vanilla loop reversal if the biv is used to
8403 calculate a giv or has a non-counting use. */
8404 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8405 && defined (HAVE_decrement_and_branch_on_count)
8406 && (! (add_val
== 1 && loop
->vtop
8407 && (bl
->biv_count
== 0
8408 || no_use_except_counting
)))
8410 && GET_CODE (comparison_value
) == CONST_INT
8411 /* Now do postponed overflow checks on COMPARISON_VAL. */
8412 && ! (((comparison_val
- add_val
) ^ INTVAL (comparison_value
))
8413 & comparison_sign_mask
))
8415 /* Register will always be nonnegative, with value
8416 0 on last iteration */
8417 add_adjust
= add_val
;
8421 else if (add_val
== 1 && loop
->vtop
8422 && (bl
->biv_count
== 0
8423 || no_use_except_counting
))
8431 if (GET_CODE (comparison
) == LE
)
8432 add_adjust
-= add_val
;
8434 /* If the initial value is not zero, or if the comparison
8435 value is not an exact multiple of the increment, then we
8436 can not reverse this loop. */
8437 if (initial_value
== const0_rtx
8438 && GET_CODE (comparison_value
) == CONST_INT
)
8440 if (((unsigned HOST_WIDE_INT
) comparison_val
% add_val
) != 0)
8445 if (! no_use_except_counting
|| add_val
!= 1)
8449 final_value
= comparison_value
;
8451 /* Reset these in case we normalized the initial value
8452 and comparison value above. */
8453 if (GET_CODE (comparison_value
) == CONST_INT
8454 && GET_CODE (initial_value
) == CONST_INT
)
8456 comparison_value
= GEN_INT (comparison_val
);
8458 = GEN_INT (comparison_val
+ INTVAL (bl
->initial_value
));
8460 bl
->initial_value
= initial_value
;
8462 /* Save some info needed to produce the new insns. */
8463 reg
= bl
->biv
->dest_reg
;
8464 jump_label
= condjump_label (PREV_INSN (loop_end
));
8465 new_add_val
= GEN_INT (-INTVAL (bl
->biv
->add_val
));
8467 /* Set start_value; if this is not a CONST_INT, we need
8469 Initialize biv to start_value before loop start.
8470 The old initializing insn will be deleted as a
8471 dead store by flow.c. */
8472 if (initial_value
== const0_rtx
8473 && GET_CODE (comparison_value
) == CONST_INT
)
8475 start_value
= GEN_INT (comparison_val
- add_adjust
);
8476 loop_insn_hoist (loop
, gen_move_insn (reg
, start_value
));
8478 else if (GET_CODE (initial_value
) == CONST_INT
)
8480 enum machine_mode mode
= GET_MODE (reg
);
8481 rtx offset
= GEN_INT (-INTVAL (initial_value
) - add_adjust
);
8482 rtx add_insn
= gen_add3_insn (reg
, comparison_value
, offset
);
8488 = gen_rtx_PLUS (mode
, comparison_value
, offset
);
8489 loop_insn_hoist (loop
, add_insn
);
8490 if (GET_CODE (comparison
) == LE
)
8491 final_value
= gen_rtx_PLUS (mode
, comparison_value
,
8494 else if (! add_adjust
)
8496 enum machine_mode mode
= GET_MODE (reg
);
8497 rtx sub_insn
= gen_sub3_insn (reg
, comparison_value
,
8503 = gen_rtx_MINUS (mode
, comparison_value
, initial_value
);
8504 loop_insn_hoist (loop
, sub_insn
);
8507 /* We could handle the other cases too, but it'll be
8508 better to have a testcase first. */
8511 /* We may not have a single insn which can increment a reg, so
8512 create a sequence to hold all the insns from expand_inc. */
8514 expand_inc (reg
, new_add_val
);
8518 p
= loop_insn_emit_before (loop
, 0, bl
->biv
->insn
, tem
);
8519 delete_insn (bl
->biv
->insn
);
8521 /* Update biv info to reflect its new status. */
8523 bl
->initial_value
= start_value
;
8524 bl
->biv
->add_val
= new_add_val
;
8526 /* Update loop info. */
8527 loop_info
->initial_value
= reg
;
8528 loop_info
->initial_equiv_value
= reg
;
8529 loop_info
->final_value
= const0_rtx
;
8530 loop_info
->final_equiv_value
= const0_rtx
;
8531 loop_info
->comparison_value
= const0_rtx
;
8532 loop_info
->comparison_code
= cmp_code
;
8533 loop_info
->increment
= new_add_val
;
8535 /* Inc LABEL_NUSES so that delete_insn will
8536 not delete the label. */
8537 LABEL_NUSES (XEXP (jump_label
, 0))++;
8539 /* Emit an insn after the end of the loop to set the biv's
8540 proper exit value if it is used anywhere outside the loop. */
8541 if ((REGNO_LAST_UID (bl
->regno
) != INSN_UID (first_compare
))
8543 || REGNO_FIRST_UID (bl
->regno
) != INSN_UID (bl
->init_insn
))
8544 loop_insn_sink (loop
, gen_load_of_final_value (reg
, final_value
));
8546 /* Delete compare/branch at end of loop. */
8547 delete_related_insns (PREV_INSN (loop_end
));
8548 if (compare_and_branch
== 2)
8549 delete_related_insns (first_compare
);
8551 /* Add new compare/branch insn at end of loop. */
8553 emit_cmp_and_jump_insns (reg
, const0_rtx
, cmp_code
, NULL_RTX
,
8555 XEXP (jump_label
, 0));
8558 emit_jump_insn_before (tem
, loop_end
);
8560 for (tem
= PREV_INSN (loop_end
);
8561 tem
&& GET_CODE (tem
) != JUMP_INSN
;
8562 tem
= PREV_INSN (tem
))
8566 JUMP_LABEL (tem
) = XEXP (jump_label
, 0);
8572 /* Increment of LABEL_NUSES done above. */
8573 /* Register is now always nonnegative,
8574 so add REG_NONNEG note to the branch. */
8575 REG_NOTES (tem
) = gen_rtx_EXPR_LIST (REG_NONNEG
, reg
,
8581 /* No insn may reference both the reversed and another biv or it
8582 will fail (see comment near the top of the loop reversal
8584 Earlier on, we have verified that the biv has no use except
8585 counting, or it is the only biv in this function.
8586 However, the code that computes no_use_except_counting does
8587 not verify reg notes. It's possible to have an insn that
8588 references another biv, and has a REG_EQUAL note with an
8589 expression based on the reversed biv. To avoid this case,
8590 remove all REG_EQUAL notes based on the reversed biv
8592 for (p
= loop_start
; p
!= loop_end
; p
= NEXT_INSN (p
))
8596 rtx set
= single_set (p
);
8597 /* If this is a set of a GIV based on the reversed biv, any
8598 REG_EQUAL notes should still be correct. */
8600 || GET_CODE (SET_DEST (set
)) != REG
8601 || (size_t) REGNO (SET_DEST (set
)) >= ivs
->n_regs
8602 || REG_IV_TYPE (ivs
, REGNO (SET_DEST (set
))) != GENERAL_INDUCT
8603 || REG_IV_INFO (ivs
, REGNO (SET_DEST (set
)))->src_reg
!= bl
->biv
->src_reg
)
8604 for (pnote
= ®_NOTES (p
); *pnote
;)
8606 if (REG_NOTE_KIND (*pnote
) == REG_EQUAL
8607 && reg_mentioned_p (regno_reg_rtx
[bl
->regno
],
8609 *pnote
= XEXP (*pnote
, 1);
8611 pnote
= &XEXP (*pnote
, 1);
8615 /* Mark that this biv has been reversed. Each giv which depends
8616 on this biv, and which is also live past the end of the loop
8617 will have to be fixed up. */
8621 if (loop_dump_stream
)
8623 fprintf (loop_dump_stream
, "Reversed loop");
8625 fprintf (loop_dump_stream
, " and added reg_nonneg\n");
8627 fprintf (loop_dump_stream
, "\n");
8638 /* Verify whether the biv BL appears to be eliminable,
8639 based on the insns in the loop that refer to it.
8641 If ELIMINATE_P is nonzero, actually do the elimination.
8643 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8644 determine whether invariant insns should be placed inside or at the
8645 start of the loop. */
8648 maybe_eliminate_biv (loop
, bl
, eliminate_p
, threshold
, insn_count
)
8649 const struct loop
*loop
;
8650 struct iv_class
*bl
;
8652 int threshold
, insn_count
;
8654 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
8655 rtx reg
= bl
->biv
->dest_reg
;
8658 /* Scan all insns in the loop, stopping if we find one that uses the
8659 biv in a way that we cannot eliminate. */
8661 for (p
= loop
->start
; p
!= loop
->end
; p
= NEXT_INSN (p
))
8663 enum rtx_code code
= GET_CODE (p
);
8664 basic_block where_bb
= 0;
8665 rtx where_insn
= threshold
>= insn_count
? 0 : p
;
8668 /* If this is a libcall that sets a giv, skip ahead to its end. */
8669 if (GET_RTX_CLASS (code
) == 'i')
8671 note
= find_reg_note (p
, REG_LIBCALL
, NULL_RTX
);
8675 rtx last
= XEXP (note
, 0);
8676 rtx set
= single_set (last
);
8678 if (set
&& GET_CODE (SET_DEST (set
)) == REG
)
8680 unsigned int regno
= REGNO (SET_DEST (set
));
8682 if (regno
< ivs
->n_regs
8683 && REG_IV_TYPE (ivs
, regno
) == GENERAL_INDUCT
8684 && REG_IV_INFO (ivs
, regno
)->src_reg
== bl
->biv
->src_reg
)
8690 /* Closely examine the insn if the biv is mentioned. */
8691 if ((code
== INSN
|| code
== JUMP_INSN
|| code
== CALL_INSN
)
8692 && reg_mentioned_p (reg
, PATTERN (p
))
8693 && ! maybe_eliminate_biv_1 (loop
, PATTERN (p
), p
, bl
,
8694 eliminate_p
, where_bb
, where_insn
))
8696 if (loop_dump_stream
)
8697 fprintf (loop_dump_stream
,
8698 "Cannot eliminate biv %d: biv used in insn %d.\n",
8699 bl
->regno
, INSN_UID (p
));
8703 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8705 && (note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
)) != NULL_RTX
8706 && reg_mentioned_p (reg
, XEXP (note
, 0)))
8707 remove_note (p
, note
);
8712 if (loop_dump_stream
)
8713 fprintf (loop_dump_stream
, "biv %d %s eliminated.\n",
8714 bl
->regno
, eliminate_p
? "was" : "can be");
8721 /* INSN and REFERENCE are instructions in the same insn chain.
8722 Return nonzero if INSN is first. */
8725 loop_insn_first_p (insn
, reference
)
8726 rtx insn
, reference
;
8730 for (p
= insn
, q
= reference
;;)
8732 /* Start with test for not first so that INSN == REFERENCE yields not
8734 if (q
== insn
|| ! p
)
8736 if (p
== reference
|| ! q
)
8739 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8740 previous insn, hence the <= comparison below does not work if
8742 if (INSN_UID (p
) < max_uid_for_loop
8743 && INSN_UID (q
) < max_uid_for_loop
8744 && GET_CODE (p
) != NOTE
)
8745 return INSN_LUID (p
) <= INSN_LUID (q
);
8747 if (INSN_UID (p
) >= max_uid_for_loop
8748 || GET_CODE (p
) == NOTE
)
8750 if (INSN_UID (q
) >= max_uid_for_loop
)
8755 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8756 the offset that we have to take into account due to auto-increment /
8757 div derivation is zero. */
8759 biv_elimination_giv_has_0_offset (biv
, giv
, insn
)
8760 struct induction
*biv
, *giv
;
8763 /* If the giv V had the auto-inc address optimization applied
8764 to it, and INSN occurs between the giv insn and the biv
8765 insn, then we'd have to adjust the value used here.
8766 This is rare, so we don't bother to make this possible. */
8767 if (giv
->auto_inc_opt
8768 && ((loop_insn_first_p (giv
->insn
, insn
)
8769 && loop_insn_first_p (insn
, biv
->insn
))
8770 || (loop_insn_first_p (biv
->insn
, insn
)
8771 && loop_insn_first_p (insn
, giv
->insn
))))
8777 /* If BL appears in X (part of the pattern of INSN), see if we can
8778 eliminate its use. If so, return 1. If not, return 0.
8780 If BIV does not appear in X, return 1.
8782 If ELIMINATE_P is nonzero, actually do the elimination.
8783 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8784 Depending on how many items have been moved out of the loop, it
8785 will either be before INSN (when WHERE_INSN is nonzero) or at the
8786 start of the loop (when WHERE_INSN is zero). */
8789 maybe_eliminate_biv_1 (loop
, x
, insn
, bl
, eliminate_p
, where_bb
, where_insn
)
8790 const struct loop
*loop
;
8792 struct iv_class
*bl
;
8794 basic_block where_bb
;
8797 enum rtx_code code
= GET_CODE (x
);
8798 rtx reg
= bl
->biv
->dest_reg
;
8799 enum machine_mode mode
= GET_MODE (reg
);
8800 struct induction
*v
;
8812 /* If we haven't already been able to do something with this BIV,
8813 we can't eliminate it. */
8819 /* If this sets the BIV, it is not a problem. */
8820 if (SET_DEST (x
) == reg
)
8823 /* If this is an insn that defines a giv, it is also ok because
8824 it will go away when the giv is reduced. */
8825 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
8826 if (v
->giv_type
== DEST_REG
&& SET_DEST (x
) == v
->dest_reg
)
8830 if (SET_DEST (x
) == cc0_rtx
&& SET_SRC (x
) == reg
)
8832 /* Can replace with any giv that was reduced and
8833 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8834 Require a constant for MULT_VAL, so we know it's nonzero.
8835 ??? We disable this optimization to avoid potential
8838 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
8839 if (GET_CODE (v
->mult_val
) == CONST_INT
&& v
->mult_val
!= const0_rtx
8840 && v
->add_val
== const0_rtx
8841 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
8845 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
8851 /* If the giv has the opposite direction of change,
8852 then reverse the comparison. */
8853 if (INTVAL (v
->mult_val
) < 0)
8854 new = gen_rtx_COMPARE (GET_MODE (v
->new_reg
),
8855 const0_rtx
, v
->new_reg
);
8859 /* We can probably test that giv's reduced reg. */
8860 if (validate_change (insn
, &SET_SRC (x
), new, 0))
8864 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8865 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8866 Require a constant for MULT_VAL, so we know it's nonzero.
8867 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8868 overflow problem. */
8870 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
8871 if (GET_CODE (v
->mult_val
) == CONST_INT
8872 && v
->mult_val
!= const0_rtx
8873 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
8875 && (GET_CODE (v
->add_val
) == SYMBOL_REF
8876 || GET_CODE (v
->add_val
) == LABEL_REF
8877 || GET_CODE (v
->add_val
) == CONST
8878 || (GET_CODE (v
->add_val
) == REG
8879 && REG_POINTER (v
->add_val
))))
8881 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
8887 /* If the giv has the opposite direction of change,
8888 then reverse the comparison. */
8889 if (INTVAL (v
->mult_val
) < 0)
8890 new = gen_rtx_COMPARE (VOIDmode
, copy_rtx (v
->add_val
),
8893 new = gen_rtx_COMPARE (VOIDmode
, v
->new_reg
,
8894 copy_rtx (v
->add_val
));
8896 /* Replace biv with the giv's reduced register. */
8897 update_reg_last_use (v
->add_val
, insn
);
8898 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
8901 /* Insn doesn't support that constant or invariant. Copy it
8902 into a register (it will be a loop invariant.) */
8903 tem
= gen_reg_rtx (GET_MODE (v
->new_reg
));
8905 loop_insn_emit_before (loop
, 0, where_insn
,
8907 copy_rtx (v
->add_val
)));
8909 /* Substitute the new register for its invariant value in
8910 the compare expression. */
8911 XEXP (new, (INTVAL (v
->mult_val
) < 0) ? 0 : 1) = tem
;
8912 if (validate_change (insn
, &SET_SRC (PATTERN (insn
)), new, 0))
8921 case GT
: case GE
: case GTU
: case GEU
:
8922 case LT
: case LE
: case LTU
: case LEU
:
8923 /* See if either argument is the biv. */
8924 if (XEXP (x
, 0) == reg
)
8925 arg
= XEXP (x
, 1), arg_operand
= 1;
8926 else if (XEXP (x
, 1) == reg
)
8927 arg
= XEXP (x
, 0), arg_operand
= 0;
8931 if (CONSTANT_P (arg
))
8933 /* First try to replace with any giv that has constant positive
8934 mult_val and constant add_val. We might be able to support
8935 negative mult_val, but it seems complex to do it in general. */
8937 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
8938 if (GET_CODE (v
->mult_val
) == CONST_INT
8939 && INTVAL (v
->mult_val
) > 0
8940 && (GET_CODE (v
->add_val
) == SYMBOL_REF
8941 || GET_CODE (v
->add_val
) == LABEL_REF
8942 || GET_CODE (v
->add_val
) == CONST
8943 || (GET_CODE (v
->add_val
) == REG
8944 && REG_POINTER (v
->add_val
)))
8945 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
8948 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
8951 /* Don't eliminate if the linear combination that makes up
8952 the giv overflows when it is applied to ARG. */
8953 if (GET_CODE (arg
) == CONST_INT
)
8957 if (GET_CODE (v
->add_val
) == CONST_INT
)
8958 add_val
= v
->add_val
;
8960 add_val
= const0_rtx
;
8962 if (const_mult_add_overflow_p (arg
, v
->mult_val
,
8970 /* Replace biv with the giv's reduced reg. */
8971 validate_change (insn
, &XEXP (x
, 1 - arg_operand
), v
->new_reg
, 1);
8973 /* If all constants are actually constant integers and
8974 the derived constant can be directly placed in the COMPARE,
8976 if (GET_CODE (arg
) == CONST_INT
8977 && GET_CODE (v
->add_val
) == CONST_INT
)
8979 tem
= expand_mult_add (arg
, NULL_RTX
, v
->mult_val
,
8980 v
->add_val
, mode
, 1);
8984 /* Otherwise, load it into a register. */
8985 tem
= gen_reg_rtx (mode
);
8986 loop_iv_add_mult_emit_before (loop
, arg
,
8987 v
->mult_val
, v
->add_val
,
8988 tem
, where_bb
, where_insn
);
8991 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
8993 if (apply_change_group ())
8997 /* Look for giv with positive constant mult_val and nonconst add_val.
8998 Insert insns to calculate new compare value.
8999 ??? Turn this off due to possible overflow. */
9001 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
9002 if (GET_CODE (v
->mult_val
) == CONST_INT
9003 && INTVAL (v
->mult_val
) > 0
9004 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
9010 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
9016 tem
= gen_reg_rtx (mode
);
9018 /* Replace biv with giv's reduced register. */
9019 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
9022 /* Compute value to compare against. */
9023 loop_iv_add_mult_emit_before (loop
, arg
,
9024 v
->mult_val
, v
->add_val
,
9025 tem
, where_bb
, where_insn
);
9026 /* Use it in this insn. */
9027 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
9028 if (apply_change_group ())
9032 else if (GET_CODE (arg
) == REG
|| GET_CODE (arg
) == MEM
)
9034 if (loop_invariant_p (loop
, arg
) == 1)
9036 /* Look for giv with constant positive mult_val and nonconst
9037 add_val. Insert insns to compute new compare value.
9038 ??? Turn this off due to possible overflow. */
9040 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
9041 if (GET_CODE (v
->mult_val
) == CONST_INT
&& INTVAL (v
->mult_val
) > 0
9042 && ! v
->ignore
&& ! v
->maybe_dead
&& v
->always_computable
9048 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
9054 tem
= gen_reg_rtx (mode
);
9056 /* Replace biv with giv's reduced register. */
9057 validate_change (insn
, &XEXP (x
, 1 - arg_operand
),
9060 /* Compute value to compare against. */
9061 loop_iv_add_mult_emit_before (loop
, arg
,
9062 v
->mult_val
, v
->add_val
,
9063 tem
, where_bb
, where_insn
);
9064 validate_change (insn
, &XEXP (x
, arg_operand
), tem
, 1);
9065 if (apply_change_group ())
9070 /* This code has problems. Basically, you can't know when
9071 seeing if we will eliminate BL, whether a particular giv
9072 of ARG will be reduced. If it isn't going to be reduced,
9073 we can't eliminate BL. We can try forcing it to be reduced,
9074 but that can generate poor code.
9076 The problem is that the benefit of reducing TV, below should
9077 be increased if BL can actually be eliminated, but this means
9078 we might have to do a topological sort of the order in which
9079 we try to process biv. It doesn't seem worthwhile to do
9080 this sort of thing now. */
9083 /* Otherwise the reg compared with had better be a biv. */
9084 if (GET_CODE (arg
) != REG
9085 || REG_IV_TYPE (ivs
, REGNO (arg
)) != BASIC_INDUCT
)
9088 /* Look for a pair of givs, one for each biv,
9089 with identical coefficients. */
9090 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
9092 struct induction
*tv
;
9094 if (v
->ignore
|| v
->maybe_dead
|| v
->mode
!= mode
)
9097 for (tv
= REG_IV_CLASS (ivs
, REGNO (arg
))->giv
; tv
;
9099 if (! tv
->ignore
&& ! tv
->maybe_dead
9100 && rtx_equal_p (tv
->mult_val
, v
->mult_val
)
9101 && rtx_equal_p (tv
->add_val
, v
->add_val
)
9102 && tv
->mode
== mode
)
9104 if (! biv_elimination_giv_has_0_offset (bl
->biv
, v
, insn
))
9110 /* Replace biv with its giv's reduced reg. */
9111 XEXP (x
, 1 - arg_operand
) = v
->new_reg
;
9112 /* Replace other operand with the other giv's
9114 XEXP (x
, arg_operand
) = tv
->new_reg
;
9121 /* If we get here, the biv can't be eliminated. */
9125 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9126 biv is used in it, since it will be replaced. */
9127 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
9128 if (v
->giv_type
== DEST_ADDR
&& v
->location
== &XEXP (x
, 0))
9136 /* See if any subexpression fails elimination. */
9137 fmt
= GET_RTX_FORMAT (code
);
9138 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9143 if (! maybe_eliminate_biv_1 (loop
, XEXP (x
, i
), insn
, bl
,
9144 eliminate_p
, where_bb
, where_insn
))
9149 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9150 if (! maybe_eliminate_biv_1 (loop
, XVECEXP (x
, i
, j
), insn
, bl
,
9151 eliminate_p
, where_bb
, where_insn
))
9160 /* Return nonzero if the last use of REG
9161 is in an insn following INSN in the same basic block. */
9164 last_use_this_basic_block (reg
, insn
)
9170 n
&& GET_CODE (n
) != CODE_LABEL
&& GET_CODE (n
) != JUMP_INSN
;
9173 if (REGNO_LAST_UID (REGNO (reg
)) == INSN_UID (n
))
9179 /* Called via `note_stores' to record the initial value of a biv. Here we
9180 just record the location of the set and process it later. */
9183 record_initial (dest
, set
, data
)
9186 void *data ATTRIBUTE_UNUSED
;
9188 struct loop_ivs
*ivs
= (struct loop_ivs
*) data
;
9189 struct iv_class
*bl
;
9191 if (GET_CODE (dest
) != REG
9192 || REGNO (dest
) >= ivs
->n_regs
9193 || REG_IV_TYPE (ivs
, REGNO (dest
)) != BASIC_INDUCT
)
9196 bl
= REG_IV_CLASS (ivs
, REGNO (dest
));
9198 /* If this is the first set found, record it. */
9199 if (bl
->init_insn
== 0)
9201 bl
->init_insn
= note_insn
;
9206 /* If any of the registers in X are "old" and currently have a last use earlier
9207 than INSN, update them to have a last use of INSN. Their actual last use
9208 will be the previous insn but it will not have a valid uid_luid so we can't
9209 use it. X must be a source expression only. */
9212 update_reg_last_use (x
, insn
)
9216 /* Check for the case where INSN does not have a valid luid. In this case,
9217 there is no need to modify the regno_last_uid, as this can only happen
9218 when code is inserted after the loop_end to set a pseudo's final value,
9219 and hence this insn will never be the last use of x.
9220 ???? This comment is not correct. See for example loop_givs_reduce.
9221 This may insert an insn before another new insn. */
9222 if (GET_CODE (x
) == REG
&& REGNO (x
) < max_reg_before_loop
9223 && INSN_UID (insn
) < max_uid_for_loop
9224 && REGNO_LAST_LUID (REGNO (x
)) < INSN_LUID (insn
))
9226 REGNO_LAST_UID (REGNO (x
)) = INSN_UID (insn
);
9231 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
9232 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
9235 update_reg_last_use (XEXP (x
, i
), insn
);
9236 else if (fmt
[i
] == 'E')
9237 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9238 update_reg_last_use (XVECEXP (x
, i
, j
), insn
);
9243 /* Given an insn INSN and condition COND, return the condition in a
9244 canonical form to simplify testing by callers. Specifically:
9246 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9247 (2) Both operands will be machine operands; (cc0) will have been replaced.
9248 (3) If an operand is a constant, it will be the second operand.
9249 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9250 for GE, GEU, and LEU.
9252 If the condition cannot be understood, or is an inequality floating-point
9253 comparison which needs to be reversed, 0 will be returned.
9255 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9257 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9258 insn used in locating the condition was found. If a replacement test
9259 of the condition is desired, it should be placed in front of that
9260 insn and we will be sure that the inputs are still valid.
9262 If WANT_REG is nonzero, we wish the condition to be relative to that
9263 register, if possible. Therefore, do not canonicalize the condition
9267 canonicalize_condition (insn
, cond
, reverse
, earliest
, want_reg
)
9279 int reverse_code
= 0;
9280 enum machine_mode mode
;
9282 code
= GET_CODE (cond
);
9283 mode
= GET_MODE (cond
);
9284 op0
= XEXP (cond
, 0);
9285 op1
= XEXP (cond
, 1);
9288 code
= reversed_comparison_code (cond
, insn
);
9289 if (code
== UNKNOWN
)
9295 /* If we are comparing a register with zero, see if the register is set
9296 in the previous insn to a COMPARE or a comparison operation. Perform
9297 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9300 while (GET_RTX_CLASS (code
) == '<'
9301 && op1
== CONST0_RTX (GET_MODE (op0
))
9304 /* Set nonzero when we find something of interest. */
9308 /* If comparison with cc0, import actual comparison from compare
9312 if ((prev
= prev_nonnote_insn (prev
)) == 0
9313 || GET_CODE (prev
) != INSN
9314 || (set
= single_set (prev
)) == 0
9315 || SET_DEST (set
) != cc0_rtx
)
9318 op0
= SET_SRC (set
);
9319 op1
= CONST0_RTX (GET_MODE (op0
));
9325 /* If this is a COMPARE, pick up the two things being compared. */
9326 if (GET_CODE (op0
) == COMPARE
)
9328 op1
= XEXP (op0
, 1);
9329 op0
= XEXP (op0
, 0);
9332 else if (GET_CODE (op0
) != REG
)
9335 /* Go back to the previous insn. Stop if it is not an INSN. We also
9336 stop if it isn't a single set or if it has a REG_INC note because
9337 we don't want to bother dealing with it. */
9339 if ((prev
= prev_nonnote_insn (prev
)) == 0
9340 || GET_CODE (prev
) != INSN
9341 || FIND_REG_INC_NOTE (prev
, NULL_RTX
))
9344 set
= set_of (op0
, prev
);
9347 && (GET_CODE (set
) != SET
9348 || !rtx_equal_p (SET_DEST (set
), op0
)))
9351 /* If this is setting OP0, get what it sets it to if it looks
9355 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
9356 #ifdef FLOAT_STORE_FLAG_VALUE
9357 REAL_VALUE_TYPE fsfv
;
9360 /* ??? We may not combine comparisons done in a CCmode with
9361 comparisons not done in a CCmode. This is to aid targets
9362 like Alpha that have an IEEE compliant EQ instruction, and
9363 a non-IEEE compliant BEQ instruction. The use of CCmode is
9364 actually artificial, simply to prevent the combination, but
9365 should not affect other platforms.
9367 However, we must allow VOIDmode comparisons to match either
9368 CCmode or non-CCmode comparison, because some ports have
9369 modeless comparisons inside branch patterns.
9371 ??? This mode check should perhaps look more like the mode check
9372 in simplify_comparison in combine. */
9374 if ((GET_CODE (SET_SRC (set
)) == COMPARE
9377 && GET_MODE_CLASS (inner_mode
) == MODE_INT
9378 && (GET_MODE_BITSIZE (inner_mode
)
9379 <= HOST_BITS_PER_WIDE_INT
)
9380 && (STORE_FLAG_VALUE
9381 & ((HOST_WIDE_INT
) 1
9382 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
9383 #ifdef FLOAT_STORE_FLAG_VALUE
9385 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
9386 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
9387 REAL_VALUE_NEGATIVE (fsfv
)))
9390 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<'))
9391 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
9392 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
9393 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
9395 else if (((code
== EQ
9397 && (GET_MODE_BITSIZE (inner_mode
)
9398 <= HOST_BITS_PER_WIDE_INT
)
9399 && GET_MODE_CLASS (inner_mode
) == MODE_INT
9400 && (STORE_FLAG_VALUE
9401 & ((HOST_WIDE_INT
) 1
9402 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
9403 #ifdef FLOAT_STORE_FLAG_VALUE
9405 && GET_MODE_CLASS (inner_mode
) == MODE_FLOAT
9406 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
9407 REAL_VALUE_NEGATIVE (fsfv
)))
9410 && GET_RTX_CLASS (GET_CODE (SET_SRC (set
))) == '<'
9411 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
9412 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
9413 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
9423 else if (reg_set_p (op0
, prev
))
9424 /* If this sets OP0, but not directly, we have to give up. */
9429 if (GET_RTX_CLASS (GET_CODE (x
)) == '<')
9430 code
= GET_CODE (x
);
9433 code
= reversed_comparison_code (x
, prev
);
9434 if (code
== UNKNOWN
)
9439 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
9445 /* If constant is first, put it last. */
9446 if (CONSTANT_P (op0
))
9447 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
9449 /* If OP0 is the result of a comparison, we weren't able to find what
9450 was really being compared, so fail. */
9451 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
9454 /* Canonicalize any ordered comparison with integers involving equality
9455 if we can do computations in the relevant mode and we do not
9458 if (GET_CODE (op1
) == CONST_INT
9459 && GET_MODE (op0
) != VOIDmode
9460 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
9462 HOST_WIDE_INT const_val
= INTVAL (op1
);
9463 unsigned HOST_WIDE_INT uconst_val
= const_val
;
9464 unsigned HOST_WIDE_INT max_val
9465 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
9470 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
9471 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
9474 /* When cross-compiling, const_val might be sign-extended from
9475 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9477 if ((HOST_WIDE_INT
) (const_val
& max_val
)
9478 != (((HOST_WIDE_INT
) 1
9479 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
9480 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
9484 if (uconst_val
< max_val
)
9485 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
9489 if (uconst_val
!= 0)
9490 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
9498 /* Never return CC0; return zero instead. */
9502 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
9505 /* Given a jump insn JUMP, return the condition that will cause it to branch
9506 to its JUMP_LABEL. If the condition cannot be understood, or is an
9507 inequality floating-point comparison which needs to be reversed, 0 will
9510 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9511 insn used in locating the condition was found. If a replacement test
9512 of the condition is desired, it should be placed in front of that
9513 insn and we will be sure that the inputs are still valid. */
9516 get_condition (jump
, earliest
)
9524 /* If this is not a standard conditional jump, we can't parse it. */
9525 if (GET_CODE (jump
) != JUMP_INSN
9526 || ! any_condjump_p (jump
))
9528 set
= pc_set (jump
);
9530 cond
= XEXP (SET_SRC (set
), 0);
9532 /* If this branches to JUMP_LABEL when the condition is false, reverse
9535 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
9536 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
9538 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
);
9541 /* Similar to above routine, except that we also put an invariant last
9542 unless both operands are invariants. */
9545 get_condition_for_loop (loop
, x
)
9546 const struct loop
*loop
;
9549 rtx comparison
= get_condition (x
, (rtx
*) 0);
9552 || ! loop_invariant_p (loop
, XEXP (comparison
, 0))
9553 || loop_invariant_p (loop
, XEXP (comparison
, 1)))
9556 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison
)), VOIDmode
,
9557 XEXP (comparison
, 1), XEXP (comparison
, 0));
9560 /* Scan the function and determine whether it has indirect (computed) jumps.
9562 This is taken mostly from flow.c; similar code exists elsewhere
9563 in the compiler. It may be useful to put this into rtlanal.c. */
9565 indirect_jump_in_function_p (start
)
9570 for (insn
= start
; insn
; insn
= NEXT_INSN (insn
))
9571 if (computed_jump_p (insn
))
9577 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9578 documentation for LOOP_MEMS for the definition of `appropriate'.
9579 This function is called from prescan_loop via for_each_rtx. */
9582 insert_loop_mem (mem
, data
)
9584 void *data ATTRIBUTE_UNUSED
;
9586 struct loop_info
*loop_info
= data
;
9593 switch (GET_CODE (m
))
9599 /* We're not interested in MEMs that are only clobbered. */
9603 /* We're not interested in the MEM associated with a
9604 CONST_DOUBLE, so there's no need to traverse into this. */
9608 /* We're not interested in any MEMs that only appear in notes. */
9612 /* This is not a MEM. */
9616 /* See if we've already seen this MEM. */
9617 for (i
= 0; i
< loop_info
->mems_idx
; ++i
)
9618 if (rtx_equal_p (m
, loop_info
->mems
[i
].mem
))
9620 if (GET_MODE (m
) != GET_MODE (loop_info
->mems
[i
].mem
))
9621 /* The modes of the two memory accesses are different. If
9622 this happens, something tricky is going on, and we just
9623 don't optimize accesses to this MEM. */
9624 loop_info
->mems
[i
].optimize
= 0;
9629 /* Resize the array, if necessary. */
9630 if (loop_info
->mems_idx
== loop_info
->mems_allocated
)
9632 if (loop_info
->mems_allocated
!= 0)
9633 loop_info
->mems_allocated
*= 2;
9635 loop_info
->mems_allocated
= 32;
9637 loop_info
->mems
= (loop_mem_info
*)
9638 xrealloc (loop_info
->mems
,
9639 loop_info
->mems_allocated
* sizeof (loop_mem_info
));
9642 /* Actually insert the MEM. */
9643 loop_info
->mems
[loop_info
->mems_idx
].mem
= m
;
9644 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9645 because we can't put it in a register. We still store it in the
9646 table, though, so that if we see the same address later, but in a
9647 non-BLK mode, we'll not think we can optimize it at that point. */
9648 loop_info
->mems
[loop_info
->mems_idx
].optimize
= (GET_MODE (m
) != BLKmode
);
9649 loop_info
->mems
[loop_info
->mems_idx
].reg
= NULL_RTX
;
9650 ++loop_info
->mems_idx
;
9656 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9658 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9659 register that is modified by an insn between FROM and TO. If the
9660 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9661 more, stop incrementing it, to avoid overflow.
9663 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9664 register I is used, if it is only used once. Otherwise, it is set
9665 to 0 (for no uses) or const0_rtx for more than one use. This
9666 parameter may be zero, in which case this processing is not done.
9668 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9669 optimize register I. */
9672 loop_regs_scan (loop
, extra_size
)
9673 const struct loop
*loop
;
9676 struct loop_regs
*regs
= LOOP_REGS (loop
);
9678 /* last_set[n] is nonzero iff reg n has been set in the current
9679 basic block. In that case, it is the insn that last set reg n. */
9684 old_nregs
= regs
->num
;
9685 regs
->num
= max_reg_num ();
9687 /* Grow the regs array if not allocated or too small. */
9688 if (regs
->num
>= regs
->size
)
9690 regs
->size
= regs
->num
+ extra_size
;
9692 regs
->array
= (struct loop_reg
*)
9693 xrealloc (regs
->array
, regs
->size
* sizeof (*regs
->array
));
9695 /* Zero the new elements. */
9696 memset (regs
->array
+ old_nregs
, 0,
9697 (regs
->size
- old_nregs
) * sizeof (*regs
->array
));
9700 /* Clear previously scanned fields but do not clear n_times_set. */
9701 for (i
= 0; i
< old_nregs
; i
++)
9703 regs
->array
[i
].set_in_loop
= 0;
9704 regs
->array
[i
].may_not_optimize
= 0;
9705 regs
->array
[i
].single_usage
= NULL_RTX
;
9708 last_set
= (rtx
*) xcalloc (regs
->num
, sizeof (rtx
));
9710 /* Scan the loop, recording register usage. */
9711 for (insn
= loop
->top
? loop
->top
: loop
->start
; insn
!= loop
->end
;
9712 insn
= NEXT_INSN (insn
))
9716 /* Record registers that have exactly one use. */
9717 find_single_use_in_loop (regs
, insn
, PATTERN (insn
));
9719 /* Include uses in REG_EQUAL notes. */
9720 if (REG_NOTES (insn
))
9721 find_single_use_in_loop (regs
, insn
, REG_NOTES (insn
));
9723 if (GET_CODE (PATTERN (insn
)) == SET
9724 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
9725 count_one_set (regs
, insn
, PATTERN (insn
), last_set
);
9726 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
9729 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
9730 count_one_set (regs
, insn
, XVECEXP (PATTERN (insn
), 0, i
),
9735 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
9736 memset (last_set
, 0, regs
->num
* sizeof (rtx
));
9738 /* Invalidate all registers used for function argument passing.
9739 We check rtx_varies_p for the same reason as below, to allow
9740 optimizing PIC calculations. */
9741 if (GET_CODE (insn
) == CALL_INSN
)
9744 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
9746 link
= XEXP (link
, 1))
9750 if (GET_CODE (op
= XEXP (link
, 0)) == USE
9751 && GET_CODE (reg
= XEXP (op
, 0)) == REG
9752 && rtx_varies_p (reg
, 1))
9753 regs
->array
[REGNO (reg
)].may_not_optimize
= 1;
9758 /* Invalidate all hard registers clobbered by calls. With one exception:
9759 a call-clobbered PIC register is still function-invariant for our
9760 purposes, since we can hoist any PIC calculations out of the loop.
9761 Thus the call to rtx_varies_p. */
9762 if (LOOP_INFO (loop
)->has_call
)
9763 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
9764 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
)
9765 && rtx_varies_p (regno_reg_rtx
[i
], 1))
9767 regs
->array
[i
].may_not_optimize
= 1;
9768 regs
->array
[i
].set_in_loop
= 1;
9771 #ifdef AVOID_CCMODE_COPIES
9772 /* Don't try to move insns which set CC registers if we should not
9773 create CCmode register copies. */
9774 for (i
= regs
->num
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
9775 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx
[i
])) == MODE_CC
)
9776 regs
->array
[i
].may_not_optimize
= 1;
9779 /* Set regs->array[I].n_times_set for the new registers. */
9780 for (i
= old_nregs
; i
< regs
->num
; i
++)
9781 regs
->array
[i
].n_times_set
= regs
->array
[i
].set_in_loop
;
9786 /* Returns the number of real INSNs in the LOOP. */
9789 count_insns_in_loop (loop
)
9790 const struct loop
*loop
;
9795 for (insn
= loop
->top
? loop
->top
: loop
->start
; insn
!= loop
->end
;
9796 insn
= NEXT_INSN (insn
))
9803 /* Move MEMs into registers for the duration of the loop. */
9807 const struct loop
*loop
;
9809 struct loop_info
*loop_info
= LOOP_INFO (loop
);
9810 struct loop_regs
*regs
= LOOP_REGS (loop
);
9811 int maybe_never
= 0;
9813 rtx p
, prev_ebb_head
;
9814 rtx label
= NULL_RTX
;
9816 /* Nonzero if the next instruction may never be executed. */
9817 int next_maybe_never
= 0;
9818 unsigned int last_max_reg
= max_reg_num ();
9820 if (loop_info
->mems_idx
== 0)
9823 /* We cannot use next_label here because it skips over normal insns. */
9824 end_label
= next_nonnote_insn (loop
->end
);
9825 if (end_label
&& GET_CODE (end_label
) != CODE_LABEL
)
9826 end_label
= NULL_RTX
;
9828 /* Check to see if it's possible that some instructions in the loop are
9829 never executed. Also check if there is a goto out of the loop other
9830 than right after the end of the loop. */
9831 for (p
= next_insn_in_loop (loop
, loop
->scan_start
);
9833 p
= next_insn_in_loop (loop
, p
))
9835 if (GET_CODE (p
) == CODE_LABEL
)
9837 else if (GET_CODE (p
) == JUMP_INSN
9838 /* If we enter the loop in the middle, and scan
9839 around to the beginning, don't set maybe_never
9840 for that. This must be an unconditional jump,
9841 otherwise the code at the top of the loop might
9842 never be executed. Unconditional jumps are
9843 followed a by barrier then loop end. */
9844 && ! (GET_CODE (p
) == JUMP_INSN
9845 && JUMP_LABEL (p
) == loop
->top
9846 && NEXT_INSN (NEXT_INSN (p
)) == loop
->end
9847 && any_uncondjump_p (p
)))
9849 /* If this is a jump outside of the loop but not right
9850 after the end of the loop, we would have to emit new fixup
9851 sequences for each such label. */
9852 if (/* If we can't tell where control might go when this
9853 JUMP_INSN is executed, we must be conservative. */
9855 || (JUMP_LABEL (p
) != end_label
9856 && (INSN_UID (JUMP_LABEL (p
)) >= max_uid_for_loop
9857 || INSN_LUID (JUMP_LABEL (p
)) < INSN_LUID (loop
->start
)
9858 || INSN_LUID (JUMP_LABEL (p
)) > INSN_LUID (loop
->end
))))
9861 if (!any_condjump_p (p
))
9862 /* Something complicated. */
9865 /* If there are any more instructions in the loop, they
9866 might not be reached. */
9867 next_maybe_never
= 1;
9869 else if (next_maybe_never
)
9873 /* Find start of the extended basic block that enters the loop. */
9874 for (p
= loop
->start
;
9875 PREV_INSN (p
) && GET_CODE (p
) != CODE_LABEL
;
9882 /* Build table of mems that get set to constant values before the
9884 for (; p
!= loop
->start
; p
= NEXT_INSN (p
))
9885 cselib_process_insn (p
);
9887 /* Actually move the MEMs. */
9888 for (i
= 0; i
< loop_info
->mems_idx
; ++i
)
9890 regset_head load_copies
;
9891 regset_head store_copies
;
9894 rtx mem
= loop_info
->mems
[i
].mem
;
9897 if (MEM_VOLATILE_P (mem
)
9898 || loop_invariant_p (loop
, XEXP (mem
, 0)) != 1)
9899 /* There's no telling whether or not MEM is modified. */
9900 loop_info
->mems
[i
].optimize
= 0;
9902 /* Go through the MEMs written to in the loop to see if this
9903 one is aliased by one of them. */
9904 mem_list_entry
= loop_info
->store_mems
;
9905 while (mem_list_entry
)
9907 if (rtx_equal_p (mem
, XEXP (mem_list_entry
, 0)))
9909 else if (true_dependence (XEXP (mem_list_entry
, 0), VOIDmode
,
9912 /* MEM is indeed aliased by this store. */
9913 loop_info
->mems
[i
].optimize
= 0;
9916 mem_list_entry
= XEXP (mem_list_entry
, 1);
9919 if (flag_float_store
&& written
9920 && GET_MODE_CLASS (GET_MODE (mem
)) == MODE_FLOAT
)
9921 loop_info
->mems
[i
].optimize
= 0;
9923 /* If this MEM is written to, we must be sure that there
9924 are no reads from another MEM that aliases this one. */
9925 if (loop_info
->mems
[i
].optimize
&& written
)
9929 for (j
= 0; j
< loop_info
->mems_idx
; ++j
)
9933 else if (true_dependence (mem
,
9935 loop_info
->mems
[j
].mem
,
9938 /* It's not safe to hoist loop_info->mems[i] out of
9939 the loop because writes to it might not be
9940 seen by reads from loop_info->mems[j]. */
9941 loop_info
->mems
[i
].optimize
= 0;
9947 if (maybe_never
&& may_trap_p (mem
))
9948 /* We can't access the MEM outside the loop; it might
9949 cause a trap that wouldn't have happened otherwise. */
9950 loop_info
->mems
[i
].optimize
= 0;
9952 if (!loop_info
->mems
[i
].optimize
)
9953 /* We thought we were going to lift this MEM out of the
9954 loop, but later discovered that we could not. */
9957 INIT_REG_SET (&load_copies
);
9958 INIT_REG_SET (&store_copies
);
9960 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9961 order to keep scan_loop from moving stores to this MEM
9962 out of the loop just because this REG is neither a
9963 user-variable nor used in the loop test. */
9964 reg
= gen_reg_rtx (GET_MODE (mem
));
9965 REG_USERVAR_P (reg
) = 1;
9966 loop_info
->mems
[i
].reg
= reg
;
9968 /* Now, replace all references to the MEM with the
9969 corresponding pseudos. */
9971 for (p
= next_insn_in_loop (loop
, loop
->scan_start
);
9973 p
= next_insn_in_loop (loop
, p
))
9979 set
= single_set (p
);
9981 /* See if this copies the mem into a register that isn't
9982 modified afterwards. We'll try to do copy propagation
9983 a little further on. */
9985 /* @@@ This test is _way_ too conservative. */
9987 && GET_CODE (SET_DEST (set
)) == REG
9988 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
9989 && REGNO (SET_DEST (set
)) < last_max_reg
9990 && regs
->array
[REGNO (SET_DEST (set
))].n_times_set
== 1
9991 && rtx_equal_p (SET_SRC (set
), mem
))
9992 SET_REGNO_REG_SET (&load_copies
, REGNO (SET_DEST (set
)));
9994 /* See if this copies the mem from a register that isn't
9995 modified afterwards. We'll try to remove the
9996 redundant copy later on by doing a little register
9997 renaming and copy propagation. This will help
9998 to untangle things for the BIV detection code. */
10001 && GET_CODE (SET_SRC (set
)) == REG
10002 && REGNO (SET_SRC (set
)) >= FIRST_PSEUDO_REGISTER
10003 && REGNO (SET_SRC (set
)) < last_max_reg
10004 && regs
->array
[REGNO (SET_SRC (set
))].n_times_set
== 1
10005 && rtx_equal_p (SET_DEST (set
), mem
))
10006 SET_REGNO_REG_SET (&store_copies
, REGNO (SET_SRC (set
)));
10008 /* If this is a call which uses / clobbers this memory
10009 location, we must not change the interface here. */
10010 if (GET_CODE (p
) == CALL_INSN
10011 && reg_mentioned_p (loop_info
->mems
[i
].mem
,
10012 CALL_INSN_FUNCTION_USAGE (p
)))
10014 cancel_changes (0);
10015 loop_info
->mems
[i
].optimize
= 0;
10019 /* Replace the memory reference with the shadow register. */
10020 replace_loop_mems (p
, loop_info
->mems
[i
].mem
,
10021 loop_info
->mems
[i
].reg
);
10024 if (GET_CODE (p
) == CODE_LABEL
10025 || GET_CODE (p
) == JUMP_INSN
)
10029 if (! loop_info
->mems
[i
].optimize
)
10030 ; /* We found we couldn't do the replacement, so do nothing. */
10031 else if (! apply_change_group ())
10032 /* We couldn't replace all occurrences of the MEM. */
10033 loop_info
->mems
[i
].optimize
= 0;
10036 /* Load the memory immediately before LOOP->START, which is
10037 the NOTE_LOOP_BEG. */
10038 cselib_val
*e
= cselib_lookup (mem
, VOIDmode
, 0);
10042 struct elt_loc_list
*const_equiv
= 0;
10046 struct elt_loc_list
*equiv
;
10047 struct elt_loc_list
*best_equiv
= 0;
10048 for (equiv
= e
->locs
; equiv
; equiv
= equiv
->next
)
10050 if (CONSTANT_P (equiv
->loc
))
10051 const_equiv
= equiv
;
10052 else if (GET_CODE (equiv
->loc
) == REG
10053 /* Extending hard register lifetimes causes crash
10054 on SRC targets. Doing so on non-SRC is
10055 probably also not good idea, since we most
10056 probably have pseudoregister equivalence as
10058 && REGNO (equiv
->loc
) >= FIRST_PSEUDO_REGISTER
)
10059 best_equiv
= equiv
;
10061 /* Use the constant equivalence if that is cheap enough. */
10063 best_equiv
= const_equiv
;
10064 else if (const_equiv
10065 && (rtx_cost (const_equiv
->loc
, SET
)
10066 <= rtx_cost (best_equiv
->loc
, SET
)))
10068 best_equiv
= const_equiv
;
10072 /* If best_equiv is nonzero, we know that MEM is set to a
10073 constant or register before the loop. We will use this
10074 knowledge to initialize the shadow register with that
10075 constant or reg rather than by loading from MEM. */
10077 best
= copy_rtx (best_equiv
->loc
);
10080 set
= gen_move_insn (reg
, best
);
10081 set
= loop_insn_hoist (loop
, set
);
10084 for (p
= prev_ebb_head
; p
!= loop
->start
; p
= NEXT_INSN (p
))
10085 if (REGNO_LAST_UID (REGNO (best
)) == INSN_UID (p
))
10087 REGNO_LAST_UID (REGNO (best
)) = INSN_UID (set
);
10093 set_unique_reg_note (set
, REG_EQUAL
, copy_rtx (const_equiv
->loc
));
10097 if (label
== NULL_RTX
)
10099 label
= gen_label_rtx ();
10100 emit_label_after (label
, loop
->end
);
10103 /* Store the memory immediately after END, which is
10104 the NOTE_LOOP_END. */
10105 set
= gen_move_insn (copy_rtx (mem
), reg
);
10106 loop_insn_emit_after (loop
, 0, label
, set
);
10109 if (loop_dump_stream
)
10111 fprintf (loop_dump_stream
, "Hoisted regno %d %s from ",
10112 REGNO (reg
), (written
? "r/w" : "r/o"));
10113 print_rtl (loop_dump_stream
, mem
);
10114 fputc ('\n', loop_dump_stream
);
10117 /* Attempt a bit of copy propagation. This helps untangle the
10118 data flow, and enables {basic,general}_induction_var to find
10120 EXECUTE_IF_SET_IN_REG_SET
10121 (&load_copies
, FIRST_PSEUDO_REGISTER
, j
,
10123 try_copy_prop (loop
, reg
, j
);
10125 CLEAR_REG_SET (&load_copies
);
10127 EXECUTE_IF_SET_IN_REG_SET
10128 (&store_copies
, FIRST_PSEUDO_REGISTER
, j
,
10130 try_swap_copy_prop (loop
, reg
, j
);
10132 CLEAR_REG_SET (&store_copies
);
10136 if (label
!= NULL_RTX
&& end_label
!= NULL_RTX
)
10138 /* Now, we need to replace all references to the previous exit
10139 label with the new one. */
10140 replace_label_data rr
;
10143 rr
.update_label_nuses
= true;
10145 for (p
= loop
->start
; p
!= loop
->end
; p
= NEXT_INSN (p
))
10147 for_each_rtx (&p
, replace_label
, &rr
);
10154 /* For communication between note_reg_stored and its caller. */
10155 struct note_reg_stored_arg
10161 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10162 is equal to ARG. */
10164 note_reg_stored (x
, setter
, arg
)
10165 rtx x
, setter ATTRIBUTE_UNUSED
;
10168 struct note_reg_stored_arg
*t
= (struct note_reg_stored_arg
*) arg
;
10173 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10174 There must be exactly one insn that sets this pseudo; it will be
10175 deleted if all replacements succeed and we can prove that the register
10176 is not used after the loop. */
10179 try_copy_prop (loop
, replacement
, regno
)
10180 const struct loop
*loop
;
10182 unsigned int regno
;
10184 /* This is the reg that we are copying from. */
10185 rtx reg_rtx
= regno_reg_rtx
[regno
];
10188 /* These help keep track of whether we replaced all uses of the reg. */
10189 int replaced_last
= 0;
10190 int store_is_first
= 0;
10192 for (insn
= next_insn_in_loop (loop
, loop
->scan_start
);
10194 insn
= next_insn_in_loop (loop
, insn
))
10198 /* Only substitute within one extended basic block from the initializing
10200 if (GET_CODE (insn
) == CODE_LABEL
&& init_insn
)
10203 if (! INSN_P (insn
))
10206 /* Is this the initializing insn? */
10207 set
= single_set (insn
);
10209 && GET_CODE (SET_DEST (set
)) == REG
10210 && REGNO (SET_DEST (set
)) == regno
)
10216 if (REGNO_FIRST_UID (regno
) == INSN_UID (insn
))
10217 store_is_first
= 1;
10220 /* Only substitute after seeing the initializing insn. */
10221 if (init_insn
&& insn
!= init_insn
)
10223 struct note_reg_stored_arg arg
;
10225 replace_loop_regs (insn
, reg_rtx
, replacement
);
10226 if (REGNO_LAST_UID (regno
) == INSN_UID (insn
))
10229 /* Stop replacing when REPLACEMENT is modified. */
10230 arg
.reg
= replacement
;
10232 note_stores (PATTERN (insn
), note_reg_stored
, &arg
);
10235 rtx note
= find_reg_note (insn
, REG_EQUAL
, NULL
);
10237 /* It is possible that we've turned previously valid REG_EQUAL to
10238 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10239 REPLACEMENT is modified, we get different meaning. */
10240 if (note
&& reg_mentioned_p (replacement
, XEXP (note
, 0)))
10241 remove_note (insn
, note
);
10248 if (apply_change_group ())
10250 if (loop_dump_stream
)
10251 fprintf (loop_dump_stream
, " Replaced reg %d", regno
);
10252 if (store_is_first
&& replaced_last
)
10257 /* Assume we're just deleting INIT_INSN. */
10259 /* Look for REG_RETVAL note. If we're deleting the end of
10260 the libcall sequence, the whole sequence can go. */
10261 retval_note
= find_reg_note (init_insn
, REG_RETVAL
, NULL_RTX
);
10262 /* If we found a REG_RETVAL note, find the first instruction
10263 in the sequence. */
10265 first
= XEXP (retval_note
, 0);
10267 /* Delete the instructions. */
10268 loop_delete_insns (first
, init_insn
);
10270 if (loop_dump_stream
)
10271 fprintf (loop_dump_stream
, ".\n");
10275 /* Replace all the instructions from FIRST up to and including LAST
10276 with NOTE_INSN_DELETED notes. */
10279 loop_delete_insns (first
, last
)
10285 if (loop_dump_stream
)
10286 fprintf (loop_dump_stream
, ", deleting init_insn (%d)",
10288 delete_insn (first
);
10290 /* If this was the LAST instructions we're supposed to delete,
10295 first
= NEXT_INSN (first
);
10299 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10300 loop LOOP if the order of the sets of these registers can be
10301 swapped. There must be exactly one insn within the loop that sets
10302 this pseudo followed immediately by a move insn that sets
10303 REPLACEMENT with REGNO. */
10305 try_swap_copy_prop (loop
, replacement
, regno
)
10306 const struct loop
*loop
;
10308 unsigned int regno
;
10311 rtx set
= NULL_RTX
;
10312 unsigned int new_regno
;
10314 new_regno
= REGNO (replacement
);
10316 for (insn
= next_insn_in_loop (loop
, loop
->scan_start
);
10318 insn
= next_insn_in_loop (loop
, insn
))
10320 /* Search for the insn that copies REGNO to NEW_REGNO? */
10322 && (set
= single_set (insn
))
10323 && GET_CODE (SET_DEST (set
)) == REG
10324 && REGNO (SET_DEST (set
)) == new_regno
10325 && GET_CODE (SET_SRC (set
)) == REG
10326 && REGNO (SET_SRC (set
)) == regno
)
10330 if (insn
!= NULL_RTX
)
10335 /* Some DEF-USE info would come in handy here to make this
10336 function more general. For now, just check the previous insn
10337 which is the most likely candidate for setting REGNO. */
10339 prev_insn
= PREV_INSN (insn
);
10342 && (prev_set
= single_set (prev_insn
))
10343 && GET_CODE (SET_DEST (prev_set
)) == REG
10344 && REGNO (SET_DEST (prev_set
)) == regno
)
10347 (set (reg regno) (expr))
10348 (set (reg new_regno) (reg regno))
10350 so try converting this to:
10351 (set (reg new_regno) (expr))
10352 (set (reg regno) (reg new_regno))
10354 The former construct is often generated when a global
10355 variable used for an induction variable is shadowed by a
10356 register (NEW_REGNO). The latter construct improves the
10357 chances of GIV replacement and BIV elimination. */
10359 validate_change (prev_insn
, &SET_DEST (prev_set
),
10361 validate_change (insn
, &SET_DEST (set
),
10363 validate_change (insn
, &SET_SRC (set
),
10366 if (apply_change_group ())
10368 if (loop_dump_stream
)
10369 fprintf (loop_dump_stream
,
10370 " Swapped set of reg %d at %d with reg %d at %d.\n",
10371 regno
, INSN_UID (insn
),
10372 new_regno
, INSN_UID (prev_insn
));
10374 /* Update first use of REGNO. */
10375 if (REGNO_FIRST_UID (regno
) == INSN_UID (prev_insn
))
10376 REGNO_FIRST_UID (regno
) = INSN_UID (insn
);
10378 /* Now perform copy propagation to hopefully
10379 remove all uses of REGNO within the loop. */
10380 try_copy_prop (loop
, replacement
, regno
);
10386 /* Replace MEM with its associated pseudo register. This function is
10387 called from load_mems via for_each_rtx. DATA is actually a pointer
10388 to a structure describing the instruction currently being scanned
10389 and the MEM we are currently replacing. */
10392 replace_loop_mem (mem
, data
)
10396 loop_replace_args
*args
= (loop_replace_args
*) data
;
10402 switch (GET_CODE (m
))
10408 /* We're not interested in the MEM associated with a
10409 CONST_DOUBLE, so there's no need to traverse into one. */
10413 /* This is not a MEM. */
10417 if (!rtx_equal_p (args
->match
, m
))
10418 /* This is not the MEM we are currently replacing. */
10421 /* Actually replace the MEM. */
10422 validate_change (args
->insn
, mem
, args
->replacement
, 1);
10428 replace_loop_mems (insn
, mem
, reg
)
10433 loop_replace_args args
;
10437 args
.replacement
= reg
;
10439 for_each_rtx (&insn
, replace_loop_mem
, &args
);
10442 /* Replace one register with another. Called through for_each_rtx; PX points
10443 to the rtx being scanned. DATA is actually a pointer to
10444 a structure of arguments. */
10447 replace_loop_reg (px
, data
)
10452 loop_replace_args
*args
= (loop_replace_args
*) data
;
10457 if (x
== args
->match
)
10458 validate_change (args
->insn
, px
, args
->replacement
, 1);
10464 replace_loop_regs (insn
, reg
, replacement
)
10469 loop_replace_args args
;
10473 args
.replacement
= replacement
;
10475 for_each_rtx (&insn
, replace_loop_reg
, &args
);
10478 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10479 (ignored in the interim). */
10482 loop_insn_emit_after (loop
, where_bb
, where_insn
, pattern
)
10483 const struct loop
*loop ATTRIBUTE_UNUSED
;
10484 basic_block where_bb ATTRIBUTE_UNUSED
;
10488 return emit_insn_after (pattern
, where_insn
);
10492 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10493 in basic block WHERE_BB (ignored in the interim) within the loop
10494 otherwise hoist PATTERN into the loop pre-header. */
10497 loop_insn_emit_before (loop
, where_bb
, where_insn
, pattern
)
10498 const struct loop
*loop
;
10499 basic_block where_bb ATTRIBUTE_UNUSED
;
10504 return loop_insn_hoist (loop
, pattern
);
10505 return emit_insn_before (pattern
, where_insn
);
10509 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10510 WHERE_BB (ignored in the interim) within the loop. */
10513 loop_call_insn_emit_before (loop
, where_bb
, where_insn
, pattern
)
10514 const struct loop
*loop ATTRIBUTE_UNUSED
;
10515 basic_block where_bb ATTRIBUTE_UNUSED
;
10519 return emit_call_insn_before (pattern
, where_insn
);
10523 /* Hoist insn for PATTERN into the loop pre-header. */
10526 loop_insn_hoist (loop
, pattern
)
10527 const struct loop
*loop
;
10530 return loop_insn_emit_before (loop
, 0, loop
->start
, pattern
);
10534 /* Hoist call insn for PATTERN into the loop pre-header. */
10537 loop_call_insn_hoist (loop
, pattern
)
10538 const struct loop
*loop
;
10541 return loop_call_insn_emit_before (loop
, 0, loop
->start
, pattern
);
10545 /* Sink insn for PATTERN after the loop end. */
10548 loop_insn_sink (loop
, pattern
)
10549 const struct loop
*loop
;
10552 return loop_insn_emit_before (loop
, 0, loop
->sink
, pattern
);
10555 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10556 and constant. Emit sequence of instructions to load it into REG. */
10558 gen_load_of_final_value (reg
, final_value
)
10559 rtx reg
, final_value
;
10563 final_value
= force_operand (final_value
, reg
);
10564 if (final_value
!= reg
)
10565 emit_move_insn (reg
, final_value
);
10566 seq
= get_insns ();
10571 /* If the loop has multiple exits, emit insn for PATTERN before the
10572 loop to ensure that it will always be executed no matter how the
10573 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10574 since this is slightly more efficient. */
10577 loop_insn_sink_or_swim (loop
, pattern
)
10578 const struct loop
*loop
;
10581 if (loop
->exit_count
)
10582 return loop_insn_hoist (loop
, pattern
);
10584 return loop_insn_sink (loop
, pattern
);
10588 loop_ivs_dump (loop
, file
, verbose
)
10589 const struct loop
*loop
;
10593 struct iv_class
*bl
;
10596 if (! loop
|| ! file
)
10599 for (bl
= LOOP_IVS (loop
)->list
; bl
; bl
= bl
->next
)
10602 fprintf (file
, "Loop %d: %d IV classes\n", loop
->num
, iv_num
);
10604 for (bl
= LOOP_IVS (loop
)->list
; bl
; bl
= bl
->next
)
10606 loop_iv_class_dump (bl
, file
, verbose
);
10607 fputc ('\n', file
);
10613 loop_iv_class_dump (bl
, file
, verbose
)
10614 const struct iv_class
*bl
;
10616 int verbose ATTRIBUTE_UNUSED
;
10618 struct induction
*v
;
10622 if (! bl
|| ! file
)
10625 fprintf (file
, "IV class for reg %d, benefit %d\n",
10626 bl
->regno
, bl
->total_benefit
);
10628 fprintf (file
, " Init insn %d", INSN_UID (bl
->init_insn
));
10629 if (bl
->initial_value
)
10631 fprintf (file
, ", init val: ");
10632 print_simple_rtl (file
, bl
->initial_value
);
10634 if (bl
->initial_test
)
10636 fprintf (file
, ", init test: ");
10637 print_simple_rtl (file
, bl
->initial_test
);
10639 fputc ('\n', file
);
10641 if (bl
->final_value
)
10643 fprintf (file
, " Final val: ");
10644 print_simple_rtl (file
, bl
->final_value
);
10645 fputc ('\n', file
);
10648 if ((incr
= biv_total_increment (bl
)))
10650 fprintf (file
, " Total increment: ");
10651 print_simple_rtl (file
, incr
);
10652 fputc ('\n', file
);
10655 /* List the increments. */
10656 for (i
= 0, v
= bl
->biv
; v
; v
= v
->next_iv
, i
++)
10658 fprintf (file
, " Inc%d: insn %d, incr: ", i
, INSN_UID (v
->insn
));
10659 print_simple_rtl (file
, v
->add_val
);
10660 fputc ('\n', file
);
10663 /* List the givs. */
10664 for (i
= 0, v
= bl
->giv
; v
; v
= v
->next_iv
, i
++)
10666 fprintf (file
, " Giv%d: insn %d, benefit %d, ",
10667 i
, INSN_UID (v
->insn
), v
->benefit
);
10668 if (v
->giv_type
== DEST_ADDR
)
10669 print_simple_rtl (file
, v
->mem
);
10671 print_simple_rtl (file
, single_set (v
->insn
));
10672 fputc ('\n', file
);
10678 loop_biv_dump (v
, file
, verbose
)
10679 const struct induction
*v
;
10688 REGNO (v
->dest_reg
), INSN_UID (v
->insn
));
10689 fprintf (file
, " const ");
10690 print_simple_rtl (file
, v
->add_val
);
10692 if (verbose
&& v
->final_value
)
10694 fputc ('\n', file
);
10695 fprintf (file
, " final ");
10696 print_simple_rtl (file
, v
->final_value
);
10699 fputc ('\n', file
);
10704 loop_giv_dump (v
, file
, verbose
)
10705 const struct induction
*v
;
10712 if (v
->giv_type
== DEST_REG
)
10713 fprintf (file
, "Giv %d: insn %d",
10714 REGNO (v
->dest_reg
), INSN_UID (v
->insn
));
10716 fprintf (file
, "Dest address: insn %d",
10717 INSN_UID (v
->insn
));
10719 fprintf (file
, " src reg %d benefit %d",
10720 REGNO (v
->src_reg
), v
->benefit
);
10721 fprintf (file
, " lifetime %d",
10724 if (v
->replaceable
)
10725 fprintf (file
, " replaceable");
10727 if (v
->no_const_addval
)
10728 fprintf (file
, " ncav");
10730 if (v
->ext_dependent
)
10732 switch (GET_CODE (v
->ext_dependent
))
10735 fprintf (file
, " ext se");
10738 fprintf (file
, " ext ze");
10741 fprintf (file
, " ext tr");
10748 fputc ('\n', file
);
10749 fprintf (file
, " mult ");
10750 print_simple_rtl (file
, v
->mult_val
);
10752 fputc ('\n', file
);
10753 fprintf (file
, " add ");
10754 print_simple_rtl (file
, v
->add_val
);
10756 if (verbose
&& v
->final_value
)
10758 fputc ('\n', file
);
10759 fprintf (file
, " final ");
10760 print_simple_rtl (file
, v
->final_value
);
10763 fputc ('\n', file
);
10769 const struct loop
*loop
;
10771 loop_ivs_dump (loop
, stderr
, 1);
10776 debug_iv_class (bl
)
10777 const struct iv_class
*bl
;
10779 loop_iv_class_dump (bl
, stderr
, 1);
10785 const struct induction
*v
;
10787 loop_biv_dump (v
, stderr
, 1);
10793 const struct induction
*v
;
10795 loop_giv_dump (v
, stderr
, 1);
10799 #define LOOP_BLOCK_NUM_1(INSN) \
10800 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10802 /* The notes do not have an assigned block, so look at the next insn. */
10803 #define LOOP_BLOCK_NUM(INSN) \
10804 ((INSN) ? (GET_CODE (INSN) == NOTE \
10805 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10806 : LOOP_BLOCK_NUM_1 (INSN)) \
10809 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10812 loop_dump_aux (loop
, file
, verbose
)
10813 const struct loop
*loop
;
10815 int verbose ATTRIBUTE_UNUSED
;
10819 if (! loop
|| ! file
)
10822 /* Print diagnostics to compare our concept of a loop with
10823 what the loop notes say. */
10824 if (! PREV_INSN (loop
->first
->head
)
10825 || GET_CODE (PREV_INSN (loop
->first
->head
)) != NOTE
10826 || NOTE_LINE_NUMBER (PREV_INSN (loop
->first
->head
))
10827 != NOTE_INSN_LOOP_BEG
)
10828 fprintf (file
, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10829 INSN_UID (PREV_INSN (loop
->first
->head
)));
10830 if (! NEXT_INSN (loop
->last
->end
)
10831 || GET_CODE (NEXT_INSN (loop
->last
->end
)) != NOTE
10832 || NOTE_LINE_NUMBER (NEXT_INSN (loop
->last
->end
))
10833 != NOTE_INSN_LOOP_END
)
10834 fprintf (file
, ";; No NOTE_INSN_LOOP_END at %d\n",
10835 INSN_UID (NEXT_INSN (loop
->last
->end
)));
10840 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10841 LOOP_BLOCK_NUM (loop
->start
),
10842 LOOP_INSN_UID (loop
->start
),
10843 LOOP_BLOCK_NUM (loop
->cont
),
10844 LOOP_INSN_UID (loop
->cont
),
10845 LOOP_BLOCK_NUM (loop
->cont
),
10846 LOOP_INSN_UID (loop
->cont
),
10847 LOOP_BLOCK_NUM (loop
->vtop
),
10848 LOOP_INSN_UID (loop
->vtop
),
10849 LOOP_BLOCK_NUM (loop
->end
),
10850 LOOP_INSN_UID (loop
->end
));
10851 fprintf (file
, ";; top %d (%d), scan start %d (%d)\n",
10852 LOOP_BLOCK_NUM (loop
->top
),
10853 LOOP_INSN_UID (loop
->top
),
10854 LOOP_BLOCK_NUM (loop
->scan_start
),
10855 LOOP_INSN_UID (loop
->scan_start
));
10856 fprintf (file
, ";; exit_count %d", loop
->exit_count
);
10857 if (loop
->exit_count
)
10859 fputs (", labels:", file
);
10860 for (label
= loop
->exit_labels
; label
; label
= LABEL_NEXTREF (label
))
10862 fprintf (file
, " %d ",
10863 LOOP_INSN_UID (XEXP (label
, 0)));
10866 fputs ("\n", file
);
10868 /* This can happen when a marked loop appears as two nested loops,
10869 say from while (a || b) {}. The inner loop won't match
10870 the loop markers but the outer one will. */
10871 if (LOOP_BLOCK_NUM (loop
->cont
) != loop
->latch
->index
)
10872 fprintf (file
, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10876 /* Call this function from the debugger to dump LOOP. */
10880 const struct loop
*loop
;
10882 flow_loop_dump (loop
, stderr
, loop_dump_aux
, 1);
10885 /* Call this function from the debugger to dump LOOPS. */
10888 debug_loops (loops
)
10889 const struct loops
*loops
;
10891 flow_loops_dump (loops
, stderr
, loop_dump_aux
, 1);