* tree-ssa-pre.c (grand_bitmap_obstack): New.
[official-gcc.git] / gcc / loop.c
blob8f1ace8f1c453d8ce80381ba25b81c7e41869dde
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables.
27 Basic induction variables (BIVs) are a pseudo registers which are set within
28 a loop only by incrementing or decrementing its value. General induction
29 variables (GIVs) are pseudo registers with a value which is a linear function
30 of a basic induction variable. BIVs are recognized by `basic_induction_var';
31 GIVs by `general_induction_var'.
33 Once induction variables are identified, strength reduction is applied to the
34 general induction variables, and induction variable elimination is applied to
35 the basic induction variables.
37 It also finds cases where
38 a register is set within the loop by zero-extending a narrower value
39 and changes these to zero the entire register once before the loop
40 and merely copy the low part within the loop.
42 Most of the complexity is in heuristics to decide when it is worth
43 while to do these things. */
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "rtl.h"
50 #include "tm_p.h"
51 #include "function.h"
52 #include "expr.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "insn-config.h"
56 #include "regs.h"
57 #include "recog.h"
58 #include "flags.h"
59 #include "real.h"
60 #include "loop.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
70 /* Not really meaningful values, but at least something. */
71 #ifndef SIMULTANEOUS_PREFETCHES
72 #define SIMULTANEOUS_PREFETCHES 3
73 #endif
74 #ifndef PREFETCH_BLOCK
75 #define PREFETCH_BLOCK 32
76 #endif
77 #ifndef HAVE_prefetch
78 #define HAVE_prefetch 0
79 #define CODE_FOR_prefetch 0
80 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
81 #endif
83 /* Give up the prefetch optimizations once we exceed a given threshold.
84 It is unlikely that we would be able to optimize something in a loop
85 with so many detected prefetches. */
86 #define MAX_PREFETCHES 100
87 /* The number of prefetch blocks that are beneficial to fetch at once before
88 a loop with a known (and low) iteration count. */
89 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
90 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
91 since it is likely that the data are already in the cache. */
92 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
94 /* Parameterize some prefetch heuristics so they can be turned on and off
95 easily for performance testing on new architectures. These can be
96 defined in target-dependent files. */
98 /* Prefetch is worthwhile only when loads/stores are dense. */
99 #ifndef PREFETCH_ONLY_DENSE_MEM
100 #define PREFETCH_ONLY_DENSE_MEM 1
101 #endif
103 /* Define what we mean by "dense" loads and stores; This value divided by 256
104 is the minimum percentage of memory references that worth prefetching. */
105 #ifndef PREFETCH_DENSE_MEM
106 #define PREFETCH_DENSE_MEM 220
107 #endif
109 /* Do not prefetch for a loop whose iteration count is known to be low. */
110 #ifndef PREFETCH_NO_LOW_LOOPCNT
111 #define PREFETCH_NO_LOW_LOOPCNT 1
112 #endif
114 /* Define what we mean by a "low" iteration count. */
115 #ifndef PREFETCH_LOW_LOOPCNT
116 #define PREFETCH_LOW_LOOPCNT 32
117 #endif
119 /* Do not prefetch for a loop that contains a function call; such a loop is
120 probably not an internal loop. */
121 #ifndef PREFETCH_NO_CALL
122 #define PREFETCH_NO_CALL 1
123 #endif
125 /* Do not prefetch accesses with an extreme stride. */
126 #ifndef PREFETCH_NO_EXTREME_STRIDE
127 #define PREFETCH_NO_EXTREME_STRIDE 1
128 #endif
130 /* Define what we mean by an "extreme" stride. */
131 #ifndef PREFETCH_EXTREME_STRIDE
132 #define PREFETCH_EXTREME_STRIDE 4096
133 #endif
135 /* Define a limit to how far apart indices can be and still be merged
136 into a single prefetch. */
137 #ifndef PREFETCH_EXTREME_DIFFERENCE
138 #define PREFETCH_EXTREME_DIFFERENCE 4096
139 #endif
141 /* Issue prefetch instructions before the loop to fetch data to be used
142 in the first few loop iterations. */
143 #ifndef PREFETCH_BEFORE_LOOP
144 #define PREFETCH_BEFORE_LOOP 1
145 #endif
147 /* Do not handle reversed order prefetches (negative stride). */
148 #ifndef PREFETCH_NO_REVERSE_ORDER
149 #define PREFETCH_NO_REVERSE_ORDER 1
150 #endif
152 /* Prefetch even if the GIV is in conditional code. */
153 #ifndef PREFETCH_CONDITIONAL
154 #define PREFETCH_CONDITIONAL 1
155 #endif
157 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
158 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
160 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
161 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
162 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
164 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
165 ((REGNO) < FIRST_PSEUDO_REGISTER \
166 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
169 /* Vector mapping INSN_UIDs to luids.
170 The luids are like uids but increase monotonically always.
171 We use them to see whether a jump comes from outside a given loop. */
173 int *uid_luid;
175 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
176 number the insn is contained in. */
178 struct loop **uid_loop;
180 /* 1 + largest uid of any insn. */
182 int max_uid_for_loop;
184 /* Number of loops detected in current function. Used as index to the
185 next few tables. */
187 static int max_loop_num;
189 /* Bound on pseudo register number before loop optimization.
190 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
191 unsigned int max_reg_before_loop;
193 /* The value to pass to the next call of reg_scan_update. */
194 static int loop_max_reg;
196 /* During the analysis of a loop, a chain of `struct movable's
197 is made to record all the movable insns found.
198 Then the entire chain can be scanned to decide which to move. */
200 struct movable
202 rtx insn; /* A movable insn */
203 rtx set_src; /* The expression this reg is set from. */
204 rtx set_dest; /* The destination of this SET. */
205 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
206 of any registers used within the LIBCALL. */
207 int consec; /* Number of consecutive following insns
208 that must be moved with this one. */
209 unsigned int regno; /* The register it sets */
210 short lifetime; /* lifetime of that register;
211 may be adjusted when matching movables
212 that load the same value are found. */
213 short savings; /* Number of insns we can move for this reg,
214 including other movables that force this
215 or match this one. */
216 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
217 a low part that we should avoid changing when
218 clearing the rest of the reg. */
219 unsigned int cond : 1; /* 1 if only conditionally movable */
220 unsigned int force : 1; /* 1 means MUST move this insn */
221 unsigned int global : 1; /* 1 means reg is live outside this loop */
222 /* If PARTIAL is 1, GLOBAL means something different:
223 that the reg is live outside the range from where it is set
224 to the following label. */
225 unsigned int done : 1; /* 1 inhibits further processing of this */
227 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
228 In particular, moving it does not make it
229 invariant. */
230 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
231 load SRC, rather than copying INSN. */
232 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
233 first insn of a consecutive sets group. */
234 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
235 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
236 the original insn with a copy from that
237 pseudo, rather than deleting it. */
238 struct movable *match; /* First entry for same value */
239 struct movable *forces; /* An insn that must be moved if this is */
240 struct movable *next;
244 FILE *loop_dump_stream;
246 /* Forward declarations. */
248 static void invalidate_loops_containing_label (rtx);
249 static void find_and_verify_loops (rtx, struct loops *);
250 static void mark_loop_jump (rtx, struct loop *);
251 static void prescan_loop (struct loop *);
252 static int reg_in_basic_block_p (rtx, rtx);
253 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
254 static int labels_in_range_p (rtx, int);
255 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
256 static void note_addr_stored (rtx, rtx, void *);
257 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
258 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
259 static rtx find_regs_nested (rtx, rtx);
260 static void scan_loop (struct loop*, int);
261 #if 0
262 static void replace_call_address (rtx, rtx, rtx);
263 #endif
264 static rtx skip_consec_insns (rtx, int);
265 static int libcall_benefit (rtx);
266 static rtx libcall_other_reg (rtx, rtx);
267 static void record_excess_regs (rtx, rtx, rtx *);
268 static void ignore_some_movables (struct loop_movables *);
269 static void force_movables (struct loop_movables *);
270 static void combine_movables (struct loop_movables *, struct loop_regs *);
271 static int num_unmoved_movables (const struct loop *);
272 static int regs_match_p (rtx, rtx, struct loop_movables *);
273 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
274 struct loop_regs *);
275 static void add_label_notes (rtx, rtx);
276 static void move_movables (struct loop *loop, struct loop_movables *, int,
277 int);
278 static void loop_movables_add (struct loop_movables *, struct movable *);
279 static void loop_movables_free (struct loop_movables *);
280 static int count_nonfixed_reads (const struct loop *, rtx);
281 static void loop_bivs_find (struct loop *);
282 static void loop_bivs_init_find (struct loop *);
283 static void loop_bivs_check (struct loop *);
284 static void loop_givs_find (struct loop *);
285 static void loop_givs_check (struct loop *);
286 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
287 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
288 struct induction *, rtx);
289 static void loop_givs_dead_check (struct loop *, struct iv_class *);
290 static void loop_givs_reduce (struct loop *, struct iv_class *);
291 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
292 static void loop_ivs_free (struct loop *);
293 static void strength_reduce (struct loop *, int);
294 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
295 static int valid_initial_value_p (rtx, rtx, int, rtx);
296 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
297 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
298 rtx, rtx *, int, int);
299 static void check_final_value (const struct loop *, struct induction *);
300 static void loop_ivs_dump (const struct loop *, FILE *, int);
301 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
302 static void loop_biv_dump (const struct induction *, FILE *, int);
303 static void loop_giv_dump (const struct induction *, FILE *, int);
304 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
305 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
306 rtx *);
307 static void update_giv_derive (const struct loop *, rtx);
308 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
309 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
310 rtx, rtx, rtx *, rtx *, rtx **);
311 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
312 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
313 rtx *, rtx *, int, int *, enum machine_mode);
314 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
315 rtx *, rtx *, rtx *);
316 static int check_dbra_loop (struct loop *, int);
317 static rtx express_from_1 (rtx, rtx, rtx);
318 static rtx combine_givs_p (struct induction *, struct induction *);
319 static int cmp_combine_givs_stats (const void *, const void *);
320 static void combine_givs (struct loop_regs *, struct iv_class *);
321 static int product_cheap_p (rtx, rtx);
322 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
323 int, int);
324 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
325 struct iv_class *, int, basic_block, rtx);
326 static int last_use_this_basic_block (rtx, rtx);
327 static void record_initial (rtx, rtx, void *);
328 static void update_reg_last_use (rtx, rtx);
329 static rtx next_insn_in_loop (const struct loop *, rtx);
330 static void loop_regs_scan (const struct loop *, int);
331 static int count_insns_in_loop (const struct loop *);
332 static int find_mem_in_note_1 (rtx *, void *);
333 static rtx find_mem_in_note (rtx);
334 static void load_mems (const struct loop *);
335 static int insert_loop_mem (rtx *, void *);
336 static int replace_loop_mem (rtx *, void *);
337 static void replace_loop_mems (rtx, rtx, rtx, int);
338 static int replace_loop_reg (rtx *, void *);
339 static void replace_loop_regs (rtx insn, rtx, rtx);
340 static void note_reg_stored (rtx, rtx, void *);
341 static void try_copy_prop (const struct loop *, rtx, unsigned int);
342 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
343 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
344 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
345 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
346 static void loop_regs_update (const struct loop *, rtx);
347 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
349 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
350 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
351 rtx, rtx);
352 static rtx loop_call_insn_hoist (const struct loop *, rtx);
353 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
355 static void loop_dump_aux (const struct loop *, FILE *, int);
356 static void loop_delete_insns (rtx, rtx);
357 static HOST_WIDE_INT remove_constant_addition (rtx *);
358 static rtx gen_load_of_final_value (rtx, rtx);
359 void debug_ivs (const struct loop *);
360 void debug_iv_class (const struct iv_class *);
361 void debug_biv (const struct induction *);
362 void debug_giv (const struct induction *);
363 void debug_loop (const struct loop *);
364 void debug_loops (const struct loops *);
366 typedef struct loop_replace_args
368 rtx match;
369 rtx replacement;
370 rtx insn;
371 } loop_replace_args;
373 /* Nonzero iff INSN is between START and END, inclusive. */
374 #define INSN_IN_RANGE_P(INSN, START, END) \
375 (INSN_UID (INSN) < max_uid_for_loop \
376 && INSN_LUID (INSN) >= INSN_LUID (START) \
377 && INSN_LUID (INSN) <= INSN_LUID (END))
379 /* Indirect_jump_in_function is computed once per function. */
380 static int indirect_jump_in_function;
381 static int indirect_jump_in_function_p (rtx);
383 static int compute_luids (rtx, rtx, int);
385 static int biv_elimination_giv_has_0_offset (struct induction *,
386 struct induction *, rtx);
388 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
389 copy the value of the strength reduced giv to its original register. */
390 static int copy_cost;
392 /* Cost of using a register, to normalize the benefits of a giv. */
393 static int reg_address_cost;
395 void
396 init_loop (void)
398 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
400 reg_address_cost = address_cost (reg, SImode);
402 copy_cost = COSTS_N_INSNS (1);
405 /* Compute the mapping from uids to luids.
406 LUIDs are numbers assigned to insns, like uids,
407 except that luids increase monotonically through the code.
408 Start at insn START and stop just before END. Assign LUIDs
409 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
410 static int
411 compute_luids (rtx start, rtx end, int prev_luid)
413 int i;
414 rtx insn;
416 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
418 if (INSN_UID (insn) >= max_uid_for_loop)
419 continue;
420 /* Don't assign luids to line-number NOTEs, so that the distance in
421 luids between two insns is not affected by -g. */
422 if (!NOTE_P (insn)
423 || NOTE_LINE_NUMBER (insn) <= 0)
424 uid_luid[INSN_UID (insn)] = ++i;
425 else
426 /* Give a line number note the same luid as preceding insn. */
427 uid_luid[INSN_UID (insn)] = i;
429 return i + 1;
432 /* Entry point of this file. Perform loop optimization
433 on the current function. F is the first insn of the function
434 and DUMPFILE is a stream for output of a trace of actions taken
435 (or 0 if none should be output). */
437 void
438 loop_optimize (rtx f, FILE *dumpfile, int flags)
440 rtx insn;
441 int i;
442 struct loops loops_data;
443 struct loops *loops = &loops_data;
444 struct loop_info *loops_info;
446 loop_dump_stream = dumpfile;
448 init_recog_no_volatile ();
450 max_reg_before_loop = max_reg_num ();
451 loop_max_reg = max_reg_before_loop;
453 regs_may_share = 0;
455 /* Count the number of loops. */
457 max_loop_num = 0;
458 for (insn = f; insn; insn = NEXT_INSN (insn))
460 if (NOTE_P (insn)
461 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
462 max_loop_num++;
465 /* Don't waste time if no loops. */
466 if (max_loop_num == 0)
467 return;
469 loops->num = max_loop_num;
471 /* Get size to use for tables indexed by uids.
472 Leave some space for labels allocated by find_and_verify_loops. */
473 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
475 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
476 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
478 /* Allocate storage for array of loops. */
479 loops->array = xcalloc (loops->num, sizeof (struct loop));
481 /* Find and process each loop.
482 First, find them, and record them in order of their beginnings. */
483 find_and_verify_loops (f, loops);
485 /* Allocate and initialize auxiliary loop information. */
486 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
487 for (i = 0; i < (int) loops->num; i++)
488 loops->array[i].aux = loops_info + i;
490 /* Now find all register lifetimes. This must be done after
491 find_and_verify_loops, because it might reorder the insns in the
492 function. */
493 reg_scan (f, max_reg_before_loop, 1);
495 /* This must occur after reg_scan so that registers created by gcse
496 will have entries in the register tables.
498 We could have added a call to reg_scan after gcse_main in toplev.c,
499 but moving this call to init_alias_analysis is more efficient. */
500 init_alias_analysis ();
502 /* See if we went too far. Note that get_max_uid already returns
503 one more that the maximum uid of all insn. */
504 if (get_max_uid () > max_uid_for_loop)
505 abort ();
506 /* Now reset it to the actual size we need. See above. */
507 max_uid_for_loop = get_max_uid ();
509 /* find_and_verify_loops has already called compute_luids, but it
510 might have rearranged code afterwards, so we need to recompute
511 the luids now. */
512 compute_luids (f, NULL_RTX, 0);
514 /* Don't leave gaps in uid_luid for insns that have been
515 deleted. It is possible that the first or last insn
516 using some register has been deleted by cross-jumping.
517 Make sure that uid_luid for that former insn's uid
518 points to the general area where that insn used to be. */
519 for (i = 0; i < max_uid_for_loop; i++)
521 uid_luid[0] = uid_luid[i];
522 if (uid_luid[0] != 0)
523 break;
525 for (i = 0; i < max_uid_for_loop; i++)
526 if (uid_luid[i] == 0)
527 uid_luid[i] = uid_luid[i - 1];
529 /* Determine if the function has indirect jump. On some systems
530 this prevents low overhead loop instructions from being used. */
531 indirect_jump_in_function = indirect_jump_in_function_p (f);
533 /* Now scan the loops, last ones first, since this means inner ones are done
534 before outer ones. */
535 for (i = max_loop_num - 1; i >= 0; i--)
537 struct loop *loop = &loops->array[i];
539 if (! loop->invalid && loop->end)
541 scan_loop (loop, flags);
542 ggc_collect ();
546 end_alias_analysis ();
548 /* Clean up. */
549 for (i = 0; i < (int) loops->num; i++)
550 free (loops_info[i].mems);
552 free (uid_luid);
553 free (uid_loop);
554 free (loops_info);
555 free (loops->array);
558 /* Returns the next insn, in execution order, after INSN. START and
559 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
560 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
561 insn-stream; it is used with loops that are entered near the
562 bottom. */
564 static rtx
565 next_insn_in_loop (const struct loop *loop, rtx insn)
567 insn = NEXT_INSN (insn);
569 if (insn == loop->end)
571 if (loop->top)
572 /* Go to the top of the loop, and continue there. */
573 insn = loop->top;
574 else
575 /* We're done. */
576 insn = NULL_RTX;
579 if (insn == loop->scan_start)
580 /* We're done. */
581 insn = NULL_RTX;
583 return insn;
586 /* Find any register references hidden inside X and add them to
587 the dependency list DEPS. This is used to look inside CLOBBER (MEM
588 when checking whether a PARALLEL can be pulled out of a loop. */
590 static rtx
591 find_regs_nested (rtx deps, rtx x)
593 enum rtx_code code = GET_CODE (x);
594 if (code == REG)
595 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
596 else
598 const char *fmt = GET_RTX_FORMAT (code);
599 int i, j;
600 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
602 if (fmt[i] == 'e')
603 deps = find_regs_nested (deps, XEXP (x, i));
604 else if (fmt[i] == 'E')
605 for (j = 0; j < XVECLEN (x, i); j++)
606 deps = find_regs_nested (deps, XVECEXP (x, i, j));
609 return deps;
612 /* Optimize one loop described by LOOP. */
614 /* ??? Could also move memory writes out of loops if the destination address
615 is invariant, the source is invariant, the memory write is not volatile,
616 and if we can prove that no read inside the loop can read this address
617 before the write occurs. If there is a read of this address after the
618 write, then we can also mark the memory read as invariant. */
620 static void
621 scan_loop (struct loop *loop, int flags)
623 struct loop_info *loop_info = LOOP_INFO (loop);
624 struct loop_regs *regs = LOOP_REGS (loop);
625 int i;
626 rtx loop_start = loop->start;
627 rtx loop_end = loop->end;
628 rtx p;
629 /* 1 if we are scanning insns that could be executed zero times. */
630 int maybe_never = 0;
631 /* 1 if we are scanning insns that might never be executed
632 due to a subroutine call which might exit before they are reached. */
633 int call_passed = 0;
634 /* Number of insns in the loop. */
635 int insn_count;
636 int tem;
637 rtx temp, update_start, update_end;
638 /* The SET from an insn, if it is the only SET in the insn. */
639 rtx set, set1;
640 /* Chain describing insns movable in current loop. */
641 struct loop_movables *movables = LOOP_MOVABLES (loop);
642 /* Ratio of extra register life span we can justify
643 for saving an instruction. More if loop doesn't call subroutines
644 since in that case saving an insn makes more difference
645 and more registers are available. */
646 int threshold;
647 int in_libcall;
649 loop->top = 0;
651 movables->head = 0;
652 movables->last = 0;
654 /* Determine whether this loop starts with a jump down to a test at
655 the end. This will occur for a small number of loops with a test
656 that is too complex to duplicate in front of the loop.
658 We search for the first insn or label in the loop, skipping NOTEs.
659 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
660 (because we might have a loop executed only once that contains a
661 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
662 (in case we have a degenerate loop).
664 Note that if we mistakenly think that a loop is entered at the top
665 when, in fact, it is entered at the exit test, the only effect will be
666 slightly poorer optimization. Making the opposite error can generate
667 incorrect code. Since very few loops now start with a jump to the
668 exit test, the code here to detect that case is very conservative. */
670 for (p = NEXT_INSN (loop_start);
671 p != loop_end
672 && !LABEL_P (p) && ! INSN_P (p)
673 && (!NOTE_P (p)
674 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
675 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
676 p = NEXT_INSN (p))
679 loop->scan_start = p;
681 /* If loop end is the end of the current function, then emit a
682 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
683 note insn. This is the position we use when sinking insns out of
684 the loop. */
685 if (NEXT_INSN (loop->end) != 0)
686 loop->sink = NEXT_INSN (loop->end);
687 else
688 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
690 /* Set up variables describing this loop. */
691 prescan_loop (loop);
692 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
694 /* If loop has a jump before the first label,
695 the true entry is the target of that jump.
696 Start scan from there.
697 But record in LOOP->TOP the place where the end-test jumps
698 back to so we can scan that after the end of the loop. */
699 if (JUMP_P (p)
700 /* Loop entry must be unconditional jump (and not a RETURN) */
701 && any_uncondjump_p (p)
702 && JUMP_LABEL (p) != 0
703 /* Check to see whether the jump actually
704 jumps out of the loop (meaning it's no loop).
705 This case can happen for things like
706 do {..} while (0). If this label was generated previously
707 by loop, we can't tell anything about it and have to reject
708 the loop. */
709 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
711 loop->top = next_label (loop->scan_start);
712 loop->scan_start = JUMP_LABEL (p);
715 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
716 as required by loop_reg_used_before_p. So skip such loops. (This
717 test may never be true, but it's best to play it safe.)
719 Also, skip loops where we do not start scanning at a label. This
720 test also rejects loops starting with a JUMP_INSN that failed the
721 test above. */
723 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
724 || !LABEL_P (loop->scan_start))
726 if (loop_dump_stream)
727 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
728 INSN_UID (loop_start), INSN_UID (loop_end));
729 return;
732 /* Allocate extra space for REGs that might be created by load_mems.
733 We allocate a little extra slop as well, in the hopes that we
734 won't have to reallocate the regs array. */
735 loop_regs_scan (loop, loop_info->mems_idx + 16);
736 insn_count = count_insns_in_loop (loop);
738 if (loop_dump_stream)
739 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
740 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
742 /* Scan through the loop finding insns that are safe to move.
743 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
744 this reg will be considered invariant for subsequent insns.
745 We consider whether subsequent insns use the reg
746 in deciding whether it is worth actually moving.
748 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
749 and therefore it is possible that the insns we are scanning
750 would never be executed. At such times, we must make sure
751 that it is safe to execute the insn once instead of zero times.
752 When MAYBE_NEVER is 0, all insns will be executed at least once
753 so that is not a problem. */
755 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
756 p != NULL_RTX;
757 p = next_insn_in_loop (loop, p))
759 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
760 in_libcall--;
761 if (NONJUMP_INSN_P (p))
763 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
764 if (temp)
765 in_libcall++;
766 if (! in_libcall
767 && (set = single_set (p))
768 && REG_P (SET_DEST (set))
769 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
770 && SET_DEST (set) != pic_offset_table_rtx
771 #endif
772 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
774 int tem1 = 0;
775 int tem2 = 0;
776 int move_insn = 0;
777 int insert_temp = 0;
778 rtx src = SET_SRC (set);
779 rtx dependencies = 0;
781 /* Figure out what to use as a source of this insn. If a
782 REG_EQUIV note is given or if a REG_EQUAL note with a
783 constant operand is specified, use it as the source and
784 mark that we should move this insn by calling
785 emit_move_insn rather that duplicating the insn.
787 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
788 note is present. */
789 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
790 if (temp)
791 src = XEXP (temp, 0), move_insn = 1;
792 else
794 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
795 if (temp && CONSTANT_P (XEXP (temp, 0)))
796 src = XEXP (temp, 0), move_insn = 1;
797 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
799 src = XEXP (temp, 0);
800 /* A libcall block can use regs that don't appear in
801 the equivalent expression. To move the libcall,
802 we must move those regs too. */
803 dependencies = libcall_other_reg (p, src);
807 /* For parallels, add any possible uses to the dependencies, as
808 we can't move the insn without resolving them first.
809 MEMs inside CLOBBERs may also reference registers; these
810 count as implicit uses. */
811 if (GET_CODE (PATTERN (p)) == PARALLEL)
813 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
815 rtx x = XVECEXP (PATTERN (p), 0, i);
816 if (GET_CODE (x) == USE)
817 dependencies
818 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
819 dependencies);
820 else if (GET_CODE (x) == CLOBBER
821 && MEM_P (XEXP (x, 0)))
822 dependencies = find_regs_nested (dependencies,
823 XEXP (XEXP (x, 0), 0));
827 if (/* The register is used in basic blocks other
828 than the one where it is set (meaning that
829 something after this point in the loop might
830 depend on its value before the set). */
831 ! reg_in_basic_block_p (p, SET_DEST (set))
832 /* And the set is not guaranteed to be executed once
833 the loop starts, or the value before the set is
834 needed before the set occurs...
836 ??? Note we have quadratic behavior here, mitigated
837 by the fact that the previous test will often fail for
838 large loops. Rather than re-scanning the entire loop
839 each time for register usage, we should build tables
840 of the register usage and use them here instead. */
841 && (maybe_never
842 || loop_reg_used_before_p (loop, set, p)))
843 /* It is unsafe to move the set. However, it may be OK to
844 move the source into a new pseudo, and substitute a
845 reg-to-reg copy for the original insn.
847 This code used to consider it OK to move a set of a variable
848 which was not created by the user and not used in an exit
849 test.
850 That behavior is incorrect and was removed. */
851 insert_temp = 1;
853 /* Don't try to optimize a MODE_CC set with a constant
854 source. It probably will be combined with a conditional
855 jump. */
856 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
857 && CONSTANT_P (src))
859 /* Don't try to optimize a register that was made
860 by loop-optimization for an inner loop.
861 We don't know its life-span, so we can't compute
862 the benefit. */
863 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
865 /* Don't move the source and add a reg-to-reg copy:
866 - with -Os (this certainly increases size),
867 - if the mode doesn't support copy operations (obviously),
868 - if the source is already a reg (the motion will gain nothing),
869 - if the source is a legitimate constant (likewise). */
870 else if (insert_temp
871 && (optimize_size
872 || ! can_copy_p (GET_MODE (SET_SRC (set)))
873 || REG_P (SET_SRC (set))
874 || (CONSTANT_P (SET_SRC (set))
875 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
877 else if ((tem = loop_invariant_p (loop, src))
878 && (dependencies == 0
879 || (tem2
880 = loop_invariant_p (loop, dependencies)) != 0)
881 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
882 || (tem1
883 = consec_sets_invariant_p
884 (loop, SET_DEST (set),
885 regs->array[REGNO (SET_DEST (set))].set_in_loop,
886 p)))
887 /* If the insn can cause a trap (such as divide by zero),
888 can't move it unless it's guaranteed to be executed
889 once loop is entered. Even a function call might
890 prevent the trap insn from being reached
891 (since it might exit!) */
892 && ! ((maybe_never || call_passed)
893 && may_trap_p (src)))
895 struct movable *m;
896 int regno = REGNO (SET_DEST (set));
898 /* A potential lossage is where we have a case where two insns
899 can be combined as long as they are both in the loop, but
900 we move one of them outside the loop. For large loops,
901 this can lose. The most common case of this is the address
902 of a function being called.
904 Therefore, if this register is marked as being used
905 exactly once if we are in a loop with calls
906 (a "large loop"), see if we can replace the usage of
907 this register with the source of this SET. If we can,
908 delete this insn.
910 Don't do this if P has a REG_RETVAL note or if we have
911 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
913 if (loop_info->has_call
914 && regs->array[regno].single_usage != 0
915 && regs->array[regno].single_usage != const0_rtx
916 && REGNO_FIRST_UID (regno) == INSN_UID (p)
917 && (REGNO_LAST_UID (regno)
918 == INSN_UID (regs->array[regno].single_usage))
919 && regs->array[regno].set_in_loop == 1
920 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
921 && ! side_effects_p (SET_SRC (set))
922 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
923 && (! SMALL_REGISTER_CLASSES
924 || (! (REG_P (SET_SRC (set))
925 && (REGNO (SET_SRC (set))
926 < FIRST_PSEUDO_REGISTER))))
927 && regno >= FIRST_PSEUDO_REGISTER
928 /* This test is not redundant; SET_SRC (set) might be
929 a call-clobbered register and the life of REGNO
930 might span a call. */
931 && ! modified_between_p (SET_SRC (set), p,
932 regs->array[regno].single_usage)
933 && no_labels_between_p (p,
934 regs->array[regno].single_usage)
935 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
936 regs->array[regno].single_usage))
938 /* Replace any usage in a REG_EQUAL note. Must copy
939 the new source, so that we don't get rtx sharing
940 between the SET_SOURCE and REG_NOTES of insn p. */
941 REG_NOTES (regs->array[regno].single_usage)
942 = (replace_rtx
943 (REG_NOTES (regs->array[regno].single_usage),
944 SET_DEST (set), copy_rtx (SET_SRC (set))));
946 delete_insn (p);
947 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
948 i++)
949 regs->array[regno+i].set_in_loop = 0;
950 continue;
953 m = xmalloc (sizeof (struct movable));
954 m->next = 0;
955 m->insn = p;
956 m->set_src = src;
957 m->dependencies = dependencies;
958 m->set_dest = SET_DEST (set);
959 m->force = 0;
960 m->consec
961 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
962 m->done = 0;
963 m->forces = 0;
964 m->partial = 0;
965 m->move_insn = move_insn;
966 m->move_insn_first = 0;
967 m->insert_temp = insert_temp;
968 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
969 m->savemode = VOIDmode;
970 m->regno = regno;
971 /* Set M->cond if either loop_invariant_p
972 or consec_sets_invariant_p returned 2
973 (only conditionally invariant). */
974 m->cond = ((tem | tem1 | tem2) > 1);
975 m->global = LOOP_REG_GLOBAL_P (loop, regno);
976 m->match = 0;
977 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
978 m->savings = regs->array[regno].n_times_set;
979 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
980 m->savings += libcall_benefit (p);
981 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
982 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
983 /* Add M to the end of the chain MOVABLES. */
984 loop_movables_add (movables, m);
986 if (m->consec > 0)
988 /* It is possible for the first instruction to have a
989 REG_EQUAL note but a non-invariant SET_SRC, so we must
990 remember the status of the first instruction in case
991 the last instruction doesn't have a REG_EQUAL note. */
992 m->move_insn_first = m->move_insn;
994 /* Skip this insn, not checking REG_LIBCALL notes. */
995 p = next_nonnote_insn (p);
996 /* Skip the consecutive insns, if there are any. */
997 p = skip_consec_insns (p, m->consec);
998 /* Back up to the last insn of the consecutive group. */
999 p = prev_nonnote_insn (p);
1001 /* We must now reset m->move_insn, m->is_equiv, and
1002 possibly m->set_src to correspond to the effects of
1003 all the insns. */
1004 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1005 if (temp)
1006 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1007 else
1009 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1010 if (temp && CONSTANT_P (XEXP (temp, 0)))
1011 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1012 else
1013 m->move_insn = 0;
1016 m->is_equiv
1017 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1020 /* If this register is always set within a STRICT_LOW_PART
1021 or set to zero, then its high bytes are constant.
1022 So clear them outside the loop and within the loop
1023 just load the low bytes.
1024 We must check that the machine has an instruction to do so.
1025 Also, if the value loaded into the register
1026 depends on the same register, this cannot be done. */
1027 else if (SET_SRC (set) == const0_rtx
1028 && NONJUMP_INSN_P (NEXT_INSN (p))
1029 && (set1 = single_set (NEXT_INSN (p)))
1030 && GET_CODE (set1) == SET
1031 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1032 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1033 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1034 == SET_DEST (set))
1035 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1037 int regno = REGNO (SET_DEST (set));
1038 if (regs->array[regno].set_in_loop == 2)
1040 struct movable *m;
1041 m = xmalloc (sizeof (struct movable));
1042 m->next = 0;
1043 m->insn = p;
1044 m->set_dest = SET_DEST (set);
1045 m->dependencies = 0;
1046 m->force = 0;
1047 m->consec = 0;
1048 m->done = 0;
1049 m->forces = 0;
1050 m->move_insn = 0;
1051 m->move_insn_first = 0;
1052 m->insert_temp = insert_temp;
1053 m->partial = 1;
1054 /* If the insn may not be executed on some cycles,
1055 we can't clear the whole reg; clear just high part.
1056 Not even if the reg is used only within this loop.
1057 Consider this:
1058 while (1)
1059 while (s != t) {
1060 if (foo ()) x = *s;
1061 use (x);
1063 Clearing x before the inner loop could clobber a value
1064 being saved from the last time around the outer loop.
1065 However, if the reg is not used outside this loop
1066 and all uses of the register are in the same
1067 basic block as the store, there is no problem.
1069 If this insn was made by loop, we don't know its
1070 INSN_LUID and hence must make a conservative
1071 assumption. */
1072 m->global = (INSN_UID (p) >= max_uid_for_loop
1073 || LOOP_REG_GLOBAL_P (loop, regno)
1074 || (labels_in_range_p
1075 (p, REGNO_FIRST_LUID (regno))));
1076 if (maybe_never && m->global)
1077 m->savemode = GET_MODE (SET_SRC (set1));
1078 else
1079 m->savemode = VOIDmode;
1080 m->regno = regno;
1081 m->cond = 0;
1082 m->match = 0;
1083 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1084 m->savings = 1;
1085 for (i = 0;
1086 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1087 i++)
1088 regs->array[regno+i].set_in_loop = -1;
1089 /* Add M to the end of the chain MOVABLES. */
1090 loop_movables_add (movables, m);
1095 /* Past a call insn, we get to insns which might not be executed
1096 because the call might exit. This matters for insns that trap.
1097 Constant and pure call insns always return, so they don't count. */
1098 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1099 call_passed = 1;
1100 /* Past a label or a jump, we get to insns for which we
1101 can't count on whether or how many times they will be
1102 executed during each iteration. Therefore, we can
1103 only move out sets of trivial variables
1104 (those not used after the loop). */
1105 /* Similar code appears twice in strength_reduce. */
1106 else if ((LABEL_P (p) || JUMP_P (p))
1107 /* If we enter the loop in the middle, and scan around to the
1108 beginning, don't set maybe_never for that. This must be an
1109 unconditional jump, otherwise the code at the top of the
1110 loop might never be executed. Unconditional jumps are
1111 followed by a barrier then the loop_end. */
1112 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1113 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1114 && any_uncondjump_p (p)))
1115 maybe_never = 1;
1118 /* If one movable subsumes another, ignore that other. */
1120 ignore_some_movables (movables);
1122 /* For each movable insn, see if the reg that it loads
1123 leads when it dies right into another conditionally movable insn.
1124 If so, record that the second insn "forces" the first one,
1125 since the second can be moved only if the first is. */
1127 force_movables (movables);
1129 /* See if there are multiple movable insns that load the same value.
1130 If there are, make all but the first point at the first one
1131 through the `match' field, and add the priorities of them
1132 all together as the priority of the first. */
1134 combine_movables (movables, regs);
1136 /* Now consider each movable insn to decide whether it is worth moving.
1137 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1139 For machines with few registers this increases code size, so do not
1140 move moveables when optimizing for code size on such machines.
1141 (The 18 below is the value for i386.) */
1143 if (!optimize_size
1144 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1146 move_movables (loop, movables, threshold, insn_count);
1148 /* Recalculate regs->array if move_movables has created new
1149 registers. */
1150 if (max_reg_num () > regs->num)
1152 loop_regs_scan (loop, 0);
1153 for (update_start = loop_start;
1154 PREV_INSN (update_start)
1155 && !LABEL_P (PREV_INSN (update_start));
1156 update_start = PREV_INSN (update_start))
1158 update_end = NEXT_INSN (loop_end);
1160 reg_scan_update (update_start, update_end, loop_max_reg);
1161 loop_max_reg = max_reg_num ();
1165 /* Now candidates that still are negative are those not moved.
1166 Change regs->array[I].set_in_loop to indicate that those are not actually
1167 invariant. */
1168 for (i = 0; i < regs->num; i++)
1169 if (regs->array[i].set_in_loop < 0)
1170 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1172 /* Now that we've moved some things out of the loop, we might be able to
1173 hoist even more memory references. */
1174 load_mems (loop);
1176 /* Recalculate regs->array if load_mems has created new registers. */
1177 if (max_reg_num () > regs->num)
1178 loop_regs_scan (loop, 0);
1180 for (update_start = loop_start;
1181 PREV_INSN (update_start)
1182 && !LABEL_P (PREV_INSN (update_start));
1183 update_start = PREV_INSN (update_start))
1185 update_end = NEXT_INSN (loop_end);
1187 reg_scan_update (update_start, update_end, loop_max_reg);
1188 loop_max_reg = max_reg_num ();
1190 if (flag_strength_reduce)
1192 if (update_end && LABEL_P (update_end))
1193 /* Ensure our label doesn't go away. */
1194 LABEL_NUSES (update_end)++;
1196 strength_reduce (loop, flags);
1198 reg_scan_update (update_start, update_end, loop_max_reg);
1199 loop_max_reg = max_reg_num ();
1201 if (update_end && LABEL_P (update_end)
1202 && --LABEL_NUSES (update_end) == 0)
1203 delete_related_insns (update_end);
1207 /* The movable information is required for strength reduction. */
1208 loop_movables_free (movables);
1210 free (regs->array);
1211 regs->array = 0;
1212 regs->num = 0;
1215 /* Add elements to *OUTPUT to record all the pseudo-regs
1216 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1218 static void
1219 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1221 enum rtx_code code;
1222 const char *fmt;
1223 int i;
1225 code = GET_CODE (in_this);
1227 switch (code)
1229 case PC:
1230 case CC0:
1231 case CONST_INT:
1232 case CONST_DOUBLE:
1233 case CONST:
1234 case SYMBOL_REF:
1235 case LABEL_REF:
1236 return;
1238 case REG:
1239 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1240 && ! reg_mentioned_p (in_this, not_in_this))
1241 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1242 return;
1244 default:
1245 break;
1248 fmt = GET_RTX_FORMAT (code);
1249 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1251 int j;
1253 switch (fmt[i])
1255 case 'E':
1256 for (j = 0; j < XVECLEN (in_this, i); j++)
1257 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1258 break;
1260 case 'e':
1261 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1262 break;
1267 /* Check what regs are referred to in the libcall block ending with INSN,
1268 aside from those mentioned in the equivalent value.
1269 If there are none, return 0.
1270 If there are one or more, return an EXPR_LIST containing all of them. */
1272 static rtx
1273 libcall_other_reg (rtx insn, rtx equiv)
1275 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1276 rtx p = XEXP (note, 0);
1277 rtx output = 0;
1279 /* First, find all the regs used in the libcall block
1280 that are not mentioned as inputs to the result. */
1282 while (p != insn)
1284 if (INSN_P (p))
1285 record_excess_regs (PATTERN (p), equiv, &output);
1286 p = NEXT_INSN (p);
1289 return output;
1292 /* Return 1 if all uses of REG
1293 are between INSN and the end of the basic block. */
1295 static int
1296 reg_in_basic_block_p (rtx insn, rtx reg)
1298 int regno = REGNO (reg);
1299 rtx p;
1301 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1302 return 0;
1304 /* Search this basic block for the already recorded last use of the reg. */
1305 for (p = insn; p; p = NEXT_INSN (p))
1307 switch (GET_CODE (p))
1309 case NOTE:
1310 break;
1312 case INSN:
1313 case CALL_INSN:
1314 /* Ordinary insn: if this is the last use, we win. */
1315 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1316 return 1;
1317 break;
1319 case JUMP_INSN:
1320 /* Jump insn: if this is the last use, we win. */
1321 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1322 return 1;
1323 /* Otherwise, it's the end of the basic block, so we lose. */
1324 return 0;
1326 case CODE_LABEL:
1327 case BARRIER:
1328 /* It's the end of the basic block, so we lose. */
1329 return 0;
1331 default:
1332 break;
1336 /* The "last use" that was recorded can't be found after the first
1337 use. This can happen when the last use was deleted while
1338 processing an inner loop, this inner loop was then completely
1339 unrolled, and the outer loop is always exited after the inner loop,
1340 so that everything after the first use becomes a single basic block. */
1341 return 1;
1344 /* Compute the benefit of eliminating the insns in the block whose
1345 last insn is LAST. This may be a group of insns used to compute a
1346 value directly or can contain a library call. */
1348 static int
1349 libcall_benefit (rtx last)
1351 rtx insn;
1352 int benefit = 0;
1354 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1355 insn != last; insn = NEXT_INSN (insn))
1357 if (CALL_P (insn))
1358 benefit += 10; /* Assume at least this many insns in a library
1359 routine. */
1360 else if (NONJUMP_INSN_P (insn)
1361 && GET_CODE (PATTERN (insn)) != USE
1362 && GET_CODE (PATTERN (insn)) != CLOBBER)
1363 benefit++;
1366 return benefit;
1369 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1371 static rtx
1372 skip_consec_insns (rtx insn, int count)
1374 for (; count > 0; count--)
1376 rtx temp;
1378 /* If first insn of libcall sequence, skip to end. */
1379 /* Do this at start of loop, since INSN is guaranteed to
1380 be an insn here. */
1381 if (!NOTE_P (insn)
1382 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1383 insn = XEXP (temp, 0);
1386 insn = NEXT_INSN (insn);
1387 while (NOTE_P (insn));
1390 return insn;
1393 /* Ignore any movable whose insn falls within a libcall
1394 which is part of another movable.
1395 We make use of the fact that the movable for the libcall value
1396 was made later and so appears later on the chain. */
1398 static void
1399 ignore_some_movables (struct loop_movables *movables)
1401 struct movable *m, *m1;
1403 for (m = movables->head; m; m = m->next)
1405 /* Is this a movable for the value of a libcall? */
1406 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1407 if (note)
1409 rtx insn;
1410 /* Check for earlier movables inside that range,
1411 and mark them invalid. We cannot use LUIDs here because
1412 insns created by loop.c for prior loops don't have LUIDs.
1413 Rather than reject all such insns from movables, we just
1414 explicitly check each insn in the libcall (since invariant
1415 libcalls aren't that common). */
1416 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1417 for (m1 = movables->head; m1 != m; m1 = m1->next)
1418 if (m1->insn == insn)
1419 m1->done = 1;
1424 /* For each movable insn, see if the reg that it loads
1425 leads when it dies right into another conditionally movable insn.
1426 If so, record that the second insn "forces" the first one,
1427 since the second can be moved only if the first is. */
1429 static void
1430 force_movables (struct loop_movables *movables)
1432 struct movable *m, *m1;
1434 for (m1 = movables->head; m1; m1 = m1->next)
1435 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1436 if (!m1->partial && !m1->done)
1438 int regno = m1->regno;
1439 for (m = m1->next; m; m = m->next)
1440 /* ??? Could this be a bug? What if CSE caused the
1441 register of M1 to be used after this insn?
1442 Since CSE does not update regno_last_uid,
1443 this insn M->insn might not be where it dies.
1444 But very likely this doesn't matter; what matters is
1445 that M's reg is computed from M1's reg. */
1446 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1447 && !m->done)
1448 break;
1449 if (m != 0 && m->set_src == m1->set_dest
1450 /* If m->consec, m->set_src isn't valid. */
1451 && m->consec == 0)
1452 m = 0;
1454 /* Increase the priority of the moving the first insn
1455 since it permits the second to be moved as well.
1456 Likewise for insns already forced by the first insn. */
1457 if (m != 0)
1459 struct movable *m2;
1461 m->forces = m1;
1462 for (m2 = m1; m2; m2 = m2->forces)
1464 m2->lifetime += m->lifetime;
1465 m2->savings += m->savings;
1471 /* Find invariant expressions that are equal and can be combined into
1472 one register. */
1474 static void
1475 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1477 struct movable *m;
1478 char *matched_regs = xmalloc (regs->num);
1479 enum machine_mode mode;
1481 /* Regs that are set more than once are not allowed to match
1482 or be matched. I'm no longer sure why not. */
1483 /* Only pseudo registers are allowed to match or be matched,
1484 since move_movables does not validate the change. */
1485 /* Perhaps testing m->consec_sets would be more appropriate here? */
1487 for (m = movables->head; m; m = m->next)
1488 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1489 && m->regno >= FIRST_PSEUDO_REGISTER
1490 && !m->insert_temp
1491 && !m->partial)
1493 struct movable *m1;
1494 int regno = m->regno;
1496 memset (matched_regs, 0, regs->num);
1497 matched_regs[regno] = 1;
1499 /* We want later insns to match the first one. Don't make the first
1500 one match any later ones. So start this loop at m->next. */
1501 for (m1 = m->next; m1; m1 = m1->next)
1502 if (m != m1 && m1->match == 0
1503 && !m1->insert_temp
1504 && regs->array[m1->regno].n_times_set == 1
1505 && m1->regno >= FIRST_PSEUDO_REGISTER
1506 /* A reg used outside the loop mustn't be eliminated. */
1507 && !m1->global
1508 /* A reg used for zero-extending mustn't be eliminated. */
1509 && !m1->partial
1510 && (matched_regs[m1->regno]
1513 /* Can combine regs with different modes loaded from the
1514 same constant only if the modes are the same or
1515 if both are integer modes with M wider or the same
1516 width as M1. The check for integer is redundant, but
1517 safe, since the only case of differing destination
1518 modes with equal sources is when both sources are
1519 VOIDmode, i.e., CONST_INT. */
1520 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1521 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1522 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1523 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1524 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1525 /* See if the source of M1 says it matches M. */
1526 && ((REG_P (m1->set_src)
1527 && matched_regs[REGNO (m1->set_src)])
1528 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1529 movables, regs))))
1530 && ((m->dependencies == m1->dependencies)
1531 || rtx_equal_p (m->dependencies, m1->dependencies)))
1533 m->lifetime += m1->lifetime;
1534 m->savings += m1->savings;
1535 m1->done = 1;
1536 m1->match = m;
1537 matched_regs[m1->regno] = 1;
1541 /* Now combine the regs used for zero-extension.
1542 This can be done for those not marked `global'
1543 provided their lives don't overlap. */
1545 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1546 mode = GET_MODE_WIDER_MODE (mode))
1548 struct movable *m0 = 0;
1550 /* Combine all the registers for extension from mode MODE.
1551 Don't combine any that are used outside this loop. */
1552 for (m = movables->head; m; m = m->next)
1553 if (m->partial && ! m->global
1554 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1556 struct movable *m1;
1558 int first = REGNO_FIRST_LUID (m->regno);
1559 int last = REGNO_LAST_LUID (m->regno);
1561 if (m0 == 0)
1563 /* First one: don't check for overlap, just record it. */
1564 m0 = m;
1565 continue;
1568 /* Make sure they extend to the same mode.
1569 (Almost always true.) */
1570 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1571 continue;
1573 /* We already have one: check for overlap with those
1574 already combined together. */
1575 for (m1 = movables->head; m1 != m; m1 = m1->next)
1576 if (m1 == m0 || (m1->partial && m1->match == m0))
1577 if (! (REGNO_FIRST_LUID (m1->regno) > last
1578 || REGNO_LAST_LUID (m1->regno) < first))
1579 goto overlap;
1581 /* No overlap: we can combine this with the others. */
1582 m0->lifetime += m->lifetime;
1583 m0->savings += m->savings;
1584 m->done = 1;
1585 m->match = m0;
1587 overlap:
1592 /* Clean up. */
1593 free (matched_regs);
1596 /* Returns the number of movable instructions in LOOP that were not
1597 moved outside the loop. */
1599 static int
1600 num_unmoved_movables (const struct loop *loop)
1602 int num = 0;
1603 struct movable *m;
1605 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1606 if (!m->done)
1607 ++num;
1609 return num;
1613 /* Return 1 if regs X and Y will become the same if moved. */
1615 static int
1616 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1618 unsigned int xn = REGNO (x);
1619 unsigned int yn = REGNO (y);
1620 struct movable *mx, *my;
1622 for (mx = movables->head; mx; mx = mx->next)
1623 if (mx->regno == xn)
1624 break;
1626 for (my = movables->head; my; my = my->next)
1627 if (my->regno == yn)
1628 break;
1630 return (mx && my
1631 && ((mx->match == my->match && mx->match != 0)
1632 || mx->match == my
1633 || mx == my->match));
1636 /* Return 1 if X and Y are identical-looking rtx's.
1637 This is the Lisp function EQUAL for rtx arguments.
1639 If two registers are matching movables or a movable register and an
1640 equivalent constant, consider them equal. */
1642 static int
1643 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
1644 struct loop_regs *regs)
1646 int i;
1647 int j;
1648 struct movable *m;
1649 enum rtx_code code;
1650 const char *fmt;
1652 if (x == y)
1653 return 1;
1654 if (x == 0 || y == 0)
1655 return 0;
1657 code = GET_CODE (x);
1659 /* If we have a register and a constant, they may sometimes be
1660 equal. */
1661 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
1662 && CONSTANT_P (y))
1664 for (m = movables->head; m; m = m->next)
1665 if (m->move_insn && m->regno == REGNO (x)
1666 && rtx_equal_p (m->set_src, y))
1667 return 1;
1669 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
1670 && CONSTANT_P (x))
1672 for (m = movables->head; m; m = m->next)
1673 if (m->move_insn && m->regno == REGNO (y)
1674 && rtx_equal_p (m->set_src, x))
1675 return 1;
1678 /* Otherwise, rtx's of different codes cannot be equal. */
1679 if (code != GET_CODE (y))
1680 return 0;
1682 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1683 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1685 if (GET_MODE (x) != GET_MODE (y))
1686 return 0;
1688 /* These three types of rtx's can be compared nonrecursively. */
1689 if (code == REG)
1690 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1692 if (code == LABEL_REF)
1693 return XEXP (x, 0) == XEXP (y, 0);
1694 if (code == SYMBOL_REF)
1695 return XSTR (x, 0) == XSTR (y, 0);
1697 /* Compare the elements. If any pair of corresponding elements
1698 fail to match, return 0 for the whole things. */
1700 fmt = GET_RTX_FORMAT (code);
1701 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1703 switch (fmt[i])
1705 case 'w':
1706 if (XWINT (x, i) != XWINT (y, i))
1707 return 0;
1708 break;
1710 case 'i':
1711 if (XINT (x, i) != XINT (y, i))
1712 return 0;
1713 break;
1715 case 'E':
1716 /* Two vectors must have the same length. */
1717 if (XVECLEN (x, i) != XVECLEN (y, i))
1718 return 0;
1720 /* And the corresponding elements must match. */
1721 for (j = 0; j < XVECLEN (x, i); j++)
1722 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1723 movables, regs) == 0)
1724 return 0;
1725 break;
1727 case 'e':
1728 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1729 == 0)
1730 return 0;
1731 break;
1733 case 's':
1734 if (strcmp (XSTR (x, i), XSTR (y, i)))
1735 return 0;
1736 break;
1738 case 'u':
1739 /* These are just backpointers, so they don't matter. */
1740 break;
1742 case '0':
1743 break;
1745 /* It is believed that rtx's at this level will never
1746 contain anything but integers and other rtx's,
1747 except for within LABEL_REFs and SYMBOL_REFs. */
1748 default:
1749 abort ();
1752 return 1;
1755 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1756 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1757 references is incremented once for each added note. */
1759 static void
1760 add_label_notes (rtx x, rtx insns)
1762 enum rtx_code code = GET_CODE (x);
1763 int i, j;
1764 const char *fmt;
1765 rtx insn;
1767 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1769 /* This code used to ignore labels that referred to dispatch tables to
1770 avoid flow generating (slightly) worse code.
1772 We no longer ignore such label references (see LABEL_REF handling in
1773 mark_jump_label for additional information). */
1774 for (insn = insns; insn; insn = NEXT_INSN (insn))
1775 if (reg_mentioned_p (XEXP (x, 0), insn))
1777 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1778 REG_NOTES (insn));
1779 if (LABEL_P (XEXP (x, 0)))
1780 LABEL_NUSES (XEXP (x, 0))++;
1784 fmt = GET_RTX_FORMAT (code);
1785 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1787 if (fmt[i] == 'e')
1788 add_label_notes (XEXP (x, i), insns);
1789 else if (fmt[i] == 'E')
1790 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1791 add_label_notes (XVECEXP (x, i, j), insns);
1795 /* Scan MOVABLES, and move the insns that deserve to be moved.
1796 If two matching movables are combined, replace one reg with the
1797 other throughout. */
1799 static void
1800 move_movables (struct loop *loop, struct loop_movables *movables,
1801 int threshold, int insn_count)
1803 struct loop_regs *regs = LOOP_REGS (loop);
1804 int nregs = regs->num;
1805 rtx new_start = 0;
1806 struct movable *m;
1807 rtx p;
1808 rtx loop_start = loop->start;
1809 rtx loop_end = loop->end;
1810 /* Map of pseudo-register replacements to handle combining
1811 when we move several insns that load the same value
1812 into different pseudo-registers. */
1813 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
1814 char *already_moved = xcalloc (nregs, sizeof (char));
1816 for (m = movables->head; m; m = m->next)
1818 /* Describe this movable insn. */
1820 if (loop_dump_stream)
1822 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1823 INSN_UID (m->insn), m->regno, m->lifetime);
1824 if (m->consec > 0)
1825 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1826 if (m->cond)
1827 fprintf (loop_dump_stream, "cond ");
1828 if (m->force)
1829 fprintf (loop_dump_stream, "force ");
1830 if (m->global)
1831 fprintf (loop_dump_stream, "global ");
1832 if (m->done)
1833 fprintf (loop_dump_stream, "done ");
1834 if (m->move_insn)
1835 fprintf (loop_dump_stream, "move-insn ");
1836 if (m->match)
1837 fprintf (loop_dump_stream, "matches %d ",
1838 INSN_UID (m->match->insn));
1839 if (m->forces)
1840 fprintf (loop_dump_stream, "forces %d ",
1841 INSN_UID (m->forces->insn));
1844 /* Ignore the insn if it's already done (it matched something else).
1845 Otherwise, see if it is now safe to move. */
1847 if (!m->done
1848 && (! m->cond
1849 || (1 == loop_invariant_p (loop, m->set_src)
1850 && (m->dependencies == 0
1851 || 1 == loop_invariant_p (loop, m->dependencies))
1852 && (m->consec == 0
1853 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1854 m->consec + 1,
1855 m->insn))))
1856 && (! m->forces || m->forces->done))
1858 int regno;
1859 rtx p;
1860 int savings = m->savings;
1862 /* We have an insn that is safe to move.
1863 Compute its desirability. */
1865 p = m->insn;
1866 regno = m->regno;
1868 if (loop_dump_stream)
1869 fprintf (loop_dump_stream, "savings %d ", savings);
1871 if (regs->array[regno].moved_once && loop_dump_stream)
1872 fprintf (loop_dump_stream, "halved since already moved ");
1874 /* An insn MUST be moved if we already moved something else
1875 which is safe only if this one is moved too: that is,
1876 if already_moved[REGNO] is nonzero. */
1878 /* An insn is desirable to move if the new lifetime of the
1879 register is no more than THRESHOLD times the old lifetime.
1880 If it's not desirable, it means the loop is so big
1881 that moving won't speed things up much,
1882 and it is liable to make register usage worse. */
1884 /* It is also desirable to move if it can be moved at no
1885 extra cost because something else was already moved. */
1887 if (already_moved[regno]
1888 || flag_move_all_movables
1889 || (threshold * savings * m->lifetime) >=
1890 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1891 || (m->forces && m->forces->done
1892 && regs->array[m->forces->regno].n_times_set == 1))
1894 int count;
1895 struct movable *m1;
1896 rtx first = NULL_RTX;
1897 rtx newreg = NULL_RTX;
1899 if (m->insert_temp)
1900 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
1902 /* Now move the insns that set the reg. */
1904 if (m->partial && m->match)
1906 rtx newpat, i1;
1907 rtx r1, r2;
1908 /* Find the end of this chain of matching regs.
1909 Thus, we load each reg in the chain from that one reg.
1910 And that reg is loaded with 0 directly,
1911 since it has ->match == 0. */
1912 for (m1 = m; m1->match; m1 = m1->match);
1913 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1914 SET_DEST (PATTERN (m1->insn)));
1915 i1 = loop_insn_hoist (loop, newpat);
1917 /* Mark the moved, invariant reg as being allowed to
1918 share a hard reg with the other matching invariant. */
1919 REG_NOTES (i1) = REG_NOTES (m->insn);
1920 r1 = SET_DEST (PATTERN (m->insn));
1921 r2 = SET_DEST (PATTERN (m1->insn));
1922 regs_may_share
1923 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1924 gen_rtx_EXPR_LIST (VOIDmode, r2,
1925 regs_may_share));
1926 delete_insn (m->insn);
1928 if (new_start == 0)
1929 new_start = i1;
1931 if (loop_dump_stream)
1932 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1934 /* If we are to re-generate the item being moved with a
1935 new move insn, first delete what we have and then emit
1936 the move insn before the loop. */
1937 else if (m->move_insn)
1939 rtx i1, temp, seq;
1941 for (count = m->consec; count >= 0; count--)
1943 /* If this is the first insn of a library call sequence,
1944 something is very wrong. */
1945 if (!NOTE_P (p)
1946 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1947 abort ();
1949 /* If this is the last insn of a libcall sequence, then
1950 delete every insn in the sequence except the last.
1951 The last insn is handled in the normal manner. */
1952 if (!NOTE_P (p)
1953 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1955 temp = XEXP (temp, 0);
1956 while (temp != p)
1957 temp = delete_insn (temp);
1960 temp = p;
1961 p = delete_insn (p);
1963 /* simplify_giv_expr expects that it can walk the insns
1964 at m->insn forwards and see this old sequence we are
1965 tossing here. delete_insn does preserve the next
1966 pointers, but when we skip over a NOTE we must fix
1967 it up. Otherwise that code walks into the non-deleted
1968 insn stream. */
1969 while (p && NOTE_P (p))
1970 p = NEXT_INSN (temp) = NEXT_INSN (p);
1972 if (m->insert_temp)
1974 /* Replace the original insn with a move from
1975 our newly created temp. */
1976 start_sequence ();
1977 emit_move_insn (m->set_dest, newreg);
1978 seq = get_insns ();
1979 end_sequence ();
1980 emit_insn_before (seq, p);
1984 start_sequence ();
1985 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
1986 m->set_src);
1987 seq = get_insns ();
1988 end_sequence ();
1990 add_label_notes (m->set_src, seq);
1992 i1 = loop_insn_hoist (loop, seq);
1993 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1994 set_unique_reg_note (i1,
1995 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1996 m->set_src);
1998 if (loop_dump_stream)
1999 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2001 /* The more regs we move, the less we like moving them. */
2002 threshold -= 3;
2004 else
2006 for (count = m->consec; count >= 0; count--)
2008 rtx i1, temp;
2010 /* If first insn of libcall sequence, skip to end. */
2011 /* Do this at start of loop, since p is guaranteed to
2012 be an insn here. */
2013 if (!NOTE_P (p)
2014 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2015 p = XEXP (temp, 0);
2017 /* If last insn of libcall sequence, move all
2018 insns except the last before the loop. The last
2019 insn is handled in the normal manner. */
2020 if (!NOTE_P (p)
2021 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2023 rtx fn_address = 0;
2024 rtx fn_reg = 0;
2025 rtx fn_address_insn = 0;
2027 first = 0;
2028 for (temp = XEXP (temp, 0); temp != p;
2029 temp = NEXT_INSN (temp))
2031 rtx body;
2032 rtx n;
2033 rtx next;
2035 if (NOTE_P (temp))
2036 continue;
2038 body = PATTERN (temp);
2040 /* Find the next insn after TEMP,
2041 not counting USE or NOTE insns. */
2042 for (next = NEXT_INSN (temp); next != p;
2043 next = NEXT_INSN (next))
2044 if (! (NONJUMP_INSN_P (next)
2045 && GET_CODE (PATTERN (next)) == USE)
2046 && !NOTE_P (next))
2047 break;
2049 /* If that is the call, this may be the insn
2050 that loads the function address.
2052 Extract the function address from the insn
2053 that loads it into a register.
2054 If this insn was cse'd, we get incorrect code.
2056 So emit a new move insn that copies the
2057 function address into the register that the
2058 call insn will use. flow.c will delete any
2059 redundant stores that we have created. */
2060 if (CALL_P (next)
2061 && GET_CODE (body) == SET
2062 && REG_P (SET_DEST (body))
2063 && (n = find_reg_note (temp, REG_EQUAL,
2064 NULL_RTX)))
2066 fn_reg = SET_SRC (body);
2067 if (!REG_P (fn_reg))
2068 fn_reg = SET_DEST (body);
2069 fn_address = XEXP (n, 0);
2070 fn_address_insn = temp;
2072 /* We have the call insn.
2073 If it uses the register we suspect it might,
2074 load it with the correct address directly. */
2075 if (CALL_P (temp)
2076 && fn_address != 0
2077 && reg_referenced_p (fn_reg, body))
2078 loop_insn_emit_after (loop, 0, fn_address_insn,
2079 gen_move_insn
2080 (fn_reg, fn_address));
2082 if (CALL_P (temp))
2084 i1 = loop_call_insn_hoist (loop, body);
2085 /* Because the USAGE information potentially
2086 contains objects other than hard registers
2087 we need to copy it. */
2088 if (CALL_INSN_FUNCTION_USAGE (temp))
2089 CALL_INSN_FUNCTION_USAGE (i1)
2090 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2092 else
2093 i1 = loop_insn_hoist (loop, body);
2094 if (first == 0)
2095 first = i1;
2096 if (temp == fn_address_insn)
2097 fn_address_insn = i1;
2098 REG_NOTES (i1) = REG_NOTES (temp);
2099 REG_NOTES (temp) = NULL;
2100 delete_insn (temp);
2102 if (new_start == 0)
2103 new_start = first;
2105 if (m->savemode != VOIDmode)
2107 /* P sets REG to zero; but we should clear only
2108 the bits that are not covered by the mode
2109 m->savemode. */
2110 rtx reg = m->set_dest;
2111 rtx sequence;
2112 rtx tem;
2114 start_sequence ();
2115 tem = expand_simple_binop
2116 (GET_MODE (reg), AND, reg,
2117 GEN_INT ((((HOST_WIDE_INT) 1
2118 << GET_MODE_BITSIZE (m->savemode)))
2119 - 1),
2120 reg, 1, OPTAB_LIB_WIDEN);
2121 if (tem == 0)
2122 abort ();
2123 if (tem != reg)
2124 emit_move_insn (reg, tem);
2125 sequence = get_insns ();
2126 end_sequence ();
2127 i1 = loop_insn_hoist (loop, sequence);
2129 else if (CALL_P (p))
2131 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2132 /* Because the USAGE information potentially
2133 contains objects other than hard registers
2134 we need to copy it. */
2135 if (CALL_INSN_FUNCTION_USAGE (p))
2136 CALL_INSN_FUNCTION_USAGE (i1)
2137 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2139 else if (count == m->consec && m->move_insn_first)
2141 rtx seq;
2142 /* The SET_SRC might not be invariant, so we must
2143 use the REG_EQUAL note. */
2144 start_sequence ();
2145 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2146 m->set_src);
2147 seq = get_insns ();
2148 end_sequence ();
2150 add_label_notes (m->set_src, seq);
2152 i1 = loop_insn_hoist (loop, seq);
2153 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2154 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2155 : REG_EQUAL, m->set_src);
2157 else if (m->insert_temp)
2159 rtx *reg_map2 = xcalloc (REGNO (newreg),
2160 sizeof(rtx));
2161 reg_map2 [m->regno] = newreg;
2163 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2164 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2165 free (reg_map2);
2167 else
2168 i1 = loop_insn_hoist (loop, PATTERN (p));
2170 if (REG_NOTES (i1) == 0)
2172 REG_NOTES (i1) = REG_NOTES (p);
2173 REG_NOTES (p) = NULL;
2175 /* If there is a REG_EQUAL note present whose value
2176 is not loop invariant, then delete it, since it
2177 may cause problems with later optimization passes.
2178 It is possible for cse to create such notes
2179 like this as a result of record_jump_cond. */
2181 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2182 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2183 remove_note (i1, temp);
2186 if (new_start == 0)
2187 new_start = i1;
2189 if (loop_dump_stream)
2190 fprintf (loop_dump_stream, " moved to %d",
2191 INSN_UID (i1));
2193 /* If library call, now fix the REG_NOTES that contain
2194 insn pointers, namely REG_LIBCALL on FIRST
2195 and REG_RETVAL on I1. */
2196 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2198 XEXP (temp, 0) = first;
2199 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2200 XEXP (temp, 0) = i1;
2203 temp = p;
2204 delete_insn (p);
2205 p = NEXT_INSN (p);
2207 /* simplify_giv_expr expects that it can walk the insns
2208 at m->insn forwards and see this old sequence we are
2209 tossing here. delete_insn does preserve the next
2210 pointers, but when we skip over a NOTE we must fix
2211 it up. Otherwise that code walks into the non-deleted
2212 insn stream. */
2213 while (p && NOTE_P (p))
2214 p = NEXT_INSN (temp) = NEXT_INSN (p);
2216 if (m->insert_temp)
2218 rtx seq;
2219 /* Replace the original insn with a move from
2220 our newly created temp. */
2221 start_sequence ();
2222 emit_move_insn (m->set_dest, newreg);
2223 seq = get_insns ();
2224 end_sequence ();
2225 emit_insn_before (seq, p);
2229 /* The more regs we move, the less we like moving them. */
2230 threshold -= 3;
2233 m->done = 1;
2235 if (!m->insert_temp)
2237 /* Any other movable that loads the same register
2238 MUST be moved. */
2239 already_moved[regno] = 1;
2241 /* This reg has been moved out of one loop. */
2242 regs->array[regno].moved_once = 1;
2244 /* The reg set here is now invariant. */
2245 if (! m->partial)
2247 int i;
2248 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2249 regs->array[regno+i].set_in_loop = 0;
2252 /* Change the length-of-life info for the register
2253 to say it lives at least the full length of this loop.
2254 This will help guide optimizations in outer loops. */
2256 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2257 /* This is the old insn before all the moved insns.
2258 We can't use the moved insn because it is out of range
2259 in uid_luid. Only the old insns have luids. */
2260 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2261 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2262 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2265 /* Combine with this moved insn any other matching movables. */
2267 if (! m->partial)
2268 for (m1 = movables->head; m1; m1 = m1->next)
2269 if (m1->match == m)
2271 rtx temp;
2273 /* Schedule the reg loaded by M1
2274 for replacement so that shares the reg of M.
2275 If the modes differ (only possible in restricted
2276 circumstances, make a SUBREG.
2278 Note this assumes that the target dependent files
2279 treat REG and SUBREG equally, including within
2280 GO_IF_LEGITIMATE_ADDRESS and in all the
2281 predicates since we never verify that replacing the
2282 original register with a SUBREG results in a
2283 recognizable insn. */
2284 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2285 reg_map[m1->regno] = m->set_dest;
2286 else
2287 reg_map[m1->regno]
2288 = gen_lowpart_common (GET_MODE (m1->set_dest),
2289 m->set_dest);
2291 /* Get rid of the matching insn
2292 and prevent further processing of it. */
2293 m1->done = 1;
2295 /* If library call, delete all insns. */
2296 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2297 NULL_RTX)))
2298 delete_insn_chain (XEXP (temp, 0), m1->insn);
2299 else
2300 delete_insn (m1->insn);
2302 /* Any other movable that loads the same register
2303 MUST be moved. */
2304 already_moved[m1->regno] = 1;
2306 /* The reg merged here is now invariant,
2307 if the reg it matches is invariant. */
2308 if (! m->partial)
2310 int i;
2311 for (i = 0;
2312 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2313 i++)
2314 regs->array[m1->regno+i].set_in_loop = 0;
2318 else if (loop_dump_stream)
2319 fprintf (loop_dump_stream, "not desirable");
2321 else if (loop_dump_stream && !m->match)
2322 fprintf (loop_dump_stream, "not safe");
2324 if (loop_dump_stream)
2325 fprintf (loop_dump_stream, "\n");
2328 if (new_start == 0)
2329 new_start = loop_start;
2331 /* Go through all the instructions in the loop, making
2332 all the register substitutions scheduled in REG_MAP. */
2333 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2334 if (INSN_P (p))
2336 replace_regs (PATTERN (p), reg_map, nregs, 0);
2337 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2338 INSN_CODE (p) = -1;
2341 /* Clean up. */
2342 free (reg_map);
2343 free (already_moved);
2347 static void
2348 loop_movables_add (struct loop_movables *movables, struct movable *m)
2350 if (movables->head == 0)
2351 movables->head = m;
2352 else
2353 movables->last->next = m;
2354 movables->last = m;
2358 static void
2359 loop_movables_free (struct loop_movables *movables)
2361 struct movable *m;
2362 struct movable *m_next;
2364 for (m = movables->head; m; m = m_next)
2366 m_next = m->next;
2367 free (m);
2371 #if 0
2372 /* Scan X and replace the address of any MEM in it with ADDR.
2373 REG is the address that MEM should have before the replacement. */
2375 static void
2376 replace_call_address (rtx x, rtx reg, rtx addr)
2378 enum rtx_code code;
2379 int i;
2380 const char *fmt;
2382 if (x == 0)
2383 return;
2384 code = GET_CODE (x);
2385 switch (code)
2387 case PC:
2388 case CC0:
2389 case CONST_INT:
2390 case CONST_DOUBLE:
2391 case CONST:
2392 case SYMBOL_REF:
2393 case LABEL_REF:
2394 case REG:
2395 return;
2397 case SET:
2398 /* Short cut for very common case. */
2399 replace_call_address (XEXP (x, 1), reg, addr);
2400 return;
2402 case CALL:
2403 /* Short cut for very common case. */
2404 replace_call_address (XEXP (x, 0), reg, addr);
2405 return;
2407 case MEM:
2408 /* If this MEM uses a reg other than the one we expected,
2409 something is wrong. */
2410 if (XEXP (x, 0) != reg)
2411 abort ();
2412 XEXP (x, 0) = addr;
2413 return;
2415 default:
2416 break;
2419 fmt = GET_RTX_FORMAT (code);
2420 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2422 if (fmt[i] == 'e')
2423 replace_call_address (XEXP (x, i), reg, addr);
2424 else if (fmt[i] == 'E')
2426 int j;
2427 for (j = 0; j < XVECLEN (x, i); j++)
2428 replace_call_address (XVECEXP (x, i, j), reg, addr);
2432 #endif
2434 /* Return the number of memory refs to addresses that vary
2435 in the rtx X. */
2437 static int
2438 count_nonfixed_reads (const struct loop *loop, rtx x)
2440 enum rtx_code code;
2441 int i;
2442 const char *fmt;
2443 int value;
2445 if (x == 0)
2446 return 0;
2448 code = GET_CODE (x);
2449 switch (code)
2451 case PC:
2452 case CC0:
2453 case CONST_INT:
2454 case CONST_DOUBLE:
2455 case CONST:
2456 case SYMBOL_REF:
2457 case LABEL_REF:
2458 case REG:
2459 return 0;
2461 case MEM:
2462 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2463 + count_nonfixed_reads (loop, XEXP (x, 0)));
2465 default:
2466 break;
2469 value = 0;
2470 fmt = GET_RTX_FORMAT (code);
2471 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2473 if (fmt[i] == 'e')
2474 value += count_nonfixed_reads (loop, XEXP (x, i));
2475 if (fmt[i] == 'E')
2477 int j;
2478 for (j = 0; j < XVECLEN (x, i); j++)
2479 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2482 return value;
2485 /* Scan a loop setting the elements `loops_enclosed',
2486 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2487 `unknown_address_altered', `unknown_constant_address_altered', and
2488 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2489 list `store_mems' in LOOP. */
2491 static void
2492 prescan_loop (struct loop *loop)
2494 int level = 1;
2495 rtx insn;
2496 struct loop_info *loop_info = LOOP_INFO (loop);
2497 rtx start = loop->start;
2498 rtx end = loop->end;
2499 /* The label after END. Jumping here is just like falling off the
2500 end of the loop. We use next_nonnote_insn instead of next_label
2501 as a hedge against the (pathological) case where some actual insn
2502 might end up between the two. */
2503 rtx exit_target = next_nonnote_insn (end);
2505 loop_info->has_indirect_jump = indirect_jump_in_function;
2506 loop_info->pre_header_has_call = 0;
2507 loop_info->has_call = 0;
2508 loop_info->has_nonconst_call = 0;
2509 loop_info->has_prefetch = 0;
2510 loop_info->has_volatile = 0;
2511 loop_info->has_tablejump = 0;
2512 loop_info->has_multiple_exit_targets = 0;
2513 loop->level = 1;
2515 loop_info->unknown_address_altered = 0;
2516 loop_info->unknown_constant_address_altered = 0;
2517 loop_info->store_mems = NULL_RTX;
2518 loop_info->first_loop_store_insn = NULL_RTX;
2519 loop_info->mems_idx = 0;
2520 loop_info->num_mem_sets = 0;
2521 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2522 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2524 for (insn = start; insn && !LABEL_P (insn);
2525 insn = PREV_INSN (insn))
2527 if (CALL_P (insn))
2529 loop_info->pre_header_has_call = 1;
2530 break;
2534 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2535 insn = NEXT_INSN (insn))
2537 switch (GET_CODE (insn))
2539 case NOTE:
2540 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2542 ++level;
2543 /* Count number of loops contained in this one. */
2544 loop->level++;
2546 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2547 --level;
2548 break;
2550 case CALL_INSN:
2551 if (! CONST_OR_PURE_CALL_P (insn))
2553 loop_info->unknown_address_altered = 1;
2554 loop_info->has_nonconst_call = 1;
2556 else if (pure_call_p (insn))
2557 loop_info->has_nonconst_call = 1;
2558 loop_info->has_call = 1;
2559 if (can_throw_internal (insn))
2560 loop_info->has_multiple_exit_targets = 1;
2561 break;
2563 case JUMP_INSN:
2564 if (! loop_info->has_multiple_exit_targets)
2566 rtx set = pc_set (insn);
2568 if (set)
2570 rtx src = SET_SRC (set);
2571 rtx label1, label2;
2573 if (GET_CODE (src) == IF_THEN_ELSE)
2575 label1 = XEXP (src, 1);
2576 label2 = XEXP (src, 2);
2578 else
2580 label1 = src;
2581 label2 = NULL_RTX;
2586 if (label1 && label1 != pc_rtx)
2588 if (GET_CODE (label1) != LABEL_REF)
2590 /* Something tricky. */
2591 loop_info->has_multiple_exit_targets = 1;
2592 break;
2594 else if (XEXP (label1, 0) != exit_target
2595 && LABEL_OUTSIDE_LOOP_P (label1))
2597 /* A jump outside the current loop. */
2598 loop_info->has_multiple_exit_targets = 1;
2599 break;
2603 label1 = label2;
2604 label2 = NULL_RTX;
2606 while (label1);
2608 else
2610 /* A return, or something tricky. */
2611 loop_info->has_multiple_exit_targets = 1;
2614 /* Fall through. */
2616 case INSN:
2617 if (volatile_refs_p (PATTERN (insn)))
2618 loop_info->has_volatile = 1;
2620 if (JUMP_P (insn)
2621 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2622 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2623 loop_info->has_tablejump = 1;
2625 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2626 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2627 loop_info->first_loop_store_insn = insn;
2629 if (flag_non_call_exceptions && can_throw_internal (insn))
2630 loop_info->has_multiple_exit_targets = 1;
2631 break;
2633 default:
2634 break;
2638 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2639 if (/* An exception thrown by a called function might land us
2640 anywhere. */
2641 ! loop_info->has_nonconst_call
2642 /* We don't want loads for MEMs moved to a location before the
2643 one at which their stack memory becomes allocated. (Note
2644 that this is not a problem for malloc, etc., since those
2645 require actual function calls. */
2646 && ! current_function_calls_alloca
2647 /* There are ways to leave the loop other than falling off the
2648 end. */
2649 && ! loop_info->has_multiple_exit_targets)
2650 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2651 insn = NEXT_INSN (insn))
2652 for_each_rtx (&insn, insert_loop_mem, loop_info);
2654 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2655 that loop_invariant_p and load_mems can use true_dependence
2656 to determine what is really clobbered. */
2657 if (loop_info->unknown_address_altered)
2659 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2661 loop_info->store_mems
2662 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2664 if (loop_info->unknown_constant_address_altered)
2666 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2667 MEM_READONLY_P (mem) = 1;
2668 loop_info->store_mems
2669 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2673 /* Invalidate all loops containing LABEL. */
2675 static void
2676 invalidate_loops_containing_label (rtx label)
2678 struct loop *loop;
2679 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2680 loop->invalid = 1;
2683 /* Scan the function looking for loops. Record the start and end of each loop.
2684 Also mark as invalid loops any loops that contain a setjmp or are branched
2685 to from outside the loop. */
2687 static void
2688 find_and_verify_loops (rtx f, struct loops *loops)
2690 rtx insn;
2691 rtx label;
2692 int num_loops;
2693 struct loop *current_loop;
2694 struct loop *next_loop;
2695 struct loop *loop;
2697 num_loops = loops->num;
2699 compute_luids (f, NULL_RTX, 0);
2701 /* If there are jumps to undefined labels,
2702 treat them as jumps out of any/all loops.
2703 This also avoids writing past end of tables when there are no loops. */
2704 uid_loop[0] = NULL;
2706 /* Find boundaries of loops, mark which loops are contained within
2707 loops, and invalidate loops that have setjmp. */
2709 num_loops = 0;
2710 current_loop = NULL;
2711 for (insn = f; insn; insn = NEXT_INSN (insn))
2713 if (NOTE_P (insn))
2714 switch (NOTE_LINE_NUMBER (insn))
2716 case NOTE_INSN_LOOP_BEG:
2717 next_loop = loops->array + num_loops;
2718 next_loop->num = num_loops;
2719 num_loops++;
2720 next_loop->start = insn;
2721 next_loop->outer = current_loop;
2722 current_loop = next_loop;
2723 break;
2725 case NOTE_INSN_LOOP_END:
2726 if (! current_loop)
2727 abort ();
2729 current_loop->end = insn;
2730 current_loop = current_loop->outer;
2731 break;
2733 default:
2734 break;
2737 if (CALL_P (insn)
2738 && find_reg_note (insn, REG_SETJMP, NULL))
2740 /* In this case, we must invalidate our current loop and any
2741 enclosing loop. */
2742 for (loop = current_loop; loop; loop = loop->outer)
2744 loop->invalid = 1;
2745 if (loop_dump_stream)
2746 fprintf (loop_dump_stream,
2747 "\nLoop at %d ignored due to setjmp.\n",
2748 INSN_UID (loop->start));
2752 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2753 enclosing loop, but this doesn't matter. */
2754 uid_loop[INSN_UID (insn)] = current_loop;
2757 /* Any loop containing a label used in an initializer must be invalidated,
2758 because it can be jumped into from anywhere. */
2759 for (label = forced_labels; label; label = XEXP (label, 1))
2760 invalidate_loops_containing_label (XEXP (label, 0));
2762 /* Any loop containing a label used for an exception handler must be
2763 invalidated, because it can be jumped into from anywhere. */
2764 for_each_eh_label (invalidate_loops_containing_label);
2766 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2767 loop that it is not contained within, that loop is marked invalid.
2768 If any INSN or CALL_INSN uses a label's address, then the loop containing
2769 that label is marked invalid, because it could be jumped into from
2770 anywhere.
2772 Also look for blocks of code ending in an unconditional branch that
2773 exits the loop. If such a block is surrounded by a conditional
2774 branch around the block, move the block elsewhere (see below) and
2775 invert the jump to point to the code block. This may eliminate a
2776 label in our loop and will simplify processing by both us and a
2777 possible second cse pass. */
2779 for (insn = f; insn; insn = NEXT_INSN (insn))
2780 if (INSN_P (insn))
2782 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2784 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
2786 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2787 if (note)
2788 invalidate_loops_containing_label (XEXP (note, 0));
2791 if (!JUMP_P (insn))
2792 continue;
2794 mark_loop_jump (PATTERN (insn), this_loop);
2796 /* See if this is an unconditional branch outside the loop. */
2797 if (this_loop
2798 && (GET_CODE (PATTERN (insn)) == RETURN
2799 || (any_uncondjump_p (insn)
2800 && onlyjump_p (insn)
2801 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2802 != this_loop)))
2803 && get_max_uid () < max_uid_for_loop)
2805 rtx p;
2806 rtx our_next = next_real_insn (insn);
2807 rtx last_insn_to_move = NEXT_INSN (insn);
2808 struct loop *dest_loop;
2809 struct loop *outer_loop = NULL;
2811 /* Go backwards until we reach the start of the loop, a label,
2812 or a JUMP_INSN. */
2813 for (p = PREV_INSN (insn);
2814 !LABEL_P (p)
2815 && ! (NOTE_P (p)
2816 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2817 && !JUMP_P (p);
2818 p = PREV_INSN (p))
2821 /* Check for the case where we have a jump to an inner nested
2822 loop, and do not perform the optimization in that case. */
2824 if (JUMP_LABEL (insn))
2826 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2827 if (dest_loop)
2829 for (outer_loop = dest_loop; outer_loop;
2830 outer_loop = outer_loop->outer)
2831 if (outer_loop == this_loop)
2832 break;
2836 /* Make sure that the target of P is within the current loop. */
2838 if (JUMP_P (p) && JUMP_LABEL (p)
2839 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2840 outer_loop = this_loop;
2842 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2843 we have a block of code to try to move.
2845 We look backward and then forward from the target of INSN
2846 to find a BARRIER at the same loop depth as the target.
2847 If we find such a BARRIER, we make a new label for the start
2848 of the block, invert the jump in P and point it to that label,
2849 and move the block of code to the spot we found. */
2851 if (! outer_loop
2852 && JUMP_P (p)
2853 && JUMP_LABEL (p) != 0
2854 /* Just ignore jumps to labels that were never emitted.
2855 These always indicate compilation errors. */
2856 && INSN_UID (JUMP_LABEL (p)) != 0
2857 && any_condjump_p (p) && onlyjump_p (p)
2858 && next_real_insn (JUMP_LABEL (p)) == our_next
2859 /* If it's not safe to move the sequence, then we
2860 mustn't try. */
2861 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2862 &last_insn_to_move))
2864 rtx target
2865 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2866 struct loop *target_loop = uid_loop[INSN_UID (target)];
2867 rtx loc, loc2;
2868 rtx tmp;
2870 /* Search for possible garbage past the conditional jumps
2871 and look for the last barrier. */
2872 for (tmp = last_insn_to_move;
2873 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
2874 if (BARRIER_P (tmp))
2875 last_insn_to_move = tmp;
2877 for (loc = target; loc; loc = PREV_INSN (loc))
2878 if (BARRIER_P (loc)
2879 /* Don't move things inside a tablejump. */
2880 && ((loc2 = next_nonnote_insn (loc)) == 0
2881 || !LABEL_P (loc2)
2882 || (loc2 = next_nonnote_insn (loc2)) == 0
2883 || !JUMP_P (loc2)
2884 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2885 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2886 && uid_loop[INSN_UID (loc)] == target_loop)
2887 break;
2889 if (loc == 0)
2890 for (loc = target; loc; loc = NEXT_INSN (loc))
2891 if (BARRIER_P (loc)
2892 /* Don't move things inside a tablejump. */
2893 && ((loc2 = next_nonnote_insn (loc)) == 0
2894 || !LABEL_P (loc2)
2895 || (loc2 = next_nonnote_insn (loc2)) == 0
2896 || !JUMP_P (loc2)
2897 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2898 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2899 && uid_loop[INSN_UID (loc)] == target_loop)
2900 break;
2902 if (loc)
2904 rtx cond_label = JUMP_LABEL (p);
2905 rtx new_label = get_label_after (p);
2907 /* Ensure our label doesn't go away. */
2908 LABEL_NUSES (cond_label)++;
2910 /* Verify that uid_loop is large enough and that
2911 we can invert P. */
2912 if (invert_jump (p, new_label, 1))
2914 rtx q, r;
2916 /* If no suitable BARRIER was found, create a suitable
2917 one before TARGET. Since TARGET is a fall through
2918 path, we'll need to insert a jump around our block
2919 and add a BARRIER before TARGET.
2921 This creates an extra unconditional jump outside
2922 the loop. However, the benefits of removing rarely
2923 executed instructions from inside the loop usually
2924 outweighs the cost of the extra unconditional jump
2925 outside the loop. */
2926 if (loc == 0)
2928 rtx temp;
2930 temp = gen_jump (JUMP_LABEL (insn));
2931 temp = emit_jump_insn_before (temp, target);
2932 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2933 LABEL_NUSES (JUMP_LABEL (insn))++;
2934 loc = emit_barrier_before (target);
2937 /* Include the BARRIER after INSN and copy the
2938 block after LOC. */
2939 if (squeeze_notes (&new_label, &last_insn_to_move))
2940 abort ();
2941 reorder_insns (new_label, last_insn_to_move, loc);
2943 /* All those insns are now in TARGET_LOOP. */
2944 for (q = new_label;
2945 q != NEXT_INSN (last_insn_to_move);
2946 q = NEXT_INSN (q))
2947 uid_loop[INSN_UID (q)] = target_loop;
2949 /* The label jumped to by INSN is no longer a loop
2950 exit. Unless INSN does not have a label (e.g.,
2951 it is a RETURN insn), search loop->exit_labels
2952 to find its label_ref, and remove it. Also turn
2953 off LABEL_OUTSIDE_LOOP_P bit. */
2954 if (JUMP_LABEL (insn))
2956 for (q = 0, r = this_loop->exit_labels;
2958 q = r, r = LABEL_NEXTREF (r))
2959 if (XEXP (r, 0) == JUMP_LABEL (insn))
2961 LABEL_OUTSIDE_LOOP_P (r) = 0;
2962 if (q)
2963 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2964 else
2965 this_loop->exit_labels = LABEL_NEXTREF (r);
2966 break;
2969 for (loop = this_loop; loop && loop != target_loop;
2970 loop = loop->outer)
2971 loop->exit_count--;
2973 /* If we didn't find it, then something is
2974 wrong. */
2975 if (! r)
2976 abort ();
2979 /* P is now a jump outside the loop, so it must be put
2980 in loop->exit_labels, and marked as such.
2981 The easiest way to do this is to just call
2982 mark_loop_jump again for P. */
2983 mark_loop_jump (PATTERN (p), this_loop);
2985 /* If INSN now jumps to the insn after it,
2986 delete INSN. */
2987 if (JUMP_LABEL (insn) != 0
2988 && (next_real_insn (JUMP_LABEL (insn))
2989 == next_real_insn (insn)))
2990 delete_related_insns (insn);
2993 /* Continue the loop after where the conditional
2994 branch used to jump, since the only branch insn
2995 in the block (if it still remains) is an inter-loop
2996 branch and hence needs no processing. */
2997 insn = NEXT_INSN (cond_label);
2999 if (--LABEL_NUSES (cond_label) == 0)
3000 delete_related_insns (cond_label);
3002 /* This loop will be continued with NEXT_INSN (insn). */
3003 insn = PREV_INSN (insn);
3010 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3011 loops it is contained in, mark the target loop invalid.
3013 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3015 static void
3016 mark_loop_jump (rtx x, struct loop *loop)
3018 struct loop *dest_loop;
3019 struct loop *outer_loop;
3020 int i;
3022 switch (GET_CODE (x))
3024 case PC:
3025 case USE:
3026 case CLOBBER:
3027 case REG:
3028 case MEM:
3029 case CONST_INT:
3030 case CONST_DOUBLE:
3031 case RETURN:
3032 return;
3034 case CONST:
3035 /* There could be a label reference in here. */
3036 mark_loop_jump (XEXP (x, 0), loop);
3037 return;
3039 case PLUS:
3040 case MINUS:
3041 case MULT:
3042 mark_loop_jump (XEXP (x, 0), loop);
3043 mark_loop_jump (XEXP (x, 1), loop);
3044 return;
3046 case LO_SUM:
3047 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3048 mark_loop_jump (XEXP (x, 1), loop);
3049 return;
3051 case SIGN_EXTEND:
3052 case ZERO_EXTEND:
3053 mark_loop_jump (XEXP (x, 0), loop);
3054 return;
3056 case LABEL_REF:
3057 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3059 /* Link together all labels that branch outside the loop. This
3060 is used by final_[bg]iv_value and the loop unrolling code. Also
3061 mark this LABEL_REF so we know that this branch should predict
3062 false. */
3064 /* A check to make sure the label is not in an inner nested loop,
3065 since this does not count as a loop exit. */
3066 if (dest_loop)
3068 for (outer_loop = dest_loop; outer_loop;
3069 outer_loop = outer_loop->outer)
3070 if (outer_loop == loop)
3071 break;
3073 else
3074 outer_loop = NULL;
3076 if (loop && ! outer_loop)
3078 LABEL_OUTSIDE_LOOP_P (x) = 1;
3079 LABEL_NEXTREF (x) = loop->exit_labels;
3080 loop->exit_labels = x;
3082 for (outer_loop = loop;
3083 outer_loop && outer_loop != dest_loop;
3084 outer_loop = outer_loop->outer)
3085 outer_loop->exit_count++;
3088 /* If this is inside a loop, but not in the current loop or one enclosed
3089 by it, it invalidates at least one loop. */
3091 if (! dest_loop)
3092 return;
3094 /* We must invalidate every nested loop containing the target of this
3095 label, except those that also contain the jump insn. */
3097 for (; dest_loop; dest_loop = dest_loop->outer)
3099 /* Stop when we reach a loop that also contains the jump insn. */
3100 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3101 if (dest_loop == outer_loop)
3102 return;
3104 /* If we get here, we know we need to invalidate a loop. */
3105 if (loop_dump_stream && ! dest_loop->invalid)
3106 fprintf (loop_dump_stream,
3107 "\nLoop at %d ignored due to multiple entry points.\n",
3108 INSN_UID (dest_loop->start));
3110 dest_loop->invalid = 1;
3112 return;
3114 case SET:
3115 /* If this is not setting pc, ignore. */
3116 if (SET_DEST (x) == pc_rtx)
3117 mark_loop_jump (SET_SRC (x), loop);
3118 return;
3120 case IF_THEN_ELSE:
3121 mark_loop_jump (XEXP (x, 1), loop);
3122 mark_loop_jump (XEXP (x, 2), loop);
3123 return;
3125 case PARALLEL:
3126 case ADDR_VEC:
3127 for (i = 0; i < XVECLEN (x, 0); i++)
3128 mark_loop_jump (XVECEXP (x, 0, i), loop);
3129 return;
3131 case ADDR_DIFF_VEC:
3132 for (i = 0; i < XVECLEN (x, 1); i++)
3133 mark_loop_jump (XVECEXP (x, 1, i), loop);
3134 return;
3136 default:
3137 /* Strictly speaking this is not a jump into the loop, only a possible
3138 jump out of the loop. However, we have no way to link the destination
3139 of this jump onto the list of exit labels. To be safe we mark this
3140 loop and any containing loops as invalid. */
3141 if (loop)
3143 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3145 if (loop_dump_stream && ! outer_loop->invalid)
3146 fprintf (loop_dump_stream,
3147 "\nLoop at %d ignored due to unknown exit jump.\n",
3148 INSN_UID (outer_loop->start));
3149 outer_loop->invalid = 1;
3152 return;
3156 /* Return nonzero if there is a label in the range from
3157 insn INSN to and including the insn whose luid is END
3158 INSN must have an assigned luid (i.e., it must not have
3159 been previously created by loop.c). */
3161 static int
3162 labels_in_range_p (rtx insn, int end)
3164 while (insn && INSN_LUID (insn) <= end)
3166 if (LABEL_P (insn))
3167 return 1;
3168 insn = NEXT_INSN (insn);
3171 return 0;
3174 /* Record that a memory reference X is being set. */
3176 static void
3177 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3178 void *data ATTRIBUTE_UNUSED)
3180 struct loop_info *loop_info = data;
3182 if (x == 0 || !MEM_P (x))
3183 return;
3185 /* Count number of memory writes.
3186 This affects heuristics in strength_reduce. */
3187 loop_info->num_mem_sets++;
3189 /* BLKmode MEM means all memory is clobbered. */
3190 if (GET_MODE (x) == BLKmode)
3192 if (MEM_READONLY_P (x))
3193 loop_info->unknown_constant_address_altered = 1;
3194 else
3195 loop_info->unknown_address_altered = 1;
3197 return;
3200 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3201 loop_info->store_mems);
3204 /* X is a value modified by an INSN that references a biv inside a loop
3205 exit test (ie, X is somehow related to the value of the biv). If X
3206 is a pseudo that is used more than once, then the biv is (effectively)
3207 used more than once. DATA is a pointer to a loop_regs structure. */
3209 static void
3210 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3212 struct loop_regs *regs = (struct loop_regs *) data;
3214 if (x == 0)
3215 return;
3217 while (GET_CODE (x) == STRICT_LOW_PART
3218 || GET_CODE (x) == SIGN_EXTRACT
3219 || GET_CODE (x) == ZERO_EXTRACT
3220 || GET_CODE (x) == SUBREG)
3221 x = XEXP (x, 0);
3223 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3224 return;
3226 /* If we do not have usage information, or if we know the register
3227 is used more than once, note that fact for check_dbra_loop. */
3228 if (REGNO (x) >= max_reg_before_loop
3229 || ! regs->array[REGNO (x)].single_usage
3230 || regs->array[REGNO (x)].single_usage == const0_rtx)
3231 regs->multiple_uses = 1;
3234 /* Return nonzero if the rtx X is invariant over the current loop.
3236 The value is 2 if we refer to something only conditionally invariant.
3238 A memory ref is invariant if it is not volatile and does not conflict
3239 with anything stored in `loop_info->store_mems'. */
3242 loop_invariant_p (const struct loop *loop, rtx x)
3244 struct loop_info *loop_info = LOOP_INFO (loop);
3245 struct loop_regs *regs = LOOP_REGS (loop);
3246 int i;
3247 enum rtx_code code;
3248 const char *fmt;
3249 int conditional = 0;
3250 rtx mem_list_entry;
3252 if (x == 0)
3253 return 1;
3254 code = GET_CODE (x);
3255 switch (code)
3257 case CONST_INT:
3258 case CONST_DOUBLE:
3259 case SYMBOL_REF:
3260 case CONST:
3261 return 1;
3263 case LABEL_REF:
3264 /* A LABEL_REF is normally invariant, however, if we are unrolling
3265 loops, and this label is inside the loop, then it isn't invariant.
3266 This is because each unrolled copy of the loop body will have
3267 a copy of this label. If this was invariant, then an insn loading
3268 the address of this label into a register might get moved outside
3269 the loop, and then each loop body would end up using the same label.
3271 We don't know the loop bounds here though, so just fail for all
3272 labels. */
3273 if (flag_old_unroll_loops)
3274 return 0;
3275 else
3276 return 1;
3278 case PC:
3279 case CC0:
3280 case UNSPEC_VOLATILE:
3281 return 0;
3283 case REG:
3284 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3285 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3286 && ! current_function_has_nonlocal_goto)
3287 return 1;
3289 if (LOOP_INFO (loop)->has_call
3290 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3291 return 0;
3293 /* Out-of-range regs can occur when we are called from unrolling.
3294 These registers created by the unroller are set in the loop,
3295 hence are never invariant.
3296 Other out-of-range regs can be generated by load_mems; those that
3297 are written to in the loop are not invariant, while those that are
3298 not written to are invariant. It would be easy for load_mems
3299 to set n_times_set correctly for these registers, however, there
3300 is no easy way to distinguish them from registers created by the
3301 unroller. */
3303 if (REGNO (x) >= (unsigned) regs->num)
3304 return 0;
3306 if (regs->array[REGNO (x)].set_in_loop < 0)
3307 return 2;
3309 return regs->array[REGNO (x)].set_in_loop == 0;
3311 case MEM:
3312 /* Volatile memory references must be rejected. Do this before
3313 checking for read-only items, so that volatile read-only items
3314 will be rejected also. */
3315 if (MEM_VOLATILE_P (x))
3316 return 0;
3318 /* See if there is any dependence between a store and this load. */
3319 mem_list_entry = loop_info->store_mems;
3320 while (mem_list_entry)
3322 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3323 x, rtx_varies_p))
3324 return 0;
3326 mem_list_entry = XEXP (mem_list_entry, 1);
3329 /* It's not invalidated by a store in memory
3330 but we must still verify the address is invariant. */
3331 break;
3333 case ASM_OPERANDS:
3334 /* Don't mess with insns declared volatile. */
3335 if (MEM_VOLATILE_P (x))
3336 return 0;
3337 break;
3339 default:
3340 break;
3343 fmt = GET_RTX_FORMAT (code);
3344 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3346 if (fmt[i] == 'e')
3348 int tem = loop_invariant_p (loop, XEXP (x, i));
3349 if (tem == 0)
3350 return 0;
3351 if (tem == 2)
3352 conditional = 1;
3354 else if (fmt[i] == 'E')
3356 int j;
3357 for (j = 0; j < XVECLEN (x, i); j++)
3359 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3360 if (tem == 0)
3361 return 0;
3362 if (tem == 2)
3363 conditional = 1;
3369 return 1 + conditional;
3372 /* Return nonzero if all the insns in the loop that set REG
3373 are INSN and the immediately following insns,
3374 and if each of those insns sets REG in an invariant way
3375 (not counting uses of REG in them).
3377 The value is 2 if some of these insns are only conditionally invariant.
3379 We assume that INSN itself is the first set of REG
3380 and that its source is invariant. */
3382 static int
3383 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3384 rtx insn)
3386 struct loop_regs *regs = LOOP_REGS (loop);
3387 rtx p = insn;
3388 unsigned int regno = REGNO (reg);
3389 rtx temp;
3390 /* Number of sets we have to insist on finding after INSN. */
3391 int count = n_sets - 1;
3392 int old = regs->array[regno].set_in_loop;
3393 int value = 0;
3394 int this;
3396 /* If N_SETS hit the limit, we can't rely on its value. */
3397 if (n_sets == 127)
3398 return 0;
3400 regs->array[regno].set_in_loop = 0;
3402 while (count > 0)
3404 enum rtx_code code;
3405 rtx set;
3407 p = NEXT_INSN (p);
3408 code = GET_CODE (p);
3410 /* If library call, skip to end of it. */
3411 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3412 p = XEXP (temp, 0);
3414 this = 0;
3415 if (code == INSN
3416 && (set = single_set (p))
3417 && REG_P (SET_DEST (set))
3418 && REGNO (SET_DEST (set)) == regno)
3420 this = loop_invariant_p (loop, SET_SRC (set));
3421 if (this != 0)
3422 value |= this;
3423 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3425 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3426 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3427 notes are OK. */
3428 this = (CONSTANT_P (XEXP (temp, 0))
3429 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3430 && loop_invariant_p (loop, XEXP (temp, 0))));
3431 if (this != 0)
3432 value |= this;
3435 if (this != 0)
3436 count--;
3437 else if (code != NOTE)
3439 regs->array[regno].set_in_loop = old;
3440 return 0;
3444 regs->array[regno].set_in_loop = old;
3445 /* If loop_invariant_p ever returned 2, we return 2. */
3446 return 1 + (value & 2);
3449 /* Look at all uses (not sets) of registers in X. For each, if it is
3450 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3451 a different insn, set USAGE[REGNO] to const0_rtx. */
3453 static void
3454 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3456 enum rtx_code code = GET_CODE (x);
3457 const char *fmt = GET_RTX_FORMAT (code);
3458 int i, j;
3460 if (code == REG)
3461 regs->array[REGNO (x)].single_usage
3462 = (regs->array[REGNO (x)].single_usage != 0
3463 && regs->array[REGNO (x)].single_usage != insn)
3464 ? const0_rtx : insn;
3466 else if (code == SET)
3468 /* Don't count SET_DEST if it is a REG; otherwise count things
3469 in SET_DEST because if a register is partially modified, it won't
3470 show up as a potential movable so we don't care how USAGE is set
3471 for it. */
3472 if (!REG_P (SET_DEST (x)))
3473 find_single_use_in_loop (regs, insn, SET_DEST (x));
3474 find_single_use_in_loop (regs, insn, SET_SRC (x));
3476 else
3477 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3479 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3480 find_single_use_in_loop (regs, insn, XEXP (x, i));
3481 else if (fmt[i] == 'E')
3482 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3483 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3487 /* Count and record any set in X which is contained in INSN. Update
3488 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3489 in X. */
3491 static void
3492 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3494 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3495 /* Don't move a reg that has an explicit clobber.
3496 It's not worth the pain to try to do it correctly. */
3497 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3499 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3501 rtx dest = SET_DEST (x);
3502 while (GET_CODE (dest) == SUBREG
3503 || GET_CODE (dest) == ZERO_EXTRACT
3504 || GET_CODE (dest) == SIGN_EXTRACT
3505 || GET_CODE (dest) == STRICT_LOW_PART)
3506 dest = XEXP (dest, 0);
3507 if (REG_P (dest))
3509 int i;
3510 int regno = REGNO (dest);
3511 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3513 /* If this is the first setting of this reg
3514 in current basic block, and it was set before,
3515 it must be set in two basic blocks, so it cannot
3516 be moved out of the loop. */
3517 if (regs->array[regno].set_in_loop > 0
3518 && last_set[regno] == 0)
3519 regs->array[regno+i].may_not_optimize = 1;
3520 /* If this is not first setting in current basic block,
3521 see if reg was used in between previous one and this.
3522 If so, neither one can be moved. */
3523 if (last_set[regno] != 0
3524 && reg_used_between_p (dest, last_set[regno], insn))
3525 regs->array[regno+i].may_not_optimize = 1;
3526 if (regs->array[regno+i].set_in_loop < 127)
3527 ++regs->array[regno+i].set_in_loop;
3528 last_set[regno+i] = insn;
3534 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3535 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3536 contained in insn INSN is used by any insn that precedes INSN in
3537 cyclic order starting from the loop entry point.
3539 We don't want to use INSN_LUID here because if we restrict INSN to those
3540 that have a valid INSN_LUID, it means we cannot move an invariant out
3541 from an inner loop past two loops. */
3543 static int
3544 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3546 rtx reg = SET_DEST (set);
3547 rtx p;
3549 /* Scan forward checking for register usage. If we hit INSN, we
3550 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3551 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3553 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3554 return 1;
3556 if (p == loop->end)
3557 p = loop->start;
3560 return 0;
3564 /* Information we collect about arrays that we might want to prefetch. */
3565 struct prefetch_info
3567 struct iv_class *class; /* Class this prefetch is based on. */
3568 struct induction *giv; /* GIV this prefetch is based on. */
3569 rtx base_address; /* Start prefetching from this address plus
3570 index. */
3571 HOST_WIDE_INT index;
3572 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3573 iteration. */
3574 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3575 prefetch area in one iteration. */
3576 unsigned int total_bytes; /* Total bytes loop will access in this block.
3577 This is set only for loops with known
3578 iteration counts and is 0xffffffff
3579 otherwise. */
3580 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3581 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3582 unsigned int write : 1; /* 1 for read/write prefetches. */
3585 /* Data used by check_store function. */
3586 struct check_store_data
3588 rtx mem_address;
3589 int mem_write;
3592 static void check_store (rtx, rtx, void *);
3593 static void emit_prefetch_instructions (struct loop *);
3594 static int rtx_equal_for_prefetch_p (rtx, rtx);
3596 /* Set mem_write when mem_address is found. Used as callback to
3597 note_stores. */
3598 static void
3599 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3601 struct check_store_data *d = (struct check_store_data *) data;
3603 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3604 d->mem_write = 1;
3607 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3608 important to get some addresses combined. Later more sophisticated
3609 transformations can be added when necessary.
3611 ??? Same trick with swapping operand is done at several other places.
3612 It can be nice to develop some common way to handle this. */
3614 static int
3615 rtx_equal_for_prefetch_p (rtx x, rtx y)
3617 int i;
3618 int j;
3619 enum rtx_code code = GET_CODE (x);
3620 const char *fmt;
3622 if (x == y)
3623 return 1;
3624 if (code != GET_CODE (y))
3625 return 0;
3627 if (COMMUTATIVE_ARITH_P (x))
3629 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3630 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3631 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3632 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3635 /* Compare the elements. If any pair of corresponding elements fails to
3636 match, return 0 for the whole thing. */
3638 fmt = GET_RTX_FORMAT (code);
3639 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3641 switch (fmt[i])
3643 case 'w':
3644 if (XWINT (x, i) != XWINT (y, i))
3645 return 0;
3646 break;
3648 case 'i':
3649 if (XINT (x, i) != XINT (y, i))
3650 return 0;
3651 break;
3653 case 'E':
3654 /* Two vectors must have the same length. */
3655 if (XVECLEN (x, i) != XVECLEN (y, i))
3656 return 0;
3658 /* And the corresponding elements must match. */
3659 for (j = 0; j < XVECLEN (x, i); j++)
3660 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3661 XVECEXP (y, i, j)) == 0)
3662 return 0;
3663 break;
3665 case 'e':
3666 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3667 return 0;
3668 break;
3670 case 's':
3671 if (strcmp (XSTR (x, i), XSTR (y, i)))
3672 return 0;
3673 break;
3675 case 'u':
3676 /* These are just backpointers, so they don't matter. */
3677 break;
3679 case '0':
3680 break;
3682 /* It is believed that rtx's at this level will never
3683 contain anything but integers and other rtx's,
3684 except for within LABEL_REFs and SYMBOL_REFs. */
3685 default:
3686 abort ();
3689 return 1;
3692 /* Remove constant addition value from the expression X (when present)
3693 and return it. */
3695 static HOST_WIDE_INT
3696 remove_constant_addition (rtx *x)
3698 HOST_WIDE_INT addval = 0;
3699 rtx exp = *x;
3701 /* Avoid clobbering a shared CONST expression. */
3702 if (GET_CODE (exp) == CONST)
3704 if (GET_CODE (XEXP (exp, 0)) == PLUS
3705 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3706 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3708 *x = XEXP (XEXP (exp, 0), 0);
3709 return INTVAL (XEXP (XEXP (exp, 0), 1));
3711 return 0;
3714 if (GET_CODE (exp) == CONST_INT)
3716 addval = INTVAL (exp);
3717 *x = const0_rtx;
3720 /* For plus expression recurse on ourself. */
3721 else if (GET_CODE (exp) == PLUS)
3723 addval += remove_constant_addition (&XEXP (exp, 0));
3724 addval += remove_constant_addition (&XEXP (exp, 1));
3726 /* In case our parameter was constant, remove extra zero from the
3727 expression. */
3728 if (XEXP (exp, 0) == const0_rtx)
3729 *x = XEXP (exp, 1);
3730 else if (XEXP (exp, 1) == const0_rtx)
3731 *x = XEXP (exp, 0);
3734 return addval;
3737 /* Attempt to identify accesses to arrays that are most likely to cause cache
3738 misses, and emit prefetch instructions a few prefetch blocks forward.
3740 To detect the arrays we use the GIV information that was collected by the
3741 strength reduction pass.
3743 The prefetch instructions are generated after the GIV information is done
3744 and before the strength reduction process. The new GIVs are injected into
3745 the strength reduction tables, so the prefetch addresses are optimized as
3746 well.
3748 GIVs are split into base address, stride, and constant addition values.
3749 GIVs with the same address, stride and close addition values are combined
3750 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3751 for write instructions can be used for the block we write to, on machines
3752 that support write prefetches.
3754 Several heuristics are used to determine when to prefetch. They are
3755 controlled by defined symbols that can be overridden for each target. */
3757 static void
3758 emit_prefetch_instructions (struct loop *loop)
3760 int num_prefetches = 0;
3761 int num_real_prefetches = 0;
3762 int num_real_write_prefetches = 0;
3763 int num_prefetches_before = 0;
3764 int num_write_prefetches_before = 0;
3765 int ahead = 0;
3766 int i;
3767 struct iv_class *bl;
3768 struct induction *iv;
3769 struct prefetch_info info[MAX_PREFETCHES];
3770 struct loop_ivs *ivs = LOOP_IVS (loop);
3772 if (!HAVE_prefetch)
3773 return;
3775 /* Consider only loops w/o calls. When a call is done, the loop is probably
3776 slow enough to read the memory. */
3777 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3779 if (loop_dump_stream)
3780 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3782 return;
3785 /* Don't prefetch in loops known to have few iterations. */
3786 if (PREFETCH_NO_LOW_LOOPCNT
3787 && LOOP_INFO (loop)->n_iterations
3788 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3790 if (loop_dump_stream)
3791 fprintf (loop_dump_stream,
3792 "Prefetch: ignoring loop: not enough iterations.\n");
3793 return;
3796 /* Search all induction variables and pick those interesting for the prefetch
3797 machinery. */
3798 for (bl = ivs->list; bl; bl = bl->next)
3800 struct induction *biv = bl->biv, *biv1;
3801 int basestride = 0;
3803 biv1 = biv;
3805 /* Expect all BIVs to be executed in each iteration. This makes our
3806 analysis more conservative. */
3807 while (biv1)
3809 /* Discard non-constant additions that we can't handle well yet, and
3810 BIVs that are executed multiple times; such BIVs ought to be
3811 handled in the nested loop. We accept not_every_iteration BIVs,
3812 since these only result in larger strides and make our
3813 heuristics more conservative. */
3814 if (GET_CODE (biv->add_val) != CONST_INT)
3816 if (loop_dump_stream)
3818 fprintf (loop_dump_stream,
3819 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3820 REGNO (biv->src_reg), INSN_UID (biv->insn));
3821 print_rtl (loop_dump_stream, biv->add_val);
3822 fprintf (loop_dump_stream, "\n");
3824 break;
3827 if (biv->maybe_multiple)
3829 if (loop_dump_stream)
3831 fprintf (loop_dump_stream,
3832 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3833 REGNO (biv->src_reg), INSN_UID (biv->insn));
3834 print_rtl (loop_dump_stream, biv->add_val);
3835 fprintf (loop_dump_stream, "\n");
3837 break;
3840 basestride += INTVAL (biv1->add_val);
3841 biv1 = biv1->next_iv;
3844 if (biv1 || !basestride)
3845 continue;
3847 for (iv = bl->giv; iv; iv = iv->next_iv)
3849 rtx address;
3850 rtx temp;
3851 HOST_WIDE_INT index = 0;
3852 int add = 1;
3853 HOST_WIDE_INT stride = 0;
3854 int stride_sign = 1;
3855 struct check_store_data d;
3856 const char *ignore_reason = NULL;
3857 int size = GET_MODE_SIZE (GET_MODE (iv));
3859 /* See whether an induction variable is interesting to us and if
3860 not, report the reason. */
3861 if (iv->giv_type != DEST_ADDR)
3862 ignore_reason = "giv is not a destination address";
3864 /* We are interested only in constant stride memory references
3865 in order to be able to compute density easily. */
3866 else if (GET_CODE (iv->mult_val) != CONST_INT)
3867 ignore_reason = "stride is not constant";
3869 else
3871 stride = INTVAL (iv->mult_val) * basestride;
3872 if (stride < 0)
3874 stride = -stride;
3875 stride_sign = -1;
3878 /* On some targets, reversed order prefetches are not
3879 worthwhile. */
3880 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3881 ignore_reason = "reversed order stride";
3883 /* Prefetch of accesses with an extreme stride might not be
3884 worthwhile, either. */
3885 else if (PREFETCH_NO_EXTREME_STRIDE
3886 && stride > PREFETCH_EXTREME_STRIDE)
3887 ignore_reason = "extreme stride";
3889 /* Ignore GIVs with varying add values; we can't predict the
3890 value for the next iteration. */
3891 else if (!loop_invariant_p (loop, iv->add_val))
3892 ignore_reason = "giv has varying add value";
3894 /* Ignore GIVs in the nested loops; they ought to have been
3895 handled already. */
3896 else if (iv->maybe_multiple)
3897 ignore_reason = "giv is in nested loop";
3900 if (ignore_reason != NULL)
3902 if (loop_dump_stream)
3903 fprintf (loop_dump_stream,
3904 "Prefetch: ignoring giv at %d: %s.\n",
3905 INSN_UID (iv->insn), ignore_reason);
3906 continue;
3909 /* Determine the pointer to the basic array we are examining. It is
3910 the sum of the BIV's initial value and the GIV's add_val. */
3911 address = copy_rtx (iv->add_val);
3912 temp = copy_rtx (bl->initial_value);
3914 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3915 index = remove_constant_addition (&address);
3917 d.mem_write = 0;
3918 d.mem_address = *iv->location;
3920 /* When the GIV is not always executed, we might be better off by
3921 not dirtying the cache pages. */
3922 if (PREFETCH_CONDITIONAL || iv->always_executed)
3923 note_stores (PATTERN (iv->insn), check_store, &d);
3924 else
3926 if (loop_dump_stream)
3927 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3928 INSN_UID (iv->insn), "in conditional code.");
3929 continue;
3932 /* Attempt to find another prefetch to the same array and see if we
3933 can merge this one. */
3934 for (i = 0; i < num_prefetches; i++)
3935 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3936 && stride == info[i].stride)
3938 /* In case both access same array (same location
3939 just with small difference in constant indexes), merge
3940 the prefetches. Just do the later and the earlier will
3941 get prefetched from previous iteration.
3942 The artificial threshold should not be too small,
3943 but also not bigger than small portion of memory usually
3944 traversed by single loop. */
3945 if (index >= info[i].index
3946 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3948 info[i].write |= d.mem_write;
3949 info[i].bytes_accessed += size;
3950 info[i].index = index;
3951 info[i].giv = iv;
3952 info[i].class = bl;
3953 info[num_prefetches].base_address = address;
3954 add = 0;
3955 break;
3958 if (index < info[i].index
3959 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3961 info[i].write |= d.mem_write;
3962 info[i].bytes_accessed += size;
3963 add = 0;
3964 break;
3968 /* Merging failed. */
3969 if (add)
3971 info[num_prefetches].giv = iv;
3972 info[num_prefetches].class = bl;
3973 info[num_prefetches].index = index;
3974 info[num_prefetches].stride = stride;
3975 info[num_prefetches].base_address = address;
3976 info[num_prefetches].write = d.mem_write;
3977 info[num_prefetches].bytes_accessed = size;
3978 num_prefetches++;
3979 if (num_prefetches >= MAX_PREFETCHES)
3981 if (loop_dump_stream)
3982 fprintf (loop_dump_stream,
3983 "Maximal number of prefetches exceeded.\n");
3984 return;
3990 for (i = 0; i < num_prefetches; i++)
3992 int density;
3994 /* Attempt to calculate the total number of bytes fetched by all
3995 iterations of the loop. Avoid overflow. */
3996 if (LOOP_INFO (loop)->n_iterations
3997 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3998 >= LOOP_INFO (loop)->n_iterations))
3999 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4000 else
4001 info[i].total_bytes = 0xffffffff;
4003 density = info[i].bytes_accessed * 100 / info[i].stride;
4005 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4006 if (PREFETCH_ONLY_DENSE_MEM)
4007 if (density * 256 > PREFETCH_DENSE_MEM * 100
4008 && (info[i].total_bytes / PREFETCH_BLOCK
4009 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4011 info[i].prefetch_before_loop = 1;
4012 info[i].prefetch_in_loop
4013 = (info[i].total_bytes / PREFETCH_BLOCK
4014 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4016 else
4018 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4019 if (loop_dump_stream)
4020 fprintf (loop_dump_stream,
4021 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4022 INSN_UID (info[i].giv->insn), density);
4024 else
4025 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4027 /* Find how many prefetch instructions we'll use within the loop. */
4028 if (info[i].prefetch_in_loop != 0)
4030 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4031 / PREFETCH_BLOCK);
4032 num_real_prefetches += info[i].prefetch_in_loop;
4033 if (info[i].write)
4034 num_real_write_prefetches += info[i].prefetch_in_loop;
4038 /* Determine how many iterations ahead to prefetch within the loop, based
4039 on how many prefetches we currently expect to do within the loop. */
4040 if (num_real_prefetches != 0)
4042 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4044 if (loop_dump_stream)
4045 fprintf (loop_dump_stream,
4046 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4047 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4048 num_real_prefetches = 0, num_real_write_prefetches = 0;
4051 /* We'll also use AHEAD to determine how many prefetch instructions to
4052 emit before a loop, so don't leave it zero. */
4053 if (ahead == 0)
4054 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4056 for (i = 0; i < num_prefetches; i++)
4058 /* Update if we've decided not to prefetch anything within the loop. */
4059 if (num_real_prefetches == 0)
4060 info[i].prefetch_in_loop = 0;
4062 /* Find how many prefetch instructions we'll use before the loop. */
4063 if (info[i].prefetch_before_loop != 0)
4065 int n = info[i].total_bytes / PREFETCH_BLOCK;
4066 if (n > ahead)
4067 n = ahead;
4068 info[i].prefetch_before_loop = n;
4069 num_prefetches_before += n;
4070 if (info[i].write)
4071 num_write_prefetches_before += n;
4074 if (loop_dump_stream)
4076 if (info[i].prefetch_in_loop == 0
4077 && info[i].prefetch_before_loop == 0)
4078 continue;
4079 fprintf (loop_dump_stream, "Prefetch insn: %d",
4080 INSN_UID (info[i].giv->insn));
4081 fprintf (loop_dump_stream,
4082 "; in loop: %d; before: %d; %s\n",
4083 info[i].prefetch_in_loop,
4084 info[i].prefetch_before_loop,
4085 info[i].write ? "read/write" : "read only");
4086 fprintf (loop_dump_stream,
4087 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4088 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4089 info[i].bytes_accessed, info[i].total_bytes);
4090 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4091 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4092 info[i].index, info[i].stride);
4093 print_rtl (loop_dump_stream, info[i].base_address);
4094 fprintf (loop_dump_stream, "\n");
4098 if (num_real_prefetches + num_prefetches_before > 0)
4100 /* Record that this loop uses prefetch instructions. */
4101 LOOP_INFO (loop)->has_prefetch = 1;
4103 if (loop_dump_stream)
4105 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4106 num_real_prefetches, num_real_write_prefetches);
4107 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4108 num_prefetches_before, num_write_prefetches_before);
4112 for (i = 0; i < num_prefetches; i++)
4114 int y;
4116 for (y = 0; y < info[i].prefetch_in_loop; y++)
4118 rtx loc = copy_rtx (*info[i].giv->location);
4119 rtx insn;
4120 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4121 rtx before_insn = info[i].giv->insn;
4122 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4123 rtx seq;
4125 /* We can save some effort by offsetting the address on
4126 architectures with offsettable memory references. */
4127 if (offsettable_address_p (0, VOIDmode, loc))
4128 loc = plus_constant (loc, bytes_ahead);
4129 else
4131 rtx reg = gen_reg_rtx (Pmode);
4132 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4133 GEN_INT (bytes_ahead), reg,
4134 0, before_insn);
4135 loc = reg;
4138 start_sequence ();
4139 /* Make sure the address operand is valid for prefetch. */
4140 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4141 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4142 loc = force_reg (Pmode, loc);
4143 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4144 GEN_INT (3)));
4145 seq = get_insns ();
4146 end_sequence ();
4147 emit_insn_before (seq, before_insn);
4149 /* Check all insns emitted and record the new GIV
4150 information. */
4151 insn = NEXT_INSN (prev_insn);
4152 while (insn != before_insn)
4154 insn = check_insn_for_givs (loop, insn,
4155 info[i].giv->always_executed,
4156 info[i].giv->maybe_multiple);
4157 insn = NEXT_INSN (insn);
4161 if (PREFETCH_BEFORE_LOOP)
4163 /* Emit insns before the loop to fetch the first cache lines or,
4164 if we're not prefetching within the loop, everything we expect
4165 to need. */
4166 for (y = 0; y < info[i].prefetch_before_loop; y++)
4168 rtx reg = gen_reg_rtx (Pmode);
4169 rtx loop_start = loop->start;
4170 rtx init_val = info[i].class->initial_value;
4171 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4172 info[i].giv->add_val,
4173 GEN_INT (y * PREFETCH_BLOCK));
4175 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4176 non-constant INIT_VAL to have the same mode as REG, which
4177 in this case we know to be Pmode. */
4178 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4180 rtx seq;
4182 start_sequence ();
4183 init_val = convert_to_mode (Pmode, init_val, 0);
4184 seq = get_insns ();
4185 end_sequence ();
4186 loop_insn_emit_before (loop, 0, loop_start, seq);
4188 loop_iv_add_mult_emit_before (loop, init_val,
4189 info[i].giv->mult_val,
4190 add_val, reg, 0, loop_start);
4191 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4192 GEN_INT (3)),
4193 loop_start);
4198 return;
4201 /* Communication with routines called via `note_stores'. */
4203 static rtx note_insn;
4205 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4207 static rtx addr_placeholder;
4209 /* ??? Unfinished optimizations, and possible future optimizations,
4210 for the strength reduction code. */
4212 /* ??? The interaction of biv elimination, and recognition of 'constant'
4213 bivs, may cause problems. */
4215 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4216 performance problems.
4218 Perhaps don't eliminate things that can be combined with an addressing
4219 mode. Find all givs that have the same biv, mult_val, and add_val;
4220 then for each giv, check to see if its only use dies in a following
4221 memory address. If so, generate a new memory address and check to see
4222 if it is valid. If it is valid, then store the modified memory address,
4223 otherwise, mark the giv as not done so that it will get its own iv. */
4225 /* ??? Could try to optimize branches when it is known that a biv is always
4226 positive. */
4228 /* ??? When replace a biv in a compare insn, we should replace with closest
4229 giv so that an optimized branch can still be recognized by the combiner,
4230 e.g. the VAX acb insn. */
4232 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4233 was rerun in loop_optimize whenever a register was added or moved.
4234 Also, some of the optimizations could be a little less conservative. */
4236 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4237 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4238 callback.
4240 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4241 least once for every loop iteration except for the last one.
4243 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4244 loop iteration.
4246 void
4247 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4249 int not_every_iteration = 0;
4250 int maybe_multiple = 0;
4251 int past_loop_latch = 0;
4252 rtx p;
4254 /* If loop_scan_start points to the loop exit test, we have to be wary of
4255 subversive use of gotos inside expression statements. */
4256 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4257 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4259 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4260 for (p = next_insn_in_loop (loop, loop->scan_start);
4261 p != NULL_RTX;
4262 p = next_insn_in_loop (loop, p))
4264 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4266 /* Past CODE_LABEL, we get to insns that may be executed multiple
4267 times. The only way we can be sure that they can't is if every
4268 jump insn between here and the end of the loop either
4269 returns, exits the loop, is a jump to a location that is still
4270 behind the label, or is a jump to the loop start. */
4272 if (LABEL_P (p))
4274 rtx insn = p;
4276 maybe_multiple = 0;
4278 while (1)
4280 insn = NEXT_INSN (insn);
4281 if (insn == loop->scan_start)
4282 break;
4283 if (insn == loop->end)
4285 if (loop->top != 0)
4286 insn = loop->top;
4287 else
4288 break;
4289 if (insn == loop->scan_start)
4290 break;
4293 if (JUMP_P (insn)
4294 && GET_CODE (PATTERN (insn)) != RETURN
4295 && (!any_condjump_p (insn)
4296 || (JUMP_LABEL (insn) != 0
4297 && JUMP_LABEL (insn) != loop->scan_start
4298 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4300 maybe_multiple = 1;
4301 break;
4306 /* Past a jump, we get to insns for which we can't count
4307 on whether they will be executed during each iteration. */
4308 /* This code appears twice in strength_reduce. There is also similar
4309 code in scan_loop. */
4310 if (JUMP_P (p)
4311 /* If we enter the loop in the middle, and scan around to the
4312 beginning, don't set not_every_iteration for that.
4313 This can be any kind of jump, since we want to know if insns
4314 will be executed if the loop is executed. */
4315 && !(JUMP_LABEL (p) == loop->top
4316 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4317 && any_uncondjump_p (p))
4318 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4320 rtx label = 0;
4322 /* If this is a jump outside the loop, then it also doesn't
4323 matter. Check to see if the target of this branch is on the
4324 loop->exits_labels list. */
4326 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4327 if (XEXP (label, 0) == JUMP_LABEL (p))
4328 break;
4330 if (!label)
4331 not_every_iteration = 1;
4334 /* Note if we pass a loop latch. If we do, then we can not clear
4335 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4336 a loop since a jump before the last CODE_LABEL may have started
4337 a new loop iteration.
4339 Note that LOOP_TOP is only set for rotated loops and we need
4340 this check for all loops, so compare against the CODE_LABEL
4341 which immediately follows LOOP_START. */
4342 if (JUMP_P (p)
4343 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4344 past_loop_latch = 1;
4346 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4347 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4348 or not an insn is known to be executed each iteration of the
4349 loop, whether or not any iterations are known to occur.
4351 Therefore, if we have just passed a label and have no more labels
4352 between here and the test insn of the loop, and we have not passed
4353 a jump to the top of the loop, then we know these insns will be
4354 executed each iteration. */
4356 if (not_every_iteration
4357 && !past_loop_latch
4358 && LABEL_P (p)
4359 && no_labels_between_p (p, loop->end))
4360 not_every_iteration = 0;
4364 static void
4365 loop_bivs_find (struct loop *loop)
4367 struct loop_regs *regs = LOOP_REGS (loop);
4368 struct loop_ivs *ivs = LOOP_IVS (loop);
4369 /* Temporary list pointers for traversing ivs->list. */
4370 struct iv_class *bl, **backbl;
4372 ivs->list = 0;
4374 for_each_insn_in_loop (loop, check_insn_for_bivs);
4376 /* Scan ivs->list to remove all regs that proved not to be bivs.
4377 Make a sanity check against regs->n_times_set. */
4378 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4380 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4381 /* Above happens if register modified by subreg, etc. */
4382 /* Make sure it is not recognized as a basic induction var: */
4383 || regs->array[bl->regno].n_times_set != bl->biv_count
4384 /* If never incremented, it is invariant that we decided not to
4385 move. So leave it alone. */
4386 || ! bl->incremented)
4388 if (loop_dump_stream)
4389 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4390 bl->regno,
4391 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4392 ? "not induction variable"
4393 : (! bl->incremented ? "never incremented"
4394 : "count error")));
4396 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4397 *backbl = bl->next;
4399 else
4401 backbl = &bl->next;
4403 if (loop_dump_stream)
4404 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4410 /* Determine how BIVS are initialized by looking through pre-header
4411 extended basic block. */
4412 static void
4413 loop_bivs_init_find (struct loop *loop)
4415 struct loop_ivs *ivs = LOOP_IVS (loop);
4416 /* Temporary list pointers for traversing ivs->list. */
4417 struct iv_class *bl;
4418 int call_seen;
4419 rtx p;
4421 /* Find initial value for each biv by searching backwards from loop_start,
4422 halting at first label. Also record any test condition. */
4424 call_seen = 0;
4425 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4427 rtx test;
4429 note_insn = p;
4431 if (CALL_P (p))
4432 call_seen = 1;
4434 if (INSN_P (p))
4435 note_stores (PATTERN (p), record_initial, ivs);
4437 /* Record any test of a biv that branches around the loop if no store
4438 between it and the start of loop. We only care about tests with
4439 constants and registers and only certain of those. */
4440 if (JUMP_P (p)
4441 && JUMP_LABEL (p) != 0
4442 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4443 && (test = get_condition_for_loop (loop, p)) != 0
4444 && REG_P (XEXP (test, 0))
4445 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4446 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4447 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4448 && bl->init_insn == 0)
4450 /* If an NE test, we have an initial value! */
4451 if (GET_CODE (test) == NE)
4453 bl->init_insn = p;
4454 bl->init_set = gen_rtx_SET (VOIDmode,
4455 XEXP (test, 0), XEXP (test, 1));
4457 else
4458 bl->initial_test = test;
4464 /* Look at the each biv and see if we can say anything better about its
4465 initial value from any initializing insns set up above. (This is done
4466 in two passes to avoid missing SETs in a PARALLEL.) */
4467 static void
4468 loop_bivs_check (struct loop *loop)
4470 struct loop_ivs *ivs = LOOP_IVS (loop);
4471 /* Temporary list pointers for traversing ivs->list. */
4472 struct iv_class *bl;
4473 struct iv_class **backbl;
4475 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4477 rtx src;
4478 rtx note;
4480 if (! bl->init_insn)
4481 continue;
4483 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4484 is a constant, use the value of that. */
4485 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4486 && CONSTANT_P (XEXP (note, 0)))
4487 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4488 && CONSTANT_P (XEXP (note, 0))))
4489 src = XEXP (note, 0);
4490 else
4491 src = SET_SRC (bl->init_set);
4493 if (loop_dump_stream)
4494 fprintf (loop_dump_stream,
4495 "Biv %d: initialized at insn %d: initial value ",
4496 bl->regno, INSN_UID (bl->init_insn));
4498 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4499 || GET_MODE (src) == VOIDmode)
4500 && valid_initial_value_p (src, bl->init_insn,
4501 LOOP_INFO (loop)->pre_header_has_call,
4502 loop->start))
4504 bl->initial_value = src;
4506 if (loop_dump_stream)
4508 print_simple_rtl (loop_dump_stream, src);
4509 fputc ('\n', loop_dump_stream);
4512 /* If we can't make it a giv,
4513 let biv keep initial value of "itself". */
4514 else if (loop_dump_stream)
4515 fprintf (loop_dump_stream, "is complex\n");
4520 /* Search the loop for general induction variables. */
4522 static void
4523 loop_givs_find (struct loop* loop)
4525 for_each_insn_in_loop (loop, check_insn_for_givs);
4529 /* For each giv for which we still don't know whether or not it is
4530 replaceable, check to see if it is replaceable because its final value
4531 can be calculated. */
4533 static void
4534 loop_givs_check (struct loop *loop)
4536 struct loop_ivs *ivs = LOOP_IVS (loop);
4537 struct iv_class *bl;
4539 for (bl = ivs->list; bl; bl = bl->next)
4541 struct induction *v;
4543 for (v = bl->giv; v; v = v->next_iv)
4544 if (! v->replaceable && ! v->not_replaceable)
4545 check_final_value (loop, v);
4550 /* Return nonzero if it is possible to eliminate the biv BL provided
4551 all givs are reduced. This is possible if either the reg is not
4552 used outside the loop, or we can compute what its final value will
4553 be. */
4555 static int
4556 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
4557 int threshold, int insn_count)
4559 /* For architectures with a decrement_and_branch_until_zero insn,
4560 don't do this if we put a REG_NONNEG note on the endtest for this
4561 biv. */
4563 #ifdef HAVE_decrement_and_branch_until_zero
4564 if (bl->nonneg)
4566 if (loop_dump_stream)
4567 fprintf (loop_dump_stream,
4568 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4569 return 0;
4571 #endif
4573 /* Check that biv is used outside loop or if it has a final value.
4574 Compare against bl->init_insn rather than loop->start. We aren't
4575 concerned with any uses of the biv between init_insn and
4576 loop->start since these won't be affected by the value of the biv
4577 elsewhere in the function, so long as init_insn doesn't use the
4578 biv itself. */
4580 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4581 && bl->init_insn
4582 && INSN_UID (bl->init_insn) < max_uid_for_loop
4583 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4584 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4585 || (bl->final_value = final_biv_value (loop, bl)))
4586 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4588 if (loop_dump_stream)
4590 fprintf (loop_dump_stream,
4591 "Cannot eliminate biv %d.\n",
4592 bl->regno);
4593 fprintf (loop_dump_stream,
4594 "First use: insn %d, last use: insn %d.\n",
4595 REGNO_FIRST_UID (bl->regno),
4596 REGNO_LAST_UID (bl->regno));
4598 return 0;
4602 /* Reduce each giv of BL that we have decided to reduce. */
4604 static void
4605 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
4607 struct induction *v;
4609 for (v = bl->giv; v; v = v->next_iv)
4611 struct induction *tv;
4612 if (! v->ignore && v->same == 0)
4614 int auto_inc_opt = 0;
4616 /* If the code for derived givs immediately below has already
4617 allocated a new_reg, we must keep it. */
4618 if (! v->new_reg)
4619 v->new_reg = gen_reg_rtx (v->mode);
4621 #ifdef AUTO_INC_DEC
4622 /* If the target has auto-increment addressing modes, and
4623 this is an address giv, then try to put the increment
4624 immediately after its use, so that flow can create an
4625 auto-increment addressing mode. */
4626 /* Don't do this for loops entered at the bottom, to avoid
4627 this invalid transformation:
4628 jmp L; -> jmp L;
4629 TOP: TOP:
4630 use giv use giv
4631 L: inc giv
4632 inc biv L:
4633 test biv test giv
4634 cbr TOP cbr TOP
4636 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4637 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4638 /* We don't handle reversed biv's because bl->biv->insn
4639 does not have a valid INSN_LUID. */
4640 && ! bl->reversed
4641 && v->always_executed && ! v->maybe_multiple
4642 && INSN_UID (v->insn) < max_uid_for_loop
4643 && !loop->top)
4645 /* If other giv's have been combined with this one, then
4646 this will work only if all uses of the other giv's occur
4647 before this giv's insn. This is difficult to check.
4649 We simplify this by looking for the common case where
4650 there is one DEST_REG giv, and this giv's insn is the
4651 last use of the dest_reg of that DEST_REG giv. If the
4652 increment occurs after the address giv, then we can
4653 perform the optimization. (Otherwise, the increment
4654 would have to go before other_giv, and we would not be
4655 able to combine it with the address giv to get an
4656 auto-inc address.) */
4657 if (v->combined_with)
4659 struct induction *other_giv = 0;
4661 for (tv = bl->giv; tv; tv = tv->next_iv)
4662 if (tv->same == v)
4664 if (other_giv)
4665 break;
4666 else
4667 other_giv = tv;
4669 if (! tv && other_giv
4670 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4671 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4672 == INSN_UID (v->insn))
4673 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4674 auto_inc_opt = 1;
4676 /* Check for case where increment is before the address
4677 giv. Do this test in "loop order". */
4678 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4679 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4680 || (INSN_LUID (bl->biv->insn)
4681 > INSN_LUID (loop->scan_start))))
4682 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4683 && (INSN_LUID (loop->scan_start)
4684 < INSN_LUID (bl->biv->insn))))
4685 auto_inc_opt = -1;
4686 else
4687 auto_inc_opt = 1;
4689 #ifdef HAVE_cc0
4691 rtx prev;
4693 /* We can't put an insn immediately after one setting
4694 cc0, or immediately before one using cc0. */
4695 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4696 || (auto_inc_opt == -1
4697 && (prev = prev_nonnote_insn (v->insn)) != 0
4698 && INSN_P (prev)
4699 && sets_cc0_p (PATTERN (prev))))
4700 auto_inc_opt = 0;
4702 #endif
4704 if (auto_inc_opt)
4705 v->auto_inc_opt = 1;
4707 #endif
4709 /* For each place where the biv is incremented, add an insn
4710 to increment the new, reduced reg for the giv. */
4711 for (tv = bl->biv; tv; tv = tv->next_iv)
4713 rtx insert_before;
4715 /* Skip if location is the same as a previous one. */
4716 if (tv->same)
4717 continue;
4718 if (! auto_inc_opt)
4719 insert_before = NEXT_INSN (tv->insn);
4720 else if (auto_inc_opt == 1)
4721 insert_before = NEXT_INSN (v->insn);
4722 else
4723 insert_before = v->insn;
4725 if (tv->mult_val == const1_rtx)
4726 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4727 v->new_reg, v->new_reg,
4728 0, insert_before);
4729 else /* tv->mult_val == const0_rtx */
4730 /* A multiply is acceptable here
4731 since this is presumed to be seldom executed. */
4732 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4733 v->add_val, v->new_reg,
4734 0, insert_before);
4737 /* Add code at loop start to initialize giv's reduced reg. */
4739 loop_iv_add_mult_hoist (loop,
4740 extend_value_for_giv (v, bl->initial_value),
4741 v->mult_val, v->add_val, v->new_reg);
4747 /* Check for givs whose first use is their definition and whose
4748 last use is the definition of another giv. If so, it is likely
4749 dead and should not be used to derive another giv nor to
4750 eliminate a biv. */
4752 static void
4753 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
4755 struct induction *v;
4757 for (v = bl->giv; v; v = v->next_iv)
4759 if (v->ignore
4760 || (v->same && v->same->ignore))
4761 continue;
4763 if (v->giv_type == DEST_REG
4764 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4766 struct induction *v1;
4768 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4769 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4770 v->maybe_dead = 1;
4776 static void
4777 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
4779 struct induction *v;
4781 for (v = bl->giv; v; v = v->next_iv)
4783 if (v->same && v->same->ignore)
4784 v->ignore = 1;
4786 if (v->ignore)
4787 continue;
4789 /* Update expression if this was combined, in case other giv was
4790 replaced. */
4791 if (v->same)
4792 v->new_reg = replace_rtx (v->new_reg,
4793 v->same->dest_reg, v->same->new_reg);
4795 /* See if this register is known to be a pointer to something. If
4796 so, see if we can find the alignment. First see if there is a
4797 destination register that is a pointer. If so, this shares the
4798 alignment too. Next see if we can deduce anything from the
4799 computational information. If not, and this is a DEST_ADDR
4800 giv, at least we know that it's a pointer, though we don't know
4801 the alignment. */
4802 if (REG_P (v->new_reg)
4803 && v->giv_type == DEST_REG
4804 && REG_POINTER (v->dest_reg))
4805 mark_reg_pointer (v->new_reg,
4806 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4807 else if (REG_P (v->new_reg)
4808 && REG_POINTER (v->src_reg))
4810 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4812 if (align == 0
4813 || GET_CODE (v->add_val) != CONST_INT
4814 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4815 align = 0;
4817 mark_reg_pointer (v->new_reg, align);
4819 else if (REG_P (v->new_reg)
4820 && REG_P (v->add_val)
4821 && REG_POINTER (v->add_val))
4823 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4825 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4826 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4827 align = 0;
4829 mark_reg_pointer (v->new_reg, align);
4831 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
4832 mark_reg_pointer (v->new_reg, 0);
4834 if (v->giv_type == DEST_ADDR)
4835 /* Store reduced reg as the address in the memref where we found
4836 this giv. */
4837 validate_change (v->insn, v->location, v->new_reg, 0);
4838 else if (v->replaceable)
4840 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4842 else
4844 rtx original_insn = v->insn;
4845 rtx note;
4847 /* Not replaceable; emit an insn to set the original giv reg from
4848 the reduced giv, same as above. */
4849 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4850 gen_move_insn (v->dest_reg,
4851 v->new_reg));
4853 /* The original insn may have a REG_EQUAL note. This note is
4854 now incorrect and may result in invalid substitutions later.
4855 The original insn is dead, but may be part of a libcall
4856 sequence, which doesn't seem worth the bother of handling. */
4857 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4858 if (note)
4859 remove_note (original_insn, note);
4862 /* When a loop is reversed, givs which depend on the reversed
4863 biv, and which are live outside the loop, must be set to their
4864 correct final value. This insn is only needed if the giv is
4865 not replaceable. The correct final value is the same as the
4866 value that the giv starts the reversed loop with. */
4867 if (bl->reversed && ! v->replaceable)
4868 loop_iv_add_mult_sink (loop,
4869 extend_value_for_giv (v, bl->initial_value),
4870 v->mult_val, v->add_val, v->dest_reg);
4871 else if (v->final_value)
4872 loop_insn_sink_or_swim (loop,
4873 gen_load_of_final_value (v->dest_reg,
4874 v->final_value));
4876 if (loop_dump_stream)
4878 fprintf (loop_dump_stream, "giv at %d reduced to ",
4879 INSN_UID (v->insn));
4880 print_simple_rtl (loop_dump_stream, v->new_reg);
4881 fprintf (loop_dump_stream, "\n");
4887 static int
4888 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
4889 struct iv_class *bl, struct induction *v,
4890 rtx test_reg)
4892 int add_cost;
4893 int benefit;
4895 benefit = v->benefit;
4896 PUT_MODE (test_reg, v->mode);
4897 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4898 test_reg, test_reg);
4900 /* Reduce benefit if not replaceable, since we will insert a
4901 move-insn to replace the insn that calculates this giv. Don't do
4902 this unless the giv is a user variable, since it will often be
4903 marked non-replaceable because of the duplication of the exit
4904 code outside the loop. In such a case, the copies we insert are
4905 dead and will be deleted. So they don't have a cost. Similar
4906 situations exist. */
4907 /* ??? The new final_[bg]iv_value code does a much better job of
4908 finding replaceable giv's, and hence this code may no longer be
4909 necessary. */
4910 if (! v->replaceable && ! bl->eliminable
4911 && REG_USERVAR_P (v->dest_reg))
4912 benefit -= copy_cost;
4914 /* Decrease the benefit to count the add-insns that we will insert
4915 to increment the reduced reg for the giv. ??? This can
4916 overestimate the run-time cost of the additional insns, e.g. if
4917 there are multiple basic blocks that increment the biv, but only
4918 one of these blocks is executed during each iteration. There is
4919 no good way to detect cases like this with the current structure
4920 of the loop optimizer. This code is more accurate for
4921 determining code size than run-time benefits. */
4922 benefit -= add_cost * bl->biv_count;
4924 /* Decide whether to strength-reduce this giv or to leave the code
4925 unchanged (recompute it from the biv each time it is used). This
4926 decision can be made independently for each giv. */
4928 #ifdef AUTO_INC_DEC
4929 /* Attempt to guess whether autoincrement will handle some of the
4930 new add insns; if so, increase BENEFIT (undo the subtraction of
4931 add_cost that was done above). */
4932 if (v->giv_type == DEST_ADDR
4933 /* Increasing the benefit is risky, since this is only a guess.
4934 Avoid increasing register pressure in cases where there would
4935 be no other benefit from reducing this giv. */
4936 && benefit > 0
4937 && GET_CODE (v->mult_val) == CONST_INT)
4939 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4941 if (HAVE_POST_INCREMENT
4942 && INTVAL (v->mult_val) == size)
4943 benefit += add_cost * bl->biv_count;
4944 else if (HAVE_PRE_INCREMENT
4945 && INTVAL (v->mult_val) == size)
4946 benefit += add_cost * bl->biv_count;
4947 else if (HAVE_POST_DECREMENT
4948 && -INTVAL (v->mult_val) == size)
4949 benefit += add_cost * bl->biv_count;
4950 else if (HAVE_PRE_DECREMENT
4951 && -INTVAL (v->mult_val) == size)
4952 benefit += add_cost * bl->biv_count;
4954 #endif
4956 return benefit;
4960 /* Free IV structures for LOOP. */
4962 static void
4963 loop_ivs_free (struct loop *loop)
4965 struct loop_ivs *ivs = LOOP_IVS (loop);
4966 struct iv_class *iv = ivs->list;
4968 free (ivs->regs);
4970 while (iv)
4972 struct iv_class *next = iv->next;
4973 struct induction *induction;
4974 struct induction *next_induction;
4976 for (induction = iv->biv; induction; induction = next_induction)
4978 next_induction = induction->next_iv;
4979 free (induction);
4981 for (induction = iv->giv; induction; induction = next_induction)
4983 next_induction = induction->next_iv;
4984 free (induction);
4987 free (iv);
4988 iv = next;
4993 /* Perform strength reduction and induction variable elimination.
4995 Pseudo registers created during this function will be beyond the
4996 last valid index in several tables including
4997 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4998 problem here, because the added registers cannot be givs outside of
4999 their loop, and hence will never be reconsidered. But scan_loop
5000 must check regnos to make sure they are in bounds. */
5002 static void
5003 strength_reduce (struct loop *loop, int flags)
5005 struct loop_info *loop_info = LOOP_INFO (loop);
5006 struct loop_regs *regs = LOOP_REGS (loop);
5007 struct loop_ivs *ivs = LOOP_IVS (loop);
5008 rtx p;
5009 /* Temporary list pointer for traversing ivs->list. */
5010 struct iv_class *bl;
5011 /* Ratio of extra register life span we can justify
5012 for saving an instruction. More if loop doesn't call subroutines
5013 since in that case saving an insn makes more difference
5014 and more registers are available. */
5015 /* ??? could set this to last value of threshold in move_movables */
5016 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5017 /* Map of pseudo-register replacements. */
5018 rtx *reg_map = NULL;
5019 int reg_map_size;
5020 int unrolled_insn_copies = 0;
5021 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5022 int insn_count = count_insns_in_loop (loop);
5024 addr_placeholder = gen_reg_rtx (Pmode);
5026 ivs->n_regs = max_reg_before_loop;
5027 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
5029 /* Find all BIVs in loop. */
5030 loop_bivs_find (loop);
5032 /* Exit if there are no bivs. */
5033 if (! ivs->list)
5035 /* Can still unroll the loop anyways, but indicate that there is no
5036 strength reduction info available. */
5037 if (flags & LOOP_UNROLL)
5038 unroll_loop (loop, insn_count, 0);
5040 loop_ivs_free (loop);
5041 return;
5044 /* Determine how BIVS are initialized by looking through pre-header
5045 extended basic block. */
5046 loop_bivs_init_find (loop);
5048 /* Look at the each biv and see if we can say anything better about its
5049 initial value from any initializing insns set up above. */
5050 loop_bivs_check (loop);
5052 /* Search the loop for general induction variables. */
5053 loop_givs_find (loop);
5055 /* Try to calculate and save the number of loop iterations. This is
5056 set to zero if the actual number can not be calculated. This must
5057 be called after all giv's have been identified, since otherwise it may
5058 fail if the iteration variable is a giv. */
5059 loop_iterations (loop);
5061 #ifdef HAVE_prefetch
5062 if (flags & LOOP_PREFETCH)
5063 emit_prefetch_instructions (loop);
5064 #endif
5066 /* Now for each giv for which we still don't know whether or not it is
5067 replaceable, check to see if it is replaceable because its final value
5068 can be calculated. This must be done after loop_iterations is called,
5069 so that final_giv_value will work correctly. */
5070 loop_givs_check (loop);
5072 /* Try to prove that the loop counter variable (if any) is always
5073 nonnegative; if so, record that fact with a REG_NONNEG note
5074 so that "decrement and branch until zero" insn can be used. */
5075 check_dbra_loop (loop, insn_count);
5077 /* Create reg_map to hold substitutions for replaceable giv regs.
5078 Some givs might have been made from biv increments, so look at
5079 ivs->reg_iv_type for a suitable size. */
5080 reg_map_size = ivs->n_regs;
5081 reg_map = xcalloc (reg_map_size, sizeof (rtx));
5083 /* Examine each iv class for feasibility of strength reduction/induction
5084 variable elimination. */
5086 for (bl = ivs->list; bl; bl = bl->next)
5088 struct induction *v;
5089 int benefit;
5091 /* Test whether it will be possible to eliminate this biv
5092 provided all givs are reduced. */
5093 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5095 /* This will be true at the end, if all givs which depend on this
5096 biv have been strength reduced.
5097 We can't (currently) eliminate the biv unless this is so. */
5098 bl->all_reduced = 1;
5100 /* Check each extension dependent giv in this class to see if its
5101 root biv is safe from wrapping in the interior mode. */
5102 check_ext_dependent_givs (loop, bl);
5104 /* Combine all giv's for this iv_class. */
5105 combine_givs (regs, bl);
5107 for (v = bl->giv; v; v = v->next_iv)
5109 struct induction *tv;
5111 if (v->ignore || v->same)
5112 continue;
5114 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5116 /* If an insn is not to be strength reduced, then set its ignore
5117 flag, and clear bl->all_reduced. */
5119 /* A giv that depends on a reversed biv must be reduced if it is
5120 used after the loop exit, otherwise, it would have the wrong
5121 value after the loop exit. To make it simple, just reduce all
5122 of such giv's whether or not we know they are used after the loop
5123 exit. */
5125 if (! flag_reduce_all_givs
5126 && v->lifetime * threshold * benefit < insn_count
5127 && ! bl->reversed)
5129 if (loop_dump_stream)
5130 fprintf (loop_dump_stream,
5131 "giv of insn %d not worth while, %d vs %d.\n",
5132 INSN_UID (v->insn),
5133 v->lifetime * threshold * benefit, insn_count);
5134 v->ignore = 1;
5135 bl->all_reduced = 0;
5137 else
5139 /* Check that we can increment the reduced giv without a
5140 multiply insn. If not, reject it. */
5142 for (tv = bl->biv; tv; tv = tv->next_iv)
5143 if (tv->mult_val == const1_rtx
5144 && ! product_cheap_p (tv->add_val, v->mult_val))
5146 if (loop_dump_stream)
5147 fprintf (loop_dump_stream,
5148 "giv of insn %d: would need a multiply.\n",
5149 INSN_UID (v->insn));
5150 v->ignore = 1;
5151 bl->all_reduced = 0;
5152 break;
5157 /* Check for givs whose first use is their definition and whose
5158 last use is the definition of another giv. If so, it is likely
5159 dead and should not be used to derive another giv nor to
5160 eliminate a biv. */
5161 loop_givs_dead_check (loop, bl);
5163 /* Reduce each giv that we decided to reduce. */
5164 loop_givs_reduce (loop, bl);
5166 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5167 as not reduced.
5169 For each giv register that can be reduced now: if replaceable,
5170 substitute reduced reg wherever the old giv occurs;
5171 else add new move insn "giv_reg = reduced_reg". */
5172 loop_givs_rescan (loop, bl, reg_map);
5174 /* All the givs based on the biv bl have been reduced if they
5175 merit it. */
5177 /* For each giv not marked as maybe dead that has been combined with a
5178 second giv, clear any "maybe dead" mark on that second giv.
5179 v->new_reg will either be or refer to the register of the giv it
5180 combined with.
5182 Doing this clearing avoids problems in biv elimination where
5183 a giv's new_reg is a complex value that can't be put in the
5184 insn but the giv combined with (with a reg as new_reg) is
5185 marked maybe_dead. Since the register will be used in either
5186 case, we'd prefer it be used from the simpler giv. */
5188 for (v = bl->giv; v; v = v->next_iv)
5189 if (! v->maybe_dead && v->same)
5190 v->same->maybe_dead = 0;
5192 /* Try to eliminate the biv, if it is a candidate.
5193 This won't work if ! bl->all_reduced,
5194 since the givs we planned to use might not have been reduced.
5196 We have to be careful that we didn't initially think we could
5197 eliminate this biv because of a giv that we now think may be
5198 dead and shouldn't be used as a biv replacement.
5200 Also, there is the possibility that we may have a giv that looks
5201 like it can be used to eliminate a biv, but the resulting insn
5202 isn't valid. This can happen, for example, on the 88k, where a
5203 JUMP_INSN can compare a register only with zero. Attempts to
5204 replace it with a compare with a constant will fail.
5206 Note that in cases where this call fails, we may have replaced some
5207 of the occurrences of the biv with a giv, but no harm was done in
5208 doing so in the rare cases where it can occur. */
5210 if (bl->all_reduced == 1 && bl->eliminable
5211 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5213 /* ?? If we created a new test to bypass the loop entirely,
5214 or otherwise drop straight in, based on this test, then
5215 we might want to rewrite it also. This way some later
5216 pass has more hope of removing the initialization of this
5217 biv entirely. */
5219 /* If final_value != 0, then the biv may be used after loop end
5220 and we must emit an insn to set it just in case.
5222 Reversed bivs already have an insn after the loop setting their
5223 value, so we don't need another one. We can't calculate the
5224 proper final value for such a biv here anyways. */
5225 if (bl->final_value && ! bl->reversed)
5226 loop_insn_sink_or_swim (loop,
5227 gen_load_of_final_value (bl->biv->dest_reg,
5228 bl->final_value));
5230 if (loop_dump_stream)
5231 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5232 bl->regno);
5234 /* See above note wrt final_value. But since we couldn't eliminate
5235 the biv, we must set the value after the loop instead of before. */
5236 else if (bl->final_value && ! bl->reversed)
5237 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5238 bl->final_value));
5241 /* Go through all the instructions in the loop, making all the
5242 register substitutions scheduled in REG_MAP. */
5244 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5245 if (INSN_P (p))
5247 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5248 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5249 INSN_CODE (p) = -1;
5252 if (loop_info->n_iterations > 0)
5254 /* When we completely unroll a loop we will likely not need the increment
5255 of the loop BIV and we will not need the conditional branch at the
5256 end of the loop. */
5257 unrolled_insn_copies = insn_count - 2;
5259 #ifdef HAVE_cc0
5260 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5261 need the comparison before the conditional branch at the end of the
5262 loop. */
5263 unrolled_insn_copies -= 1;
5264 #endif
5266 /* We'll need one copy for each loop iteration. */
5267 unrolled_insn_copies *= loop_info->n_iterations;
5269 /* A little slop to account for the ability to remove initialization
5270 code, better CSE, and other secondary benefits of completely
5271 unrolling some loops. */
5272 unrolled_insn_copies -= 1;
5274 /* Clamp the value. */
5275 if (unrolled_insn_copies < 0)
5276 unrolled_insn_copies = 0;
5279 /* Unroll loops from within strength reduction so that we can use the
5280 induction variable information that strength_reduce has already
5281 collected. Always unroll loops that would be as small or smaller
5282 unrolled than when rolled. */
5283 if ((flags & LOOP_UNROLL)
5284 || ((flags & LOOP_AUTO_UNROLL)
5285 && loop_info->n_iterations > 0
5286 && unrolled_insn_copies <= insn_count))
5287 unroll_loop (loop, insn_count, 1);
5289 if (loop_dump_stream)
5290 fprintf (loop_dump_stream, "\n");
5292 loop_ivs_free (loop);
5293 if (reg_map)
5294 free (reg_map);
5297 /*Record all basic induction variables calculated in the insn. */
5298 static rtx
5299 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
5300 int maybe_multiple)
5302 struct loop_ivs *ivs = LOOP_IVS (loop);
5303 rtx set;
5304 rtx dest_reg;
5305 rtx inc_val;
5306 rtx mult_val;
5307 rtx *location;
5309 if (NONJUMP_INSN_P (p)
5310 && (set = single_set (p))
5311 && REG_P (SET_DEST (set)))
5313 dest_reg = SET_DEST (set);
5314 if (REGNO (dest_reg) < max_reg_before_loop
5315 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5316 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5318 if (basic_induction_var (loop, SET_SRC (set),
5319 GET_MODE (SET_SRC (set)),
5320 dest_reg, p, &inc_val, &mult_val,
5321 &location))
5323 /* It is a possible basic induction variable.
5324 Create and initialize an induction structure for it. */
5326 struct induction *v = xmalloc (sizeof (struct induction));
5328 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5329 not_every_iteration, maybe_multiple);
5330 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5332 else if (REGNO (dest_reg) < ivs->n_regs)
5333 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5336 return p;
5339 /* Record all givs calculated in the insn.
5340 A register is a giv if: it is only set once, it is a function of a
5341 biv and a constant (or invariant), and it is not a biv. */
5342 static rtx
5343 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
5344 int maybe_multiple)
5346 struct loop_regs *regs = LOOP_REGS (loop);
5348 rtx set;
5349 /* Look for a general induction variable in a register. */
5350 if (NONJUMP_INSN_P (p)
5351 && (set = single_set (p))
5352 && REG_P (SET_DEST (set))
5353 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5355 rtx src_reg;
5356 rtx dest_reg;
5357 rtx add_val;
5358 rtx mult_val;
5359 rtx ext_val;
5360 int benefit;
5361 rtx regnote = 0;
5362 rtx last_consec_insn;
5364 dest_reg = SET_DEST (set);
5365 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5366 return p;
5368 if (/* SET_SRC is a giv. */
5369 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5370 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5371 /* Equivalent expression is a giv. */
5372 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5373 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5374 &add_val, &mult_val, &ext_val, 0,
5375 &benefit, VOIDmode)))
5376 /* Don't try to handle any regs made by loop optimization.
5377 We have nothing on them in regno_first_uid, etc. */
5378 && REGNO (dest_reg) < max_reg_before_loop
5379 /* Don't recognize a BASIC_INDUCT_VAR here. */
5380 && dest_reg != src_reg
5381 /* This must be the only place where the register is set. */
5382 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5383 /* or all sets must be consecutive and make a giv. */
5384 || (benefit = consec_sets_giv (loop, benefit, p,
5385 src_reg, dest_reg,
5386 &add_val, &mult_val, &ext_val,
5387 &last_consec_insn))))
5389 struct induction *v = xmalloc (sizeof (struct induction));
5391 /* If this is a library call, increase benefit. */
5392 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5393 benefit += libcall_benefit (p);
5395 /* Skip the consecutive insns, if there are any. */
5396 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5397 p = last_consec_insn;
5399 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5400 ext_val, benefit, DEST_REG, not_every_iteration,
5401 maybe_multiple, (rtx*) 0);
5406 /* Look for givs which are memory addresses. */
5407 if (NONJUMP_INSN_P (p))
5408 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5409 maybe_multiple);
5411 /* Update the status of whether giv can derive other givs. This can
5412 change when we pass a label or an insn that updates a biv. */
5413 if (INSN_P (p) || LABEL_P (p))
5414 update_giv_derive (loop, p);
5415 return p;
5418 /* Return 1 if X is a valid source for an initial value (or as value being
5419 compared against in an initial test).
5421 X must be either a register or constant and must not be clobbered between
5422 the current insn and the start of the loop.
5424 INSN is the insn containing X. */
5426 static int
5427 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
5429 if (CONSTANT_P (x))
5430 return 1;
5432 /* Only consider pseudos we know about initialized in insns whose luids
5433 we know. */
5434 if (!REG_P (x)
5435 || REGNO (x) >= max_reg_before_loop)
5436 return 0;
5438 /* Don't use call-clobbered registers across a call which clobbers it. On
5439 some machines, don't use any hard registers at all. */
5440 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5441 && (SMALL_REGISTER_CLASSES
5442 || (call_used_regs[REGNO (x)] && call_seen)))
5443 return 0;
5445 /* Don't use registers that have been clobbered before the start of the
5446 loop. */
5447 if (reg_set_between_p (x, insn, loop_start))
5448 return 0;
5450 return 1;
5453 /* Scan X for memory refs and check each memory address
5454 as a possible giv. INSN is the insn whose pattern X comes from.
5455 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5456 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5457 more than once in each loop iteration. */
5459 static void
5460 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
5461 int not_every_iteration, int maybe_multiple)
5463 int i, j;
5464 enum rtx_code code;
5465 const char *fmt;
5467 if (x == 0)
5468 return;
5470 code = GET_CODE (x);
5471 switch (code)
5473 case REG:
5474 case CONST_INT:
5475 case CONST:
5476 case CONST_DOUBLE:
5477 case SYMBOL_REF:
5478 case LABEL_REF:
5479 case PC:
5480 case CC0:
5481 case ADDR_VEC:
5482 case ADDR_DIFF_VEC:
5483 case USE:
5484 case CLOBBER:
5485 return;
5487 case MEM:
5489 rtx src_reg;
5490 rtx add_val;
5491 rtx mult_val;
5492 rtx ext_val;
5493 int benefit;
5495 /* This code used to disable creating GIVs with mult_val == 1 and
5496 add_val == 0. However, this leads to lost optimizations when
5497 it comes time to combine a set of related DEST_ADDR GIVs, since
5498 this one would not be seen. */
5500 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5501 &mult_val, &ext_val, 1, &benefit,
5502 GET_MODE (x)))
5504 /* Found one; record it. */
5505 struct induction *v = xmalloc (sizeof (struct induction));
5507 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5508 add_val, ext_val, benefit, DEST_ADDR,
5509 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5511 v->mem = x;
5514 return;
5516 default:
5517 break;
5520 /* Recursively scan the subexpressions for other mem refs. */
5522 fmt = GET_RTX_FORMAT (code);
5523 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5524 if (fmt[i] == 'e')
5525 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5526 maybe_multiple);
5527 else if (fmt[i] == 'E')
5528 for (j = 0; j < XVECLEN (x, i); j++)
5529 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5530 maybe_multiple);
5533 /* Fill in the data about one biv update.
5534 V is the `struct induction' in which we record the biv. (It is
5535 allocated by the caller, with alloca.)
5536 INSN is the insn that sets it.
5537 DEST_REG is the biv's reg.
5539 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5540 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5541 being set to INC_VAL.
5543 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5544 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5545 can be executed more than once per iteration. If MAYBE_MULTIPLE
5546 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5547 executed exactly once per iteration. */
5549 static void
5550 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
5551 rtx inc_val, rtx mult_val, rtx *location,
5552 int not_every_iteration, int maybe_multiple)
5554 struct loop_ivs *ivs = LOOP_IVS (loop);
5555 struct iv_class *bl;
5557 v->insn = insn;
5558 v->src_reg = dest_reg;
5559 v->dest_reg = dest_reg;
5560 v->mult_val = mult_val;
5561 v->add_val = inc_val;
5562 v->ext_dependent = NULL_RTX;
5563 v->location = location;
5564 v->mode = GET_MODE (dest_reg);
5565 v->always_computable = ! not_every_iteration;
5566 v->always_executed = ! not_every_iteration;
5567 v->maybe_multiple = maybe_multiple;
5568 v->same = 0;
5570 /* Add this to the reg's iv_class, creating a class
5571 if this is the first incrementation of the reg. */
5573 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5574 if (bl == 0)
5576 /* Create and initialize new iv_class. */
5578 bl = xmalloc (sizeof (struct iv_class));
5580 bl->regno = REGNO (dest_reg);
5581 bl->biv = 0;
5582 bl->giv = 0;
5583 bl->biv_count = 0;
5584 bl->giv_count = 0;
5586 /* Set initial value to the reg itself. */
5587 bl->initial_value = dest_reg;
5588 bl->final_value = 0;
5589 /* We haven't seen the initializing insn yet. */
5590 bl->init_insn = 0;
5591 bl->init_set = 0;
5592 bl->initial_test = 0;
5593 bl->incremented = 0;
5594 bl->eliminable = 0;
5595 bl->nonneg = 0;
5596 bl->reversed = 0;
5597 bl->total_benefit = 0;
5599 /* Add this class to ivs->list. */
5600 bl->next = ivs->list;
5601 ivs->list = bl;
5603 /* Put it in the array of biv register classes. */
5604 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5606 else
5608 /* Check if location is the same as a previous one. */
5609 struct induction *induction;
5610 for (induction = bl->biv; induction; induction = induction->next_iv)
5611 if (location == induction->location)
5613 v->same = induction;
5614 break;
5618 /* Update IV_CLASS entry for this biv. */
5619 v->next_iv = bl->biv;
5620 bl->biv = v;
5621 bl->biv_count++;
5622 if (mult_val == const1_rtx)
5623 bl->incremented = 1;
5625 if (loop_dump_stream)
5626 loop_biv_dump (v, loop_dump_stream, 0);
5629 /* Fill in the data about one giv.
5630 V is the `struct induction' in which we record the giv. (It is
5631 allocated by the caller, with alloca.)
5632 INSN is the insn that sets it.
5633 BENEFIT estimates the savings from deleting this insn.
5634 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5635 into a register or is used as a memory address.
5637 SRC_REG is the biv reg which the giv is computed from.
5638 DEST_REG is the giv's reg (if the giv is stored in a reg).
5639 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5640 LOCATION points to the place where this giv's value appears in INSN. */
5642 static void
5643 record_giv (const struct loop *loop, struct induction *v, rtx insn,
5644 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
5645 rtx ext_val, int benefit, enum g_types type,
5646 int not_every_iteration, int maybe_multiple, rtx *location)
5648 struct loop_ivs *ivs = LOOP_IVS (loop);
5649 struct induction *b;
5650 struct iv_class *bl;
5651 rtx set = single_set (insn);
5652 rtx temp;
5654 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5655 undo the MULT canonicalization that we performed earlier. */
5656 temp = simplify_rtx (add_val);
5657 if (temp
5658 && ! (GET_CODE (add_val) == MULT
5659 && GET_CODE (temp) == ASHIFT))
5660 add_val = temp;
5662 v->insn = insn;
5663 v->src_reg = src_reg;
5664 v->giv_type = type;
5665 v->dest_reg = dest_reg;
5666 v->mult_val = mult_val;
5667 v->add_val = add_val;
5668 v->ext_dependent = ext_val;
5669 v->benefit = benefit;
5670 v->location = location;
5671 v->cant_derive = 0;
5672 v->combined_with = 0;
5673 v->maybe_multiple = maybe_multiple;
5674 v->maybe_dead = 0;
5675 v->derive_adjustment = 0;
5676 v->same = 0;
5677 v->ignore = 0;
5678 v->new_reg = 0;
5679 v->final_value = 0;
5680 v->same_insn = 0;
5681 v->auto_inc_opt = 0;
5682 v->unrolled = 0;
5683 v->shared = 0;
5685 /* The v->always_computable field is used in update_giv_derive, to
5686 determine whether a giv can be used to derive another giv. For a
5687 DEST_REG giv, INSN computes a new value for the giv, so its value
5688 isn't computable if INSN insn't executed every iteration.
5689 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5690 it does not compute a new value. Hence the value is always computable
5691 regardless of whether INSN is executed each iteration. */
5693 if (type == DEST_ADDR)
5694 v->always_computable = 1;
5695 else
5696 v->always_computable = ! not_every_iteration;
5698 v->always_executed = ! not_every_iteration;
5700 if (type == DEST_ADDR)
5702 v->mode = GET_MODE (*location);
5703 v->lifetime = 1;
5705 else /* type == DEST_REG */
5707 v->mode = GET_MODE (SET_DEST (set));
5709 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5711 /* If the lifetime is zero, it means that this register is
5712 really a dead store. So mark this as a giv that can be
5713 ignored. This will not prevent the biv from being eliminated. */
5714 if (v->lifetime == 0)
5715 v->ignore = 1;
5717 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5718 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5721 /* Add the giv to the class of givs computed from one biv. */
5723 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5724 if (bl)
5726 v->next_iv = bl->giv;
5727 bl->giv = v;
5728 /* Don't count DEST_ADDR. This is supposed to count the number of
5729 insns that calculate givs. */
5730 if (type == DEST_REG)
5731 bl->giv_count++;
5732 bl->total_benefit += benefit;
5734 else
5735 /* Fatal error, biv missing for this giv? */
5736 abort ();
5738 if (type == DEST_ADDR)
5740 v->replaceable = 1;
5741 v->not_replaceable = 0;
5743 else
5745 /* The giv can be replaced outright by the reduced register only if all
5746 of the following conditions are true:
5747 - the insn that sets the giv is always executed on any iteration
5748 on which the giv is used at all
5749 (there are two ways to deduce this:
5750 either the insn is executed on every iteration,
5751 or all uses follow that insn in the same basic block),
5752 - the giv is not used outside the loop
5753 - no assignments to the biv occur during the giv's lifetime. */
5755 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5756 /* Previous line always fails if INSN was moved by loop opt. */
5757 && REGNO_LAST_LUID (REGNO (dest_reg))
5758 < INSN_LUID (loop->end)
5759 && (! not_every_iteration
5760 || last_use_this_basic_block (dest_reg, insn)))
5762 /* Now check that there are no assignments to the biv within the
5763 giv's lifetime. This requires two separate checks. */
5765 /* Check each biv update, and fail if any are between the first
5766 and last use of the giv.
5768 If this loop contains an inner loop that was unrolled, then
5769 the insn modifying the biv may have been emitted by the loop
5770 unrolling code, and hence does not have a valid luid. Just
5771 mark the biv as not replaceable in this case. It is not very
5772 useful as a biv, because it is used in two different loops.
5773 It is very unlikely that we would be able to optimize the giv
5774 using this biv anyways. */
5776 v->replaceable = 1;
5777 v->not_replaceable = 0;
5778 for (b = bl->biv; b; b = b->next_iv)
5780 if (INSN_UID (b->insn) >= max_uid_for_loop
5781 || ((INSN_LUID (b->insn)
5782 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5783 && (INSN_LUID (b->insn)
5784 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5786 v->replaceable = 0;
5787 v->not_replaceable = 1;
5788 break;
5792 /* If there are any backwards branches that go from after the
5793 biv update to before it, then this giv is not replaceable. */
5794 if (v->replaceable)
5795 for (b = bl->biv; b; b = b->next_iv)
5796 if (back_branch_in_range_p (loop, b->insn))
5798 v->replaceable = 0;
5799 v->not_replaceable = 1;
5800 break;
5803 else
5805 /* May still be replaceable, we don't have enough info here to
5806 decide. */
5807 v->replaceable = 0;
5808 v->not_replaceable = 0;
5812 /* Record whether the add_val contains a const_int, for later use by
5813 combine_givs. */
5815 rtx tem = add_val;
5817 v->no_const_addval = 1;
5818 if (tem == const0_rtx)
5820 else if (CONSTANT_P (add_val))
5821 v->no_const_addval = 0;
5822 if (GET_CODE (tem) == PLUS)
5824 while (1)
5826 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5827 tem = XEXP (tem, 0);
5828 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5829 tem = XEXP (tem, 1);
5830 else
5831 break;
5833 if (CONSTANT_P (XEXP (tem, 1)))
5834 v->no_const_addval = 0;
5838 if (loop_dump_stream)
5839 loop_giv_dump (v, loop_dump_stream, 0);
5842 /* All this does is determine whether a giv can be made replaceable because
5843 its final value can be calculated. This code can not be part of record_giv
5844 above, because final_giv_value requires that the number of loop iterations
5845 be known, and that can not be accurately calculated until after all givs
5846 have been identified. */
5848 static void
5849 check_final_value (const struct loop *loop, struct induction *v)
5851 rtx final_value = 0;
5853 /* DEST_ADDR givs will never reach here, because they are always marked
5854 replaceable above in record_giv. */
5856 /* The giv can be replaced outright by the reduced register only if all
5857 of the following conditions are true:
5858 - the insn that sets the giv is always executed on any iteration
5859 on which the giv is used at all
5860 (there are two ways to deduce this:
5861 either the insn is executed on every iteration,
5862 or all uses follow that insn in the same basic block),
5863 - its final value can be calculated (this condition is different
5864 than the one above in record_giv)
5865 - it's not used before the it's set
5866 - no assignments to the biv occur during the giv's lifetime. */
5868 #if 0
5869 /* This is only called now when replaceable is known to be false. */
5870 /* Clear replaceable, so that it won't confuse final_giv_value. */
5871 v->replaceable = 0;
5872 #endif
5874 if ((final_value = final_giv_value (loop, v))
5875 && (v->always_executed
5876 || last_use_this_basic_block (v->dest_reg, v->insn)))
5878 int biv_increment_seen = 0, before_giv_insn = 0;
5879 rtx p = v->insn;
5880 rtx last_giv_use;
5882 v->replaceable = 1;
5883 v->not_replaceable = 0;
5885 /* When trying to determine whether or not a biv increment occurs
5886 during the lifetime of the giv, we can ignore uses of the variable
5887 outside the loop because final_value is true. Hence we can not
5888 use regno_last_uid and regno_first_uid as above in record_giv. */
5890 /* Search the loop to determine whether any assignments to the
5891 biv occur during the giv's lifetime. Start with the insn
5892 that sets the giv, and search around the loop until we come
5893 back to that insn again.
5895 Also fail if there is a jump within the giv's lifetime that jumps
5896 to somewhere outside the lifetime but still within the loop. This
5897 catches spaghetti code where the execution order is not linear, and
5898 hence the above test fails. Here we assume that the giv lifetime
5899 does not extend from one iteration of the loop to the next, so as
5900 to make the test easier. Since the lifetime isn't known yet,
5901 this requires two loops. See also record_giv above. */
5903 last_giv_use = v->insn;
5905 while (1)
5907 p = NEXT_INSN (p);
5908 if (p == loop->end)
5910 before_giv_insn = 1;
5911 p = NEXT_INSN (loop->start);
5913 if (p == v->insn)
5914 break;
5916 if (INSN_P (p))
5918 /* It is possible for the BIV increment to use the GIV if we
5919 have a cycle. Thus we must be sure to check each insn for
5920 both BIV and GIV uses, and we must check for BIV uses
5921 first. */
5923 if (! biv_increment_seen
5924 && reg_set_p (v->src_reg, PATTERN (p)))
5925 biv_increment_seen = 1;
5927 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5929 if (biv_increment_seen || before_giv_insn)
5931 v->replaceable = 0;
5932 v->not_replaceable = 1;
5933 break;
5935 last_giv_use = p;
5940 /* Now that the lifetime of the giv is known, check for branches
5941 from within the lifetime to outside the lifetime if it is still
5942 replaceable. */
5944 if (v->replaceable)
5946 p = v->insn;
5947 while (1)
5949 p = NEXT_INSN (p);
5950 if (p == loop->end)
5951 p = NEXT_INSN (loop->start);
5952 if (p == last_giv_use)
5953 break;
5955 if (JUMP_P (p) && JUMP_LABEL (p)
5956 && LABEL_NAME (JUMP_LABEL (p))
5957 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5958 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5959 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5960 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5962 v->replaceable = 0;
5963 v->not_replaceable = 1;
5965 if (loop_dump_stream)
5966 fprintf (loop_dump_stream,
5967 "Found branch outside giv lifetime.\n");
5969 break;
5974 /* If it is replaceable, then save the final value. */
5975 if (v->replaceable)
5976 v->final_value = final_value;
5979 if (loop_dump_stream && v->replaceable)
5980 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5981 INSN_UID (v->insn), REGNO (v->dest_reg));
5984 /* Update the status of whether a giv can derive other givs.
5986 We need to do something special if there is or may be an update to the biv
5987 between the time the giv is defined and the time it is used to derive
5988 another giv.
5990 In addition, a giv that is only conditionally set is not allowed to
5991 derive another giv once a label has been passed.
5993 The cases we look at are when a label or an update to a biv is passed. */
5995 static void
5996 update_giv_derive (const struct loop *loop, rtx p)
5998 struct loop_ivs *ivs = LOOP_IVS (loop);
5999 struct iv_class *bl;
6000 struct induction *biv, *giv;
6001 rtx tem;
6002 int dummy;
6004 /* Search all IV classes, then all bivs, and finally all givs.
6006 There are three cases we are concerned with. First we have the situation
6007 of a giv that is only updated conditionally. In that case, it may not
6008 derive any givs after a label is passed.
6010 The second case is when a biv update occurs, or may occur, after the
6011 definition of a giv. For certain biv updates (see below) that are
6012 known to occur between the giv definition and use, we can adjust the
6013 giv definition. For others, or when the biv update is conditional,
6014 we must prevent the giv from deriving any other givs. There are two
6015 sub-cases within this case.
6017 If this is a label, we are concerned with any biv update that is done
6018 conditionally, since it may be done after the giv is defined followed by
6019 a branch here (actually, we need to pass both a jump and a label, but
6020 this extra tracking doesn't seem worth it).
6022 If this is a jump, we are concerned about any biv update that may be
6023 executed multiple times. We are actually only concerned about
6024 backward jumps, but it is probably not worth performing the test
6025 on the jump again here.
6027 If this is a biv update, we must adjust the giv status to show that a
6028 subsequent biv update was performed. If this adjustment cannot be done,
6029 the giv cannot derive further givs. */
6031 for (bl = ivs->list; bl; bl = bl->next)
6032 for (biv = bl->biv; biv; biv = biv->next_iv)
6033 if (LABEL_P (p) || JUMP_P (p)
6034 || biv->insn == p)
6036 /* Skip if location is the same as a previous one. */
6037 if (biv->same)
6038 continue;
6040 for (giv = bl->giv; giv; giv = giv->next_iv)
6042 /* If cant_derive is already true, there is no point in
6043 checking all of these conditions again. */
6044 if (giv->cant_derive)
6045 continue;
6047 /* If this giv is conditionally set and we have passed a label,
6048 it cannot derive anything. */
6049 if (LABEL_P (p) && ! giv->always_computable)
6050 giv->cant_derive = 1;
6052 /* Skip givs that have mult_val == 0, since
6053 they are really invariants. Also skip those that are
6054 replaceable, since we know their lifetime doesn't contain
6055 any biv update. */
6056 else if (giv->mult_val == const0_rtx || giv->replaceable)
6057 continue;
6059 /* The only way we can allow this giv to derive another
6060 is if this is a biv increment and we can form the product
6061 of biv->add_val and giv->mult_val. In this case, we will
6062 be able to compute a compensation. */
6063 else if (biv->insn == p)
6065 rtx ext_val_dummy;
6067 tem = 0;
6068 if (biv->mult_val == const1_rtx)
6069 tem = simplify_giv_expr (loop,
6070 gen_rtx_MULT (giv->mode,
6071 biv->add_val,
6072 giv->mult_val),
6073 &ext_val_dummy, &dummy);
6075 if (tem && giv->derive_adjustment)
6076 tem = simplify_giv_expr
6077 (loop,
6078 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6079 &ext_val_dummy, &dummy);
6081 if (tem)
6082 giv->derive_adjustment = tem;
6083 else
6084 giv->cant_derive = 1;
6086 else if ((LABEL_P (p) && ! biv->always_computable)
6087 || (JUMP_P (p) && biv->maybe_multiple))
6088 giv->cant_derive = 1;
6093 /* Check whether an insn is an increment legitimate for a basic induction var.
6094 X is the source of insn P, or a part of it.
6095 MODE is the mode in which X should be interpreted.
6097 DEST_REG is the putative biv, also the destination of the insn.
6098 We accept patterns of these forms:
6099 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6100 REG = INVARIANT + REG
6102 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6103 store the additive term into *INC_VAL, and store the place where
6104 we found the additive term into *LOCATION.
6106 If X is an assignment of an invariant into DEST_REG, we set
6107 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6109 We also want to detect a BIV when it corresponds to a variable
6110 whose mode was promoted. In that case, an increment
6111 of the variable may be a PLUS that adds a SUBREG of that variable to
6112 an invariant and then sign- or zero-extends the result of the PLUS
6113 into the variable.
6115 Most GIVs in such cases will be in the promoted mode, since that is the
6116 probably the natural computation mode (and almost certainly the mode
6117 used for addresses) on the machine. So we view the pseudo-reg containing
6118 the variable as the BIV, as if it were simply incremented.
6120 Note that treating the entire pseudo as a BIV will result in making
6121 simple increments to any GIVs based on it. However, if the variable
6122 overflows in its declared mode but not its promoted mode, the result will
6123 be incorrect. This is acceptable if the variable is signed, since
6124 overflows in such cases are undefined, but not if it is unsigned, since
6125 those overflows are defined. So we only check for SIGN_EXTEND and
6126 not ZERO_EXTEND.
6128 If we cannot find a biv, we return 0. */
6130 static int
6131 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
6132 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
6133 rtx **location)
6135 enum rtx_code code;
6136 rtx *argp, arg;
6137 rtx insn, set = 0, last, inc;
6139 code = GET_CODE (x);
6140 *location = NULL;
6141 switch (code)
6143 case PLUS:
6144 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6145 || (GET_CODE (XEXP (x, 0)) == SUBREG
6146 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6147 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6149 argp = &XEXP (x, 1);
6151 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6152 || (GET_CODE (XEXP (x, 1)) == SUBREG
6153 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6154 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6156 argp = &XEXP (x, 0);
6158 else
6159 return 0;
6161 arg = *argp;
6162 if (loop_invariant_p (loop, arg) != 1)
6163 return 0;
6165 /* convert_modes can emit new instructions, e.g. when arg is a loop
6166 invariant MEM and dest_reg has a different mode.
6167 These instructions would be emitted after the end of the function
6168 and then *inc_val would be an uninitialized pseudo.
6169 Detect this and bail in this case.
6170 Other alternatives to solve this can be introducing a convert_modes
6171 variant which is allowed to fail but not allowed to emit new
6172 instructions, emit these instructions before loop start and let
6173 it be garbage collected if *inc_val is never used or saving the
6174 *inc_val initialization sequence generated here and when *inc_val
6175 is going to be actually used, emit it at some suitable place. */
6176 last = get_last_insn ();
6177 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6178 if (get_last_insn () != last)
6180 delete_insns_since (last);
6181 return 0;
6184 *inc_val = inc;
6185 *mult_val = const1_rtx;
6186 *location = argp;
6187 return 1;
6189 case SUBREG:
6190 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6191 handle addition of promoted variables.
6192 ??? The comment at the start of this function is wrong: promoted
6193 variable increments don't look like it says they do. */
6194 return basic_induction_var (loop, SUBREG_REG (x),
6195 GET_MODE (SUBREG_REG (x)),
6196 dest_reg, p, inc_val, mult_val, location);
6198 case REG:
6199 /* If this register is assigned in a previous insn, look at its
6200 source, but don't go outside the loop or past a label. */
6202 /* If this sets a register to itself, we would repeat any previous
6203 biv increment if we applied this strategy blindly. */
6204 if (rtx_equal_p (dest_reg, x))
6205 return 0;
6207 insn = p;
6208 while (1)
6210 rtx dest;
6213 insn = PREV_INSN (insn);
6215 while (insn && NOTE_P (insn)
6216 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6218 if (!insn)
6219 break;
6220 set = single_set (insn);
6221 if (set == 0)
6222 break;
6223 dest = SET_DEST (set);
6224 if (dest == x
6225 || (GET_CODE (dest) == SUBREG
6226 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6227 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6228 && SUBREG_REG (dest) == x))
6229 return basic_induction_var (loop, SET_SRC (set),
6230 (GET_MODE (SET_SRC (set)) == VOIDmode
6231 ? GET_MODE (x)
6232 : GET_MODE (SET_SRC (set))),
6233 dest_reg, insn,
6234 inc_val, mult_val, location);
6236 while (GET_CODE (dest) == SIGN_EXTRACT
6237 || GET_CODE (dest) == ZERO_EXTRACT
6238 || GET_CODE (dest) == SUBREG
6239 || GET_CODE (dest) == STRICT_LOW_PART)
6240 dest = XEXP (dest, 0);
6241 if (dest == x)
6242 break;
6244 /* Fall through. */
6246 /* Can accept constant setting of biv only when inside inner most loop.
6247 Otherwise, a biv of an inner loop may be incorrectly recognized
6248 as a biv of the outer loop,
6249 causing code to be moved INTO the inner loop. */
6250 case MEM:
6251 if (loop_invariant_p (loop, x) != 1)
6252 return 0;
6253 case CONST_INT:
6254 case SYMBOL_REF:
6255 case CONST:
6256 /* convert_modes aborts if we try to convert to or from CCmode, so just
6257 exclude that case. It is very unlikely that a condition code value
6258 would be a useful iterator anyways. convert_modes aborts if we try to
6259 convert a float mode to non-float or vice versa too. */
6260 if (loop->level == 1
6261 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6262 && GET_MODE_CLASS (mode) != MODE_CC)
6264 /* Possible bug here? Perhaps we don't know the mode of X. */
6265 last = get_last_insn ();
6266 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6267 if (get_last_insn () != last)
6269 delete_insns_since (last);
6270 return 0;
6273 *inc_val = inc;
6274 *mult_val = const0_rtx;
6275 return 1;
6277 else
6278 return 0;
6280 case SIGN_EXTEND:
6281 /* Ignore this BIV if signed arithmetic overflow is defined. */
6282 if (flag_wrapv)
6283 return 0;
6284 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6285 dest_reg, p, inc_val, mult_val, location);
6287 case ASHIFTRT:
6288 /* Similar, since this can be a sign extension. */
6289 for (insn = PREV_INSN (p);
6290 (insn && NOTE_P (insn)
6291 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6292 insn = PREV_INSN (insn))
6295 if (insn)
6296 set = single_set (insn);
6298 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6299 && set && SET_DEST (set) == XEXP (x, 0)
6300 && GET_CODE (XEXP (x, 1)) == CONST_INT
6301 && INTVAL (XEXP (x, 1)) >= 0
6302 && GET_CODE (SET_SRC (set)) == ASHIFT
6303 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6304 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6305 GET_MODE (XEXP (x, 0)),
6306 dest_reg, insn, inc_val, mult_val,
6307 location);
6308 return 0;
6310 default:
6311 return 0;
6315 /* A general induction variable (giv) is any quantity that is a linear
6316 function of a basic induction variable,
6317 i.e. giv = biv * mult_val + add_val.
6318 The coefficients can be any loop invariant quantity.
6319 A giv need not be computed directly from the biv;
6320 it can be computed by way of other givs. */
6322 /* Determine whether X computes a giv.
6323 If it does, return a nonzero value
6324 which is the benefit from eliminating the computation of X;
6325 set *SRC_REG to the register of the biv that it is computed from;
6326 set *ADD_VAL and *MULT_VAL to the coefficients,
6327 such that the value of X is biv * mult + add; */
6329 static int
6330 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
6331 rtx *add_val, rtx *mult_val, rtx *ext_val,
6332 int is_addr, int *pbenefit,
6333 enum machine_mode addr_mode)
6335 struct loop_ivs *ivs = LOOP_IVS (loop);
6336 rtx orig_x = x;
6338 /* If this is an invariant, forget it, it isn't a giv. */
6339 if (loop_invariant_p (loop, x) == 1)
6340 return 0;
6342 *pbenefit = 0;
6343 *ext_val = NULL_RTX;
6344 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6345 if (x == 0)
6346 return 0;
6348 switch (GET_CODE (x))
6350 case USE:
6351 case CONST_INT:
6352 /* Since this is now an invariant and wasn't before, it must be a giv
6353 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6354 with. */
6355 *src_reg = ivs->list->biv->dest_reg;
6356 *mult_val = const0_rtx;
6357 *add_val = x;
6358 break;
6360 case REG:
6361 /* This is equivalent to a BIV. */
6362 *src_reg = x;
6363 *mult_val = const1_rtx;
6364 *add_val = const0_rtx;
6365 break;
6367 case PLUS:
6368 /* Either (plus (biv) (invar)) or
6369 (plus (mult (biv) (invar_1)) (invar_2)). */
6370 if (GET_CODE (XEXP (x, 0)) == MULT)
6372 *src_reg = XEXP (XEXP (x, 0), 0);
6373 *mult_val = XEXP (XEXP (x, 0), 1);
6375 else
6377 *src_reg = XEXP (x, 0);
6378 *mult_val = const1_rtx;
6380 *add_val = XEXP (x, 1);
6381 break;
6383 case MULT:
6384 /* ADD_VAL is zero. */
6385 *src_reg = XEXP (x, 0);
6386 *mult_val = XEXP (x, 1);
6387 *add_val = const0_rtx;
6388 break;
6390 default:
6391 abort ();
6394 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6395 unless they are CONST_INT). */
6396 if (GET_CODE (*add_val) == USE)
6397 *add_val = XEXP (*add_val, 0);
6398 if (GET_CODE (*mult_val) == USE)
6399 *mult_val = XEXP (*mult_val, 0);
6401 if (is_addr)
6402 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6403 else
6404 *pbenefit += rtx_cost (orig_x, SET);
6406 /* Always return true if this is a giv so it will be detected as such,
6407 even if the benefit is zero or negative. This allows elimination
6408 of bivs that might otherwise not be eliminated. */
6409 return 1;
6412 /* Given an expression, X, try to form it as a linear function of a biv.
6413 We will canonicalize it to be of the form
6414 (plus (mult (BIV) (invar_1))
6415 (invar_2))
6416 with possible degeneracies.
6418 The invariant expressions must each be of a form that can be used as a
6419 machine operand. We surround then with a USE rtx (a hack, but localized
6420 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6421 routine; it is the caller's responsibility to strip them.
6423 If no such canonicalization is possible (i.e., two biv's are used or an
6424 expression that is neither invariant nor a biv or giv), this routine
6425 returns 0.
6427 For a nonzero return, the result will have a code of CONST_INT, USE,
6428 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6430 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6432 static rtx sge_plus (enum machine_mode, rtx, rtx);
6433 static rtx sge_plus_constant (rtx, rtx);
6435 static rtx
6436 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
6438 struct loop_ivs *ivs = LOOP_IVS (loop);
6439 struct loop_regs *regs = LOOP_REGS (loop);
6440 enum machine_mode mode = GET_MODE (x);
6441 rtx arg0, arg1;
6442 rtx tem;
6444 /* If this is not an integer mode, or if we cannot do arithmetic in this
6445 mode, this can't be a giv. */
6446 if (mode != VOIDmode
6447 && (GET_MODE_CLASS (mode) != MODE_INT
6448 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6449 return NULL_RTX;
6451 switch (GET_CODE (x))
6453 case PLUS:
6454 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6455 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6456 if (arg0 == 0 || arg1 == 0)
6457 return NULL_RTX;
6459 /* Put constant last, CONST_INT last if both constant. */
6460 if ((GET_CODE (arg0) == USE
6461 || GET_CODE (arg0) == CONST_INT)
6462 && ! ((GET_CODE (arg0) == USE
6463 && GET_CODE (arg1) == USE)
6464 || GET_CODE (arg1) == CONST_INT))
6465 tem = arg0, arg0 = arg1, arg1 = tem;
6467 /* Handle addition of zero, then addition of an invariant. */
6468 if (arg1 == const0_rtx)
6469 return arg0;
6470 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6471 switch (GET_CODE (arg0))
6473 case CONST_INT:
6474 case USE:
6475 /* Adding two invariants must result in an invariant, so enclose
6476 addition operation inside a USE and return it. */
6477 if (GET_CODE (arg0) == USE)
6478 arg0 = XEXP (arg0, 0);
6479 if (GET_CODE (arg1) == USE)
6480 arg1 = XEXP (arg1, 0);
6482 if (GET_CODE (arg0) == CONST_INT)
6483 tem = arg0, arg0 = arg1, arg1 = tem;
6484 if (GET_CODE (arg1) == CONST_INT)
6485 tem = sge_plus_constant (arg0, arg1);
6486 else
6487 tem = sge_plus (mode, arg0, arg1);
6489 if (GET_CODE (tem) != CONST_INT)
6490 tem = gen_rtx_USE (mode, tem);
6491 return tem;
6493 case REG:
6494 case MULT:
6495 /* biv + invar or mult + invar. Return sum. */
6496 return gen_rtx_PLUS (mode, arg0, arg1);
6498 case PLUS:
6499 /* (a + invar_1) + invar_2. Associate. */
6500 return
6501 simplify_giv_expr (loop,
6502 gen_rtx_PLUS (mode,
6503 XEXP (arg0, 0),
6504 gen_rtx_PLUS (mode,
6505 XEXP (arg0, 1),
6506 arg1)),
6507 ext_val, benefit);
6509 default:
6510 abort ();
6513 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6514 MULT to reduce cases. */
6515 if (REG_P (arg0))
6516 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6517 if (REG_P (arg1))
6518 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6520 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6521 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6522 Recurse to associate the second PLUS. */
6523 if (GET_CODE (arg1) == MULT)
6524 tem = arg0, arg0 = arg1, arg1 = tem;
6526 if (GET_CODE (arg1) == PLUS)
6527 return
6528 simplify_giv_expr (loop,
6529 gen_rtx_PLUS (mode,
6530 gen_rtx_PLUS (mode, arg0,
6531 XEXP (arg1, 0)),
6532 XEXP (arg1, 1)),
6533 ext_val, benefit);
6535 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6536 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6537 return NULL_RTX;
6539 if (!rtx_equal_p (arg0, arg1))
6540 return NULL_RTX;
6542 return simplify_giv_expr (loop,
6543 gen_rtx_MULT (mode,
6544 XEXP (arg0, 0),
6545 gen_rtx_PLUS (mode,
6546 XEXP (arg0, 1),
6547 XEXP (arg1, 1))),
6548 ext_val, benefit);
6550 case MINUS:
6551 /* Handle "a - b" as "a + b * (-1)". */
6552 return simplify_giv_expr (loop,
6553 gen_rtx_PLUS (mode,
6554 XEXP (x, 0),
6555 gen_rtx_MULT (mode,
6556 XEXP (x, 1),
6557 constm1_rtx)),
6558 ext_val, benefit);
6560 case MULT:
6561 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6562 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6563 if (arg0 == 0 || arg1 == 0)
6564 return NULL_RTX;
6566 /* Put constant last, CONST_INT last if both constant. */
6567 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6568 && GET_CODE (arg1) != CONST_INT)
6569 tem = arg0, arg0 = arg1, arg1 = tem;
6571 /* If second argument is not now constant, not giv. */
6572 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6573 return NULL_RTX;
6575 /* Handle multiply by 0 or 1. */
6576 if (arg1 == const0_rtx)
6577 return const0_rtx;
6579 else if (arg1 == const1_rtx)
6580 return arg0;
6582 switch (GET_CODE (arg0))
6584 case REG:
6585 /* biv * invar. Done. */
6586 return gen_rtx_MULT (mode, arg0, arg1);
6588 case CONST_INT:
6589 /* Product of two constants. */
6590 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6592 case USE:
6593 /* invar * invar is a giv, but attempt to simplify it somehow. */
6594 if (GET_CODE (arg1) != CONST_INT)
6595 return NULL_RTX;
6597 arg0 = XEXP (arg0, 0);
6598 if (GET_CODE (arg0) == MULT)
6600 /* (invar_0 * invar_1) * invar_2. Associate. */
6601 return simplify_giv_expr (loop,
6602 gen_rtx_MULT (mode,
6603 XEXP (arg0, 0),
6604 gen_rtx_MULT (mode,
6605 XEXP (arg0,
6607 arg1)),
6608 ext_val, benefit);
6610 /* Propagate the MULT expressions to the innermost nodes. */
6611 else if (GET_CODE (arg0) == PLUS)
6613 /* (invar_0 + invar_1) * invar_2. Distribute. */
6614 return simplify_giv_expr (loop,
6615 gen_rtx_PLUS (mode,
6616 gen_rtx_MULT (mode,
6617 XEXP (arg0,
6619 arg1),
6620 gen_rtx_MULT (mode,
6621 XEXP (arg0,
6623 arg1)),
6624 ext_val, benefit);
6626 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6628 case MULT:
6629 /* (a * invar_1) * invar_2. Associate. */
6630 return simplify_giv_expr (loop,
6631 gen_rtx_MULT (mode,
6632 XEXP (arg0, 0),
6633 gen_rtx_MULT (mode,
6634 XEXP (arg0, 1),
6635 arg1)),
6636 ext_val, benefit);
6638 case PLUS:
6639 /* (a + invar_1) * invar_2. Distribute. */
6640 return simplify_giv_expr (loop,
6641 gen_rtx_PLUS (mode,
6642 gen_rtx_MULT (mode,
6643 XEXP (arg0, 0),
6644 arg1),
6645 gen_rtx_MULT (mode,
6646 XEXP (arg0, 1),
6647 arg1)),
6648 ext_val, benefit);
6650 default:
6651 abort ();
6654 case ASHIFT:
6655 /* Shift by constant is multiply by power of two. */
6656 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6657 return 0;
6659 return
6660 simplify_giv_expr (loop,
6661 gen_rtx_MULT (mode,
6662 XEXP (x, 0),
6663 GEN_INT ((HOST_WIDE_INT) 1
6664 << INTVAL (XEXP (x, 1)))),
6665 ext_val, benefit);
6667 case NEG:
6668 /* "-a" is "a * (-1)" */
6669 return simplify_giv_expr (loop,
6670 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6671 ext_val, benefit);
6673 case NOT:
6674 /* "~a" is "-a - 1". Silly, but easy. */
6675 return simplify_giv_expr (loop,
6676 gen_rtx_MINUS (mode,
6677 gen_rtx_NEG (mode, XEXP (x, 0)),
6678 const1_rtx),
6679 ext_val, benefit);
6681 case USE:
6682 /* Already in proper form for invariant. */
6683 return x;
6685 case SIGN_EXTEND:
6686 case ZERO_EXTEND:
6687 case TRUNCATE:
6688 /* Conditionally recognize extensions of simple IVs. After we've
6689 computed loop traversal counts and verified the range of the
6690 source IV, we'll reevaluate this as a GIV. */
6691 if (*ext_val == NULL_RTX)
6693 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6694 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
6696 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6697 return arg0;
6700 goto do_default;
6702 case REG:
6703 /* If this is a new register, we can't deal with it. */
6704 if (REGNO (x) >= max_reg_before_loop)
6705 return 0;
6707 /* Check for biv or giv. */
6708 switch (REG_IV_TYPE (ivs, REGNO (x)))
6710 case BASIC_INDUCT:
6711 return x;
6712 case GENERAL_INDUCT:
6714 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6716 /* Form expression from giv and add benefit. Ensure this giv
6717 can derive another and subtract any needed adjustment if so. */
6719 /* Increasing the benefit here is risky. The only case in which it
6720 is arguably correct is if this is the only use of V. In other
6721 cases, this will artificially inflate the benefit of the current
6722 giv, and lead to suboptimal code. Thus, it is disabled, since
6723 potentially not reducing an only marginally beneficial giv is
6724 less harmful than reducing many givs that are not really
6725 beneficial. */
6727 rtx single_use = regs->array[REGNO (x)].single_usage;
6728 if (single_use && single_use != const0_rtx)
6729 *benefit += v->benefit;
6732 if (v->cant_derive)
6733 return 0;
6735 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6736 v->src_reg, v->mult_val),
6737 v->add_val);
6739 if (v->derive_adjustment)
6740 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6741 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6742 if (*ext_val)
6744 if (!v->ext_dependent)
6745 return arg0;
6747 else
6749 *ext_val = v->ext_dependent;
6750 return arg0;
6752 return 0;
6755 default:
6756 do_default:
6757 /* If it isn't an induction variable, and it is invariant, we
6758 may be able to simplify things further by looking through
6759 the bits we just moved outside the loop. */
6760 if (loop_invariant_p (loop, x) == 1)
6762 struct movable *m;
6763 struct loop_movables *movables = LOOP_MOVABLES (loop);
6765 for (m = movables->head; m; m = m->next)
6766 if (rtx_equal_p (x, m->set_dest))
6768 /* Ok, we found a match. Substitute and simplify. */
6770 /* If we match another movable, we must use that, as
6771 this one is going away. */
6772 if (m->match)
6773 return simplify_giv_expr (loop, m->match->set_dest,
6774 ext_val, benefit);
6776 /* If consec is nonzero, this is a member of a group of
6777 instructions that were moved together. We handle this
6778 case only to the point of seeking to the last insn and
6779 looking for a REG_EQUAL. Fail if we don't find one. */
6780 if (m->consec != 0)
6782 int i = m->consec;
6783 tem = m->insn;
6786 tem = NEXT_INSN (tem);
6788 while (--i > 0);
6790 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6791 if (tem)
6792 tem = XEXP (tem, 0);
6794 else
6796 tem = single_set (m->insn);
6797 if (tem)
6798 tem = SET_SRC (tem);
6801 if (tem)
6803 /* What we are most interested in is pointer
6804 arithmetic on invariants -- only take
6805 patterns we may be able to do something with. */
6806 if (GET_CODE (tem) == PLUS
6807 || GET_CODE (tem) == MULT
6808 || GET_CODE (tem) == ASHIFT
6809 || GET_CODE (tem) == CONST_INT
6810 || GET_CODE (tem) == SYMBOL_REF)
6812 tem = simplify_giv_expr (loop, tem, ext_val,
6813 benefit);
6814 if (tem)
6815 return tem;
6817 else if (GET_CODE (tem) == CONST
6818 && GET_CODE (XEXP (tem, 0)) == PLUS
6819 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6820 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6822 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6823 ext_val, benefit);
6824 if (tem)
6825 return tem;
6828 break;
6831 break;
6834 /* Fall through to general case. */
6835 default:
6836 /* If invariant, return as USE (unless CONST_INT).
6837 Otherwise, not giv. */
6838 if (GET_CODE (x) == USE)
6839 x = XEXP (x, 0);
6841 if (loop_invariant_p (loop, x) == 1)
6843 if (GET_CODE (x) == CONST_INT)
6844 return x;
6845 if (GET_CODE (x) == CONST
6846 && GET_CODE (XEXP (x, 0)) == PLUS
6847 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6848 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6849 x = XEXP (x, 0);
6850 return gen_rtx_USE (mode, x);
6852 else
6853 return 0;
6857 /* This routine folds invariants such that there is only ever one
6858 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6860 static rtx
6861 sge_plus_constant (rtx x, rtx c)
6863 if (GET_CODE (x) == CONST_INT)
6864 return GEN_INT (INTVAL (x) + INTVAL (c));
6865 else if (GET_CODE (x) != PLUS)
6866 return gen_rtx_PLUS (GET_MODE (x), x, c);
6867 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6869 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6870 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6872 else if (GET_CODE (XEXP (x, 0)) == PLUS
6873 || GET_CODE (XEXP (x, 1)) != PLUS)
6875 return gen_rtx_PLUS (GET_MODE (x),
6876 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6878 else
6880 return gen_rtx_PLUS (GET_MODE (x),
6881 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6885 static rtx
6886 sge_plus (enum machine_mode mode, rtx x, rtx y)
6888 while (GET_CODE (y) == PLUS)
6890 rtx a = XEXP (y, 0);
6891 if (GET_CODE (a) == CONST_INT)
6892 x = sge_plus_constant (x, a);
6893 else
6894 x = gen_rtx_PLUS (mode, x, a);
6895 y = XEXP (y, 1);
6897 if (GET_CODE (y) == CONST_INT)
6898 x = sge_plus_constant (x, y);
6899 else
6900 x = gen_rtx_PLUS (mode, x, y);
6901 return x;
6904 /* Help detect a giv that is calculated by several consecutive insns;
6905 for example,
6906 giv = biv * M
6907 giv = giv + A
6908 The caller has already identified the first insn P as having a giv as dest;
6909 we check that all other insns that set the same register follow
6910 immediately after P, that they alter nothing else,
6911 and that the result of the last is still a giv.
6913 The value is 0 if the reg set in P is not really a giv.
6914 Otherwise, the value is the amount gained by eliminating
6915 all the consecutive insns that compute the value.
6917 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6918 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6920 The coefficients of the ultimate giv value are stored in
6921 *MULT_VAL and *ADD_VAL. */
6923 static int
6924 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
6925 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
6926 rtx *ext_val, rtx *last_consec_insn)
6928 struct loop_ivs *ivs = LOOP_IVS (loop);
6929 struct loop_regs *regs = LOOP_REGS (loop);
6930 int count;
6931 enum rtx_code code;
6932 int benefit;
6933 rtx temp;
6934 rtx set;
6936 /* Indicate that this is a giv so that we can update the value produced in
6937 each insn of the multi-insn sequence.
6939 This induction structure will be used only by the call to
6940 general_induction_var below, so we can allocate it on our stack.
6941 If this is a giv, our caller will replace the induct var entry with
6942 a new induction structure. */
6943 struct induction *v;
6945 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6946 return 0;
6948 v = alloca (sizeof (struct induction));
6949 v->src_reg = src_reg;
6950 v->mult_val = *mult_val;
6951 v->add_val = *add_val;
6952 v->benefit = first_benefit;
6953 v->cant_derive = 0;
6954 v->derive_adjustment = 0;
6955 v->ext_dependent = NULL_RTX;
6957 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6958 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6960 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6962 while (count > 0)
6964 p = NEXT_INSN (p);
6965 code = GET_CODE (p);
6967 /* If libcall, skip to end of call sequence. */
6968 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6969 p = XEXP (temp, 0);
6971 if (code == INSN
6972 && (set = single_set (p))
6973 && REG_P (SET_DEST (set))
6974 && SET_DEST (set) == dest_reg
6975 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6976 add_val, mult_val, ext_val, 0,
6977 &benefit, VOIDmode)
6978 /* Giv created by equivalent expression. */
6979 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6980 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6981 add_val, mult_val, ext_val, 0,
6982 &benefit, VOIDmode)))
6983 && src_reg == v->src_reg)
6985 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6986 benefit += libcall_benefit (p);
6988 count--;
6989 v->mult_val = *mult_val;
6990 v->add_val = *add_val;
6991 v->benefit += benefit;
6993 else if (code != NOTE)
6995 /* Allow insns that set something other than this giv to a
6996 constant. Such insns are needed on machines which cannot
6997 include long constants and should not disqualify a giv. */
6998 if (code == INSN
6999 && (set = single_set (p))
7000 && SET_DEST (set) != dest_reg
7001 && CONSTANT_P (SET_SRC (set)))
7002 continue;
7004 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7005 return 0;
7009 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7010 *last_consec_insn = p;
7011 return v->benefit;
7014 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7015 represented by G1. If no such expression can be found, or it is clear that
7016 it cannot possibly be a valid address, 0 is returned.
7018 To perform the computation, we note that
7019 G1 = x * v + a and
7020 G2 = y * v + b
7021 where `v' is the biv.
7023 So G2 = (y/b) * G1 + (b - a*y/x).
7025 Note that MULT = y/x.
7027 Update: A and B are now allowed to be additive expressions such that
7028 B contains all variables in A. That is, computing B-A will not require
7029 subtracting variables. */
7031 static rtx
7032 express_from_1 (rtx a, rtx b, rtx mult)
7034 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7036 if (mult == const0_rtx)
7037 return b;
7039 /* If MULT is not 1, we cannot handle A with non-constants, since we
7040 would then be required to subtract multiples of the registers in A.
7041 This is theoretically possible, and may even apply to some Fortran
7042 constructs, but it is a lot of work and we do not attempt it here. */
7044 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7045 return NULL_RTX;
7047 /* In general these structures are sorted top to bottom (down the PLUS
7048 chain), but not left to right across the PLUS. If B is a higher
7049 order giv than A, we can strip one level and recurse. If A is higher
7050 order, we'll eventually bail out, but won't know that until the end.
7051 If they are the same, we'll strip one level around this loop. */
7053 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7055 rtx ra, rb, oa, ob, tmp;
7057 ra = XEXP (a, 0), oa = XEXP (a, 1);
7058 if (GET_CODE (ra) == PLUS)
7059 tmp = ra, ra = oa, oa = tmp;
7061 rb = XEXP (b, 0), ob = XEXP (b, 1);
7062 if (GET_CODE (rb) == PLUS)
7063 tmp = rb, rb = ob, ob = tmp;
7065 if (rtx_equal_p (ra, rb))
7066 /* We matched: remove one reg completely. */
7067 a = oa, b = ob;
7068 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7069 /* An alternate match. */
7070 a = oa, b = rb;
7071 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7072 /* An alternate match. */
7073 a = ra, b = ob;
7074 else
7076 /* Indicates an extra register in B. Strip one level from B and
7077 recurse, hoping B was the higher order expression. */
7078 ob = express_from_1 (a, ob, mult);
7079 if (ob == NULL_RTX)
7080 return NULL_RTX;
7081 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7085 /* Here we are at the last level of A, go through the cases hoping to
7086 get rid of everything but a constant. */
7088 if (GET_CODE (a) == PLUS)
7090 rtx ra, oa;
7092 ra = XEXP (a, 0), oa = XEXP (a, 1);
7093 if (rtx_equal_p (oa, b))
7094 oa = ra;
7095 else if (!rtx_equal_p (ra, b))
7096 return NULL_RTX;
7098 if (GET_CODE (oa) != CONST_INT)
7099 return NULL_RTX;
7101 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7103 else if (GET_CODE (a) == CONST_INT)
7105 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7107 else if (CONSTANT_P (a))
7109 enum machine_mode mode_a = GET_MODE (a);
7110 enum machine_mode mode_b = GET_MODE (b);
7111 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7112 return simplify_gen_binary (MINUS, mode, b, a);
7114 else if (GET_CODE (b) == PLUS)
7116 if (rtx_equal_p (a, XEXP (b, 0)))
7117 return XEXP (b, 1);
7118 else if (rtx_equal_p (a, XEXP (b, 1)))
7119 return XEXP (b, 0);
7120 else
7121 return NULL_RTX;
7123 else if (rtx_equal_p (a, b))
7124 return const0_rtx;
7126 return NULL_RTX;
7130 express_from (struct induction *g1, struct induction *g2)
7132 rtx mult, add;
7134 /* The value that G1 will be multiplied by must be a constant integer. Also,
7135 the only chance we have of getting a valid address is if b*c/a (see above
7136 for notation) is also an integer. */
7137 if (GET_CODE (g1->mult_val) == CONST_INT
7138 && GET_CODE (g2->mult_val) == CONST_INT)
7140 if (g1->mult_val == const0_rtx
7141 || (g1->mult_val == constm1_rtx
7142 && INTVAL (g2->mult_val)
7143 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
7144 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7145 return NULL_RTX;
7146 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7148 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7149 mult = const1_rtx;
7150 else
7152 /* ??? Find out if the one is a multiple of the other? */
7153 return NULL_RTX;
7156 add = express_from_1 (g1->add_val, g2->add_val, mult);
7157 if (add == NULL_RTX)
7159 /* Failed. If we've got a multiplication factor between G1 and G2,
7160 scale G1's addend and try again. */
7161 if (INTVAL (mult) > 1)
7163 rtx g1_add_val = g1->add_val;
7164 if (GET_CODE (g1_add_val) == MULT
7165 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7167 HOST_WIDE_INT m;
7168 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7169 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7170 XEXP (g1_add_val, 0), GEN_INT (m));
7172 else
7174 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7175 mult);
7178 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7181 if (add == NULL_RTX)
7182 return NULL_RTX;
7184 /* Form simplified final result. */
7185 if (mult == const0_rtx)
7186 return add;
7187 else if (mult == const1_rtx)
7188 mult = g1->dest_reg;
7189 else
7190 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7192 if (add == const0_rtx)
7193 return mult;
7194 else
7196 if (GET_CODE (add) == PLUS
7197 && CONSTANT_P (XEXP (add, 1)))
7199 rtx tem = XEXP (add, 1);
7200 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7201 add = tem;
7204 return gen_rtx_PLUS (g2->mode, mult, add);
7208 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7209 represented by G1. This indicates that G2 should be combined with G1 and
7210 that G2 can use (either directly or via an address expression) a register
7211 used to represent G1. */
7213 static rtx
7214 combine_givs_p (struct induction *g1, struct induction *g2)
7216 rtx comb, ret;
7218 /* With the introduction of ext dependent givs, we must care for modes.
7219 G2 must not use a wider mode than G1. */
7220 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7221 return NULL_RTX;
7223 ret = comb = express_from (g1, g2);
7224 if (comb == NULL_RTX)
7225 return NULL_RTX;
7226 if (g1->mode != g2->mode)
7227 ret = gen_lowpart (g2->mode, comb);
7229 /* If these givs are identical, they can be combined. We use the results
7230 of express_from because the addends are not in a canonical form, so
7231 rtx_equal_p is a weaker test. */
7232 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7233 combination to be the other way round. */
7234 if (comb == g1->dest_reg
7235 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7237 return ret;
7240 /* If G2 can be expressed as a function of G1 and that function is valid
7241 as an address and no more expensive than using a register for G2,
7242 the expression of G2 in terms of G1 can be used. */
7243 if (ret != NULL_RTX
7244 && g2->giv_type == DEST_ADDR
7245 && memory_address_p (GET_MODE (g2->mem), ret))
7246 return ret;
7248 return NULL_RTX;
7251 /* Check each extension dependent giv in this class to see if its
7252 root biv is safe from wrapping in the interior mode, which would
7253 make the giv illegal. */
7255 static void
7256 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
7258 struct loop_info *loop_info = LOOP_INFO (loop);
7259 int ze_ok = 0, se_ok = 0, info_ok = 0;
7260 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7261 HOST_WIDE_INT start_val;
7262 unsigned HOST_WIDE_INT u_end_val = 0;
7263 unsigned HOST_WIDE_INT u_start_val = 0;
7264 rtx incr = pc_rtx;
7265 struct induction *v;
7267 /* Make sure the iteration data is available. We must have
7268 constants in order to be certain of no overflow. */
7269 if (loop_info->n_iterations > 0
7270 && bl->initial_value
7271 && GET_CODE (bl->initial_value) == CONST_INT
7272 && (incr = biv_total_increment (bl))
7273 && GET_CODE (incr) == CONST_INT
7274 /* Make sure the host can represent the arithmetic. */
7275 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7277 unsigned HOST_WIDE_INT abs_incr, total_incr;
7278 HOST_WIDE_INT s_end_val;
7279 int neg_incr;
7281 info_ok = 1;
7282 start_val = INTVAL (bl->initial_value);
7283 u_start_val = start_val;
7285 neg_incr = 0, abs_incr = INTVAL (incr);
7286 if (INTVAL (incr) < 0)
7287 neg_incr = 1, abs_incr = -abs_incr;
7288 total_incr = abs_incr * loop_info->n_iterations;
7290 /* Check for host arithmetic overflow. */
7291 if (total_incr / loop_info->n_iterations == abs_incr)
7293 unsigned HOST_WIDE_INT u_max;
7294 HOST_WIDE_INT s_max;
7296 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7297 s_end_val = u_end_val;
7298 u_max = GET_MODE_MASK (biv_mode);
7299 s_max = u_max >> 1;
7301 /* Check zero extension of biv ok. */
7302 if (start_val >= 0
7303 /* Check for host arithmetic overflow. */
7304 && (neg_incr
7305 ? u_end_val < u_start_val
7306 : u_end_val > u_start_val)
7307 /* Check for target arithmetic overflow. */
7308 && (neg_incr
7309 ? 1 /* taken care of with host overflow */
7310 : u_end_val <= u_max))
7312 ze_ok = 1;
7315 /* Check sign extension of biv ok. */
7316 /* ??? While it is true that overflow with signed and pointer
7317 arithmetic is undefined, I fear too many programmers don't
7318 keep this fact in mind -- myself included on occasion.
7319 So leave alone with the signed overflow optimizations. */
7320 if (start_val >= -s_max - 1
7321 /* Check for host arithmetic overflow. */
7322 && (neg_incr
7323 ? s_end_val < start_val
7324 : s_end_val > start_val)
7325 /* Check for target arithmetic overflow. */
7326 && (neg_incr
7327 ? s_end_val >= -s_max - 1
7328 : s_end_val <= s_max))
7330 se_ok = 1;
7335 /* If we know the BIV is compared at run-time against an
7336 invariant value, and the increment is +/- 1, we may also
7337 be able to prove that the BIV cannot overflow. */
7338 else if (bl->biv->src_reg == loop_info->iteration_var
7339 && loop_info->comparison_value
7340 && loop_invariant_p (loop, loop_info->comparison_value)
7341 && (incr = biv_total_increment (bl))
7342 && GET_CODE (incr) == CONST_INT)
7344 /* If the increment is +1, and the exit test is a <,
7345 the BIV cannot overflow. (For <=, we have the
7346 problematic case that the comparison value might
7347 be the maximum value of the range.) */
7348 if (INTVAL (incr) == 1)
7350 if (loop_info->comparison_code == LT)
7351 se_ok = ze_ok = 1;
7352 else if (loop_info->comparison_code == LTU)
7353 ze_ok = 1;
7356 /* Likewise for increment -1 and exit test >. */
7357 if (INTVAL (incr) == -1)
7359 if (loop_info->comparison_code == GT)
7360 se_ok = ze_ok = 1;
7361 else if (loop_info->comparison_code == GTU)
7362 ze_ok = 1;
7366 /* Invalidate givs that fail the tests. */
7367 for (v = bl->giv; v; v = v->next_iv)
7368 if (v->ext_dependent)
7370 enum rtx_code code = GET_CODE (v->ext_dependent);
7371 int ok = 0;
7373 switch (code)
7375 case SIGN_EXTEND:
7376 ok = se_ok;
7377 break;
7378 case ZERO_EXTEND:
7379 ok = ze_ok;
7380 break;
7382 case TRUNCATE:
7383 /* We don't know whether this value is being used as either
7384 signed or unsigned, so to safely truncate we must satisfy
7385 both. The initial check here verifies the BIV itself;
7386 once that is successful we may check its range wrt the
7387 derived GIV. This works only if we were able to determine
7388 constant start and end values above. */
7389 if (se_ok && ze_ok && info_ok)
7391 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7392 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7394 /* We know from the above that both endpoints are nonnegative,
7395 and that there is no wrapping. Verify that both endpoints
7396 are within the (signed) range of the outer mode. */
7397 if (u_start_val <= max && u_end_val <= max)
7398 ok = 1;
7400 break;
7402 default:
7403 abort ();
7406 if (ok)
7408 if (loop_dump_stream)
7410 fprintf (loop_dump_stream,
7411 "Verified ext dependent giv at %d of reg %d\n",
7412 INSN_UID (v->insn), bl->regno);
7415 else
7417 if (loop_dump_stream)
7419 const char *why;
7421 if (info_ok)
7422 why = "biv iteration values overflowed";
7423 else
7425 if (incr == pc_rtx)
7426 incr = biv_total_increment (bl);
7427 if (incr == const1_rtx)
7428 why = "biv iteration info incomplete; incr by 1";
7429 else
7430 why = "biv iteration info incomplete";
7433 fprintf (loop_dump_stream,
7434 "Failed ext dependent giv at %d, %s\n",
7435 INSN_UID (v->insn), why);
7437 v->ignore = 1;
7438 bl->all_reduced = 0;
7443 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7446 extend_value_for_giv (struct induction *v, rtx value)
7448 rtx ext_dep = v->ext_dependent;
7450 if (! ext_dep)
7451 return value;
7453 /* Recall that check_ext_dependent_givs verified that the known bounds
7454 of a biv did not overflow or wrap with respect to the extension for
7455 the giv. Therefore, constants need no additional adjustment. */
7456 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7457 return value;
7459 /* Otherwise, we must adjust the value to compensate for the
7460 differing modes of the biv and the giv. */
7461 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7464 struct combine_givs_stats
7466 int giv_number;
7467 int total_benefit;
7470 static int
7471 cmp_combine_givs_stats (const void *xp, const void *yp)
7473 const struct combine_givs_stats * const x =
7474 (const struct combine_givs_stats *) xp;
7475 const struct combine_givs_stats * const y =
7476 (const struct combine_givs_stats *) yp;
7477 int d;
7478 d = y->total_benefit - x->total_benefit;
7479 /* Stabilize the sort. */
7480 if (!d)
7481 d = x->giv_number - y->giv_number;
7482 return d;
7485 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7486 any other. If so, point SAME to the giv combined with and set NEW_REG to
7487 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7488 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7490 static void
7491 combine_givs (struct loop_regs *regs, struct iv_class *bl)
7493 /* Additional benefit to add for being combined multiple times. */
7494 const int extra_benefit = 3;
7496 struct induction *g1, *g2, **giv_array;
7497 int i, j, k, giv_count;
7498 struct combine_givs_stats *stats;
7499 rtx *can_combine;
7501 /* Count givs, because bl->giv_count is incorrect here. */
7502 giv_count = 0;
7503 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7504 if (!g1->ignore)
7505 giv_count++;
7507 giv_array = alloca (giv_count * sizeof (struct induction *));
7508 i = 0;
7509 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7510 if (!g1->ignore)
7511 giv_array[i++] = g1;
7513 stats = xcalloc (giv_count, sizeof (*stats));
7514 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
7516 for (i = 0; i < giv_count; i++)
7518 int this_benefit;
7519 rtx single_use;
7521 g1 = giv_array[i];
7522 stats[i].giv_number = i;
7524 /* If a DEST_REG GIV is used only once, do not allow it to combine
7525 with anything, for in doing so we will gain nothing that cannot
7526 be had by simply letting the GIV with which we would have combined
7527 to be reduced on its own. The losage shows up in particular with
7528 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7529 be seen elsewhere as well. */
7530 if (g1->giv_type == DEST_REG
7531 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7532 && single_use != const0_rtx)
7533 continue;
7535 this_benefit = g1->benefit;
7536 /* Add an additional weight for zero addends. */
7537 if (g1->no_const_addval)
7538 this_benefit += 1;
7540 for (j = 0; j < giv_count; j++)
7542 rtx this_combine;
7544 g2 = giv_array[j];
7545 if (g1 != g2
7546 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7548 can_combine[i * giv_count + j] = this_combine;
7549 this_benefit += g2->benefit + extra_benefit;
7552 stats[i].total_benefit = this_benefit;
7555 /* Iterate, combining until we can't. */
7556 restart:
7557 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7559 if (loop_dump_stream)
7561 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7562 for (k = 0; k < giv_count; k++)
7564 g1 = giv_array[stats[k].giv_number];
7565 if (!g1->combined_with && !g1->same)
7566 fprintf (loop_dump_stream, " {%d, %d}",
7567 INSN_UID (giv_array[stats[k].giv_number]->insn),
7568 stats[k].total_benefit);
7570 putc ('\n', loop_dump_stream);
7573 for (k = 0; k < giv_count; k++)
7575 int g1_add_benefit = 0;
7577 i = stats[k].giv_number;
7578 g1 = giv_array[i];
7580 /* If it has already been combined, skip. */
7581 if (g1->combined_with || g1->same)
7582 continue;
7584 for (j = 0; j < giv_count; j++)
7586 g2 = giv_array[j];
7587 if (g1 != g2 && can_combine[i * giv_count + j]
7588 /* If it has already been combined, skip. */
7589 && ! g2->same && ! g2->combined_with)
7591 int l;
7593 g2->new_reg = can_combine[i * giv_count + j];
7594 g2->same = g1;
7595 /* For destination, we now may replace by mem expression instead
7596 of register. This changes the costs considerably, so add the
7597 compensation. */
7598 if (g2->giv_type == DEST_ADDR)
7599 g2->benefit = (g2->benefit + reg_address_cost
7600 - address_cost (g2->new_reg,
7601 GET_MODE (g2->mem)));
7602 g1->combined_with++;
7603 g1->lifetime += g2->lifetime;
7605 g1_add_benefit += g2->benefit;
7607 /* ??? The new final_[bg]iv_value code does a much better job
7608 of finding replaceable giv's, and hence this code may no
7609 longer be necessary. */
7610 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7611 g1_add_benefit -= copy_cost;
7613 /* To help optimize the next set of combinations, remove
7614 this giv from the benefits of other potential mates. */
7615 for (l = 0; l < giv_count; ++l)
7617 int m = stats[l].giv_number;
7618 if (can_combine[m * giv_count + j])
7619 stats[l].total_benefit -= g2->benefit + extra_benefit;
7622 if (loop_dump_stream)
7623 fprintf (loop_dump_stream,
7624 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7625 INSN_UID (g2->insn), INSN_UID (g1->insn),
7626 g1->benefit, g1_add_benefit, g1->lifetime);
7630 /* To help optimize the next set of combinations, remove
7631 this giv from the benefits of other potential mates. */
7632 if (g1->combined_with)
7634 for (j = 0; j < giv_count; ++j)
7636 int m = stats[j].giv_number;
7637 if (can_combine[m * giv_count + i])
7638 stats[j].total_benefit -= g1->benefit + extra_benefit;
7641 g1->benefit += g1_add_benefit;
7643 /* We've finished with this giv, and everything it touched.
7644 Restart the combination so that proper weights for the
7645 rest of the givs are properly taken into account. */
7646 /* ??? Ideally we would compact the arrays at this point, so
7647 as to not cover old ground. But sanely compacting
7648 can_combine is tricky. */
7649 goto restart;
7653 /* Clean up. */
7654 free (stats);
7655 free (can_combine);
7658 /* Generate sequence for REG = B * M + A. B is the initial value of
7659 the basic induction variable, M a multiplicative constant, A an
7660 additive constant and REG the destination register. */
7662 static rtx
7663 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
7665 rtx seq;
7666 rtx result;
7668 start_sequence ();
7669 /* Use unsigned arithmetic. */
7670 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7671 if (reg != result)
7672 emit_move_insn (reg, result);
7673 seq = get_insns ();
7674 end_sequence ();
7676 return seq;
7680 /* Update registers created in insn sequence SEQ. */
7682 static void
7683 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
7685 rtx insn;
7687 /* Update register info for alias analysis. */
7689 insn = seq;
7690 while (insn != NULL_RTX)
7692 rtx set = single_set (insn);
7694 if (set && REG_P (SET_DEST (set)))
7695 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7697 insn = NEXT_INSN (insn);
7702 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
7703 is the initial value of the basic induction variable, M a
7704 multiplicative constant, A an additive constant and REG the
7705 destination register. */
7707 void
7708 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
7709 rtx reg, basic_block before_bb, rtx before_insn)
7711 rtx seq;
7713 if (! before_insn)
7715 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7716 return;
7719 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7720 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7722 /* Increase the lifetime of any invariants moved further in code. */
7723 update_reg_last_use (a, before_insn);
7724 update_reg_last_use (b, before_insn);
7725 update_reg_last_use (m, before_insn);
7727 /* It is possible that the expansion created lots of new registers.
7728 Iterate over the sequence we just created and record them all. We
7729 must do this before inserting the sequence. */
7730 loop_regs_update (loop, seq);
7732 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7736 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
7737 initial value of the basic induction variable, M a multiplicative
7738 constant, A an additive constant and REG the destination
7739 register. */
7741 void
7742 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7744 rtx seq;
7746 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7747 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7749 /* Increase the lifetime of any invariants moved further in code.
7750 ???? Is this really necessary? */
7751 update_reg_last_use (a, loop->sink);
7752 update_reg_last_use (b, loop->sink);
7753 update_reg_last_use (m, loop->sink);
7755 /* It is possible that the expansion created lots of new registers.
7756 Iterate over the sequence we just created and record them all. We
7757 must do this before inserting the sequence. */
7758 loop_regs_update (loop, seq);
7760 loop_insn_sink (loop, seq);
7764 /* Emit insns after loop to set REG = B * M + A. B is the initial
7765 value of the basic induction variable, M a multiplicative constant,
7766 A an additive constant and REG the destination register. */
7768 void
7769 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7771 rtx seq;
7773 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7774 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7776 /* It is possible that the expansion created lots of new registers.
7777 Iterate over the sequence we just created and record them all. We
7778 must do this before inserting the sequence. */
7779 loop_regs_update (loop, seq);
7781 loop_insn_hoist (loop, seq);
7786 /* Similar to gen_add_mult, but compute cost rather than generating
7787 sequence. */
7789 static int
7790 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
7792 int cost = 0;
7793 rtx last, result;
7795 start_sequence ();
7796 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7797 if (reg != result)
7798 emit_move_insn (reg, result);
7799 last = get_last_insn ();
7800 while (last)
7802 rtx t = single_set (last);
7803 if (t)
7804 cost += rtx_cost (SET_SRC (t), SET);
7805 last = PREV_INSN (last);
7807 end_sequence ();
7808 return cost;
7811 /* Test whether A * B can be computed without
7812 an actual multiply insn. Value is 1 if so.
7814 ??? This function stinks because it generates a ton of wasted RTL
7815 ??? and as a result fragments GC memory to no end. There are other
7816 ??? places in the compiler which are invoked a lot and do the same
7817 ??? thing, generate wasted RTL just to see if something is possible. */
7819 static int
7820 product_cheap_p (rtx a, rtx b)
7822 rtx tmp;
7823 int win, n_insns;
7825 /* If only one is constant, make it B. */
7826 if (GET_CODE (a) == CONST_INT)
7827 tmp = a, a = b, b = tmp;
7829 /* If first constant, both constant, so don't need multiply. */
7830 if (GET_CODE (a) == CONST_INT)
7831 return 1;
7833 /* If second not constant, neither is constant, so would need multiply. */
7834 if (GET_CODE (b) != CONST_INT)
7835 return 0;
7837 /* One operand is constant, so might not need multiply insn. Generate the
7838 code for the multiply and see if a call or multiply, or long sequence
7839 of insns is generated. */
7841 start_sequence ();
7842 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7843 tmp = get_insns ();
7844 end_sequence ();
7846 win = 1;
7847 if (INSN_P (tmp))
7849 n_insns = 0;
7850 while (tmp != NULL_RTX)
7852 rtx next = NEXT_INSN (tmp);
7854 if (++n_insns > 3
7855 || !NONJUMP_INSN_P (tmp)
7856 || (GET_CODE (PATTERN (tmp)) == SET
7857 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7858 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7859 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7860 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7862 win = 0;
7863 break;
7866 tmp = next;
7869 else if (GET_CODE (tmp) == SET
7870 && GET_CODE (SET_SRC (tmp)) == MULT)
7871 win = 0;
7872 else if (GET_CODE (tmp) == PARALLEL
7873 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7874 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7875 win = 0;
7877 return win;
7880 /* Check to see if loop can be terminated by a "decrement and branch until
7881 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7882 Also try reversing an increment loop to a decrement loop
7883 to see if the optimization can be performed.
7884 Value is nonzero if optimization was performed. */
7886 /* This is useful even if the architecture doesn't have such an insn,
7887 because it might change a loops which increments from 0 to n to a loop
7888 which decrements from n to 0. A loop that decrements to zero is usually
7889 faster than one that increments from zero. */
7891 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7892 such as approx_final_value, biv_total_increment, loop_iterations, and
7893 final_[bg]iv_value. */
7895 static int
7896 check_dbra_loop (struct loop *loop, int insn_count)
7898 struct loop_info *loop_info = LOOP_INFO (loop);
7899 struct loop_regs *regs = LOOP_REGS (loop);
7900 struct loop_ivs *ivs = LOOP_IVS (loop);
7901 struct iv_class *bl;
7902 rtx reg;
7903 enum machine_mode mode;
7904 rtx jump_label;
7905 rtx final_value;
7906 rtx start_value;
7907 rtx new_add_val;
7908 rtx comparison;
7909 rtx before_comparison;
7910 rtx p;
7911 rtx jump;
7912 rtx first_compare;
7913 int compare_and_branch;
7914 rtx loop_start = loop->start;
7915 rtx loop_end = loop->end;
7917 /* If last insn is a conditional branch, and the insn before tests a
7918 register value, try to optimize it. Otherwise, we can't do anything. */
7920 jump = PREV_INSN (loop_end);
7921 comparison = get_condition_for_loop (loop, jump);
7922 if (comparison == 0)
7923 return 0;
7924 if (!onlyjump_p (jump))
7925 return 0;
7927 /* Try to compute whether the compare/branch at the loop end is one or
7928 two instructions. */
7929 get_condition (jump, &first_compare, false, true);
7930 if (first_compare == jump)
7931 compare_and_branch = 1;
7932 else if (first_compare == prev_nonnote_insn (jump))
7933 compare_and_branch = 2;
7934 else
7935 return 0;
7938 /* If more than one condition is present to control the loop, then
7939 do not proceed, as this function does not know how to rewrite
7940 loop tests with more than one condition.
7942 Look backwards from the first insn in the last comparison
7943 sequence and see if we've got another comparison sequence. */
7945 rtx jump1;
7946 if ((jump1 = prev_nonnote_insn (first_compare))
7947 && JUMP_P (jump1))
7948 return 0;
7951 /* Check all of the bivs to see if the compare uses one of them.
7952 Skip biv's set more than once because we can't guarantee that
7953 it will be zero on the last iteration. Also skip if the biv is
7954 used between its update and the test insn. */
7956 for (bl = ivs->list; bl; bl = bl->next)
7958 if (bl->biv_count == 1
7959 && ! bl->biv->maybe_multiple
7960 && bl->biv->dest_reg == XEXP (comparison, 0)
7961 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7962 first_compare))
7963 break;
7966 /* Try swapping the comparison to identify a suitable biv. */
7967 if (!bl)
7968 for (bl = ivs->list; bl; bl = bl->next)
7969 if (bl->biv_count == 1
7970 && ! bl->biv->maybe_multiple
7971 && bl->biv->dest_reg == XEXP (comparison, 1)
7972 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7973 first_compare))
7975 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
7976 VOIDmode,
7977 XEXP (comparison, 1),
7978 XEXP (comparison, 0));
7979 break;
7982 if (! bl)
7983 return 0;
7985 /* Look for the case where the basic induction variable is always
7986 nonnegative, and equals zero on the last iteration.
7987 In this case, add a reg_note REG_NONNEG, which allows the
7988 m68k DBRA instruction to be used. */
7990 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
7991 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7992 && GET_CODE (bl->biv->add_val) == CONST_INT
7993 && INTVAL (bl->biv->add_val) < 0)
7995 /* Initial value must be greater than 0,
7996 init_val % -dec_value == 0 to ensure that it equals zero on
7997 the last iteration */
7999 if (GET_CODE (bl->initial_value) == CONST_INT
8000 && INTVAL (bl->initial_value) > 0
8001 && (INTVAL (bl->initial_value)
8002 % (-INTVAL (bl->biv->add_val))) == 0)
8004 /* Register always nonnegative, add REG_NOTE to branch. */
8005 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8006 REG_NOTES (jump)
8007 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8008 REG_NOTES (jump));
8009 bl->nonneg = 1;
8011 return 1;
8014 /* If the decrement is 1 and the value was tested as >= 0 before
8015 the loop, then we can safely optimize. */
8016 for (p = loop_start; p; p = PREV_INSN (p))
8018 if (LABEL_P (p))
8019 break;
8020 if (!JUMP_P (p))
8021 continue;
8023 before_comparison = get_condition_for_loop (loop, p);
8024 if (before_comparison
8025 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8026 && (GET_CODE (before_comparison) == LT
8027 || GET_CODE (before_comparison) == LTU)
8028 && XEXP (before_comparison, 1) == const0_rtx
8029 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8030 && INTVAL (bl->biv->add_val) == -1)
8032 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8033 REG_NOTES (jump)
8034 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8035 REG_NOTES (jump));
8036 bl->nonneg = 1;
8038 return 1;
8042 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8043 && INTVAL (bl->biv->add_val) > 0)
8045 /* Try to change inc to dec, so can apply above optimization. */
8046 /* Can do this if:
8047 all registers modified are induction variables or invariant,
8048 all memory references have non-overlapping addresses
8049 (obviously true if only one write)
8050 allow 2 insns for the compare/jump at the end of the loop. */
8051 /* Also, we must avoid any instructions which use both the reversed
8052 biv and another biv. Such instructions will fail if the loop is
8053 reversed. We meet this condition by requiring that either
8054 no_use_except_counting is true, or else that there is only
8055 one biv. */
8056 int num_nonfixed_reads = 0;
8057 /* 1 if the iteration var is used only to count iterations. */
8058 int no_use_except_counting = 0;
8059 /* 1 if the loop has no memory store, or it has a single memory store
8060 which is reversible. */
8061 int reversible_mem_store = 1;
8063 if (bl->giv_count == 0
8064 && !loop->exit_count
8065 && !loop_info->has_multiple_exit_targets)
8067 rtx bivreg = regno_reg_rtx[bl->regno];
8068 struct iv_class *blt;
8070 /* If there are no givs for this biv, and the only exit is the
8071 fall through at the end of the loop, then
8072 see if perhaps there are no uses except to count. */
8073 no_use_except_counting = 1;
8074 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8075 if (INSN_P (p))
8077 rtx set = single_set (p);
8079 if (set && REG_P (SET_DEST (set))
8080 && REGNO (SET_DEST (set)) == bl->regno)
8081 /* An insn that sets the biv is okay. */
8083 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
8084 /* An insn that doesn't mention the biv is okay. */
8086 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8087 || p == prev_nonnote_insn (loop_end))
8089 /* If either of these insns uses the biv and sets a pseudo
8090 that has more than one usage, then the biv has uses
8091 other than counting since it's used to derive a value
8092 that is used more than one time. */
8093 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8094 regs);
8095 if (regs->multiple_uses)
8097 no_use_except_counting = 0;
8098 break;
8101 else
8103 no_use_except_counting = 0;
8104 break;
8108 /* A biv has uses besides counting if it is used to set
8109 another biv. */
8110 for (blt = ivs->list; blt; blt = blt->next)
8111 if (blt->init_set
8112 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8114 no_use_except_counting = 0;
8115 break;
8119 if (no_use_except_counting)
8120 /* No need to worry about MEMs. */
8122 else if (loop_info->num_mem_sets <= 1)
8124 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8125 if (INSN_P (p))
8126 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8128 /* If the loop has a single store, and the destination address is
8129 invariant, then we can't reverse the loop, because this address
8130 might then have the wrong value at loop exit.
8131 This would work if the source was invariant also, however, in that
8132 case, the insn should have been moved out of the loop. */
8134 if (loop_info->num_mem_sets == 1)
8136 struct induction *v;
8138 /* If we could prove that each of the memory locations
8139 written to was different, then we could reverse the
8140 store -- but we don't presently have any way of
8141 knowing that. */
8142 reversible_mem_store = 0;
8144 /* If the store depends on a register that is set after the
8145 store, it depends on the initial value, and is thus not
8146 reversible. */
8147 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8149 if (v->giv_type == DEST_REG
8150 && reg_mentioned_p (v->dest_reg,
8151 PATTERN (loop_info->first_loop_store_insn))
8152 && loop_insn_first_p (loop_info->first_loop_store_insn,
8153 v->insn))
8154 reversible_mem_store = 0;
8158 else
8159 return 0;
8161 /* This code only acts for innermost loops. Also it simplifies
8162 the memory address check by only reversing loops with
8163 zero or one memory access.
8164 Two memory accesses could involve parts of the same array,
8165 and that can't be reversed.
8166 If the biv is used only for counting, than we don't need to worry
8167 about all these things. */
8169 if ((num_nonfixed_reads <= 1
8170 && ! loop_info->has_nonconst_call
8171 && ! loop_info->has_prefetch
8172 && ! loop_info->has_volatile
8173 && reversible_mem_store
8174 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8175 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8176 && (bl == ivs->list && bl->next == 0))
8177 || (no_use_except_counting && ! loop_info->has_prefetch))
8179 rtx tem;
8181 /* Loop can be reversed. */
8182 if (loop_dump_stream)
8183 fprintf (loop_dump_stream, "Can reverse loop\n");
8185 /* Now check other conditions:
8187 The increment must be a constant, as must the initial value,
8188 and the comparison code must be LT.
8190 This test can probably be improved since +/- 1 in the constant
8191 can be obtained by changing LT to LE and vice versa; this is
8192 confusing. */
8194 if (comparison
8195 /* for constants, LE gets turned into LT */
8196 && (GET_CODE (comparison) == LT
8197 || (GET_CODE (comparison) == LE
8198 && no_use_except_counting)
8199 || GET_CODE (comparison) == LTU))
8201 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8202 rtx initial_value, comparison_value;
8203 int nonneg = 0;
8204 enum rtx_code cmp_code;
8205 int comparison_const_width;
8206 unsigned HOST_WIDE_INT comparison_sign_mask;
8207 bool keep_first_compare;
8209 add_val = INTVAL (bl->biv->add_val);
8210 comparison_value = XEXP (comparison, 1);
8211 if (GET_MODE (comparison_value) == VOIDmode)
8212 comparison_const_width
8213 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8214 else
8215 comparison_const_width
8216 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8217 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8218 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8219 comparison_sign_mask
8220 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8222 /* If the comparison value is not a loop invariant, then we
8223 can not reverse this loop.
8225 ??? If the insns which initialize the comparison value as
8226 a whole compute an invariant result, then we could move
8227 them out of the loop and proceed with loop reversal. */
8228 if (! loop_invariant_p (loop, comparison_value))
8229 return 0;
8231 if (GET_CODE (comparison_value) == CONST_INT)
8232 comparison_val = INTVAL (comparison_value);
8233 initial_value = bl->initial_value;
8235 /* Normalize the initial value if it is an integer and
8236 has no other use except as a counter. This will allow
8237 a few more loops to be reversed. */
8238 if (no_use_except_counting
8239 && GET_CODE (comparison_value) == CONST_INT
8240 && GET_CODE (initial_value) == CONST_INT)
8242 comparison_val = comparison_val - INTVAL (bl->initial_value);
8243 /* The code below requires comparison_val to be a multiple
8244 of add_val in order to do the loop reversal, so
8245 round up comparison_val to a multiple of add_val.
8246 Since comparison_value is constant, we know that the
8247 current comparison code is LT. */
8248 comparison_val = comparison_val + add_val - 1;
8249 comparison_val
8250 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8251 /* We postpone overflow checks for COMPARISON_VAL here;
8252 even if there is an overflow, we might still be able to
8253 reverse the loop, if converting the loop exit test to
8254 NE is possible. */
8255 initial_value = const0_rtx;
8258 /* First check if we can do a vanilla loop reversal. */
8259 if (initial_value == const0_rtx
8260 && GET_CODE (comparison_value) == CONST_INT
8261 /* Now do postponed overflow checks on COMPARISON_VAL. */
8262 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8263 & comparison_sign_mask))
8265 /* Register will always be nonnegative, with value
8266 0 on last iteration */
8267 add_adjust = add_val;
8268 nonneg = 1;
8269 cmp_code = GE;
8271 else
8272 return 0;
8274 if (GET_CODE (comparison) == LE)
8275 add_adjust -= add_val;
8277 /* If the initial value is not zero, or if the comparison
8278 value is not an exact multiple of the increment, then we
8279 can not reverse this loop. */
8280 if (initial_value == const0_rtx
8281 && GET_CODE (comparison_value) == CONST_INT)
8283 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8284 return 0;
8286 else
8288 if (! no_use_except_counting || add_val != 1)
8289 return 0;
8292 final_value = comparison_value;
8294 /* Reset these in case we normalized the initial value
8295 and comparison value above. */
8296 if (GET_CODE (comparison_value) == CONST_INT
8297 && GET_CODE (initial_value) == CONST_INT)
8299 comparison_value = GEN_INT (comparison_val);
8300 final_value
8301 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8303 bl->initial_value = initial_value;
8305 /* Save some info needed to produce the new insns. */
8306 reg = bl->biv->dest_reg;
8307 mode = GET_MODE (reg);
8308 jump_label = condjump_label (PREV_INSN (loop_end));
8309 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8311 /* Set start_value; if this is not a CONST_INT, we need
8312 to generate a SUB.
8313 Initialize biv to start_value before loop start.
8314 The old initializing insn will be deleted as a
8315 dead store by flow.c. */
8316 if (initial_value == const0_rtx
8317 && GET_CODE (comparison_value) == CONST_INT)
8319 start_value
8320 = gen_int_mode (comparison_val - add_adjust, mode);
8321 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8323 else if (GET_CODE (initial_value) == CONST_INT)
8325 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8326 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8328 if (add_insn == 0)
8329 return 0;
8331 start_value
8332 = gen_rtx_PLUS (mode, comparison_value, offset);
8333 loop_insn_hoist (loop, add_insn);
8334 if (GET_CODE (comparison) == LE)
8335 final_value = gen_rtx_PLUS (mode, comparison_value,
8336 GEN_INT (add_val));
8338 else if (! add_adjust)
8340 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8341 initial_value);
8343 if (sub_insn == 0)
8344 return 0;
8345 start_value
8346 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8347 loop_insn_hoist (loop, sub_insn);
8349 else
8350 /* We could handle the other cases too, but it'll be
8351 better to have a testcase first. */
8352 return 0;
8354 /* We may not have a single insn which can increment a reg, so
8355 create a sequence to hold all the insns from expand_inc. */
8356 start_sequence ();
8357 expand_inc (reg, new_add_val);
8358 tem = get_insns ();
8359 end_sequence ();
8361 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8362 delete_insn (bl->biv->insn);
8364 /* Update biv info to reflect its new status. */
8365 bl->biv->insn = p;
8366 bl->initial_value = start_value;
8367 bl->biv->add_val = new_add_val;
8369 /* Update loop info. */
8370 loop_info->initial_value = reg;
8371 loop_info->initial_equiv_value = reg;
8372 loop_info->final_value = const0_rtx;
8373 loop_info->final_equiv_value = const0_rtx;
8374 loop_info->comparison_value = const0_rtx;
8375 loop_info->comparison_code = cmp_code;
8376 loop_info->increment = new_add_val;
8378 /* Inc LABEL_NUSES so that delete_insn will
8379 not delete the label. */
8380 LABEL_NUSES (XEXP (jump_label, 0))++;
8382 /* If we have a separate comparison insn that does more
8383 than just set cc0, the result of the comparison might
8384 be used outside the loop. */
8385 keep_first_compare = (compare_and_branch == 2
8386 #ifdef HAVE_CC0
8387 && sets_cc0_p (first_compare) <= 0
8388 #endif
8391 /* Emit an insn after the end of the loop to set the biv's
8392 proper exit value if it is used anywhere outside the loop. */
8393 if (keep_first_compare
8394 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8395 || ! bl->init_insn
8396 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8397 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8399 if (keep_first_compare)
8400 loop_insn_sink (loop, PATTERN (first_compare));
8402 /* Delete compare/branch at end of loop. */
8403 delete_related_insns (PREV_INSN (loop_end));
8404 if (compare_and_branch == 2)
8405 delete_related_insns (first_compare);
8407 /* Add new compare/branch insn at end of loop. */
8408 start_sequence ();
8409 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8410 mode, 0,
8411 XEXP (jump_label, 0));
8412 tem = get_insns ();
8413 end_sequence ();
8414 emit_jump_insn_before (tem, loop_end);
8416 for (tem = PREV_INSN (loop_end);
8417 tem && !JUMP_P (tem);
8418 tem = PREV_INSN (tem))
8421 if (tem)
8422 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8424 if (nonneg)
8426 if (tem)
8428 /* Increment of LABEL_NUSES done above. */
8429 /* Register is now always nonnegative,
8430 so add REG_NONNEG note to the branch. */
8431 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8432 REG_NOTES (tem));
8434 bl->nonneg = 1;
8437 /* No insn may reference both the reversed and another biv or it
8438 will fail (see comment near the top of the loop reversal
8439 code).
8440 Earlier on, we have verified that the biv has no use except
8441 counting, or it is the only biv in this function.
8442 However, the code that computes no_use_except_counting does
8443 not verify reg notes. It's possible to have an insn that
8444 references another biv, and has a REG_EQUAL note with an
8445 expression based on the reversed biv. To avoid this case,
8446 remove all REG_EQUAL notes based on the reversed biv
8447 here. */
8448 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8449 if (INSN_P (p))
8451 rtx *pnote;
8452 rtx set = single_set (p);
8453 /* If this is a set of a GIV based on the reversed biv, any
8454 REG_EQUAL notes should still be correct. */
8455 if (! set
8456 || !REG_P (SET_DEST (set))
8457 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8458 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8459 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8460 for (pnote = &REG_NOTES (p); *pnote;)
8462 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8463 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8464 XEXP (*pnote, 0)))
8465 *pnote = XEXP (*pnote, 1);
8466 else
8467 pnote = &XEXP (*pnote, 1);
8471 /* Mark that this biv has been reversed. Each giv which depends
8472 on this biv, and which is also live past the end of the loop
8473 will have to be fixed up. */
8475 bl->reversed = 1;
8477 if (loop_dump_stream)
8479 fprintf (loop_dump_stream, "Reversed loop");
8480 if (bl->nonneg)
8481 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8482 else
8483 fprintf (loop_dump_stream, "\n");
8486 return 1;
8491 return 0;
8494 /* Verify whether the biv BL appears to be eliminable,
8495 based on the insns in the loop that refer to it.
8497 If ELIMINATE_P is nonzero, actually do the elimination.
8499 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8500 determine whether invariant insns should be placed inside or at the
8501 start of the loop. */
8503 static int
8504 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
8505 int eliminate_p, int threshold, int insn_count)
8507 struct loop_ivs *ivs = LOOP_IVS (loop);
8508 rtx reg = bl->biv->dest_reg;
8509 rtx p;
8511 /* Scan all insns in the loop, stopping if we find one that uses the
8512 biv in a way that we cannot eliminate. */
8514 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8516 enum rtx_code code = GET_CODE (p);
8517 basic_block where_bb = 0;
8518 rtx where_insn = threshold >= insn_count ? 0 : p;
8519 rtx note;
8521 /* If this is a libcall that sets a giv, skip ahead to its end. */
8522 if (INSN_P (p))
8524 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8526 if (note)
8528 rtx last = XEXP (note, 0);
8529 rtx set = single_set (last);
8531 if (set && REG_P (SET_DEST (set)))
8533 unsigned int regno = REGNO (SET_DEST (set));
8535 if (regno < ivs->n_regs
8536 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8537 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8538 p = last;
8543 /* Closely examine the insn if the biv is mentioned. */
8544 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8545 && reg_mentioned_p (reg, PATTERN (p))
8546 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8547 eliminate_p, where_bb, where_insn))
8549 if (loop_dump_stream)
8550 fprintf (loop_dump_stream,
8551 "Cannot eliminate biv %d: biv used in insn %d.\n",
8552 bl->regno, INSN_UID (p));
8553 break;
8556 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8557 if (eliminate_p
8558 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8559 && reg_mentioned_p (reg, XEXP (note, 0)))
8560 remove_note (p, note);
8563 if (p == loop->end)
8565 if (loop_dump_stream)
8566 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8567 bl->regno, eliminate_p ? "was" : "can be");
8568 return 1;
8571 return 0;
8574 /* INSN and REFERENCE are instructions in the same insn chain.
8575 Return nonzero if INSN is first. */
8578 loop_insn_first_p (rtx insn, rtx reference)
8580 rtx p, q;
8582 for (p = insn, q = reference;;)
8584 /* Start with test for not first so that INSN == REFERENCE yields not
8585 first. */
8586 if (q == insn || ! p)
8587 return 0;
8588 if (p == reference || ! q)
8589 return 1;
8591 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8592 previous insn, hence the <= comparison below does not work if
8593 P is a note. */
8594 if (INSN_UID (p) < max_uid_for_loop
8595 && INSN_UID (q) < max_uid_for_loop
8596 && !NOTE_P (p))
8597 return INSN_LUID (p) <= INSN_LUID (q);
8599 if (INSN_UID (p) >= max_uid_for_loop
8600 || NOTE_P (p))
8601 p = NEXT_INSN (p);
8602 if (INSN_UID (q) >= max_uid_for_loop)
8603 q = NEXT_INSN (q);
8607 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8608 the offset that we have to take into account due to auto-increment /
8609 div derivation is zero. */
8610 static int
8611 biv_elimination_giv_has_0_offset (struct induction *biv,
8612 struct induction *giv, rtx insn)
8614 /* If the giv V had the auto-inc address optimization applied
8615 to it, and INSN occurs between the giv insn and the biv
8616 insn, then we'd have to adjust the value used here.
8617 This is rare, so we don't bother to make this possible. */
8618 if (giv->auto_inc_opt
8619 && ((loop_insn_first_p (giv->insn, insn)
8620 && loop_insn_first_p (insn, biv->insn))
8621 || (loop_insn_first_p (biv->insn, insn)
8622 && loop_insn_first_p (insn, giv->insn))))
8623 return 0;
8625 return 1;
8628 /* If BL appears in X (part of the pattern of INSN), see if we can
8629 eliminate its use. If so, return 1. If not, return 0.
8631 If BIV does not appear in X, return 1.
8633 If ELIMINATE_P is nonzero, actually do the elimination.
8634 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8635 Depending on how many items have been moved out of the loop, it
8636 will either be before INSN (when WHERE_INSN is nonzero) or at the
8637 start of the loop (when WHERE_INSN is zero). */
8639 static int
8640 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
8641 struct iv_class *bl, int eliminate_p,
8642 basic_block where_bb, rtx where_insn)
8644 enum rtx_code code = GET_CODE (x);
8645 rtx reg = bl->biv->dest_reg;
8646 enum machine_mode mode = GET_MODE (reg);
8647 struct induction *v;
8648 rtx arg, tem;
8649 #ifdef HAVE_cc0
8650 rtx new;
8651 #endif
8652 int arg_operand;
8653 const char *fmt;
8654 int i, j;
8656 switch (code)
8658 case REG:
8659 /* If we haven't already been able to do something with this BIV,
8660 we can't eliminate it. */
8661 if (x == reg)
8662 return 0;
8663 return 1;
8665 case SET:
8666 /* If this sets the BIV, it is not a problem. */
8667 if (SET_DEST (x) == reg)
8668 return 1;
8670 /* If this is an insn that defines a giv, it is also ok because
8671 it will go away when the giv is reduced. */
8672 for (v = bl->giv; v; v = v->next_iv)
8673 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8674 return 1;
8676 #ifdef HAVE_cc0
8677 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8679 /* Can replace with any giv that was reduced and
8680 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8681 Require a constant for MULT_VAL, so we know it's nonzero.
8682 ??? We disable this optimization to avoid potential
8683 overflows. */
8685 for (v = bl->giv; v; v = v->next_iv)
8686 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8687 && v->add_val == const0_rtx
8688 && ! v->ignore && ! v->maybe_dead && v->always_computable
8689 && v->mode == mode
8690 && 0)
8692 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8693 continue;
8695 if (! eliminate_p)
8696 return 1;
8698 /* If the giv has the opposite direction of change,
8699 then reverse the comparison. */
8700 if (INTVAL (v->mult_val) < 0)
8701 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8702 const0_rtx, v->new_reg);
8703 else
8704 new = v->new_reg;
8706 /* We can probably test that giv's reduced reg. */
8707 if (validate_change (insn, &SET_SRC (x), new, 0))
8708 return 1;
8711 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8712 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8713 Require a constant for MULT_VAL, so we know it's nonzero.
8714 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8715 overflow problem. */
8717 for (v = bl->giv; v; v = v->next_iv)
8718 if (GET_CODE (v->mult_val) == CONST_INT
8719 && v->mult_val != const0_rtx
8720 && ! v->ignore && ! v->maybe_dead && v->always_computable
8721 && v->mode == mode
8722 && (GET_CODE (v->add_val) == SYMBOL_REF
8723 || GET_CODE (v->add_val) == LABEL_REF
8724 || GET_CODE (v->add_val) == CONST
8725 || (REG_P (v->add_val)
8726 && REG_POINTER (v->add_val))))
8728 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8729 continue;
8731 if (! eliminate_p)
8732 return 1;
8734 /* If the giv has the opposite direction of change,
8735 then reverse the comparison. */
8736 if (INTVAL (v->mult_val) < 0)
8737 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8738 v->new_reg);
8739 else
8740 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8741 copy_rtx (v->add_val));
8743 /* Replace biv with the giv's reduced register. */
8744 update_reg_last_use (v->add_val, insn);
8745 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8746 return 1;
8748 /* Insn doesn't support that constant or invariant. Copy it
8749 into a register (it will be a loop invariant.) */
8750 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8752 loop_insn_emit_before (loop, 0, where_insn,
8753 gen_move_insn (tem,
8754 copy_rtx (v->add_val)));
8756 /* Substitute the new register for its invariant value in
8757 the compare expression. */
8758 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8759 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8760 return 1;
8763 #endif
8764 break;
8766 case COMPARE:
8767 case EQ: case NE:
8768 case GT: case GE: case GTU: case GEU:
8769 case LT: case LE: case LTU: case LEU:
8770 /* See if either argument is the biv. */
8771 if (XEXP (x, 0) == reg)
8772 arg = XEXP (x, 1), arg_operand = 1;
8773 else if (XEXP (x, 1) == reg)
8774 arg = XEXP (x, 0), arg_operand = 0;
8775 else
8776 break;
8778 if (CONSTANT_P (arg))
8780 /* First try to replace with any giv that has constant positive
8781 mult_val and constant add_val. We might be able to support
8782 negative mult_val, but it seems complex to do it in general. */
8784 for (v = bl->giv; v; v = v->next_iv)
8785 if (GET_CODE (v->mult_val) == CONST_INT
8786 && INTVAL (v->mult_val) > 0
8787 && (GET_CODE (v->add_val) == SYMBOL_REF
8788 || GET_CODE (v->add_val) == LABEL_REF
8789 || GET_CODE (v->add_val) == CONST
8790 || (REG_P (v->add_val)
8791 && REG_POINTER (v->add_val)))
8792 && ! v->ignore && ! v->maybe_dead && v->always_computable
8793 && v->mode == mode)
8795 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8796 continue;
8798 /* Don't eliminate if the linear combination that makes up
8799 the giv overflows when it is applied to ARG. */
8800 if (GET_CODE (arg) == CONST_INT)
8802 rtx add_val;
8804 if (GET_CODE (v->add_val) == CONST_INT)
8805 add_val = v->add_val;
8806 else
8807 add_val = const0_rtx;
8809 if (const_mult_add_overflow_p (arg, v->mult_val,
8810 add_val, mode, 1))
8811 continue;
8814 if (! eliminate_p)
8815 return 1;
8817 /* Replace biv with the giv's reduced reg. */
8818 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8820 /* If all constants are actually constant integers and
8821 the derived constant can be directly placed in the COMPARE,
8822 do so. */
8823 if (GET_CODE (arg) == CONST_INT
8824 && GET_CODE (v->add_val) == CONST_INT)
8826 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8827 v->add_val, mode, 1);
8829 else
8831 /* Otherwise, load it into a register. */
8832 tem = gen_reg_rtx (mode);
8833 loop_iv_add_mult_emit_before (loop, arg,
8834 v->mult_val, v->add_val,
8835 tem, where_bb, where_insn);
8838 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8840 if (apply_change_group ())
8841 return 1;
8844 /* Look for giv with positive constant mult_val and nonconst add_val.
8845 Insert insns to calculate new compare value.
8846 ??? Turn this off due to possible overflow. */
8848 for (v = bl->giv; v; v = v->next_iv)
8849 if (GET_CODE (v->mult_val) == CONST_INT
8850 && INTVAL (v->mult_val) > 0
8851 && ! v->ignore && ! v->maybe_dead && v->always_computable
8852 && v->mode == mode
8853 && 0)
8855 rtx tem;
8857 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8858 continue;
8860 if (! eliminate_p)
8861 return 1;
8863 tem = gen_reg_rtx (mode);
8865 /* Replace biv with giv's reduced register. */
8866 validate_change (insn, &XEXP (x, 1 - arg_operand),
8867 v->new_reg, 1);
8869 /* Compute value to compare against. */
8870 loop_iv_add_mult_emit_before (loop, arg,
8871 v->mult_val, v->add_val,
8872 tem, where_bb, where_insn);
8873 /* Use it in this insn. */
8874 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8875 if (apply_change_group ())
8876 return 1;
8879 else if (REG_P (arg) || MEM_P (arg))
8881 if (loop_invariant_p (loop, arg) == 1)
8883 /* Look for giv with constant positive mult_val and nonconst
8884 add_val. Insert insns to compute new compare value.
8885 ??? Turn this off due to possible overflow. */
8887 for (v = bl->giv; v; v = v->next_iv)
8888 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8889 && ! v->ignore && ! v->maybe_dead && v->always_computable
8890 && v->mode == mode
8891 && 0)
8893 rtx tem;
8895 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8896 continue;
8898 if (! eliminate_p)
8899 return 1;
8901 tem = gen_reg_rtx (mode);
8903 /* Replace biv with giv's reduced register. */
8904 validate_change (insn, &XEXP (x, 1 - arg_operand),
8905 v->new_reg, 1);
8907 /* Compute value to compare against. */
8908 loop_iv_add_mult_emit_before (loop, arg,
8909 v->mult_val, v->add_val,
8910 tem, where_bb, where_insn);
8911 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8912 if (apply_change_group ())
8913 return 1;
8917 /* This code has problems. Basically, you can't know when
8918 seeing if we will eliminate BL, whether a particular giv
8919 of ARG will be reduced. If it isn't going to be reduced,
8920 we can't eliminate BL. We can try forcing it to be reduced,
8921 but that can generate poor code.
8923 The problem is that the benefit of reducing TV, below should
8924 be increased if BL can actually be eliminated, but this means
8925 we might have to do a topological sort of the order in which
8926 we try to process biv. It doesn't seem worthwhile to do
8927 this sort of thing now. */
8929 #if 0
8930 /* Otherwise the reg compared with had better be a biv. */
8931 if (!REG_P (arg)
8932 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8933 return 0;
8935 /* Look for a pair of givs, one for each biv,
8936 with identical coefficients. */
8937 for (v = bl->giv; v; v = v->next_iv)
8939 struct induction *tv;
8941 if (v->ignore || v->maybe_dead || v->mode != mode)
8942 continue;
8944 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8945 tv = tv->next_iv)
8946 if (! tv->ignore && ! tv->maybe_dead
8947 && rtx_equal_p (tv->mult_val, v->mult_val)
8948 && rtx_equal_p (tv->add_val, v->add_val)
8949 && tv->mode == mode)
8951 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8952 continue;
8954 if (! eliminate_p)
8955 return 1;
8957 /* Replace biv with its giv's reduced reg. */
8958 XEXP (x, 1 - arg_operand) = v->new_reg;
8959 /* Replace other operand with the other giv's
8960 reduced reg. */
8961 XEXP (x, arg_operand) = tv->new_reg;
8962 return 1;
8965 #endif
8968 /* If we get here, the biv can't be eliminated. */
8969 return 0;
8971 case MEM:
8972 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8973 biv is used in it, since it will be replaced. */
8974 for (v = bl->giv; v; v = v->next_iv)
8975 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8976 return 1;
8977 break;
8979 default:
8980 break;
8983 /* See if any subexpression fails elimination. */
8984 fmt = GET_RTX_FORMAT (code);
8985 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8987 switch (fmt[i])
8989 case 'e':
8990 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8991 eliminate_p, where_bb, where_insn))
8992 return 0;
8993 break;
8995 case 'E':
8996 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8997 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8998 eliminate_p, where_bb, where_insn))
8999 return 0;
9000 break;
9004 return 1;
9007 /* Return nonzero if the last use of REG
9008 is in an insn following INSN in the same basic block. */
9010 static int
9011 last_use_this_basic_block (rtx reg, rtx insn)
9013 rtx n;
9014 for (n = insn;
9015 n && !LABEL_P (n) && !JUMP_P (n);
9016 n = NEXT_INSN (n))
9018 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9019 return 1;
9021 return 0;
9024 /* Called via `note_stores' to record the initial value of a biv. Here we
9025 just record the location of the set and process it later. */
9027 static void
9028 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
9030 struct loop_ivs *ivs = (struct loop_ivs *) data;
9031 struct iv_class *bl;
9033 if (!REG_P (dest)
9034 || REGNO (dest) >= ivs->n_regs
9035 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9036 return;
9038 bl = REG_IV_CLASS (ivs, REGNO (dest));
9040 /* If this is the first set found, record it. */
9041 if (bl->init_insn == 0)
9043 bl->init_insn = note_insn;
9044 bl->init_set = set;
9048 /* If any of the registers in X are "old" and currently have a last use earlier
9049 than INSN, update them to have a last use of INSN. Their actual last use
9050 will be the previous insn but it will not have a valid uid_luid so we can't
9051 use it. X must be a source expression only. */
9053 static void
9054 update_reg_last_use (rtx x, rtx insn)
9056 /* Check for the case where INSN does not have a valid luid. In this case,
9057 there is no need to modify the regno_last_uid, as this can only happen
9058 when code is inserted after the loop_end to set a pseudo's final value,
9059 and hence this insn will never be the last use of x.
9060 ???? This comment is not correct. See for example loop_givs_reduce.
9061 This may insert an insn before another new insn. */
9062 if (REG_P (x) && REGNO (x) < max_reg_before_loop
9063 && INSN_UID (insn) < max_uid_for_loop
9064 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9066 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9068 else
9070 int i, j;
9071 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9072 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9074 if (fmt[i] == 'e')
9075 update_reg_last_use (XEXP (x, i), insn);
9076 else if (fmt[i] == 'E')
9077 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9078 update_reg_last_use (XVECEXP (x, i, j), insn);
9083 /* Given an insn INSN and condition COND, return the condition in a
9084 canonical form to simplify testing by callers. Specifically:
9086 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9087 (2) Both operands will be machine operands; (cc0) will have been replaced.
9088 (3) If an operand is a constant, it will be the second operand.
9089 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9090 for GE, GEU, and LEU.
9092 If the condition cannot be understood, or is an inequality floating-point
9093 comparison which needs to be reversed, 0 will be returned.
9095 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9097 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9098 insn used in locating the condition was found. If a replacement test
9099 of the condition is desired, it should be placed in front of that
9100 insn and we will be sure that the inputs are still valid.
9102 If WANT_REG is nonzero, we wish the condition to be relative to that
9103 register, if possible. Therefore, do not canonicalize the condition
9104 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
9105 to be a compare to a CC mode register.
9107 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
9108 and at INSN. */
9111 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
9112 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
9114 enum rtx_code code;
9115 rtx prev = insn;
9116 rtx set;
9117 rtx tem;
9118 rtx op0, op1;
9119 int reverse_code = 0;
9120 enum machine_mode mode;
9122 code = GET_CODE (cond);
9123 mode = GET_MODE (cond);
9124 op0 = XEXP (cond, 0);
9125 op1 = XEXP (cond, 1);
9127 if (reverse)
9128 code = reversed_comparison_code (cond, insn);
9129 if (code == UNKNOWN)
9130 return 0;
9132 if (earliest)
9133 *earliest = insn;
9135 /* If we are comparing a register with zero, see if the register is set
9136 in the previous insn to a COMPARE or a comparison operation. Perform
9137 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9138 in cse.c */
9140 while ((GET_RTX_CLASS (code) == RTX_COMPARE
9141 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
9142 && op1 == CONST0_RTX (GET_MODE (op0))
9143 && op0 != want_reg)
9145 /* Set nonzero when we find something of interest. */
9146 rtx x = 0;
9148 #ifdef HAVE_cc0
9149 /* If comparison with cc0, import actual comparison from compare
9150 insn. */
9151 if (op0 == cc0_rtx)
9153 if ((prev = prev_nonnote_insn (prev)) == 0
9154 || !NONJUMP_INSN_P (prev)
9155 || (set = single_set (prev)) == 0
9156 || SET_DEST (set) != cc0_rtx)
9157 return 0;
9159 op0 = SET_SRC (set);
9160 op1 = CONST0_RTX (GET_MODE (op0));
9161 if (earliest)
9162 *earliest = prev;
9164 #endif
9166 /* If this is a COMPARE, pick up the two things being compared. */
9167 if (GET_CODE (op0) == COMPARE)
9169 op1 = XEXP (op0, 1);
9170 op0 = XEXP (op0, 0);
9171 continue;
9173 else if (!REG_P (op0))
9174 break;
9176 /* Go back to the previous insn. Stop if it is not an INSN. We also
9177 stop if it isn't a single set or if it has a REG_INC note because
9178 we don't want to bother dealing with it. */
9180 if ((prev = prev_nonnote_insn (prev)) == 0
9181 || !NONJUMP_INSN_P (prev)
9182 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9183 break;
9185 set = set_of (op0, prev);
9187 if (set
9188 && (GET_CODE (set) != SET
9189 || !rtx_equal_p (SET_DEST (set), op0)))
9190 break;
9192 /* If this is setting OP0, get what it sets it to if it looks
9193 relevant. */
9194 if (set)
9196 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9197 #ifdef FLOAT_STORE_FLAG_VALUE
9198 REAL_VALUE_TYPE fsfv;
9199 #endif
9201 /* ??? We may not combine comparisons done in a CCmode with
9202 comparisons not done in a CCmode. This is to aid targets
9203 like Alpha that have an IEEE compliant EQ instruction, and
9204 a non-IEEE compliant BEQ instruction. The use of CCmode is
9205 actually artificial, simply to prevent the combination, but
9206 should not affect other platforms.
9208 However, we must allow VOIDmode comparisons to match either
9209 CCmode or non-CCmode comparison, because some ports have
9210 modeless comparisons inside branch patterns.
9212 ??? This mode check should perhaps look more like the mode check
9213 in simplify_comparison in combine. */
9215 if ((GET_CODE (SET_SRC (set)) == COMPARE
9216 || (((code == NE
9217 || (code == LT
9218 && GET_MODE_CLASS (inner_mode) == MODE_INT
9219 && (GET_MODE_BITSIZE (inner_mode)
9220 <= HOST_BITS_PER_WIDE_INT)
9221 && (STORE_FLAG_VALUE
9222 & ((HOST_WIDE_INT) 1
9223 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9224 #ifdef FLOAT_STORE_FLAG_VALUE
9225 || (code == LT
9226 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9227 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9228 REAL_VALUE_NEGATIVE (fsfv)))
9229 #endif
9231 && COMPARISON_P (SET_SRC (set))))
9232 && (((GET_MODE_CLASS (mode) == MODE_CC)
9233 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9234 || mode == VOIDmode || inner_mode == VOIDmode))
9235 x = SET_SRC (set);
9236 else if (((code == EQ
9237 || (code == GE
9238 && (GET_MODE_BITSIZE (inner_mode)
9239 <= HOST_BITS_PER_WIDE_INT)
9240 && GET_MODE_CLASS (inner_mode) == MODE_INT
9241 && (STORE_FLAG_VALUE
9242 & ((HOST_WIDE_INT) 1
9243 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9244 #ifdef FLOAT_STORE_FLAG_VALUE
9245 || (code == GE
9246 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9247 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9248 REAL_VALUE_NEGATIVE (fsfv)))
9249 #endif
9251 && COMPARISON_P (SET_SRC (set))
9252 && (((GET_MODE_CLASS (mode) == MODE_CC)
9253 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9254 || mode == VOIDmode || inner_mode == VOIDmode))
9257 reverse_code = 1;
9258 x = SET_SRC (set);
9260 else
9261 break;
9264 else if (reg_set_p (op0, prev))
9265 /* If this sets OP0, but not directly, we have to give up. */
9266 break;
9268 if (x)
9270 /* If the caller is expecting the condition to be valid at INSN,
9271 make sure X doesn't change before INSN. */
9272 if (valid_at_insn_p)
9273 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
9274 break;
9275 if (COMPARISON_P (x))
9276 code = GET_CODE (x);
9277 if (reverse_code)
9279 code = reversed_comparison_code (x, prev);
9280 if (code == UNKNOWN)
9281 return 0;
9282 reverse_code = 0;
9285 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9286 if (earliest)
9287 *earliest = prev;
9291 /* If constant is first, put it last. */
9292 if (CONSTANT_P (op0))
9293 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9295 /* If OP0 is the result of a comparison, we weren't able to find what
9296 was really being compared, so fail. */
9297 if (!allow_cc_mode
9298 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9299 return 0;
9301 /* Canonicalize any ordered comparison with integers involving equality
9302 if we can do computations in the relevant mode and we do not
9303 overflow. */
9305 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
9306 && GET_CODE (op1) == CONST_INT
9307 && GET_MODE (op0) != VOIDmode
9308 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9310 HOST_WIDE_INT const_val = INTVAL (op1);
9311 unsigned HOST_WIDE_INT uconst_val = const_val;
9312 unsigned HOST_WIDE_INT max_val
9313 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9315 switch (code)
9317 case LE:
9318 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9319 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9320 break;
9322 /* When cross-compiling, const_val might be sign-extended from
9323 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9324 case GE:
9325 if ((HOST_WIDE_INT) (const_val & max_val)
9326 != (((HOST_WIDE_INT) 1
9327 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9328 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9329 break;
9331 case LEU:
9332 if (uconst_val < max_val)
9333 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9334 break;
9336 case GEU:
9337 if (uconst_val != 0)
9338 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9339 break;
9341 default:
9342 break;
9346 /* Never return CC0; return zero instead. */
9347 if (CC0_P (op0))
9348 return 0;
9350 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9353 /* Given a jump insn JUMP, return the condition that will cause it to branch
9354 to its JUMP_LABEL. If the condition cannot be understood, or is an
9355 inequality floating-point comparison which needs to be reversed, 0 will
9356 be returned.
9358 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9359 insn used in locating the condition was found. If a replacement test
9360 of the condition is desired, it should be placed in front of that
9361 insn and we will be sure that the inputs are still valid. If EARLIEST
9362 is null, the returned condition will be valid at INSN.
9364 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
9365 compare CC mode register.
9367 VALID_AT_INSN_P is the same as for canonicalize_condition. */
9370 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
9372 rtx cond;
9373 int reverse;
9374 rtx set;
9376 /* If this is not a standard conditional jump, we can't parse it. */
9377 if (!JUMP_P (jump)
9378 || ! any_condjump_p (jump))
9379 return 0;
9380 set = pc_set (jump);
9382 cond = XEXP (SET_SRC (set), 0);
9384 /* If this branches to JUMP_LABEL when the condition is false, reverse
9385 the condition. */
9386 reverse
9387 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9388 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9390 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
9391 allow_cc_mode, valid_at_insn_p);
9394 /* Similar to above routine, except that we also put an invariant last
9395 unless both operands are invariants. */
9398 get_condition_for_loop (const struct loop *loop, rtx x)
9400 rtx comparison = get_condition (x, (rtx*) 0, false, true);
9402 if (comparison == 0
9403 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9404 || loop_invariant_p (loop, XEXP (comparison, 1)))
9405 return comparison;
9407 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9408 XEXP (comparison, 1), XEXP (comparison, 0));
9411 /* Scan the function and determine whether it has indirect (computed) jumps.
9413 This is taken mostly from flow.c; similar code exists elsewhere
9414 in the compiler. It may be useful to put this into rtlanal.c. */
9415 static int
9416 indirect_jump_in_function_p (rtx start)
9418 rtx insn;
9420 for (insn = start; insn; insn = NEXT_INSN (insn))
9421 if (computed_jump_p (insn))
9422 return 1;
9424 return 0;
9427 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9428 documentation for LOOP_MEMS for the definition of `appropriate'.
9429 This function is called from prescan_loop via for_each_rtx. */
9431 static int
9432 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
9434 struct loop_info *loop_info = data;
9435 int i;
9436 rtx m = *mem;
9438 if (m == NULL_RTX)
9439 return 0;
9441 switch (GET_CODE (m))
9443 case MEM:
9444 break;
9446 case CLOBBER:
9447 /* We're not interested in MEMs that are only clobbered. */
9448 return -1;
9450 case CONST_DOUBLE:
9451 /* We're not interested in the MEM associated with a
9452 CONST_DOUBLE, so there's no need to traverse into this. */
9453 return -1;
9455 case EXPR_LIST:
9456 /* We're not interested in any MEMs that only appear in notes. */
9457 return -1;
9459 default:
9460 /* This is not a MEM. */
9461 return 0;
9464 /* See if we've already seen this MEM. */
9465 for (i = 0; i < loop_info->mems_idx; ++i)
9466 if (rtx_equal_p (m, loop_info->mems[i].mem))
9468 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
9469 loop_info->mems[i].mem = m;
9470 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9471 /* The modes of the two memory accesses are different. If
9472 this happens, something tricky is going on, and we just
9473 don't optimize accesses to this MEM. */
9474 loop_info->mems[i].optimize = 0;
9476 return 0;
9479 /* Resize the array, if necessary. */
9480 if (loop_info->mems_idx == loop_info->mems_allocated)
9482 if (loop_info->mems_allocated != 0)
9483 loop_info->mems_allocated *= 2;
9484 else
9485 loop_info->mems_allocated = 32;
9487 loop_info->mems = xrealloc (loop_info->mems,
9488 loop_info->mems_allocated * sizeof (loop_mem_info));
9491 /* Actually insert the MEM. */
9492 loop_info->mems[loop_info->mems_idx].mem = m;
9493 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9494 because we can't put it in a register. We still store it in the
9495 table, though, so that if we see the same address later, but in a
9496 non-BLK mode, we'll not think we can optimize it at that point. */
9497 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9498 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9499 ++loop_info->mems_idx;
9501 return 0;
9505 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9507 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9508 register that is modified by an insn between FROM and TO. If the
9509 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9510 more, stop incrementing it, to avoid overflow.
9512 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9513 register I is used, if it is only used once. Otherwise, it is set
9514 to 0 (for no uses) or const0_rtx for more than one use. This
9515 parameter may be zero, in which case this processing is not done.
9517 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9518 optimize register I. */
9520 static void
9521 loop_regs_scan (const struct loop *loop, int extra_size)
9523 struct loop_regs *regs = LOOP_REGS (loop);
9524 int old_nregs;
9525 /* last_set[n] is nonzero iff reg n has been set in the current
9526 basic block. In that case, it is the insn that last set reg n. */
9527 rtx *last_set;
9528 rtx insn;
9529 int i;
9531 old_nregs = regs->num;
9532 regs->num = max_reg_num ();
9534 /* Grow the regs array if not allocated or too small. */
9535 if (regs->num >= regs->size)
9537 regs->size = regs->num + extra_size;
9539 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
9541 /* Zero the new elements. */
9542 memset (regs->array + old_nregs, 0,
9543 (regs->size - old_nregs) * sizeof (*regs->array));
9546 /* Clear previously scanned fields but do not clear n_times_set. */
9547 for (i = 0; i < old_nregs; i++)
9549 regs->array[i].set_in_loop = 0;
9550 regs->array[i].may_not_optimize = 0;
9551 regs->array[i].single_usage = NULL_RTX;
9554 last_set = xcalloc (regs->num, sizeof (rtx));
9556 /* Scan the loop, recording register usage. */
9557 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9558 insn = NEXT_INSN (insn))
9560 if (INSN_P (insn))
9562 /* Record registers that have exactly one use. */
9563 find_single_use_in_loop (regs, insn, PATTERN (insn));
9565 /* Include uses in REG_EQUAL notes. */
9566 if (REG_NOTES (insn))
9567 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9569 if (GET_CODE (PATTERN (insn)) == SET
9570 || GET_CODE (PATTERN (insn)) == CLOBBER)
9571 count_one_set (regs, insn, PATTERN (insn), last_set);
9572 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9574 int i;
9575 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9576 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9577 last_set);
9581 if (LABEL_P (insn) || JUMP_P (insn))
9582 memset (last_set, 0, regs->num * sizeof (rtx));
9584 /* Invalidate all registers used for function argument passing.
9585 We check rtx_varies_p for the same reason as below, to allow
9586 optimizing PIC calculations. */
9587 if (CALL_P (insn))
9589 rtx link;
9590 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9591 link;
9592 link = XEXP (link, 1))
9594 rtx op, reg;
9596 if (GET_CODE (op = XEXP (link, 0)) == USE
9597 && REG_P (reg = XEXP (op, 0))
9598 && rtx_varies_p (reg, 1))
9599 regs->array[REGNO (reg)].may_not_optimize = 1;
9604 /* Invalidate all hard registers clobbered by calls. With one exception:
9605 a call-clobbered PIC register is still function-invariant for our
9606 purposes, since we can hoist any PIC calculations out of the loop.
9607 Thus the call to rtx_varies_p. */
9608 if (LOOP_INFO (loop)->has_call)
9609 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9610 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9611 && rtx_varies_p (regno_reg_rtx[i], 1))
9613 regs->array[i].may_not_optimize = 1;
9614 regs->array[i].set_in_loop = 1;
9617 #ifdef AVOID_CCMODE_COPIES
9618 /* Don't try to move insns which set CC registers if we should not
9619 create CCmode register copies. */
9620 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9621 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9622 regs->array[i].may_not_optimize = 1;
9623 #endif
9625 /* Set regs->array[I].n_times_set for the new registers. */
9626 for (i = old_nregs; i < regs->num; i++)
9627 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9629 free (last_set);
9632 /* Returns the number of real INSNs in the LOOP. */
9634 static int
9635 count_insns_in_loop (const struct loop *loop)
9637 int count = 0;
9638 rtx insn;
9640 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9641 insn = NEXT_INSN (insn))
9642 if (INSN_P (insn))
9643 ++count;
9645 return count;
9648 /* Move MEMs into registers for the duration of the loop. */
9650 static void
9651 load_mems (const struct loop *loop)
9653 struct loop_info *loop_info = LOOP_INFO (loop);
9654 struct loop_regs *regs = LOOP_REGS (loop);
9655 int maybe_never = 0;
9656 int i;
9657 rtx p, prev_ebb_head;
9658 rtx label = NULL_RTX;
9659 rtx end_label;
9660 /* Nonzero if the next instruction may never be executed. */
9661 int next_maybe_never = 0;
9662 unsigned int last_max_reg = max_reg_num ();
9664 if (loop_info->mems_idx == 0)
9665 return;
9667 /* We cannot use next_label here because it skips over normal insns. */
9668 end_label = next_nonnote_insn (loop->end);
9669 if (end_label && !LABEL_P (end_label))
9670 end_label = NULL_RTX;
9672 /* Check to see if it's possible that some instructions in the loop are
9673 never executed. Also check if there is a goto out of the loop other
9674 than right after the end of the loop. */
9675 for (p = next_insn_in_loop (loop, loop->scan_start);
9676 p != NULL_RTX;
9677 p = next_insn_in_loop (loop, p))
9679 if (LABEL_P (p))
9680 maybe_never = 1;
9681 else if (JUMP_P (p)
9682 /* If we enter the loop in the middle, and scan
9683 around to the beginning, don't set maybe_never
9684 for that. This must be an unconditional jump,
9685 otherwise the code at the top of the loop might
9686 never be executed. Unconditional jumps are
9687 followed a by barrier then loop end. */
9688 && ! (JUMP_P (p)
9689 && JUMP_LABEL (p) == loop->top
9690 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9691 && any_uncondjump_p (p)))
9693 /* If this is a jump outside of the loop but not right
9694 after the end of the loop, we would have to emit new fixup
9695 sequences for each such label. */
9696 if (/* If we can't tell where control might go when this
9697 JUMP_INSN is executed, we must be conservative. */
9698 !JUMP_LABEL (p)
9699 || (JUMP_LABEL (p) != end_label
9700 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9701 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9702 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9703 return;
9705 if (!any_condjump_p (p))
9706 /* Something complicated. */
9707 maybe_never = 1;
9708 else
9709 /* If there are any more instructions in the loop, they
9710 might not be reached. */
9711 next_maybe_never = 1;
9713 else if (next_maybe_never)
9714 maybe_never = 1;
9717 /* Find start of the extended basic block that enters the loop. */
9718 for (p = loop->start;
9719 PREV_INSN (p) && !LABEL_P (p);
9720 p = PREV_INSN (p))
9722 prev_ebb_head = p;
9724 cselib_init (true);
9726 /* Build table of mems that get set to constant values before the
9727 loop. */
9728 for (; p != loop->start; p = NEXT_INSN (p))
9729 cselib_process_insn (p);
9731 /* Actually move the MEMs. */
9732 for (i = 0; i < loop_info->mems_idx; ++i)
9734 regset_head load_copies;
9735 regset_head store_copies;
9736 int written = 0;
9737 rtx reg;
9738 rtx mem = loop_info->mems[i].mem;
9739 rtx mem_list_entry;
9741 if (MEM_VOLATILE_P (mem)
9742 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9743 /* There's no telling whether or not MEM is modified. */
9744 loop_info->mems[i].optimize = 0;
9746 /* Go through the MEMs written to in the loop to see if this
9747 one is aliased by one of them. */
9748 mem_list_entry = loop_info->store_mems;
9749 while (mem_list_entry)
9751 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9752 written = 1;
9753 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9754 mem, rtx_varies_p))
9756 /* MEM is indeed aliased by this store. */
9757 loop_info->mems[i].optimize = 0;
9758 break;
9760 mem_list_entry = XEXP (mem_list_entry, 1);
9763 if (flag_float_store && written
9764 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9765 loop_info->mems[i].optimize = 0;
9767 /* If this MEM is written to, we must be sure that there
9768 are no reads from another MEM that aliases this one. */
9769 if (loop_info->mems[i].optimize && written)
9771 int j;
9773 for (j = 0; j < loop_info->mems_idx; ++j)
9775 if (j == i)
9776 continue;
9777 else if (true_dependence (mem,
9778 VOIDmode,
9779 loop_info->mems[j].mem,
9780 rtx_varies_p))
9782 /* It's not safe to hoist loop_info->mems[i] out of
9783 the loop because writes to it might not be
9784 seen by reads from loop_info->mems[j]. */
9785 loop_info->mems[i].optimize = 0;
9786 break;
9791 if (maybe_never && may_trap_p (mem))
9792 /* We can't access the MEM outside the loop; it might
9793 cause a trap that wouldn't have happened otherwise. */
9794 loop_info->mems[i].optimize = 0;
9796 if (!loop_info->mems[i].optimize)
9797 /* We thought we were going to lift this MEM out of the
9798 loop, but later discovered that we could not. */
9799 continue;
9801 INIT_REG_SET (&load_copies);
9802 INIT_REG_SET (&store_copies);
9804 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9805 order to keep scan_loop from moving stores to this MEM
9806 out of the loop just because this REG is neither a
9807 user-variable nor used in the loop test. */
9808 reg = gen_reg_rtx (GET_MODE (mem));
9809 REG_USERVAR_P (reg) = 1;
9810 loop_info->mems[i].reg = reg;
9812 /* Now, replace all references to the MEM with the
9813 corresponding pseudos. */
9814 maybe_never = 0;
9815 for (p = next_insn_in_loop (loop, loop->scan_start);
9816 p != NULL_RTX;
9817 p = next_insn_in_loop (loop, p))
9819 if (INSN_P (p))
9821 rtx set;
9823 set = single_set (p);
9825 /* See if this copies the mem into a register that isn't
9826 modified afterwards. We'll try to do copy propagation
9827 a little further on. */
9828 if (set
9829 /* @@@ This test is _way_ too conservative. */
9830 && ! maybe_never
9831 && REG_P (SET_DEST (set))
9832 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9833 && REGNO (SET_DEST (set)) < last_max_reg
9834 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9835 && rtx_equal_p (SET_SRC (set), mem))
9836 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9838 /* See if this copies the mem from a register that isn't
9839 modified afterwards. We'll try to remove the
9840 redundant copy later on by doing a little register
9841 renaming and copy propagation. This will help
9842 to untangle things for the BIV detection code. */
9843 if (set
9844 && ! maybe_never
9845 && REG_P (SET_SRC (set))
9846 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9847 && REGNO (SET_SRC (set)) < last_max_reg
9848 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9849 && rtx_equal_p (SET_DEST (set), mem))
9850 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9852 /* If this is a call which uses / clobbers this memory
9853 location, we must not change the interface here. */
9854 if (CALL_P (p)
9855 && reg_mentioned_p (loop_info->mems[i].mem,
9856 CALL_INSN_FUNCTION_USAGE (p)))
9858 cancel_changes (0);
9859 loop_info->mems[i].optimize = 0;
9860 break;
9862 else
9863 /* Replace the memory reference with the shadow register. */
9864 replace_loop_mems (p, loop_info->mems[i].mem,
9865 loop_info->mems[i].reg, written);
9868 if (LABEL_P (p)
9869 || JUMP_P (p))
9870 maybe_never = 1;
9873 if (! loop_info->mems[i].optimize)
9874 ; /* We found we couldn't do the replacement, so do nothing. */
9875 else if (! apply_change_group ())
9876 /* We couldn't replace all occurrences of the MEM. */
9877 loop_info->mems[i].optimize = 0;
9878 else
9880 /* Load the memory immediately before LOOP->START, which is
9881 the NOTE_LOOP_BEG. */
9882 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9883 rtx set;
9884 rtx best = mem;
9885 int j;
9886 struct elt_loc_list *const_equiv = 0;
9888 if (e)
9890 struct elt_loc_list *equiv;
9891 struct elt_loc_list *best_equiv = 0;
9892 for (equiv = e->locs; equiv; equiv = equiv->next)
9894 if (CONSTANT_P (equiv->loc))
9895 const_equiv = equiv;
9896 else if (REG_P (equiv->loc)
9897 /* Extending hard register lifetimes causes crash
9898 on SRC targets. Doing so on non-SRC is
9899 probably also not good idea, since we most
9900 probably have pseudoregister equivalence as
9901 well. */
9902 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9903 best_equiv = equiv;
9905 /* Use the constant equivalence if that is cheap enough. */
9906 if (! best_equiv)
9907 best_equiv = const_equiv;
9908 else if (const_equiv
9909 && (rtx_cost (const_equiv->loc, SET)
9910 <= rtx_cost (best_equiv->loc, SET)))
9912 best_equiv = const_equiv;
9913 const_equiv = 0;
9916 /* If best_equiv is nonzero, we know that MEM is set to a
9917 constant or register before the loop. We will use this
9918 knowledge to initialize the shadow register with that
9919 constant or reg rather than by loading from MEM. */
9920 if (best_equiv)
9921 best = copy_rtx (best_equiv->loc);
9924 set = gen_move_insn (reg, best);
9925 set = loop_insn_hoist (loop, set);
9926 if (REG_P (best))
9928 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9929 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9931 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9932 break;
9936 if (const_equiv)
9937 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9939 if (written)
9941 if (label == NULL_RTX)
9943 label = gen_label_rtx ();
9944 emit_label_after (label, loop->end);
9947 /* Store the memory immediately after END, which is
9948 the NOTE_LOOP_END. */
9949 set = gen_move_insn (copy_rtx (mem), reg);
9950 loop_insn_emit_after (loop, 0, label, set);
9953 if (loop_dump_stream)
9955 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9956 REGNO (reg), (written ? "r/w" : "r/o"));
9957 print_rtl (loop_dump_stream, mem);
9958 fputc ('\n', loop_dump_stream);
9961 /* Attempt a bit of copy propagation. This helps untangle the
9962 data flow, and enables {basic,general}_induction_var to find
9963 more bivs/givs. */
9964 EXECUTE_IF_SET_IN_REG_SET
9965 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9967 try_copy_prop (loop, reg, j);
9969 CLEAR_REG_SET (&load_copies);
9971 EXECUTE_IF_SET_IN_REG_SET
9972 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9974 try_swap_copy_prop (loop, reg, j);
9976 CLEAR_REG_SET (&store_copies);
9980 /* Now, we need to replace all references to the previous exit
9981 label with the new one. */
9982 if (label != NULL_RTX && end_label != NULL_RTX)
9983 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9984 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
9985 redirect_jump (p, label, false);
9987 cselib_finish ();
9990 /* For communication between note_reg_stored and its caller. */
9991 struct note_reg_stored_arg
9993 int set_seen;
9994 rtx reg;
9997 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9998 is equal to ARG. */
9999 static void
10000 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
10002 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10003 if (t->reg == x)
10004 t->set_seen = 1;
10007 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10008 There must be exactly one insn that sets this pseudo; it will be
10009 deleted if all replacements succeed and we can prove that the register
10010 is not used after the loop. */
10012 static void
10013 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
10015 /* This is the reg that we are copying from. */
10016 rtx reg_rtx = regno_reg_rtx[regno];
10017 rtx init_insn = 0;
10018 rtx insn;
10019 /* These help keep track of whether we replaced all uses of the reg. */
10020 int replaced_last = 0;
10021 int store_is_first = 0;
10023 for (insn = next_insn_in_loop (loop, loop->scan_start);
10024 insn != NULL_RTX;
10025 insn = next_insn_in_loop (loop, insn))
10027 rtx set;
10029 /* Only substitute within one extended basic block from the initializing
10030 insn. */
10031 if (LABEL_P (insn) && init_insn)
10032 break;
10034 if (! INSN_P (insn))
10035 continue;
10037 /* Is this the initializing insn? */
10038 set = single_set (insn);
10039 if (set
10040 && REG_P (SET_DEST (set))
10041 && REGNO (SET_DEST (set)) == regno)
10043 if (init_insn)
10044 abort ();
10046 init_insn = insn;
10047 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10048 store_is_first = 1;
10051 /* Only substitute after seeing the initializing insn. */
10052 if (init_insn && insn != init_insn)
10054 struct note_reg_stored_arg arg;
10056 replace_loop_regs (insn, reg_rtx, replacement);
10057 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10058 replaced_last = 1;
10060 /* Stop replacing when REPLACEMENT is modified. */
10061 arg.reg = replacement;
10062 arg.set_seen = 0;
10063 note_stores (PATTERN (insn), note_reg_stored, &arg);
10064 if (arg.set_seen)
10066 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10068 /* It is possible that we've turned previously valid REG_EQUAL to
10069 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10070 REPLACEMENT is modified, we get different meaning. */
10071 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10072 remove_note (insn, note);
10073 break;
10077 if (! init_insn)
10078 abort ();
10079 if (apply_change_group ())
10081 if (loop_dump_stream)
10082 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10083 if (store_is_first && replaced_last)
10085 rtx first;
10086 rtx retval_note;
10088 /* Assume we're just deleting INIT_INSN. */
10089 first = init_insn;
10090 /* Look for REG_RETVAL note. If we're deleting the end of
10091 the libcall sequence, the whole sequence can go. */
10092 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10093 /* If we found a REG_RETVAL note, find the first instruction
10094 in the sequence. */
10095 if (retval_note)
10096 first = XEXP (retval_note, 0);
10098 /* Delete the instructions. */
10099 loop_delete_insns (first, init_insn);
10101 if (loop_dump_stream)
10102 fprintf (loop_dump_stream, ".\n");
10106 /* Replace all the instructions from FIRST up to and including LAST
10107 with NOTE_INSN_DELETED notes. */
10109 static void
10110 loop_delete_insns (rtx first, rtx last)
10112 while (1)
10114 if (loop_dump_stream)
10115 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10116 INSN_UID (first));
10117 delete_insn (first);
10119 /* If this was the LAST instructions we're supposed to delete,
10120 we're done. */
10121 if (first == last)
10122 break;
10124 first = NEXT_INSN (first);
10128 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10129 loop LOOP if the order of the sets of these registers can be
10130 swapped. There must be exactly one insn within the loop that sets
10131 this pseudo followed immediately by a move insn that sets
10132 REPLACEMENT with REGNO. */
10133 static void
10134 try_swap_copy_prop (const struct loop *loop, rtx replacement,
10135 unsigned int regno)
10137 rtx insn;
10138 rtx set = NULL_RTX;
10139 unsigned int new_regno;
10141 new_regno = REGNO (replacement);
10143 for (insn = next_insn_in_loop (loop, loop->scan_start);
10144 insn != NULL_RTX;
10145 insn = next_insn_in_loop (loop, insn))
10147 /* Search for the insn that copies REGNO to NEW_REGNO? */
10148 if (INSN_P (insn)
10149 && (set = single_set (insn))
10150 && REG_P (SET_DEST (set))
10151 && REGNO (SET_DEST (set)) == new_regno
10152 && REG_P (SET_SRC (set))
10153 && REGNO (SET_SRC (set)) == regno)
10154 break;
10157 if (insn != NULL_RTX)
10159 rtx prev_insn;
10160 rtx prev_set;
10162 /* Some DEF-USE info would come in handy here to make this
10163 function more general. For now, just check the previous insn
10164 which is the most likely candidate for setting REGNO. */
10166 prev_insn = PREV_INSN (insn);
10168 if (INSN_P (insn)
10169 && (prev_set = single_set (prev_insn))
10170 && REG_P (SET_DEST (prev_set))
10171 && REGNO (SET_DEST (prev_set)) == regno)
10173 /* We have:
10174 (set (reg regno) (expr))
10175 (set (reg new_regno) (reg regno))
10177 so try converting this to:
10178 (set (reg new_regno) (expr))
10179 (set (reg regno) (reg new_regno))
10181 The former construct is often generated when a global
10182 variable used for an induction variable is shadowed by a
10183 register (NEW_REGNO). The latter construct improves the
10184 chances of GIV replacement and BIV elimination. */
10186 validate_change (prev_insn, &SET_DEST (prev_set),
10187 replacement, 1);
10188 validate_change (insn, &SET_DEST (set),
10189 SET_SRC (set), 1);
10190 validate_change (insn, &SET_SRC (set),
10191 replacement, 1);
10193 if (apply_change_group ())
10195 if (loop_dump_stream)
10196 fprintf (loop_dump_stream,
10197 " Swapped set of reg %d at %d with reg %d at %d.\n",
10198 regno, INSN_UID (insn),
10199 new_regno, INSN_UID (prev_insn));
10201 /* Update first use of REGNO. */
10202 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10203 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10205 /* Now perform copy propagation to hopefully
10206 remove all uses of REGNO within the loop. */
10207 try_copy_prop (loop, replacement, regno);
10213 /* Worker function for find_mem_in_note, called via for_each_rtx. */
10215 static int
10216 find_mem_in_note_1 (rtx *x, void *data)
10218 if (*x != NULL_RTX && MEM_P (*x))
10220 rtx *res = (rtx *) data;
10221 *res = *x;
10222 return 1;
10224 return 0;
10227 /* Returns the first MEM found in NOTE by depth-first search. */
10229 static rtx
10230 find_mem_in_note (rtx note)
10232 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
10233 return note;
10234 return NULL_RTX;
10237 /* Replace MEM with its associated pseudo register. This function is
10238 called from load_mems via for_each_rtx. DATA is actually a pointer
10239 to a structure describing the instruction currently being scanned
10240 and the MEM we are currently replacing. */
10242 static int
10243 replace_loop_mem (rtx *mem, void *data)
10245 loop_replace_args *args = (loop_replace_args *) data;
10246 rtx m = *mem;
10248 if (m == NULL_RTX)
10249 return 0;
10251 switch (GET_CODE (m))
10253 case MEM:
10254 break;
10256 case CONST_DOUBLE:
10257 /* We're not interested in the MEM associated with a
10258 CONST_DOUBLE, so there's no need to traverse into one. */
10259 return -1;
10261 default:
10262 /* This is not a MEM. */
10263 return 0;
10266 if (!rtx_equal_p (args->match, m))
10267 /* This is not the MEM we are currently replacing. */
10268 return 0;
10270 /* Actually replace the MEM. */
10271 validate_change (args->insn, mem, args->replacement, 1);
10273 return 0;
10276 static void
10277 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
10279 loop_replace_args args;
10281 args.insn = insn;
10282 args.match = mem;
10283 args.replacement = reg;
10285 for_each_rtx (&insn, replace_loop_mem, &args);
10287 /* If we hoist a mem write out of the loop, then REG_EQUAL
10288 notes referring to the mem are no longer valid. */
10289 if (written)
10291 rtx note, sub;
10292 rtx *link;
10294 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
10296 if (REG_NOTE_KIND (note) == REG_EQUAL
10297 && (sub = find_mem_in_note (note))
10298 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
10300 /* Remove the note. */
10301 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
10302 break;
10308 /* Replace one register with another. Called through for_each_rtx; PX points
10309 to the rtx being scanned. DATA is actually a pointer to
10310 a structure of arguments. */
10312 static int
10313 replace_loop_reg (rtx *px, void *data)
10315 rtx x = *px;
10316 loop_replace_args *args = (loop_replace_args *) data;
10318 if (x == NULL_RTX)
10319 return 0;
10321 if (x == args->match)
10322 validate_change (args->insn, px, args->replacement, 1);
10324 return 0;
10327 static void
10328 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
10330 loop_replace_args args;
10332 args.insn = insn;
10333 args.match = reg;
10334 args.replacement = replacement;
10336 for_each_rtx (&insn, replace_loop_reg, &args);
10339 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10340 (ignored in the interim). */
10342 static rtx
10343 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
10344 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
10345 rtx pattern)
10347 return emit_insn_after (pattern, where_insn);
10351 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10352 in basic block WHERE_BB (ignored in the interim) within the loop
10353 otherwise hoist PATTERN into the loop pre-header. */
10356 loop_insn_emit_before (const struct loop *loop,
10357 basic_block where_bb ATTRIBUTE_UNUSED,
10358 rtx where_insn, rtx pattern)
10360 if (! where_insn)
10361 return loop_insn_hoist (loop, pattern);
10362 return emit_insn_before (pattern, where_insn);
10366 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10367 WHERE_BB (ignored in the interim) within the loop. */
10369 static rtx
10370 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
10371 basic_block where_bb ATTRIBUTE_UNUSED,
10372 rtx where_insn, rtx pattern)
10374 return emit_call_insn_before (pattern, where_insn);
10378 /* Hoist insn for PATTERN into the loop pre-header. */
10381 loop_insn_hoist (const struct loop *loop, rtx pattern)
10383 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10387 /* Hoist call insn for PATTERN into the loop pre-header. */
10389 static rtx
10390 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
10392 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10396 /* Sink insn for PATTERN after the loop end. */
10399 loop_insn_sink (const struct loop *loop, rtx pattern)
10401 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10404 /* bl->final_value can be either general_operand or PLUS of general_operand
10405 and constant. Emit sequence of instructions to load it into REG. */
10406 static rtx
10407 gen_load_of_final_value (rtx reg, rtx final_value)
10409 rtx seq;
10410 start_sequence ();
10411 final_value = force_operand (final_value, reg);
10412 if (final_value != reg)
10413 emit_move_insn (reg, final_value);
10414 seq = get_insns ();
10415 end_sequence ();
10416 return seq;
10419 /* If the loop has multiple exits, emit insn for PATTERN before the
10420 loop to ensure that it will always be executed no matter how the
10421 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10422 since this is slightly more efficient. */
10424 static rtx
10425 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
10427 if (loop->exit_count)
10428 return loop_insn_hoist (loop, pattern);
10429 else
10430 return loop_insn_sink (loop, pattern);
10433 static void
10434 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
10436 struct iv_class *bl;
10437 int iv_num = 0;
10439 if (! loop || ! file)
10440 return;
10442 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10443 iv_num++;
10445 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10447 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10449 loop_iv_class_dump (bl, file, verbose);
10450 fputc ('\n', file);
10455 static void
10456 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
10457 int verbose ATTRIBUTE_UNUSED)
10459 struct induction *v;
10460 rtx incr;
10461 int i;
10463 if (! bl || ! file)
10464 return;
10466 fprintf (file, "IV class for reg %d, benefit %d\n",
10467 bl->regno, bl->total_benefit);
10469 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10470 if (bl->initial_value)
10472 fprintf (file, ", init val: ");
10473 print_simple_rtl (file, bl->initial_value);
10475 if (bl->initial_test)
10477 fprintf (file, ", init test: ");
10478 print_simple_rtl (file, bl->initial_test);
10480 fputc ('\n', file);
10482 if (bl->final_value)
10484 fprintf (file, " Final val: ");
10485 print_simple_rtl (file, bl->final_value);
10486 fputc ('\n', file);
10489 if ((incr = biv_total_increment (bl)))
10491 fprintf (file, " Total increment: ");
10492 print_simple_rtl (file, incr);
10493 fputc ('\n', file);
10496 /* List the increments. */
10497 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10499 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10500 print_simple_rtl (file, v->add_val);
10501 fputc ('\n', file);
10504 /* List the givs. */
10505 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10507 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10508 i, INSN_UID (v->insn), v->benefit);
10509 if (v->giv_type == DEST_ADDR)
10510 print_simple_rtl (file, v->mem);
10511 else
10512 print_simple_rtl (file, single_set (v->insn));
10513 fputc ('\n', file);
10518 static void
10519 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
10521 if (! v || ! file)
10522 return;
10524 fprintf (file,
10525 "Biv %d: insn %d",
10526 REGNO (v->dest_reg), INSN_UID (v->insn));
10527 fprintf (file, " const ");
10528 print_simple_rtl (file, v->add_val);
10530 if (verbose && v->final_value)
10532 fputc ('\n', file);
10533 fprintf (file, " final ");
10534 print_simple_rtl (file, v->final_value);
10537 fputc ('\n', file);
10541 static void
10542 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
10544 if (! v || ! file)
10545 return;
10547 if (v->giv_type == DEST_REG)
10548 fprintf (file, "Giv %d: insn %d",
10549 REGNO (v->dest_reg), INSN_UID (v->insn));
10550 else
10551 fprintf (file, "Dest address: insn %d",
10552 INSN_UID (v->insn));
10554 fprintf (file, " src reg %d benefit %d",
10555 REGNO (v->src_reg), v->benefit);
10556 fprintf (file, " lifetime %d",
10557 v->lifetime);
10559 if (v->replaceable)
10560 fprintf (file, " replaceable");
10562 if (v->no_const_addval)
10563 fprintf (file, " ncav");
10565 if (v->ext_dependent)
10567 switch (GET_CODE (v->ext_dependent))
10569 case SIGN_EXTEND:
10570 fprintf (file, " ext se");
10571 break;
10572 case ZERO_EXTEND:
10573 fprintf (file, " ext ze");
10574 break;
10575 case TRUNCATE:
10576 fprintf (file, " ext tr");
10577 break;
10578 default:
10579 abort ();
10583 fputc ('\n', file);
10584 fprintf (file, " mult ");
10585 print_simple_rtl (file, v->mult_val);
10587 fputc ('\n', file);
10588 fprintf (file, " add ");
10589 print_simple_rtl (file, v->add_val);
10591 if (verbose && v->final_value)
10593 fputc ('\n', file);
10594 fprintf (file, " final ");
10595 print_simple_rtl (file, v->final_value);
10598 fputc ('\n', file);
10602 void
10603 debug_ivs (const struct loop *loop)
10605 loop_ivs_dump (loop, stderr, 1);
10609 void
10610 debug_iv_class (const struct iv_class *bl)
10612 loop_iv_class_dump (bl, stderr, 1);
10616 void
10617 debug_biv (const struct induction *v)
10619 loop_biv_dump (v, stderr, 1);
10623 void
10624 debug_giv (const struct induction *v)
10626 loop_giv_dump (v, stderr, 1);
10630 #define LOOP_BLOCK_NUM_1(INSN) \
10631 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10633 /* The notes do not have an assigned block, so look at the next insn. */
10634 #define LOOP_BLOCK_NUM(INSN) \
10635 ((INSN) ? (NOTE_P (INSN) \
10636 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10637 : LOOP_BLOCK_NUM_1 (INSN)) \
10638 : -1)
10640 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10642 static void
10643 loop_dump_aux (const struct loop *loop, FILE *file,
10644 int verbose ATTRIBUTE_UNUSED)
10646 rtx label;
10648 if (! loop || ! file || !BB_HEAD (loop->first))
10649 return;
10651 /* Print diagnostics to compare our concept of a loop with
10652 what the loop notes say. */
10653 if (! PREV_INSN (BB_HEAD (loop->first))
10654 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
10655 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
10656 != NOTE_INSN_LOOP_BEG)
10657 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10658 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
10659 if (! NEXT_INSN (BB_END (loop->last))
10660 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
10661 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
10662 != NOTE_INSN_LOOP_END)
10663 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10664 INSN_UID (NEXT_INSN (BB_END (loop->last))));
10666 if (loop->start)
10668 fprintf (file,
10669 ";; start %d (%d), end %d (%d)\n",
10670 LOOP_BLOCK_NUM (loop->start),
10671 LOOP_INSN_UID (loop->start),
10672 LOOP_BLOCK_NUM (loop->end),
10673 LOOP_INSN_UID (loop->end));
10674 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10675 LOOP_BLOCK_NUM (loop->top),
10676 LOOP_INSN_UID (loop->top),
10677 LOOP_BLOCK_NUM (loop->scan_start),
10678 LOOP_INSN_UID (loop->scan_start));
10679 fprintf (file, ";; exit_count %d", loop->exit_count);
10680 if (loop->exit_count)
10682 fputs (", labels:", file);
10683 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10685 fprintf (file, " %d ",
10686 LOOP_INSN_UID (XEXP (label, 0)));
10689 fputs ("\n", file);
10693 /* Call this function from the debugger to dump LOOP. */
10695 void
10696 debug_loop (const struct loop *loop)
10698 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10701 /* Call this function from the debugger to dump LOOPS. */
10703 void
10704 debug_loops (const struct loops *loops)
10706 flow_loops_dump (loops, stderr, loop_dump_aux, 1);