2004-02-19 David Daney <ddaney@avtrex.com>
[official-gcc.git] / gcc / loop.c
bloba51a34cb6047f66906711213f26ba14cc38e0525
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables.
27 Basic induction variables (BIVs) are a pseudo registers which are set within
28 a loop only by incrementing or decrementing its value. General induction
29 variables (GIVs) are pseudo registers with a value which is a linear function
30 of a basic induction variable. BIVs are recognized by `basic_induction_var';
31 GIVs by `general_induction_var'.
33 Once induction variables are identified, strength reduction is applied to the
34 general induction variables, and induction variable elimination is applied to
35 the basic induction variables.
37 It also finds cases where
38 a register is set within the loop by zero-extending a narrower value
39 and changes these to zero the entire register once before the loop
40 and merely copy the low part within the loop.
42 Most of the complexity is in heuristics to decide when it is worth
43 while to do these things. */
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "rtl.h"
50 #include "tm_p.h"
51 #include "function.h"
52 #include "expr.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "insn-config.h"
56 #include "regs.h"
57 #include "recog.h"
58 #include "flags.h"
59 #include "real.h"
60 #include "loop.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
70 /* Not really meaningful values, but at least something. */
71 #ifndef SIMULTANEOUS_PREFETCHES
72 #define SIMULTANEOUS_PREFETCHES 3
73 #endif
74 #ifndef PREFETCH_BLOCK
75 #define PREFETCH_BLOCK 32
76 #endif
77 #ifndef HAVE_prefetch
78 #define HAVE_prefetch 0
79 #define CODE_FOR_prefetch 0
80 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
81 #endif
83 /* Give up the prefetch optimizations once we exceed a given threshold.
84 It is unlikely that we would be able to optimize something in a loop
85 with so many detected prefetches. */
86 #define MAX_PREFETCHES 100
87 /* The number of prefetch blocks that are beneficial to fetch at once before
88 a loop with a known (and low) iteration count. */
89 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
90 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
91 since it is likely that the data are already in the cache. */
92 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
94 /* Parameterize some prefetch heuristics so they can be turned on and off
95 easily for performance testing on new architectures. These can be
96 defined in target-dependent files. */
98 /* Prefetch is worthwhile only when loads/stores are dense. */
99 #ifndef PREFETCH_ONLY_DENSE_MEM
100 #define PREFETCH_ONLY_DENSE_MEM 1
101 #endif
103 /* Define what we mean by "dense" loads and stores; This value divided by 256
104 is the minimum percentage of memory references that worth prefetching. */
105 #ifndef PREFETCH_DENSE_MEM
106 #define PREFETCH_DENSE_MEM 220
107 #endif
109 /* Do not prefetch for a loop whose iteration count is known to be low. */
110 #ifndef PREFETCH_NO_LOW_LOOPCNT
111 #define PREFETCH_NO_LOW_LOOPCNT 1
112 #endif
114 /* Define what we mean by a "low" iteration count. */
115 #ifndef PREFETCH_LOW_LOOPCNT
116 #define PREFETCH_LOW_LOOPCNT 32
117 #endif
119 /* Do not prefetch for a loop that contains a function call; such a loop is
120 probably not an internal loop. */
121 #ifndef PREFETCH_NO_CALL
122 #define PREFETCH_NO_CALL 1
123 #endif
125 /* Do not prefetch accesses with an extreme stride. */
126 #ifndef PREFETCH_NO_EXTREME_STRIDE
127 #define PREFETCH_NO_EXTREME_STRIDE 1
128 #endif
130 /* Define what we mean by an "extreme" stride. */
131 #ifndef PREFETCH_EXTREME_STRIDE
132 #define PREFETCH_EXTREME_STRIDE 4096
133 #endif
135 /* Define a limit to how far apart indices can be and still be merged
136 into a single prefetch. */
137 #ifndef PREFETCH_EXTREME_DIFFERENCE
138 #define PREFETCH_EXTREME_DIFFERENCE 4096
139 #endif
141 /* Issue prefetch instructions before the loop to fetch data to be used
142 in the first few loop iterations. */
143 #ifndef PREFETCH_BEFORE_LOOP
144 #define PREFETCH_BEFORE_LOOP 1
145 #endif
147 /* Do not handle reversed order prefetches (negative stride). */
148 #ifndef PREFETCH_NO_REVERSE_ORDER
149 #define PREFETCH_NO_REVERSE_ORDER 1
150 #endif
152 /* Prefetch even if the GIV is in conditional code. */
153 #ifndef PREFETCH_CONDITIONAL
154 #define PREFETCH_CONDITIONAL 1
155 #endif
157 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
158 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
160 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
161 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
162 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
164 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
165 ((REGNO) < FIRST_PSEUDO_REGISTER \
166 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
169 /* Vector mapping INSN_UIDs to luids.
170 The luids are like uids but increase monotonically always.
171 We use them to see whether a jump comes from outside a given loop. */
173 int *uid_luid;
175 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
176 number the insn is contained in. */
178 struct loop **uid_loop;
180 /* 1 + largest uid of any insn. */
182 int max_uid_for_loop;
184 /* Number of loops detected in current function. Used as index to the
185 next few tables. */
187 static int max_loop_num;
189 /* Bound on pseudo register number before loop optimization.
190 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
191 unsigned int max_reg_before_loop;
193 /* The value to pass to the next call of reg_scan_update. */
194 static int loop_max_reg;
196 /* During the analysis of a loop, a chain of `struct movable's
197 is made to record all the movable insns found.
198 Then the entire chain can be scanned to decide which to move. */
200 struct movable
202 rtx insn; /* A movable insn */
203 rtx set_src; /* The expression this reg is set from. */
204 rtx set_dest; /* The destination of this SET. */
205 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
206 of any registers used within the LIBCALL. */
207 int consec; /* Number of consecutive following insns
208 that must be moved with this one. */
209 unsigned int regno; /* The register it sets */
210 short lifetime; /* lifetime of that register;
211 may be adjusted when matching movables
212 that load the same value are found. */
213 short savings; /* Number of insns we can move for this reg,
214 including other movables that force this
215 or match this one. */
216 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
217 a low part that we should avoid changing when
218 clearing the rest of the reg. */
219 unsigned int cond : 1; /* 1 if only conditionally movable */
220 unsigned int force : 1; /* 1 means MUST move this insn */
221 unsigned int global : 1; /* 1 means reg is live outside this loop */
222 /* If PARTIAL is 1, GLOBAL means something different:
223 that the reg is live outside the range from where it is set
224 to the following label. */
225 unsigned int done : 1; /* 1 inhibits further processing of this */
227 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
228 In particular, moving it does not make it
229 invariant. */
230 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
231 load SRC, rather than copying INSN. */
232 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
233 first insn of a consecutive sets group. */
234 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
235 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
236 the original insn with a copy from that
237 pseudo, rather than deleting it. */
238 struct movable *match; /* First entry for same value */
239 struct movable *forces; /* An insn that must be moved if this is */
240 struct movable *next;
244 FILE *loop_dump_stream;
246 /* Forward declarations. */
248 static void invalidate_loops_containing_label (rtx);
249 static void find_and_verify_loops (rtx, struct loops *);
250 static void mark_loop_jump (rtx, struct loop *);
251 static void prescan_loop (struct loop *);
252 static int reg_in_basic_block_p (rtx, rtx);
253 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
254 static int labels_in_range_p (rtx, int);
255 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
256 static void note_addr_stored (rtx, rtx, void *);
257 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
258 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
259 static rtx find_regs_nested (rtx, rtx);
260 static void scan_loop (struct loop*, int);
261 #if 0
262 static void replace_call_address (rtx, rtx, rtx);
263 #endif
264 static rtx skip_consec_insns (rtx, int);
265 static int libcall_benefit (rtx);
266 static void ignore_some_movables (struct loop_movables *);
267 static void force_movables (struct loop_movables *);
268 static void combine_movables (struct loop_movables *, struct loop_regs *);
269 static int num_unmoved_movables (const struct loop *);
270 static int regs_match_p (rtx, rtx, struct loop_movables *);
271 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
272 struct loop_regs *);
273 static void add_label_notes (rtx, rtx);
274 static void move_movables (struct loop *loop, struct loop_movables *, int,
275 int);
276 static void loop_movables_add (struct loop_movables *, struct movable *);
277 static void loop_movables_free (struct loop_movables *);
278 static int count_nonfixed_reads (const struct loop *, rtx);
279 static void loop_bivs_find (struct loop *);
280 static void loop_bivs_init_find (struct loop *);
281 static void loop_bivs_check (struct loop *);
282 static void loop_givs_find (struct loop *);
283 static void loop_givs_check (struct loop *);
284 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
285 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
286 struct induction *, rtx);
287 static void loop_givs_dead_check (struct loop *, struct iv_class *);
288 static void loop_givs_reduce (struct loop *, struct iv_class *);
289 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
290 static void loop_ivs_free (struct loop *);
291 static void strength_reduce (struct loop *, int);
292 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
293 static int valid_initial_value_p (rtx, rtx, int, rtx);
294 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
295 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
296 rtx, rtx *, int, int);
297 static void check_final_value (const struct loop *, struct induction *);
298 static void loop_ivs_dump (const struct loop *, FILE *, int);
299 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
300 static void loop_biv_dump (const struct induction *, FILE *, int);
301 static void loop_giv_dump (const struct induction *, FILE *, int);
302 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
303 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
304 rtx *);
305 static void update_giv_derive (const struct loop *, rtx);
306 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
307 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
308 rtx, rtx, rtx *, rtx *, rtx **);
309 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
310 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
311 rtx *, rtx *, int, int *, enum machine_mode);
312 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
313 rtx *, rtx *, rtx *);
314 static int check_dbra_loop (struct loop *, int);
315 static rtx express_from_1 (rtx, rtx, rtx);
316 static rtx combine_givs_p (struct induction *, struct induction *);
317 static int cmp_combine_givs_stats (const void *, const void *);
318 static void combine_givs (struct loop_regs *, struct iv_class *);
319 static int product_cheap_p (rtx, rtx);
320 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
321 int, int);
322 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
323 struct iv_class *, int, basic_block, rtx);
324 static int last_use_this_basic_block (rtx, rtx);
325 static void record_initial (rtx, rtx, void *);
326 static void update_reg_last_use (rtx, rtx);
327 static rtx next_insn_in_loop (const struct loop *, rtx);
328 static void loop_regs_scan (const struct loop *, int);
329 static int count_insns_in_loop (const struct loop *);
330 static int find_mem_in_note_1 (rtx *, void *);
331 static rtx find_mem_in_note (rtx);
332 static void load_mems (const struct loop *);
333 static int insert_loop_mem (rtx *, void *);
334 static int replace_loop_mem (rtx *, void *);
335 static void replace_loop_mems (rtx, rtx, rtx, int);
336 static int replace_loop_reg (rtx *, void *);
337 static void replace_loop_regs (rtx insn, rtx, rtx);
338 static void note_reg_stored (rtx, rtx, void *);
339 static void try_copy_prop (const struct loop *, rtx, unsigned int);
340 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
341 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
342 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
343 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
344 static void loop_regs_update (const struct loop *, rtx);
345 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
347 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
348 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
349 rtx, rtx);
350 static rtx loop_call_insn_hoist (const struct loop *, rtx);
351 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
353 static void loop_dump_aux (const struct loop *, FILE *, int);
354 static void loop_delete_insns (rtx, rtx);
355 static HOST_WIDE_INT remove_constant_addition (rtx *);
356 static rtx gen_load_of_final_value (rtx, rtx);
357 void debug_ivs (const struct loop *);
358 void debug_iv_class (const struct iv_class *);
359 void debug_biv (const struct induction *);
360 void debug_giv (const struct induction *);
361 void debug_loop (const struct loop *);
362 void debug_loops (const struct loops *);
364 typedef struct loop_replace_args
366 rtx match;
367 rtx replacement;
368 rtx insn;
369 } loop_replace_args;
371 /* Nonzero iff INSN is between START and END, inclusive. */
372 #define INSN_IN_RANGE_P(INSN, START, END) \
373 (INSN_UID (INSN) < max_uid_for_loop \
374 && INSN_LUID (INSN) >= INSN_LUID (START) \
375 && INSN_LUID (INSN) <= INSN_LUID (END))
377 /* Indirect_jump_in_function is computed once per function. */
378 static int indirect_jump_in_function;
379 static int indirect_jump_in_function_p (rtx);
381 static int compute_luids (rtx, rtx, int);
383 static int biv_elimination_giv_has_0_offset (struct induction *,
384 struct induction *, rtx);
386 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
387 copy the value of the strength reduced giv to its original register. */
388 static int copy_cost;
390 /* Cost of using a register, to normalize the benefits of a giv. */
391 static int reg_address_cost;
393 void
394 init_loop (void)
396 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
398 reg_address_cost = address_cost (reg, SImode);
400 copy_cost = COSTS_N_INSNS (1);
403 /* Compute the mapping from uids to luids.
404 LUIDs are numbers assigned to insns, like uids,
405 except that luids increase monotonically through the code.
406 Start at insn START and stop just before END. Assign LUIDs
407 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
408 static int
409 compute_luids (rtx start, rtx end, int prev_luid)
411 int i;
412 rtx insn;
414 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
416 if (INSN_UID (insn) >= max_uid_for_loop)
417 continue;
418 /* Don't assign luids to line-number NOTEs, so that the distance in
419 luids between two insns is not affected by -g. */
420 if (GET_CODE (insn) != NOTE
421 || NOTE_LINE_NUMBER (insn) <= 0)
422 uid_luid[INSN_UID (insn)] = ++i;
423 else
424 /* Give a line number note the same luid as preceding insn. */
425 uid_luid[INSN_UID (insn)] = i;
427 return i + 1;
430 /* Entry point of this file. Perform loop optimization
431 on the current function. F is the first insn of the function
432 and DUMPFILE is a stream for output of a trace of actions taken
433 (or 0 if none should be output). */
435 void
436 loop_optimize (rtx f, FILE *dumpfile, int flags)
438 rtx insn;
439 int i;
440 struct loops loops_data;
441 struct loops *loops = &loops_data;
442 struct loop_info *loops_info;
444 loop_dump_stream = dumpfile;
446 init_recog_no_volatile ();
448 max_reg_before_loop = max_reg_num ();
449 loop_max_reg = max_reg_before_loop;
451 regs_may_share = 0;
453 /* Count the number of loops. */
455 max_loop_num = 0;
456 for (insn = f; insn; insn = NEXT_INSN (insn))
458 if (GET_CODE (insn) == NOTE
459 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
460 max_loop_num++;
463 /* Don't waste time if no loops. */
464 if (max_loop_num == 0)
465 return;
467 loops->num = max_loop_num;
469 /* Get size to use for tables indexed by uids.
470 Leave some space for labels allocated by find_and_verify_loops. */
471 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
473 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
474 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
476 /* Allocate storage for array of loops. */
477 loops->array = xcalloc (loops->num, sizeof (struct loop));
479 /* Find and process each loop.
480 First, find them, and record them in order of their beginnings. */
481 find_and_verify_loops (f, loops);
483 /* Allocate and initialize auxiliary loop information. */
484 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
485 for (i = 0; i < (int) loops->num; i++)
486 loops->array[i].aux = loops_info + i;
488 /* Now find all register lifetimes. This must be done after
489 find_and_verify_loops, because it might reorder the insns in the
490 function. */
491 reg_scan (f, max_reg_before_loop, 1);
493 /* This must occur after reg_scan so that registers created by gcse
494 will have entries in the register tables.
496 We could have added a call to reg_scan after gcse_main in toplev.c,
497 but moving this call to init_alias_analysis is more efficient. */
498 init_alias_analysis ();
500 /* See if we went too far. Note that get_max_uid already returns
501 one more that the maximum uid of all insn. */
502 if (get_max_uid () > max_uid_for_loop)
503 abort ();
504 /* Now reset it to the actual size we need. See above. */
505 max_uid_for_loop = get_max_uid ();
507 /* find_and_verify_loops has already called compute_luids, but it
508 might have rearranged code afterwards, so we need to recompute
509 the luids now. */
510 compute_luids (f, NULL_RTX, 0);
512 /* Don't leave gaps in uid_luid for insns that have been
513 deleted. It is possible that the first or last insn
514 using some register has been deleted by cross-jumping.
515 Make sure that uid_luid for that former insn's uid
516 points to the general area where that insn used to be. */
517 for (i = 0; i < max_uid_for_loop; i++)
519 uid_luid[0] = uid_luid[i];
520 if (uid_luid[0] != 0)
521 break;
523 for (i = 0; i < max_uid_for_loop; i++)
524 if (uid_luid[i] == 0)
525 uid_luid[i] = uid_luid[i - 1];
527 /* Determine if the function has indirect jump. On some systems
528 this prevents low overhead loop instructions from being used. */
529 indirect_jump_in_function = indirect_jump_in_function_p (f);
531 /* Now scan the loops, last ones first, since this means inner ones are done
532 before outer ones. */
533 for (i = max_loop_num - 1; i >= 0; i--)
535 struct loop *loop = &loops->array[i];
537 if (! loop->invalid && loop->end)
539 scan_loop (loop, flags);
540 ggc_collect ();
544 end_alias_analysis ();
546 /* Clean up. */
547 for (i = 0; i < (int) loops->num; i++)
548 free (loops_info[i].mems);
550 free (uid_luid);
551 free (uid_loop);
552 free (loops_info);
553 free (loops->array);
556 /* Returns the next insn, in execution order, after INSN. START and
557 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
558 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
559 insn-stream; it is used with loops that are entered near the
560 bottom. */
562 static rtx
563 next_insn_in_loop (const struct loop *loop, rtx insn)
565 insn = NEXT_INSN (insn);
567 if (insn == loop->end)
569 if (loop->top)
570 /* Go to the top of the loop, and continue there. */
571 insn = loop->top;
572 else
573 /* We're done. */
574 insn = NULL_RTX;
577 if (insn == loop->scan_start)
578 /* We're done. */
579 insn = NULL_RTX;
581 return insn;
584 /* Find any register references hidden inside X and add them to
585 the dependency list DEPS. This is used to look inside CLOBBER (MEM
586 when checking whether a PARALLEL can be pulled out of a loop. */
588 static rtx
589 find_regs_nested (rtx deps, rtx x)
591 enum rtx_code code = GET_CODE (x);
592 if (code == REG)
593 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
594 else
596 const char *fmt = GET_RTX_FORMAT (code);
597 int i, j;
598 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
600 if (fmt[i] == 'e')
601 deps = find_regs_nested (deps, XEXP (x, i));
602 else if (fmt[i] == 'E')
603 for (j = 0; j < XVECLEN (x, i); j++)
604 deps = find_regs_nested (deps, XVECEXP (x, i, j));
607 return deps;
610 /* Optimize one loop described by LOOP. */
612 /* ??? Could also move memory writes out of loops if the destination address
613 is invariant, the source is invariant, the memory write is not volatile,
614 and if we can prove that no read inside the loop can read this address
615 before the write occurs. If there is a read of this address after the
616 write, then we can also mark the memory read as invariant. */
618 static void
619 scan_loop (struct loop *loop, int flags)
621 struct loop_info *loop_info = LOOP_INFO (loop);
622 struct loop_regs *regs = LOOP_REGS (loop);
623 int i;
624 rtx loop_start = loop->start;
625 rtx loop_end = loop->end;
626 rtx p;
627 /* 1 if we are scanning insns that could be executed zero times. */
628 int maybe_never = 0;
629 /* 1 if we are scanning insns that might never be executed
630 due to a subroutine call which might exit before they are reached. */
631 int call_passed = 0;
632 /* Number of insns in the loop. */
633 int insn_count;
634 int tem;
635 rtx temp, update_start, update_end;
636 /* The SET from an insn, if it is the only SET in the insn. */
637 rtx set, set1;
638 /* Chain describing insns movable in current loop. */
639 struct loop_movables *movables = LOOP_MOVABLES (loop);
640 /* Ratio of extra register life span we can justify
641 for saving an instruction. More if loop doesn't call subroutines
642 since in that case saving an insn makes more difference
643 and more registers are available. */
644 int threshold;
645 /* Nonzero if we are scanning instructions in a sub-loop. */
646 int loop_depth = 0;
647 int in_libcall;
649 loop->top = 0;
651 movables->head = 0;
652 movables->last = 0;
654 /* Determine whether this loop starts with a jump down to a test at
655 the end. This will occur for a small number of loops with a test
656 that is too complex to duplicate in front of the loop.
658 We search for the first insn or label in the loop, skipping NOTEs.
659 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
660 (because we might have a loop executed only once that contains a
661 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
662 (in case we have a degenerate loop).
664 Note that if we mistakenly think that a loop is entered at the top
665 when, in fact, it is entered at the exit test, the only effect will be
666 slightly poorer optimization. Making the opposite error can generate
667 incorrect code. Since very few loops now start with a jump to the
668 exit test, the code here to detect that case is very conservative. */
670 for (p = NEXT_INSN (loop_start);
671 p != loop_end
672 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
673 && (GET_CODE (p) != NOTE
674 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
675 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
676 p = NEXT_INSN (p))
679 loop->scan_start = p;
681 /* If loop end is the end of the current function, then emit a
682 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
683 note insn. This is the position we use when sinking insns out of
684 the loop. */
685 if (NEXT_INSN (loop->end) != 0)
686 loop->sink = NEXT_INSN (loop->end);
687 else
688 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
690 /* Set up variables describing this loop. */
691 prescan_loop (loop);
692 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
694 /* If loop has a jump before the first label,
695 the true entry is the target of that jump.
696 Start scan from there.
697 But record in LOOP->TOP the place where the end-test jumps
698 back to so we can scan that after the end of the loop. */
699 if (GET_CODE (p) == JUMP_INSN
700 /* Loop entry must be unconditional jump (and not a RETURN) */
701 && any_uncondjump_p (p)
702 && JUMP_LABEL (p) != 0
703 /* Check to see whether the jump actually
704 jumps out of the loop (meaning it's no loop).
705 This case can happen for things like
706 do {..} while (0). If this label was generated previously
707 by loop, we can't tell anything about it and have to reject
708 the loop. */
709 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
711 loop->top = next_label (loop->scan_start);
712 loop->scan_start = JUMP_LABEL (p);
715 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
716 as required by loop_reg_used_before_p. So skip such loops. (This
717 test may never be true, but it's best to play it safe.)
719 Also, skip loops where we do not start scanning at a label. This
720 test also rejects loops starting with a JUMP_INSN that failed the
721 test above. */
723 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
724 || GET_CODE (loop->scan_start) != CODE_LABEL)
726 if (loop_dump_stream)
727 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
728 INSN_UID (loop_start), INSN_UID (loop_end));
729 return;
732 /* Allocate extra space for REGs that might be created by load_mems.
733 We allocate a little extra slop as well, in the hopes that we
734 won't have to reallocate the regs array. */
735 loop_regs_scan (loop, loop_info->mems_idx + 16);
736 insn_count = count_insns_in_loop (loop);
738 if (loop_dump_stream)
740 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
741 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
742 if (loop->cont)
743 fprintf (loop_dump_stream, "Continue at insn %d.\n",
744 INSN_UID (loop->cont));
747 /* Scan through the loop finding insns that are safe to move.
748 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
749 this reg will be considered invariant for subsequent insns.
750 We consider whether subsequent insns use the reg
751 in deciding whether it is worth actually moving.
753 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
754 and therefore it is possible that the insns we are scanning
755 would never be executed. At such times, we must make sure
756 that it is safe to execute the insn once instead of zero times.
757 When MAYBE_NEVER is 0, all insns will be executed at least once
758 so that is not a problem. */
760 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
761 p != NULL_RTX;
762 p = next_insn_in_loop (loop, p))
764 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
765 in_libcall--;
766 if (GET_CODE (p) == INSN)
768 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
769 if (temp)
770 in_libcall++;
771 if (! in_libcall
772 && (set = single_set (p))
773 && GET_CODE (SET_DEST (set)) == REG
774 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
775 && SET_DEST (set) != pic_offset_table_rtx
776 #endif
777 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
779 int tem1 = 0;
780 int tem2 = 0;
781 int move_insn = 0;
782 int insert_temp = 0;
783 rtx src = SET_SRC (set);
784 rtx dependencies = 0;
786 /* Figure out what to use as a source of this insn. If a
787 REG_EQUIV note is given or if a REG_EQUAL note with a
788 constant operand is specified, use it as the source and
789 mark that we should move this insn by calling
790 emit_move_insn rather that duplicating the insn.
792 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
793 note is present. */
794 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
795 if (temp)
796 src = XEXP (temp, 0), move_insn = 1;
797 else
799 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
800 if (temp && CONSTANT_P (XEXP (temp, 0)))
801 src = XEXP (temp, 0), move_insn = 1;
802 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
804 src = XEXP (temp, 0);
805 /* A libcall block can use regs that don't appear in
806 the equivalent expression. To move the libcall,
807 we must move those regs too. */
808 dependencies = libcall_other_reg (p, src);
812 /* For parallels, add any possible uses to the dependencies, as
813 we can't move the insn without resolving them first.
814 MEMs inside CLOBBERs may also reference registers; these
815 count as implicit uses. */
816 if (GET_CODE (PATTERN (p)) == PARALLEL)
818 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
820 rtx x = XVECEXP (PATTERN (p), 0, i);
821 if (GET_CODE (x) == USE)
822 dependencies
823 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
824 dependencies);
825 else if (GET_CODE (x) == CLOBBER
826 && GET_CODE (XEXP (x, 0)) == MEM)
827 dependencies = find_regs_nested (dependencies,
828 XEXP (XEXP (x, 0), 0));
832 if (/* The register is used in basic blocks other
833 than the one where it is set (meaning that
834 something after this point in the loop might
835 depend on its value before the set). */
836 ! reg_in_basic_block_p (p, SET_DEST (set))
837 /* And the set is not guaranteed to be executed once
838 the loop starts, or the value before the set is
839 needed before the set occurs...
841 ??? Note we have quadratic behavior here, mitigated
842 by the fact that the previous test will often fail for
843 large loops. Rather than re-scanning the entire loop
844 each time for register usage, we should build tables
845 of the register usage and use them here instead. */
846 && (maybe_never
847 || loop_reg_used_before_p (loop, set, p)))
848 /* It is unsafe to move the set. However, it may be OK to
849 move the source into a new pseudo, and substitute a
850 reg-to-reg copy for the original insn.
852 This code used to consider it OK to move a set of a variable
853 which was not created by the user and not used in an exit
854 test.
855 That behavior is incorrect and was removed. */
856 insert_temp = 1;
858 /* Don't try to optimize a MODE_CC set with a constant
859 source. It probably will be combined with a conditional
860 jump. */
861 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
862 && CONSTANT_P (src))
864 /* Don't try to optimize a register that was made
865 by loop-optimization for an inner loop.
866 We don't know its life-span, so we can't compute
867 the benefit. */
868 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
870 /* Don't move the source and add a reg-to-reg copy:
871 - with -Os (this certainly increases size),
872 - if the mode doesn't support copy operations (obviously),
873 - if the source is already a reg (the motion will gain nothing),
874 - if the source is a legitimate constant (likewise). */
875 else if (insert_temp
876 && (optimize_size
877 || ! can_copy_p (GET_MODE (SET_SRC (set)))
878 || GET_CODE (SET_SRC (set)) == REG
879 || (CONSTANT_P (SET_SRC (set))
880 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
882 else if ((tem = loop_invariant_p (loop, src))
883 && (dependencies == 0
884 || (tem2
885 = loop_invariant_p (loop, dependencies)) != 0)
886 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
887 || (tem1
888 = consec_sets_invariant_p
889 (loop, SET_DEST (set),
890 regs->array[REGNO (SET_DEST (set))].set_in_loop,
891 p)))
892 /* If the insn can cause a trap (such as divide by zero),
893 can't move it unless it's guaranteed to be executed
894 once loop is entered. Even a function call might
895 prevent the trap insn from being reached
896 (since it might exit!) */
897 && ! ((maybe_never || call_passed)
898 && may_trap_p (src)))
900 struct movable *m;
901 int regno = REGNO (SET_DEST (set));
903 /* A potential lossage is where we have a case where two insns
904 can be combined as long as they are both in the loop, but
905 we move one of them outside the loop. For large loops,
906 this can lose. The most common case of this is the address
907 of a function being called.
909 Therefore, if this register is marked as being used
910 exactly once if we are in a loop with calls
911 (a "large loop"), see if we can replace the usage of
912 this register with the source of this SET. If we can,
913 delete this insn.
915 Don't do this if P has a REG_RETVAL note or if we have
916 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
918 if (loop_info->has_call
919 && regs->array[regno].single_usage != 0
920 && regs->array[regno].single_usage != const0_rtx
921 && REGNO_FIRST_UID (regno) == INSN_UID (p)
922 && (REGNO_LAST_UID (regno)
923 == INSN_UID (regs->array[regno].single_usage))
924 && regs->array[regno].set_in_loop == 1
925 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
926 && ! side_effects_p (SET_SRC (set))
927 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
928 && (! SMALL_REGISTER_CLASSES
929 || (! (GET_CODE (SET_SRC (set)) == REG
930 && (REGNO (SET_SRC (set))
931 < FIRST_PSEUDO_REGISTER))))
932 /* This test is not redundant; SET_SRC (set) might be
933 a call-clobbered register and the life of REGNO
934 might span a call. */
935 && ! modified_between_p (SET_SRC (set), p,
936 regs->array[regno].single_usage)
937 && no_labels_between_p (p,
938 regs->array[regno].single_usage)
939 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
940 regs->array[regno].single_usage))
942 /* Replace any usage in a REG_EQUAL note. Must copy
943 the new source, so that we don't get rtx sharing
944 between the SET_SOURCE and REG_NOTES of insn p. */
945 REG_NOTES (regs->array[regno].single_usage)
946 = (replace_rtx
947 (REG_NOTES (regs->array[regno].single_usage),
948 SET_DEST (set), copy_rtx (SET_SRC (set))));
950 delete_insn (p);
951 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
952 i++)
953 regs->array[regno+i].set_in_loop = 0;
954 continue;
957 m = xmalloc (sizeof (struct movable));
958 m->next = 0;
959 m->insn = p;
960 m->set_src = src;
961 m->dependencies = dependencies;
962 m->set_dest = SET_DEST (set);
963 m->force = 0;
964 m->consec
965 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
966 m->done = 0;
967 m->forces = 0;
968 m->partial = 0;
969 m->move_insn = move_insn;
970 m->move_insn_first = 0;
971 m->insert_temp = insert_temp;
972 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
973 m->savemode = VOIDmode;
974 m->regno = regno;
975 /* Set M->cond if either loop_invariant_p
976 or consec_sets_invariant_p returned 2
977 (only conditionally invariant). */
978 m->cond = ((tem | tem1 | tem2) > 1);
979 m->global = LOOP_REG_GLOBAL_P (loop, regno);
980 m->match = 0;
981 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
982 m->savings = regs->array[regno].n_times_set;
983 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
984 m->savings += libcall_benefit (p);
985 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
986 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
987 /* Add M to the end of the chain MOVABLES. */
988 loop_movables_add (movables, m);
990 if (m->consec > 0)
992 /* It is possible for the first instruction to have a
993 REG_EQUAL note but a non-invariant SET_SRC, so we must
994 remember the status of the first instruction in case
995 the last instruction doesn't have a REG_EQUAL note. */
996 m->move_insn_first = m->move_insn;
998 /* Skip this insn, not checking REG_LIBCALL notes. */
999 p = next_nonnote_insn (p);
1000 /* Skip the consecutive insns, if there are any. */
1001 p = skip_consec_insns (p, m->consec);
1002 /* Back up to the last insn of the consecutive group. */
1003 p = prev_nonnote_insn (p);
1005 /* We must now reset m->move_insn, m->is_equiv, and
1006 possibly m->set_src to correspond to the effects of
1007 all the insns. */
1008 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1009 if (temp)
1010 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1011 else
1013 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1014 if (temp && CONSTANT_P (XEXP (temp, 0)))
1015 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1016 else
1017 m->move_insn = 0;
1020 m->is_equiv
1021 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1024 /* If this register is always set within a STRICT_LOW_PART
1025 or set to zero, then its high bytes are constant.
1026 So clear them outside the loop and within the loop
1027 just load the low bytes.
1028 We must check that the machine has an instruction to do so.
1029 Also, if the value loaded into the register
1030 depends on the same register, this cannot be done. */
1031 else if (SET_SRC (set) == const0_rtx
1032 && GET_CODE (NEXT_INSN (p)) == INSN
1033 && (set1 = single_set (NEXT_INSN (p)))
1034 && GET_CODE (set1) == SET
1035 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1036 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1037 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1038 == SET_DEST (set))
1039 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1041 int regno = REGNO (SET_DEST (set));
1042 if (regs->array[regno].set_in_loop == 2)
1044 struct movable *m;
1045 m = xmalloc (sizeof (struct movable));
1046 m->next = 0;
1047 m->insn = p;
1048 m->set_dest = SET_DEST (set);
1049 m->dependencies = 0;
1050 m->force = 0;
1051 m->consec = 0;
1052 m->done = 0;
1053 m->forces = 0;
1054 m->move_insn = 0;
1055 m->move_insn_first = 0;
1056 m->insert_temp = insert_temp;
1057 m->partial = 1;
1058 /* If the insn may not be executed on some cycles,
1059 we can't clear the whole reg; clear just high part.
1060 Not even if the reg is used only within this loop.
1061 Consider this:
1062 while (1)
1063 while (s != t) {
1064 if (foo ()) x = *s;
1065 use (x);
1067 Clearing x before the inner loop could clobber a value
1068 being saved from the last time around the outer loop.
1069 However, if the reg is not used outside this loop
1070 and all uses of the register are in the same
1071 basic block as the store, there is no problem.
1073 If this insn was made by loop, we don't know its
1074 INSN_LUID and hence must make a conservative
1075 assumption. */
1076 m->global = (INSN_UID (p) >= max_uid_for_loop
1077 || LOOP_REG_GLOBAL_P (loop, regno)
1078 || (labels_in_range_p
1079 (p, REGNO_FIRST_LUID (regno))));
1080 if (maybe_never && m->global)
1081 m->savemode = GET_MODE (SET_SRC (set1));
1082 else
1083 m->savemode = VOIDmode;
1084 m->regno = regno;
1085 m->cond = 0;
1086 m->match = 0;
1087 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1088 m->savings = 1;
1089 for (i = 0;
1090 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1091 i++)
1092 regs->array[regno+i].set_in_loop = -1;
1093 /* Add M to the end of the chain MOVABLES. */
1094 loop_movables_add (movables, m);
1099 /* Past a call insn, we get to insns which might not be executed
1100 because the call might exit. This matters for insns that trap.
1101 Constant and pure call insns always return, so they don't count. */
1102 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1103 call_passed = 1;
1104 /* Past a label or a jump, we get to insns for which we
1105 can't count on whether or how many times they will be
1106 executed during each iteration. Therefore, we can
1107 only move out sets of trivial variables
1108 (those not used after the loop). */
1109 /* Similar code appears twice in strength_reduce. */
1110 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1111 /* If we enter the loop in the middle, and scan around to the
1112 beginning, don't set maybe_never for that. This must be an
1113 unconditional jump, otherwise the code at the top of the
1114 loop might never be executed. Unconditional jumps are
1115 followed by a barrier then the loop_end. */
1116 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1117 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1118 && any_uncondjump_p (p)))
1119 maybe_never = 1;
1120 else if (GET_CODE (p) == NOTE)
1122 /* At the virtual top of a converted loop, insns are again known to
1123 be executed: logically, the loop begins here even though the exit
1124 code has been duplicated. */
1125 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1126 maybe_never = call_passed = 0;
1127 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1128 loop_depth++;
1129 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1130 loop_depth--;
1134 /* If one movable subsumes another, ignore that other. */
1136 ignore_some_movables (movables);
1138 /* For each movable insn, see if the reg that it loads
1139 leads when it dies right into another conditionally movable insn.
1140 If so, record that the second insn "forces" the first one,
1141 since the second can be moved only if the first is. */
1143 force_movables (movables);
1145 /* See if there are multiple movable insns that load the same value.
1146 If there are, make all but the first point at the first one
1147 through the `match' field, and add the priorities of them
1148 all together as the priority of the first. */
1150 combine_movables (movables, regs);
1152 /* Now consider each movable insn to decide whether it is worth moving.
1153 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1155 For machines with few registers this increases code size, so do not
1156 move moveables when optimizing for code size on such machines.
1157 (The 18 below is the value for i386.) */
1159 if (!optimize_size
1160 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1162 move_movables (loop, movables, threshold, insn_count);
1164 /* Recalculate regs->array if move_movables has created new
1165 registers. */
1166 if (max_reg_num () > regs->num)
1168 loop_regs_scan (loop, 0);
1169 for (update_start = loop_start;
1170 PREV_INSN (update_start)
1171 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1172 update_start = PREV_INSN (update_start))
1174 update_end = NEXT_INSN (loop_end);
1176 reg_scan_update (update_start, update_end, loop_max_reg);
1177 loop_max_reg = max_reg_num ();
1181 /* Now candidates that still are negative are those not moved.
1182 Change regs->array[I].set_in_loop to indicate that those are not actually
1183 invariant. */
1184 for (i = 0; i < regs->num; i++)
1185 if (regs->array[i].set_in_loop < 0)
1186 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1188 /* Now that we've moved some things out of the loop, we might be able to
1189 hoist even more memory references. */
1190 load_mems (loop);
1192 /* Recalculate regs->array if load_mems has created new registers. */
1193 if (max_reg_num () > regs->num)
1194 loop_regs_scan (loop, 0);
1196 for (update_start = loop_start;
1197 PREV_INSN (update_start)
1198 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1199 update_start = PREV_INSN (update_start))
1201 update_end = NEXT_INSN (loop_end);
1203 reg_scan_update (update_start, update_end, loop_max_reg);
1204 loop_max_reg = max_reg_num ();
1206 if (flag_strength_reduce)
1208 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1209 /* Ensure our label doesn't go away. */
1210 LABEL_NUSES (update_end)++;
1212 strength_reduce (loop, flags);
1214 reg_scan_update (update_start, update_end, loop_max_reg);
1215 loop_max_reg = max_reg_num ();
1217 if (update_end && GET_CODE (update_end) == CODE_LABEL
1218 && --LABEL_NUSES (update_end) == 0)
1219 delete_related_insns (update_end);
1223 /* The movable information is required for strength reduction. */
1224 loop_movables_free (movables);
1226 free (regs->array);
1227 regs->array = 0;
1228 regs->num = 0;
1231 /* Add elements to *OUTPUT to record all the pseudo-regs
1232 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1234 void
1235 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1237 enum rtx_code code;
1238 const char *fmt;
1239 int i;
1241 code = GET_CODE (in_this);
1243 switch (code)
1245 case PC:
1246 case CC0:
1247 case CONST_INT:
1248 case CONST_DOUBLE:
1249 case CONST:
1250 case SYMBOL_REF:
1251 case LABEL_REF:
1252 return;
1254 case REG:
1255 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1256 && ! reg_mentioned_p (in_this, not_in_this))
1257 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1258 return;
1260 default:
1261 break;
1264 fmt = GET_RTX_FORMAT (code);
1265 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1267 int j;
1269 switch (fmt[i])
1271 case 'E':
1272 for (j = 0; j < XVECLEN (in_this, i); j++)
1273 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1274 break;
1276 case 'e':
1277 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1278 break;
1283 /* Check what regs are referred to in the libcall block ending with INSN,
1284 aside from those mentioned in the equivalent value.
1285 If there are none, return 0.
1286 If there are one or more, return an EXPR_LIST containing all of them. */
1289 libcall_other_reg (rtx insn, rtx equiv)
1291 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1292 rtx p = XEXP (note, 0);
1293 rtx output = 0;
1295 /* First, find all the regs used in the libcall block
1296 that are not mentioned as inputs to the result. */
1298 while (p != insn)
1300 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1301 || GET_CODE (p) == CALL_INSN)
1302 record_excess_regs (PATTERN (p), equiv, &output);
1303 p = NEXT_INSN (p);
1306 return output;
1309 /* Return 1 if all uses of REG
1310 are between INSN and the end of the basic block. */
1312 static int
1313 reg_in_basic_block_p (rtx insn, rtx reg)
1315 int regno = REGNO (reg);
1316 rtx p;
1318 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1319 return 0;
1321 /* Search this basic block for the already recorded last use of the reg. */
1322 for (p = insn; p; p = NEXT_INSN (p))
1324 switch (GET_CODE (p))
1326 case NOTE:
1327 break;
1329 case INSN:
1330 case CALL_INSN:
1331 /* Ordinary insn: if this is the last use, we win. */
1332 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1333 return 1;
1334 break;
1336 case JUMP_INSN:
1337 /* Jump insn: if this is the last use, we win. */
1338 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1339 return 1;
1340 /* Otherwise, it's the end of the basic block, so we lose. */
1341 return 0;
1343 case CODE_LABEL:
1344 case BARRIER:
1345 /* It's the end of the basic block, so we lose. */
1346 return 0;
1348 default:
1349 break;
1353 /* The "last use" that was recorded can't be found after the first
1354 use. This can happen when the last use was deleted while
1355 processing an inner loop, this inner loop was then completely
1356 unrolled, and the outer loop is always exited after the inner loop,
1357 so that everything after the first use becomes a single basic block. */
1358 return 1;
1361 /* Compute the benefit of eliminating the insns in the block whose
1362 last insn is LAST. This may be a group of insns used to compute a
1363 value directly or can contain a library call. */
1365 static int
1366 libcall_benefit (rtx last)
1368 rtx insn;
1369 int benefit = 0;
1371 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1372 insn != last; insn = NEXT_INSN (insn))
1374 if (GET_CODE (insn) == CALL_INSN)
1375 benefit += 10; /* Assume at least this many insns in a library
1376 routine. */
1377 else if (GET_CODE (insn) == INSN
1378 && GET_CODE (PATTERN (insn)) != USE
1379 && GET_CODE (PATTERN (insn)) != CLOBBER)
1380 benefit++;
1383 return benefit;
1386 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1388 static rtx
1389 skip_consec_insns (rtx insn, int count)
1391 for (; count > 0; count--)
1393 rtx temp;
1395 /* If first insn of libcall sequence, skip to end. */
1396 /* Do this at start of loop, since INSN is guaranteed to
1397 be an insn here. */
1398 if (GET_CODE (insn) != NOTE
1399 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1400 insn = XEXP (temp, 0);
1403 insn = NEXT_INSN (insn);
1404 while (GET_CODE (insn) == NOTE);
1407 return insn;
1410 /* Ignore any movable whose insn falls within a libcall
1411 which is part of another movable.
1412 We make use of the fact that the movable for the libcall value
1413 was made later and so appears later on the chain. */
1415 static void
1416 ignore_some_movables (struct loop_movables *movables)
1418 struct movable *m, *m1;
1420 for (m = movables->head; m; m = m->next)
1422 /* Is this a movable for the value of a libcall? */
1423 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1424 if (note)
1426 rtx insn;
1427 /* Check for earlier movables inside that range,
1428 and mark them invalid. We cannot use LUIDs here because
1429 insns created by loop.c for prior loops don't have LUIDs.
1430 Rather than reject all such insns from movables, we just
1431 explicitly check each insn in the libcall (since invariant
1432 libcalls aren't that common). */
1433 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1434 for (m1 = movables->head; m1 != m; m1 = m1->next)
1435 if (m1->insn == insn)
1436 m1->done = 1;
1441 /* For each movable insn, see if the reg that it loads
1442 leads when it dies right into another conditionally movable insn.
1443 If so, record that the second insn "forces" the first one,
1444 since the second can be moved only if the first is. */
1446 static void
1447 force_movables (struct loop_movables *movables)
1449 struct movable *m, *m1;
1451 for (m1 = movables->head; m1; m1 = m1->next)
1452 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1453 if (!m1->partial && !m1->done)
1455 int regno = m1->regno;
1456 for (m = m1->next; m; m = m->next)
1457 /* ??? Could this be a bug? What if CSE caused the
1458 register of M1 to be used after this insn?
1459 Since CSE does not update regno_last_uid,
1460 this insn M->insn might not be where it dies.
1461 But very likely this doesn't matter; what matters is
1462 that M's reg is computed from M1's reg. */
1463 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1464 && !m->done)
1465 break;
1466 if (m != 0 && m->set_src == m1->set_dest
1467 /* If m->consec, m->set_src isn't valid. */
1468 && m->consec == 0)
1469 m = 0;
1471 /* Increase the priority of the moving the first insn
1472 since it permits the second to be moved as well.
1473 Likewise for insns already forced by the first insn. */
1474 if (m != 0)
1476 struct movable *m2;
1478 m->forces = m1;
1479 for (m2 = m1; m2; m2 = m2->forces)
1481 m2->lifetime += m->lifetime;
1482 m2->savings += m->savings;
1488 /* Find invariant expressions that are equal and can be combined into
1489 one register. */
1491 static void
1492 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1494 struct movable *m;
1495 char *matched_regs = xmalloc (regs->num);
1496 enum machine_mode mode;
1498 /* Regs that are set more than once are not allowed to match
1499 or be matched. I'm no longer sure why not. */
1500 /* Only pseudo registers are allowed to match or be matched,
1501 since move_movables does not validate the change. */
1502 /* Perhaps testing m->consec_sets would be more appropriate here? */
1504 for (m = movables->head; m; m = m->next)
1505 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1506 && m->regno >= FIRST_PSEUDO_REGISTER
1507 && !m->insert_temp
1508 && !m->partial)
1510 struct movable *m1;
1511 int regno = m->regno;
1513 memset (matched_regs, 0, regs->num);
1514 matched_regs[regno] = 1;
1516 /* We want later insns to match the first one. Don't make the first
1517 one match any later ones. So start this loop at m->next. */
1518 for (m1 = m->next; m1; m1 = m1->next)
1519 if (m != m1 && m1->match == 0
1520 && !m1->insert_temp
1521 && regs->array[m1->regno].n_times_set == 1
1522 && m1->regno >= FIRST_PSEUDO_REGISTER
1523 /* A reg used outside the loop mustn't be eliminated. */
1524 && !m1->global
1525 /* A reg used for zero-extending mustn't be eliminated. */
1526 && !m1->partial
1527 && (matched_regs[m1->regno]
1530 /* Can combine regs with different modes loaded from the
1531 same constant only if the modes are the same or
1532 if both are integer modes with M wider or the same
1533 width as M1. The check for integer is redundant, but
1534 safe, since the only case of differing destination
1535 modes with equal sources is when both sources are
1536 VOIDmode, i.e., CONST_INT. */
1537 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1538 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1539 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1540 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1541 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1542 /* See if the source of M1 says it matches M. */
1543 && ((GET_CODE (m1->set_src) == REG
1544 && matched_regs[REGNO (m1->set_src)])
1545 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1546 movables, regs))))
1547 && ((m->dependencies == m1->dependencies)
1548 || rtx_equal_p (m->dependencies, m1->dependencies)))
1550 m->lifetime += m1->lifetime;
1551 m->savings += m1->savings;
1552 m1->done = 1;
1553 m1->match = m;
1554 matched_regs[m1->regno] = 1;
1558 /* Now combine the regs used for zero-extension.
1559 This can be done for those not marked `global'
1560 provided their lives don't overlap. */
1562 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1563 mode = GET_MODE_WIDER_MODE (mode))
1565 struct movable *m0 = 0;
1567 /* Combine all the registers for extension from mode MODE.
1568 Don't combine any that are used outside this loop. */
1569 for (m = movables->head; m; m = m->next)
1570 if (m->partial && ! m->global
1571 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1573 struct movable *m1;
1575 int first = REGNO_FIRST_LUID (m->regno);
1576 int last = REGNO_LAST_LUID (m->regno);
1578 if (m0 == 0)
1580 /* First one: don't check for overlap, just record it. */
1581 m0 = m;
1582 continue;
1585 /* Make sure they extend to the same mode.
1586 (Almost always true.) */
1587 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1588 continue;
1590 /* We already have one: check for overlap with those
1591 already combined together. */
1592 for (m1 = movables->head; m1 != m; m1 = m1->next)
1593 if (m1 == m0 || (m1->partial && m1->match == m0))
1594 if (! (REGNO_FIRST_LUID (m1->regno) > last
1595 || REGNO_LAST_LUID (m1->regno) < first))
1596 goto overlap;
1598 /* No overlap: we can combine this with the others. */
1599 m0->lifetime += m->lifetime;
1600 m0->savings += m->savings;
1601 m->done = 1;
1602 m->match = m0;
1604 overlap:
1609 /* Clean up. */
1610 free (matched_regs);
1613 /* Returns the number of movable instructions in LOOP that were not
1614 moved outside the loop. */
1616 static int
1617 num_unmoved_movables (const struct loop *loop)
1619 int num = 0;
1620 struct movable *m;
1622 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1623 if (!m->done)
1624 ++num;
1626 return num;
1630 /* Return 1 if regs X and Y will become the same if moved. */
1632 static int
1633 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1635 unsigned int xn = REGNO (x);
1636 unsigned int yn = REGNO (y);
1637 struct movable *mx, *my;
1639 for (mx = movables->head; mx; mx = mx->next)
1640 if (mx->regno == xn)
1641 break;
1643 for (my = movables->head; my; my = my->next)
1644 if (my->regno == yn)
1645 break;
1647 return (mx && my
1648 && ((mx->match == my->match && mx->match != 0)
1649 || mx->match == my
1650 || mx == my->match));
1653 /* Return 1 if X and Y are identical-looking rtx's.
1654 This is the Lisp function EQUAL for rtx arguments.
1656 If two registers are matching movables or a movable register and an
1657 equivalent constant, consider them equal. */
1659 static int
1660 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
1661 struct loop_regs *regs)
1663 int i;
1664 int j;
1665 struct movable *m;
1666 enum rtx_code code;
1667 const char *fmt;
1669 if (x == y)
1670 return 1;
1671 if (x == 0 || y == 0)
1672 return 0;
1674 code = GET_CODE (x);
1676 /* If we have a register and a constant, they may sometimes be
1677 equal. */
1678 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1679 && CONSTANT_P (y))
1681 for (m = movables->head; m; m = m->next)
1682 if (m->move_insn && m->regno == REGNO (x)
1683 && rtx_equal_p (m->set_src, y))
1684 return 1;
1686 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1687 && CONSTANT_P (x))
1689 for (m = movables->head; m; m = m->next)
1690 if (m->move_insn && m->regno == REGNO (y)
1691 && rtx_equal_p (m->set_src, x))
1692 return 1;
1695 /* Otherwise, rtx's of different codes cannot be equal. */
1696 if (code != GET_CODE (y))
1697 return 0;
1699 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1700 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1702 if (GET_MODE (x) != GET_MODE (y))
1703 return 0;
1705 /* These three types of rtx's can be compared nonrecursively. */
1706 if (code == REG)
1707 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1709 if (code == LABEL_REF)
1710 return XEXP (x, 0) == XEXP (y, 0);
1711 if (code == SYMBOL_REF)
1712 return XSTR (x, 0) == XSTR (y, 0);
1714 /* Compare the elements. If any pair of corresponding elements
1715 fail to match, return 0 for the whole things. */
1717 fmt = GET_RTX_FORMAT (code);
1718 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1720 switch (fmt[i])
1722 case 'w':
1723 if (XWINT (x, i) != XWINT (y, i))
1724 return 0;
1725 break;
1727 case 'i':
1728 if (XINT (x, i) != XINT (y, i))
1729 return 0;
1730 break;
1732 case 'E':
1733 /* Two vectors must have the same length. */
1734 if (XVECLEN (x, i) != XVECLEN (y, i))
1735 return 0;
1737 /* And the corresponding elements must match. */
1738 for (j = 0; j < XVECLEN (x, i); j++)
1739 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1740 movables, regs) == 0)
1741 return 0;
1742 break;
1744 case 'e':
1745 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1746 == 0)
1747 return 0;
1748 break;
1750 case 's':
1751 if (strcmp (XSTR (x, i), XSTR (y, i)))
1752 return 0;
1753 break;
1755 case 'u':
1756 /* These are just backpointers, so they don't matter. */
1757 break;
1759 case '0':
1760 break;
1762 /* It is believed that rtx's at this level will never
1763 contain anything but integers and other rtx's,
1764 except for within LABEL_REFs and SYMBOL_REFs. */
1765 default:
1766 abort ();
1769 return 1;
1772 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1773 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1774 references is incremented once for each added note. */
1776 static void
1777 add_label_notes (rtx x, rtx insns)
1779 enum rtx_code code = GET_CODE (x);
1780 int i, j;
1781 const char *fmt;
1782 rtx insn;
1784 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1786 /* This code used to ignore labels that referred to dispatch tables to
1787 avoid flow generating (slightly) worse code.
1789 We no longer ignore such label references (see LABEL_REF handling in
1790 mark_jump_label for additional information). */
1791 for (insn = insns; insn; insn = NEXT_INSN (insn))
1792 if (reg_mentioned_p (XEXP (x, 0), insn))
1794 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1795 REG_NOTES (insn));
1796 if (LABEL_P (XEXP (x, 0)))
1797 LABEL_NUSES (XEXP (x, 0))++;
1801 fmt = GET_RTX_FORMAT (code);
1802 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1804 if (fmt[i] == 'e')
1805 add_label_notes (XEXP (x, i), insns);
1806 else if (fmt[i] == 'E')
1807 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1808 add_label_notes (XVECEXP (x, i, j), insns);
1812 /* Scan MOVABLES, and move the insns that deserve to be moved.
1813 If two matching movables are combined, replace one reg with the
1814 other throughout. */
1816 static void
1817 move_movables (struct loop *loop, struct loop_movables *movables,
1818 int threshold, int insn_count)
1820 struct loop_regs *regs = LOOP_REGS (loop);
1821 int nregs = regs->num;
1822 rtx new_start = 0;
1823 struct movable *m;
1824 rtx p;
1825 rtx loop_start = loop->start;
1826 rtx loop_end = loop->end;
1827 /* Map of pseudo-register replacements to handle combining
1828 when we move several insns that load the same value
1829 into different pseudo-registers. */
1830 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
1831 char *already_moved = xcalloc (nregs, sizeof (char));
1833 for (m = movables->head; m; m = m->next)
1835 /* Describe this movable insn. */
1837 if (loop_dump_stream)
1839 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1840 INSN_UID (m->insn), m->regno, m->lifetime);
1841 if (m->consec > 0)
1842 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1843 if (m->cond)
1844 fprintf (loop_dump_stream, "cond ");
1845 if (m->force)
1846 fprintf (loop_dump_stream, "force ");
1847 if (m->global)
1848 fprintf (loop_dump_stream, "global ");
1849 if (m->done)
1850 fprintf (loop_dump_stream, "done ");
1851 if (m->move_insn)
1852 fprintf (loop_dump_stream, "move-insn ");
1853 if (m->match)
1854 fprintf (loop_dump_stream, "matches %d ",
1855 INSN_UID (m->match->insn));
1856 if (m->forces)
1857 fprintf (loop_dump_stream, "forces %d ",
1858 INSN_UID (m->forces->insn));
1861 /* Ignore the insn if it's already done (it matched something else).
1862 Otherwise, see if it is now safe to move. */
1864 if (!m->done
1865 && (! m->cond
1866 || (1 == loop_invariant_p (loop, m->set_src)
1867 && (m->dependencies == 0
1868 || 1 == loop_invariant_p (loop, m->dependencies))
1869 && (m->consec == 0
1870 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1871 m->consec + 1,
1872 m->insn))))
1873 && (! m->forces || m->forces->done))
1875 int regno;
1876 rtx p;
1877 int savings = m->savings;
1879 /* We have an insn that is safe to move.
1880 Compute its desirability. */
1882 p = m->insn;
1883 regno = m->regno;
1885 if (loop_dump_stream)
1886 fprintf (loop_dump_stream, "savings %d ", savings);
1888 if (regs->array[regno].moved_once && loop_dump_stream)
1889 fprintf (loop_dump_stream, "halved since already moved ");
1891 /* An insn MUST be moved if we already moved something else
1892 which is safe only if this one is moved too: that is,
1893 if already_moved[REGNO] is nonzero. */
1895 /* An insn is desirable to move if the new lifetime of the
1896 register is no more than THRESHOLD times the old lifetime.
1897 If it's not desirable, it means the loop is so big
1898 that moving won't speed things up much,
1899 and it is liable to make register usage worse. */
1901 /* It is also desirable to move if it can be moved at no
1902 extra cost because something else was already moved. */
1904 if (already_moved[regno]
1905 || flag_move_all_movables
1906 || (threshold * savings * m->lifetime) >=
1907 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1908 || (m->forces && m->forces->done
1909 && regs->array[m->forces->regno].n_times_set == 1))
1911 int count;
1912 struct movable *m1;
1913 rtx first = NULL_RTX;
1914 rtx newreg = NULL_RTX;
1916 if (m->insert_temp)
1917 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
1919 /* Now move the insns that set the reg. */
1921 if (m->partial && m->match)
1923 rtx newpat, i1;
1924 rtx r1, r2;
1925 /* Find the end of this chain of matching regs.
1926 Thus, we load each reg in the chain from that one reg.
1927 And that reg is loaded with 0 directly,
1928 since it has ->match == 0. */
1929 for (m1 = m; m1->match; m1 = m1->match);
1930 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1931 SET_DEST (PATTERN (m1->insn)));
1932 i1 = loop_insn_hoist (loop, newpat);
1934 /* Mark the moved, invariant reg as being allowed to
1935 share a hard reg with the other matching invariant. */
1936 REG_NOTES (i1) = REG_NOTES (m->insn);
1937 r1 = SET_DEST (PATTERN (m->insn));
1938 r2 = SET_DEST (PATTERN (m1->insn));
1939 regs_may_share
1940 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1941 gen_rtx_EXPR_LIST (VOIDmode, r2,
1942 regs_may_share));
1943 delete_insn (m->insn);
1945 if (new_start == 0)
1946 new_start = i1;
1948 if (loop_dump_stream)
1949 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1951 /* If we are to re-generate the item being moved with a
1952 new move insn, first delete what we have and then emit
1953 the move insn before the loop. */
1954 else if (m->move_insn)
1956 rtx i1, temp, seq;
1958 for (count = m->consec; count >= 0; count--)
1960 /* If this is the first insn of a library call sequence,
1961 something is very wrong. */
1962 if (GET_CODE (p) != NOTE
1963 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1964 abort ();
1966 /* If this is the last insn of a libcall sequence, then
1967 delete every insn in the sequence except the last.
1968 The last insn is handled in the normal manner. */
1969 if (GET_CODE (p) != NOTE
1970 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1972 temp = XEXP (temp, 0);
1973 while (temp != p)
1974 temp = delete_insn (temp);
1977 temp = p;
1978 p = delete_insn (p);
1980 /* simplify_giv_expr expects that it can walk the insns
1981 at m->insn forwards and see this old sequence we are
1982 tossing here. delete_insn does preserve the next
1983 pointers, but when we skip over a NOTE we must fix
1984 it up. Otherwise that code walks into the non-deleted
1985 insn stream. */
1986 while (p && GET_CODE (p) == NOTE)
1987 p = NEXT_INSN (temp) = NEXT_INSN (p);
1989 if (m->insert_temp)
1991 /* Replace the original insn with a move from
1992 our newly created temp. */
1993 start_sequence ();
1994 emit_move_insn (m->set_dest, newreg);
1995 seq = get_insns ();
1996 end_sequence ();
1997 emit_insn_before (seq, p);
2001 start_sequence ();
2002 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2003 m->set_src);
2004 seq = get_insns ();
2005 end_sequence ();
2007 add_label_notes (m->set_src, seq);
2009 i1 = loop_insn_hoist (loop, seq);
2010 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2011 set_unique_reg_note (i1,
2012 m->is_equiv ? REG_EQUIV : REG_EQUAL,
2013 m->set_src);
2015 if (loop_dump_stream)
2016 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2018 /* The more regs we move, the less we like moving them. */
2019 threshold -= 3;
2021 else
2023 for (count = m->consec; count >= 0; count--)
2025 rtx i1, temp;
2027 /* If first insn of libcall sequence, skip to end. */
2028 /* Do this at start of loop, since p is guaranteed to
2029 be an insn here. */
2030 if (GET_CODE (p) != NOTE
2031 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2032 p = XEXP (temp, 0);
2034 /* If last insn of libcall sequence, move all
2035 insns except the last before the loop. The last
2036 insn is handled in the normal manner. */
2037 if (GET_CODE (p) != NOTE
2038 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2040 rtx fn_address = 0;
2041 rtx fn_reg = 0;
2042 rtx fn_address_insn = 0;
2044 first = 0;
2045 for (temp = XEXP (temp, 0); temp != p;
2046 temp = NEXT_INSN (temp))
2048 rtx body;
2049 rtx n;
2050 rtx next;
2052 if (GET_CODE (temp) == NOTE)
2053 continue;
2055 body = PATTERN (temp);
2057 /* Find the next insn after TEMP,
2058 not counting USE or NOTE insns. */
2059 for (next = NEXT_INSN (temp); next != p;
2060 next = NEXT_INSN (next))
2061 if (! (GET_CODE (next) == INSN
2062 && GET_CODE (PATTERN (next)) == USE)
2063 && GET_CODE (next) != NOTE)
2064 break;
2066 /* If that is the call, this may be the insn
2067 that loads the function address.
2069 Extract the function address from the insn
2070 that loads it into a register.
2071 If this insn was cse'd, we get incorrect code.
2073 So emit a new move insn that copies the
2074 function address into the register that the
2075 call insn will use. flow.c will delete any
2076 redundant stores that we have created. */
2077 if (GET_CODE (next) == CALL_INSN
2078 && GET_CODE (body) == SET
2079 && GET_CODE (SET_DEST (body)) == REG
2080 && (n = find_reg_note (temp, REG_EQUAL,
2081 NULL_RTX)))
2083 fn_reg = SET_SRC (body);
2084 if (GET_CODE (fn_reg) != REG)
2085 fn_reg = SET_DEST (body);
2086 fn_address = XEXP (n, 0);
2087 fn_address_insn = temp;
2089 /* We have the call insn.
2090 If it uses the register we suspect it might,
2091 load it with the correct address directly. */
2092 if (GET_CODE (temp) == CALL_INSN
2093 && fn_address != 0
2094 && reg_referenced_p (fn_reg, body))
2095 loop_insn_emit_after (loop, 0, fn_address_insn,
2096 gen_move_insn
2097 (fn_reg, fn_address));
2099 if (GET_CODE (temp) == CALL_INSN)
2101 i1 = loop_call_insn_hoist (loop, body);
2102 /* Because the USAGE information potentially
2103 contains objects other than hard registers
2104 we need to copy it. */
2105 if (CALL_INSN_FUNCTION_USAGE (temp))
2106 CALL_INSN_FUNCTION_USAGE (i1)
2107 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2109 else
2110 i1 = loop_insn_hoist (loop, body);
2111 if (first == 0)
2112 first = i1;
2113 if (temp == fn_address_insn)
2114 fn_address_insn = i1;
2115 REG_NOTES (i1) = REG_NOTES (temp);
2116 REG_NOTES (temp) = NULL;
2117 delete_insn (temp);
2119 if (new_start == 0)
2120 new_start = first;
2122 if (m->savemode != VOIDmode)
2124 /* P sets REG to zero; but we should clear only
2125 the bits that are not covered by the mode
2126 m->savemode. */
2127 rtx reg = m->set_dest;
2128 rtx sequence;
2129 rtx tem;
2131 start_sequence ();
2132 tem = expand_simple_binop
2133 (GET_MODE (reg), AND, reg,
2134 GEN_INT ((((HOST_WIDE_INT) 1
2135 << GET_MODE_BITSIZE (m->savemode)))
2136 - 1),
2137 reg, 1, OPTAB_LIB_WIDEN);
2138 if (tem == 0)
2139 abort ();
2140 if (tem != reg)
2141 emit_move_insn (reg, tem);
2142 sequence = get_insns ();
2143 end_sequence ();
2144 i1 = loop_insn_hoist (loop, sequence);
2146 else if (GET_CODE (p) == CALL_INSN)
2148 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2149 /* Because the USAGE information potentially
2150 contains objects other than hard registers
2151 we need to copy it. */
2152 if (CALL_INSN_FUNCTION_USAGE (p))
2153 CALL_INSN_FUNCTION_USAGE (i1)
2154 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2156 else if (count == m->consec && m->move_insn_first)
2158 rtx seq;
2159 /* The SET_SRC might not be invariant, so we must
2160 use the REG_EQUAL note. */
2161 start_sequence ();
2162 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2163 m->set_src);
2164 seq = get_insns ();
2165 end_sequence ();
2167 add_label_notes (m->set_src, seq);
2169 i1 = loop_insn_hoist (loop, seq);
2170 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2171 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2172 : REG_EQUAL, m->set_src);
2174 else if (m->insert_temp)
2176 rtx *reg_map2 = xcalloc (REGNO (newreg),
2177 sizeof(rtx));
2178 reg_map2 [m->regno] = newreg;
2180 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2181 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2182 free (reg_map2);
2184 else
2185 i1 = loop_insn_hoist (loop, PATTERN (p));
2187 if (REG_NOTES (i1) == 0)
2189 REG_NOTES (i1) = REG_NOTES (p);
2190 REG_NOTES (p) = NULL;
2192 /* If there is a REG_EQUAL note present whose value
2193 is not loop invariant, then delete it, since it
2194 may cause problems with later optimization passes.
2195 It is possible for cse to create such notes
2196 like this as a result of record_jump_cond. */
2198 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2199 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2200 remove_note (i1, temp);
2203 if (new_start == 0)
2204 new_start = i1;
2206 if (loop_dump_stream)
2207 fprintf (loop_dump_stream, " moved to %d",
2208 INSN_UID (i1));
2210 /* If library call, now fix the REG_NOTES that contain
2211 insn pointers, namely REG_LIBCALL on FIRST
2212 and REG_RETVAL on I1. */
2213 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2215 XEXP (temp, 0) = first;
2216 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2217 XEXP (temp, 0) = i1;
2220 temp = p;
2221 delete_insn (p);
2222 p = NEXT_INSN (p);
2224 /* simplify_giv_expr expects that it can walk the insns
2225 at m->insn forwards and see this old sequence we are
2226 tossing here. delete_insn does preserve the next
2227 pointers, but when we skip over a NOTE we must fix
2228 it up. Otherwise that code walks into the non-deleted
2229 insn stream. */
2230 while (p && GET_CODE (p) == NOTE)
2231 p = NEXT_INSN (temp) = NEXT_INSN (p);
2233 if (m->insert_temp)
2235 rtx seq;
2236 /* Replace the original insn with a move from
2237 our newly created temp. */
2238 start_sequence ();
2239 emit_move_insn (m->set_dest, newreg);
2240 seq = get_insns ();
2241 end_sequence ();
2242 emit_insn_before (seq, p);
2246 /* The more regs we move, the less we like moving them. */
2247 threshold -= 3;
2250 m->done = 1;
2252 if (!m->insert_temp)
2254 /* Any other movable that loads the same register
2255 MUST be moved. */
2256 already_moved[regno] = 1;
2258 /* This reg has been moved out of one loop. */
2259 regs->array[regno].moved_once = 1;
2261 /* The reg set here is now invariant. */
2262 if (! m->partial)
2264 int i;
2265 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2266 regs->array[regno+i].set_in_loop = 0;
2269 /* Change the length-of-life info for the register
2270 to say it lives at least the full length of this loop.
2271 This will help guide optimizations in outer loops. */
2273 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2274 /* This is the old insn before all the moved insns.
2275 We can't use the moved insn because it is out of range
2276 in uid_luid. Only the old insns have luids. */
2277 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2278 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2279 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2282 /* Combine with this moved insn any other matching movables. */
2284 if (! m->partial)
2285 for (m1 = movables->head; m1; m1 = m1->next)
2286 if (m1->match == m)
2288 rtx temp;
2290 /* Schedule the reg loaded by M1
2291 for replacement so that shares the reg of M.
2292 If the modes differ (only possible in restricted
2293 circumstances, make a SUBREG.
2295 Note this assumes that the target dependent files
2296 treat REG and SUBREG equally, including within
2297 GO_IF_LEGITIMATE_ADDRESS and in all the
2298 predicates since we never verify that replacing the
2299 original register with a SUBREG results in a
2300 recognizable insn. */
2301 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2302 reg_map[m1->regno] = m->set_dest;
2303 else
2304 reg_map[m1->regno]
2305 = gen_lowpart_common (GET_MODE (m1->set_dest),
2306 m->set_dest);
2308 /* Get rid of the matching insn
2309 and prevent further processing of it. */
2310 m1->done = 1;
2312 /* If library call, delete all insns. */
2313 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2314 NULL_RTX)))
2315 delete_insn_chain (XEXP (temp, 0), m1->insn);
2316 else
2317 delete_insn (m1->insn);
2319 /* Any other movable that loads the same register
2320 MUST be moved. */
2321 already_moved[m1->regno] = 1;
2323 /* The reg merged here is now invariant,
2324 if the reg it matches is invariant. */
2325 if (! m->partial)
2327 int i;
2328 for (i = 0;
2329 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2330 i++)
2331 regs->array[m1->regno+i].set_in_loop = 0;
2335 else if (loop_dump_stream)
2336 fprintf (loop_dump_stream, "not desirable");
2338 else if (loop_dump_stream && !m->match)
2339 fprintf (loop_dump_stream, "not safe");
2341 if (loop_dump_stream)
2342 fprintf (loop_dump_stream, "\n");
2345 if (new_start == 0)
2346 new_start = loop_start;
2348 /* Go through all the instructions in the loop, making
2349 all the register substitutions scheduled in REG_MAP. */
2350 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2351 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2352 || GET_CODE (p) == CALL_INSN)
2354 replace_regs (PATTERN (p), reg_map, nregs, 0);
2355 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2356 INSN_CODE (p) = -1;
2359 /* Clean up. */
2360 free (reg_map);
2361 free (already_moved);
2365 static void
2366 loop_movables_add (struct loop_movables *movables, struct movable *m)
2368 if (movables->head == 0)
2369 movables->head = m;
2370 else
2371 movables->last->next = m;
2372 movables->last = m;
2376 static void
2377 loop_movables_free (struct loop_movables *movables)
2379 struct movable *m;
2380 struct movable *m_next;
2382 for (m = movables->head; m; m = m_next)
2384 m_next = m->next;
2385 free (m);
2389 #if 0
2390 /* Scan X and replace the address of any MEM in it with ADDR.
2391 REG is the address that MEM should have before the replacement. */
2393 static void
2394 replace_call_address (rtx x, rtx reg, rtx addr)
2396 enum rtx_code code;
2397 int i;
2398 const char *fmt;
2400 if (x == 0)
2401 return;
2402 code = GET_CODE (x);
2403 switch (code)
2405 case PC:
2406 case CC0:
2407 case CONST_INT:
2408 case CONST_DOUBLE:
2409 case CONST:
2410 case SYMBOL_REF:
2411 case LABEL_REF:
2412 case REG:
2413 return;
2415 case SET:
2416 /* Short cut for very common case. */
2417 replace_call_address (XEXP (x, 1), reg, addr);
2418 return;
2420 case CALL:
2421 /* Short cut for very common case. */
2422 replace_call_address (XEXP (x, 0), reg, addr);
2423 return;
2425 case MEM:
2426 /* If this MEM uses a reg other than the one we expected,
2427 something is wrong. */
2428 if (XEXP (x, 0) != reg)
2429 abort ();
2430 XEXP (x, 0) = addr;
2431 return;
2433 default:
2434 break;
2437 fmt = GET_RTX_FORMAT (code);
2438 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2440 if (fmt[i] == 'e')
2441 replace_call_address (XEXP (x, i), reg, addr);
2442 else if (fmt[i] == 'E')
2444 int j;
2445 for (j = 0; j < XVECLEN (x, i); j++)
2446 replace_call_address (XVECEXP (x, i, j), reg, addr);
2450 #endif
2452 /* Return the number of memory refs to addresses that vary
2453 in the rtx X. */
2455 static int
2456 count_nonfixed_reads (const struct loop *loop, rtx x)
2458 enum rtx_code code;
2459 int i;
2460 const char *fmt;
2461 int value;
2463 if (x == 0)
2464 return 0;
2466 code = GET_CODE (x);
2467 switch (code)
2469 case PC:
2470 case CC0:
2471 case CONST_INT:
2472 case CONST_DOUBLE:
2473 case CONST:
2474 case SYMBOL_REF:
2475 case LABEL_REF:
2476 case REG:
2477 return 0;
2479 case MEM:
2480 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2481 + count_nonfixed_reads (loop, XEXP (x, 0)));
2483 default:
2484 break;
2487 value = 0;
2488 fmt = GET_RTX_FORMAT (code);
2489 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2491 if (fmt[i] == 'e')
2492 value += count_nonfixed_reads (loop, XEXP (x, i));
2493 if (fmt[i] == 'E')
2495 int j;
2496 for (j = 0; j < XVECLEN (x, i); j++)
2497 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2500 return value;
2503 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2504 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2505 `unknown_address_altered', `unknown_constant_address_altered', and
2506 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2507 list `store_mems' in LOOP. */
2509 static void
2510 prescan_loop (struct loop *loop)
2512 int level = 1;
2513 rtx insn;
2514 struct loop_info *loop_info = LOOP_INFO (loop);
2515 rtx start = loop->start;
2516 rtx end = loop->end;
2517 /* The label after END. Jumping here is just like falling off the
2518 end of the loop. We use next_nonnote_insn instead of next_label
2519 as a hedge against the (pathological) case where some actual insn
2520 might end up between the two. */
2521 rtx exit_target = next_nonnote_insn (end);
2523 loop_info->has_indirect_jump = indirect_jump_in_function;
2524 loop_info->pre_header_has_call = 0;
2525 loop_info->has_call = 0;
2526 loop_info->has_nonconst_call = 0;
2527 loop_info->has_prefetch = 0;
2528 loop_info->has_volatile = 0;
2529 loop_info->has_tablejump = 0;
2530 loop_info->has_multiple_exit_targets = 0;
2531 loop->level = 1;
2533 loop_info->unknown_address_altered = 0;
2534 loop_info->unknown_constant_address_altered = 0;
2535 loop_info->store_mems = NULL_RTX;
2536 loop_info->first_loop_store_insn = NULL_RTX;
2537 loop_info->mems_idx = 0;
2538 loop_info->num_mem_sets = 0;
2539 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2540 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2542 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2543 insn = PREV_INSN (insn))
2545 if (GET_CODE (insn) == CALL_INSN)
2547 loop_info->pre_header_has_call = 1;
2548 break;
2552 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2553 insn = NEXT_INSN (insn))
2555 switch (GET_CODE (insn))
2557 case NOTE:
2558 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2560 ++level;
2561 /* Count number of loops contained in this one. */
2562 loop->level++;
2564 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2565 --level;
2566 break;
2568 case CALL_INSN:
2569 if (! CONST_OR_PURE_CALL_P (insn))
2571 loop_info->unknown_address_altered = 1;
2572 loop_info->has_nonconst_call = 1;
2574 else if (pure_call_p (insn))
2575 loop_info->has_nonconst_call = 1;
2576 loop_info->has_call = 1;
2577 if (can_throw_internal (insn))
2578 loop_info->has_multiple_exit_targets = 1;
2580 /* Calls initializing constant objects have CLOBBER of MEM /u in the
2581 attached FUNCTION_USAGE expression list, not accounted for by the
2582 code above. We should note these to avoid missing dependencies in
2583 later references. */
2585 rtx fusage_entry;
2587 for (fusage_entry = CALL_INSN_FUNCTION_USAGE (insn);
2588 fusage_entry; fusage_entry = XEXP (fusage_entry, 1))
2590 rtx fusage = XEXP (fusage_entry, 0);
2592 if (GET_CODE (fusage) == CLOBBER
2593 && GET_CODE (XEXP (fusage, 0)) == MEM
2594 && RTX_UNCHANGING_P (XEXP (fusage, 0)))
2596 note_stores (fusage, note_addr_stored, loop_info);
2597 if (! loop_info->first_loop_store_insn
2598 && loop_info->store_mems)
2599 loop_info->first_loop_store_insn = insn;
2603 break;
2605 case JUMP_INSN:
2606 if (! loop_info->has_multiple_exit_targets)
2608 rtx set = pc_set (insn);
2610 if (set)
2612 rtx src = SET_SRC (set);
2613 rtx label1, label2;
2615 if (GET_CODE (src) == IF_THEN_ELSE)
2617 label1 = XEXP (src, 1);
2618 label2 = XEXP (src, 2);
2620 else
2622 label1 = src;
2623 label2 = NULL_RTX;
2628 if (label1 && label1 != pc_rtx)
2630 if (GET_CODE (label1) != LABEL_REF)
2632 /* Something tricky. */
2633 loop_info->has_multiple_exit_targets = 1;
2634 break;
2636 else if (XEXP (label1, 0) != exit_target
2637 && LABEL_OUTSIDE_LOOP_P (label1))
2639 /* A jump outside the current loop. */
2640 loop_info->has_multiple_exit_targets = 1;
2641 break;
2645 label1 = label2;
2646 label2 = NULL_RTX;
2648 while (label1);
2650 else
2652 /* A return, or something tricky. */
2653 loop_info->has_multiple_exit_targets = 1;
2656 /* Fall through. */
2658 case INSN:
2659 if (volatile_refs_p (PATTERN (insn)))
2660 loop_info->has_volatile = 1;
2662 if (GET_CODE (insn) == JUMP_INSN
2663 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2664 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2665 loop_info->has_tablejump = 1;
2667 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2668 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2669 loop_info->first_loop_store_insn = insn;
2671 if (flag_non_call_exceptions && can_throw_internal (insn))
2672 loop_info->has_multiple_exit_targets = 1;
2673 break;
2675 default:
2676 break;
2680 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2681 if (/* An exception thrown by a called function might land us
2682 anywhere. */
2683 ! loop_info->has_nonconst_call
2684 /* We don't want loads for MEMs moved to a location before the
2685 one at which their stack memory becomes allocated. (Note
2686 that this is not a problem for malloc, etc., since those
2687 require actual function calls. */
2688 && ! current_function_calls_alloca
2689 /* There are ways to leave the loop other than falling off the
2690 end. */
2691 && ! loop_info->has_multiple_exit_targets)
2692 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2693 insn = NEXT_INSN (insn))
2694 for_each_rtx (&insn, insert_loop_mem, loop_info);
2696 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2697 that loop_invariant_p and load_mems can use true_dependence
2698 to determine what is really clobbered. */
2699 if (loop_info->unknown_address_altered)
2701 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2703 loop_info->store_mems
2704 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2706 if (loop_info->unknown_constant_address_altered)
2708 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2710 RTX_UNCHANGING_P (mem) = 1;
2711 loop_info->store_mems
2712 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2716 /* Invalidate all loops containing LABEL. */
2718 static void
2719 invalidate_loops_containing_label (rtx label)
2721 struct loop *loop;
2722 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2723 loop->invalid = 1;
2726 /* Scan the function looking for loops. Record the start and end of each loop.
2727 Also mark as invalid loops any loops that contain a setjmp or are branched
2728 to from outside the loop. */
2730 static void
2731 find_and_verify_loops (rtx f, struct loops *loops)
2733 rtx insn;
2734 rtx label;
2735 int num_loops;
2736 struct loop *current_loop;
2737 struct loop *next_loop;
2738 struct loop *loop;
2740 num_loops = loops->num;
2742 compute_luids (f, NULL_RTX, 0);
2744 /* If there are jumps to undefined labels,
2745 treat them as jumps out of any/all loops.
2746 This also avoids writing past end of tables when there are no loops. */
2747 uid_loop[0] = NULL;
2749 /* Find boundaries of loops, mark which loops are contained within
2750 loops, and invalidate loops that have setjmp. */
2752 num_loops = 0;
2753 current_loop = NULL;
2754 for (insn = f; insn; insn = NEXT_INSN (insn))
2756 if (GET_CODE (insn) == NOTE)
2757 switch (NOTE_LINE_NUMBER (insn))
2759 case NOTE_INSN_LOOP_BEG:
2760 next_loop = loops->array + num_loops;
2761 next_loop->num = num_loops;
2762 num_loops++;
2763 next_loop->start = insn;
2764 next_loop->outer = current_loop;
2765 current_loop = next_loop;
2766 break;
2768 case NOTE_INSN_LOOP_CONT:
2769 current_loop->cont = insn;
2770 break;
2772 case NOTE_INSN_LOOP_VTOP:
2773 current_loop->vtop = insn;
2774 break;
2776 case NOTE_INSN_LOOP_END:
2777 if (! current_loop)
2778 abort ();
2780 current_loop->end = insn;
2781 current_loop = current_loop->outer;
2782 break;
2784 default:
2785 break;
2788 if (GET_CODE (insn) == CALL_INSN
2789 && find_reg_note (insn, REG_SETJMP, NULL))
2791 /* In this case, we must invalidate our current loop and any
2792 enclosing loop. */
2793 for (loop = current_loop; loop; loop = loop->outer)
2795 loop->invalid = 1;
2796 if (loop_dump_stream)
2797 fprintf (loop_dump_stream,
2798 "\nLoop at %d ignored due to setjmp.\n",
2799 INSN_UID (loop->start));
2803 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2804 enclosing loop, but this doesn't matter. */
2805 uid_loop[INSN_UID (insn)] = current_loop;
2808 /* Any loop containing a label used in an initializer must be invalidated,
2809 because it can be jumped into from anywhere. */
2810 for (label = forced_labels; label; label = XEXP (label, 1))
2811 invalidate_loops_containing_label (XEXP (label, 0));
2813 /* Any loop containing a label used for an exception handler must be
2814 invalidated, because it can be jumped into from anywhere. */
2815 for_each_eh_label (invalidate_loops_containing_label);
2817 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2818 loop that it is not contained within, that loop is marked invalid.
2819 If any INSN or CALL_INSN uses a label's address, then the loop containing
2820 that label is marked invalid, because it could be jumped into from
2821 anywhere.
2823 Also look for blocks of code ending in an unconditional branch that
2824 exits the loop. If such a block is surrounded by a conditional
2825 branch around the block, move the block elsewhere (see below) and
2826 invert the jump to point to the code block. This may eliminate a
2827 label in our loop and will simplify processing by both us and a
2828 possible second cse pass. */
2830 for (insn = f; insn; insn = NEXT_INSN (insn))
2831 if (INSN_P (insn))
2833 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2835 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2837 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2838 if (note)
2839 invalidate_loops_containing_label (XEXP (note, 0));
2842 if (GET_CODE (insn) != JUMP_INSN)
2843 continue;
2845 mark_loop_jump (PATTERN (insn), this_loop);
2847 /* See if this is an unconditional branch outside the loop. */
2848 if (this_loop
2849 && (GET_CODE (PATTERN (insn)) == RETURN
2850 || (any_uncondjump_p (insn)
2851 && onlyjump_p (insn)
2852 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2853 != this_loop)))
2854 && get_max_uid () < max_uid_for_loop)
2856 rtx p;
2857 rtx our_next = next_real_insn (insn);
2858 rtx last_insn_to_move = NEXT_INSN (insn);
2859 struct loop *dest_loop;
2860 struct loop *outer_loop = NULL;
2862 /* Go backwards until we reach the start of the loop, a label,
2863 or a JUMP_INSN. */
2864 for (p = PREV_INSN (insn);
2865 GET_CODE (p) != CODE_LABEL
2866 && ! (GET_CODE (p) == NOTE
2867 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2868 && GET_CODE (p) != JUMP_INSN;
2869 p = PREV_INSN (p))
2872 /* Check for the case where we have a jump to an inner nested
2873 loop, and do not perform the optimization in that case. */
2875 if (JUMP_LABEL (insn))
2877 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2878 if (dest_loop)
2880 for (outer_loop = dest_loop; outer_loop;
2881 outer_loop = outer_loop->outer)
2882 if (outer_loop == this_loop)
2883 break;
2887 /* Make sure that the target of P is within the current loop. */
2889 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2890 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2891 outer_loop = this_loop;
2893 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2894 we have a block of code to try to move.
2896 We look backward and then forward from the target of INSN
2897 to find a BARRIER at the same loop depth as the target.
2898 If we find such a BARRIER, we make a new label for the start
2899 of the block, invert the jump in P and point it to that label,
2900 and move the block of code to the spot we found. */
2902 if (! outer_loop
2903 && GET_CODE (p) == JUMP_INSN
2904 && JUMP_LABEL (p) != 0
2905 /* Just ignore jumps to labels that were never emitted.
2906 These always indicate compilation errors. */
2907 && INSN_UID (JUMP_LABEL (p)) != 0
2908 && any_condjump_p (p) && onlyjump_p (p)
2909 && next_real_insn (JUMP_LABEL (p)) == our_next
2910 /* If it's not safe to move the sequence, then we
2911 mustn't try. */
2912 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2913 &last_insn_to_move))
2915 rtx target
2916 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2917 struct loop *target_loop = uid_loop[INSN_UID (target)];
2918 rtx loc, loc2;
2919 rtx tmp;
2921 /* Search for possible garbage past the conditional jumps
2922 and look for the last barrier. */
2923 for (tmp = last_insn_to_move;
2924 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2925 if (GET_CODE (tmp) == BARRIER)
2926 last_insn_to_move = tmp;
2928 for (loc = target; loc; loc = PREV_INSN (loc))
2929 if (GET_CODE (loc) == BARRIER
2930 /* Don't move things inside a tablejump. */
2931 && ((loc2 = next_nonnote_insn (loc)) == 0
2932 || GET_CODE (loc2) != CODE_LABEL
2933 || (loc2 = next_nonnote_insn (loc2)) == 0
2934 || GET_CODE (loc2) != JUMP_INSN
2935 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2936 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2937 && uid_loop[INSN_UID (loc)] == target_loop)
2938 break;
2940 if (loc == 0)
2941 for (loc = target; loc; loc = NEXT_INSN (loc))
2942 if (GET_CODE (loc) == BARRIER
2943 /* Don't move things inside a tablejump. */
2944 && ((loc2 = next_nonnote_insn (loc)) == 0
2945 || GET_CODE (loc2) != CODE_LABEL
2946 || (loc2 = next_nonnote_insn (loc2)) == 0
2947 || GET_CODE (loc2) != JUMP_INSN
2948 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2949 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2950 && uid_loop[INSN_UID (loc)] == target_loop)
2951 break;
2953 if (loc)
2955 rtx cond_label = JUMP_LABEL (p);
2956 rtx new_label = get_label_after (p);
2958 /* Ensure our label doesn't go away. */
2959 LABEL_NUSES (cond_label)++;
2961 /* Verify that uid_loop is large enough and that
2962 we can invert P. */
2963 if (invert_jump (p, new_label, 1))
2965 rtx q, r;
2967 /* If no suitable BARRIER was found, create a suitable
2968 one before TARGET. Since TARGET is a fall through
2969 path, we'll need to insert a jump around our block
2970 and add a BARRIER before TARGET.
2972 This creates an extra unconditional jump outside
2973 the loop. However, the benefits of removing rarely
2974 executed instructions from inside the loop usually
2975 outweighs the cost of the extra unconditional jump
2976 outside the loop. */
2977 if (loc == 0)
2979 rtx temp;
2981 temp = gen_jump (JUMP_LABEL (insn));
2982 temp = emit_jump_insn_before (temp, target);
2983 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2984 LABEL_NUSES (JUMP_LABEL (insn))++;
2985 loc = emit_barrier_before (target);
2988 /* Include the BARRIER after INSN and copy the
2989 block after LOC. */
2990 if (squeeze_notes (&new_label, &last_insn_to_move))
2991 abort ();
2992 reorder_insns (new_label, last_insn_to_move, loc);
2994 /* All those insns are now in TARGET_LOOP. */
2995 for (q = new_label;
2996 q != NEXT_INSN (last_insn_to_move);
2997 q = NEXT_INSN (q))
2998 uid_loop[INSN_UID (q)] = target_loop;
3000 /* The label jumped to by INSN is no longer a loop
3001 exit. Unless INSN does not have a label (e.g.,
3002 it is a RETURN insn), search loop->exit_labels
3003 to find its label_ref, and remove it. Also turn
3004 off LABEL_OUTSIDE_LOOP_P bit. */
3005 if (JUMP_LABEL (insn))
3007 for (q = 0, r = this_loop->exit_labels;
3009 q = r, r = LABEL_NEXTREF (r))
3010 if (XEXP (r, 0) == JUMP_LABEL (insn))
3012 LABEL_OUTSIDE_LOOP_P (r) = 0;
3013 if (q)
3014 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
3015 else
3016 this_loop->exit_labels = LABEL_NEXTREF (r);
3017 break;
3020 for (loop = this_loop; loop && loop != target_loop;
3021 loop = loop->outer)
3022 loop->exit_count--;
3024 /* If we didn't find it, then something is
3025 wrong. */
3026 if (! r)
3027 abort ();
3030 /* P is now a jump outside the loop, so it must be put
3031 in loop->exit_labels, and marked as such.
3032 The easiest way to do this is to just call
3033 mark_loop_jump again for P. */
3034 mark_loop_jump (PATTERN (p), this_loop);
3036 /* If INSN now jumps to the insn after it,
3037 delete INSN. */
3038 if (JUMP_LABEL (insn) != 0
3039 && (next_real_insn (JUMP_LABEL (insn))
3040 == next_real_insn (insn)))
3041 delete_related_insns (insn);
3044 /* Continue the loop after where the conditional
3045 branch used to jump, since the only branch insn
3046 in the block (if it still remains) is an inter-loop
3047 branch and hence needs no processing. */
3048 insn = NEXT_INSN (cond_label);
3050 if (--LABEL_NUSES (cond_label) == 0)
3051 delete_related_insns (cond_label);
3053 /* This loop will be continued with NEXT_INSN (insn). */
3054 insn = PREV_INSN (insn);
3061 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3062 loops it is contained in, mark the target loop invalid.
3064 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3066 static void
3067 mark_loop_jump (rtx x, struct loop *loop)
3069 struct loop *dest_loop;
3070 struct loop *outer_loop;
3071 int i;
3073 switch (GET_CODE (x))
3075 case PC:
3076 case USE:
3077 case CLOBBER:
3078 case REG:
3079 case MEM:
3080 case CONST_INT:
3081 case CONST_DOUBLE:
3082 case RETURN:
3083 return;
3085 case CONST:
3086 /* There could be a label reference in here. */
3087 mark_loop_jump (XEXP (x, 0), loop);
3088 return;
3090 case PLUS:
3091 case MINUS:
3092 case MULT:
3093 mark_loop_jump (XEXP (x, 0), loop);
3094 mark_loop_jump (XEXP (x, 1), loop);
3095 return;
3097 case LO_SUM:
3098 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3099 mark_loop_jump (XEXP (x, 1), loop);
3100 return;
3102 case SIGN_EXTEND:
3103 case ZERO_EXTEND:
3104 mark_loop_jump (XEXP (x, 0), loop);
3105 return;
3107 case LABEL_REF:
3108 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3110 /* Link together all labels that branch outside the loop. This
3111 is used by final_[bg]iv_value and the loop unrolling code. Also
3112 mark this LABEL_REF so we know that this branch should predict
3113 false. */
3115 /* A check to make sure the label is not in an inner nested loop,
3116 since this does not count as a loop exit. */
3117 if (dest_loop)
3119 for (outer_loop = dest_loop; outer_loop;
3120 outer_loop = outer_loop->outer)
3121 if (outer_loop == loop)
3122 break;
3124 else
3125 outer_loop = NULL;
3127 if (loop && ! outer_loop)
3129 LABEL_OUTSIDE_LOOP_P (x) = 1;
3130 LABEL_NEXTREF (x) = loop->exit_labels;
3131 loop->exit_labels = x;
3133 for (outer_loop = loop;
3134 outer_loop && outer_loop != dest_loop;
3135 outer_loop = outer_loop->outer)
3136 outer_loop->exit_count++;
3139 /* If this is inside a loop, but not in the current loop or one enclosed
3140 by it, it invalidates at least one loop. */
3142 if (! dest_loop)
3143 return;
3145 /* We must invalidate every nested loop containing the target of this
3146 label, except those that also contain the jump insn. */
3148 for (; dest_loop; dest_loop = dest_loop->outer)
3150 /* Stop when we reach a loop that also contains the jump insn. */
3151 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3152 if (dest_loop == outer_loop)
3153 return;
3155 /* If we get here, we know we need to invalidate a loop. */
3156 if (loop_dump_stream && ! dest_loop->invalid)
3157 fprintf (loop_dump_stream,
3158 "\nLoop at %d ignored due to multiple entry points.\n",
3159 INSN_UID (dest_loop->start));
3161 dest_loop->invalid = 1;
3163 return;
3165 case SET:
3166 /* If this is not setting pc, ignore. */
3167 if (SET_DEST (x) == pc_rtx)
3168 mark_loop_jump (SET_SRC (x), loop);
3169 return;
3171 case IF_THEN_ELSE:
3172 mark_loop_jump (XEXP (x, 1), loop);
3173 mark_loop_jump (XEXP (x, 2), loop);
3174 return;
3176 case PARALLEL:
3177 case ADDR_VEC:
3178 for (i = 0; i < XVECLEN (x, 0); i++)
3179 mark_loop_jump (XVECEXP (x, 0, i), loop);
3180 return;
3182 case ADDR_DIFF_VEC:
3183 for (i = 0; i < XVECLEN (x, 1); i++)
3184 mark_loop_jump (XVECEXP (x, 1, i), loop);
3185 return;
3187 default:
3188 /* Strictly speaking this is not a jump into the loop, only a possible
3189 jump out of the loop. However, we have no way to link the destination
3190 of this jump onto the list of exit labels. To be safe we mark this
3191 loop and any containing loops as invalid. */
3192 if (loop)
3194 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3196 if (loop_dump_stream && ! outer_loop->invalid)
3197 fprintf (loop_dump_stream,
3198 "\nLoop at %d ignored due to unknown exit jump.\n",
3199 INSN_UID (outer_loop->start));
3200 outer_loop->invalid = 1;
3203 return;
3207 /* Return nonzero if there is a label in the range from
3208 insn INSN to and including the insn whose luid is END
3209 INSN must have an assigned luid (i.e., it must not have
3210 been previously created by loop.c). */
3212 static int
3213 labels_in_range_p (rtx insn, int end)
3215 while (insn && INSN_LUID (insn) <= end)
3217 if (GET_CODE (insn) == CODE_LABEL)
3218 return 1;
3219 insn = NEXT_INSN (insn);
3222 return 0;
3225 /* Record that a memory reference X is being set. */
3227 static void
3228 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3229 void *data ATTRIBUTE_UNUSED)
3231 struct loop_info *loop_info = data;
3233 if (x == 0 || GET_CODE (x) != MEM)
3234 return;
3236 /* Count number of memory writes.
3237 This affects heuristics in strength_reduce. */
3238 loop_info->num_mem_sets++;
3240 /* BLKmode MEM means all memory is clobbered. */
3241 if (GET_MODE (x) == BLKmode)
3243 if (RTX_UNCHANGING_P (x))
3244 loop_info->unknown_constant_address_altered = 1;
3245 else
3246 loop_info->unknown_address_altered = 1;
3248 return;
3251 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3252 loop_info->store_mems);
3255 /* X is a value modified by an INSN that references a biv inside a loop
3256 exit test (ie, X is somehow related to the value of the biv). If X
3257 is a pseudo that is used more than once, then the biv is (effectively)
3258 used more than once. DATA is a pointer to a loop_regs structure. */
3260 static void
3261 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3263 struct loop_regs *regs = (struct loop_regs *) data;
3265 if (x == 0)
3266 return;
3268 while (GET_CODE (x) == STRICT_LOW_PART
3269 || GET_CODE (x) == SIGN_EXTRACT
3270 || GET_CODE (x) == ZERO_EXTRACT
3271 || GET_CODE (x) == SUBREG)
3272 x = XEXP (x, 0);
3274 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3275 return;
3277 /* If we do not have usage information, or if we know the register
3278 is used more than once, note that fact for check_dbra_loop. */
3279 if (REGNO (x) >= max_reg_before_loop
3280 || ! regs->array[REGNO (x)].single_usage
3281 || regs->array[REGNO (x)].single_usage == const0_rtx)
3282 regs->multiple_uses = 1;
3285 /* Return nonzero if the rtx X is invariant over the current loop.
3287 The value is 2 if we refer to something only conditionally invariant.
3289 A memory ref is invariant if it is not volatile and does not conflict
3290 with anything stored in `loop_info->store_mems'. */
3293 loop_invariant_p (const struct loop *loop, rtx x)
3295 struct loop_info *loop_info = LOOP_INFO (loop);
3296 struct loop_regs *regs = LOOP_REGS (loop);
3297 int i;
3298 enum rtx_code code;
3299 const char *fmt;
3300 int conditional = 0;
3301 rtx mem_list_entry;
3303 if (x == 0)
3304 return 1;
3305 code = GET_CODE (x);
3306 switch (code)
3308 case CONST_INT:
3309 case CONST_DOUBLE:
3310 case SYMBOL_REF:
3311 case CONST:
3312 return 1;
3314 case LABEL_REF:
3315 /* A LABEL_REF is normally invariant, however, if we are unrolling
3316 loops, and this label is inside the loop, then it isn't invariant.
3317 This is because each unrolled copy of the loop body will have
3318 a copy of this label. If this was invariant, then an insn loading
3319 the address of this label into a register might get moved outside
3320 the loop, and then each loop body would end up using the same label.
3322 We don't know the loop bounds here though, so just fail for all
3323 labels. */
3324 if (flag_old_unroll_loops)
3325 return 0;
3326 else
3327 return 1;
3329 case PC:
3330 case CC0:
3331 case UNSPEC_VOLATILE:
3332 return 0;
3334 case REG:
3335 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3336 since the reg might be set by initialization within the loop. */
3338 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3339 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3340 && ! current_function_has_nonlocal_goto)
3341 return 1;
3343 if (LOOP_INFO (loop)->has_call
3344 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3345 return 0;
3347 /* Out-of-range regs can occur when we are called from unrolling.
3348 These registers created by the unroller are set in the loop,
3349 hence are never invariant.
3350 Other out-of-range regs can be generated by load_mems; those that
3351 are written to in the loop are not invariant, while those that are
3352 not written to are invariant. It would be easy for load_mems
3353 to set n_times_set correctly for these registers, however, there
3354 is no easy way to distinguish them from registers created by the
3355 unroller. */
3357 if (REGNO (x) >= (unsigned) regs->num)
3358 return 0;
3360 if (regs->array[REGNO (x)].set_in_loop < 0)
3361 return 2;
3363 return regs->array[REGNO (x)].set_in_loop == 0;
3365 case MEM:
3366 /* Volatile memory references must be rejected. Do this before
3367 checking for read-only items, so that volatile read-only items
3368 will be rejected also. */
3369 if (MEM_VOLATILE_P (x))
3370 return 0;
3372 /* See if there is any dependence between a store and this load. */
3373 mem_list_entry = loop_info->store_mems;
3374 while (mem_list_entry)
3376 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3377 x, rtx_varies_p))
3378 return 0;
3380 mem_list_entry = XEXP (mem_list_entry, 1);
3383 /* It's not invalidated by a store in memory
3384 but we must still verify the address is invariant. */
3385 break;
3387 case ASM_OPERANDS:
3388 /* Don't mess with insns declared volatile. */
3389 if (MEM_VOLATILE_P (x))
3390 return 0;
3391 break;
3393 default:
3394 break;
3397 fmt = GET_RTX_FORMAT (code);
3398 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3400 if (fmt[i] == 'e')
3402 int tem = loop_invariant_p (loop, XEXP (x, i));
3403 if (tem == 0)
3404 return 0;
3405 if (tem == 2)
3406 conditional = 1;
3408 else if (fmt[i] == 'E')
3410 int j;
3411 for (j = 0; j < XVECLEN (x, i); j++)
3413 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3414 if (tem == 0)
3415 return 0;
3416 if (tem == 2)
3417 conditional = 1;
3423 return 1 + conditional;
3426 /* Return nonzero if all the insns in the loop that set REG
3427 are INSN and the immediately following insns,
3428 and if each of those insns sets REG in an invariant way
3429 (not counting uses of REG in them).
3431 The value is 2 if some of these insns are only conditionally invariant.
3433 We assume that INSN itself is the first set of REG
3434 and that its source is invariant. */
3436 static int
3437 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3438 rtx insn)
3440 struct loop_regs *regs = LOOP_REGS (loop);
3441 rtx p = insn;
3442 unsigned int regno = REGNO (reg);
3443 rtx temp;
3444 /* Number of sets we have to insist on finding after INSN. */
3445 int count = n_sets - 1;
3446 int old = regs->array[regno].set_in_loop;
3447 int value = 0;
3448 int this;
3450 /* If N_SETS hit the limit, we can't rely on its value. */
3451 if (n_sets == 127)
3452 return 0;
3454 regs->array[regno].set_in_loop = 0;
3456 while (count > 0)
3458 enum rtx_code code;
3459 rtx set;
3461 p = NEXT_INSN (p);
3462 code = GET_CODE (p);
3464 /* If library call, skip to end of it. */
3465 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3466 p = XEXP (temp, 0);
3468 this = 0;
3469 if (code == INSN
3470 && (set = single_set (p))
3471 && GET_CODE (SET_DEST (set)) == REG
3472 && REGNO (SET_DEST (set)) == regno)
3474 this = loop_invariant_p (loop, SET_SRC (set));
3475 if (this != 0)
3476 value |= this;
3477 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3479 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3480 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3481 notes are OK. */
3482 this = (CONSTANT_P (XEXP (temp, 0))
3483 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3484 && loop_invariant_p (loop, XEXP (temp, 0))));
3485 if (this != 0)
3486 value |= this;
3489 if (this != 0)
3490 count--;
3491 else if (code != NOTE)
3493 regs->array[regno].set_in_loop = old;
3494 return 0;
3498 regs->array[regno].set_in_loop = old;
3499 /* If loop_invariant_p ever returned 2, we return 2. */
3500 return 1 + (value & 2);
3503 /* Look at all uses (not sets) of registers in X. For each, if it is
3504 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3505 a different insn, set USAGE[REGNO] to const0_rtx. */
3507 static void
3508 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3510 enum rtx_code code = GET_CODE (x);
3511 const char *fmt = GET_RTX_FORMAT (code);
3512 int i, j;
3514 if (code == REG)
3515 regs->array[REGNO (x)].single_usage
3516 = (regs->array[REGNO (x)].single_usage != 0
3517 && regs->array[REGNO (x)].single_usage != insn)
3518 ? const0_rtx : insn;
3520 else if (code == SET)
3522 /* Don't count SET_DEST if it is a REG; otherwise count things
3523 in SET_DEST because if a register is partially modified, it won't
3524 show up as a potential movable so we don't care how USAGE is set
3525 for it. */
3526 if (GET_CODE (SET_DEST (x)) != REG)
3527 find_single_use_in_loop (regs, insn, SET_DEST (x));
3528 find_single_use_in_loop (regs, insn, SET_SRC (x));
3530 else
3531 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3533 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3534 find_single_use_in_loop (regs, insn, XEXP (x, i));
3535 else if (fmt[i] == 'E')
3536 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3537 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3541 /* Count and record any set in X which is contained in INSN. Update
3542 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3543 in X. */
3545 static void
3546 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3548 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3549 /* Don't move a reg that has an explicit clobber.
3550 It's not worth the pain to try to do it correctly. */
3551 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3553 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3555 rtx dest = SET_DEST (x);
3556 while (GET_CODE (dest) == SUBREG
3557 || GET_CODE (dest) == ZERO_EXTRACT
3558 || GET_CODE (dest) == SIGN_EXTRACT
3559 || GET_CODE (dest) == STRICT_LOW_PART)
3560 dest = XEXP (dest, 0);
3561 if (GET_CODE (dest) == REG)
3563 int i;
3564 int regno = REGNO (dest);
3565 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3567 /* If this is the first setting of this reg
3568 in current basic block, and it was set before,
3569 it must be set in two basic blocks, so it cannot
3570 be moved out of the loop. */
3571 if (regs->array[regno].set_in_loop > 0
3572 && last_set[regno] == 0)
3573 regs->array[regno+i].may_not_optimize = 1;
3574 /* If this is not first setting in current basic block,
3575 see if reg was used in between previous one and this.
3576 If so, neither one can be moved. */
3577 if (last_set[regno] != 0
3578 && reg_used_between_p (dest, last_set[regno], insn))
3579 regs->array[regno+i].may_not_optimize = 1;
3580 if (regs->array[regno+i].set_in_loop < 127)
3581 ++regs->array[regno+i].set_in_loop;
3582 last_set[regno+i] = insn;
3588 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3589 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3590 contained in insn INSN is used by any insn that precedes INSN in
3591 cyclic order starting from the loop entry point.
3593 We don't want to use INSN_LUID here because if we restrict INSN to those
3594 that have a valid INSN_LUID, it means we cannot move an invariant out
3595 from an inner loop past two loops. */
3597 static int
3598 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3600 rtx reg = SET_DEST (set);
3601 rtx p;
3603 /* Scan forward checking for register usage. If we hit INSN, we
3604 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3605 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3607 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3608 return 1;
3610 if (p == loop->end)
3611 p = loop->start;
3614 return 0;
3618 /* Information we collect about arrays that we might want to prefetch. */
3619 struct prefetch_info
3621 struct iv_class *class; /* Class this prefetch is based on. */
3622 struct induction *giv; /* GIV this prefetch is based on. */
3623 rtx base_address; /* Start prefetching from this address plus
3624 index. */
3625 HOST_WIDE_INT index;
3626 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3627 iteration. */
3628 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3629 prefetch area in one iteration. */
3630 unsigned int total_bytes; /* Total bytes loop will access in this block.
3631 This is set only for loops with known
3632 iteration counts and is 0xffffffff
3633 otherwise. */
3634 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3635 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3636 unsigned int write : 1; /* 1 for read/write prefetches. */
3639 /* Data used by check_store function. */
3640 struct check_store_data
3642 rtx mem_address;
3643 int mem_write;
3646 static void check_store (rtx, rtx, void *);
3647 static void emit_prefetch_instructions (struct loop *);
3648 static int rtx_equal_for_prefetch_p (rtx, rtx);
3650 /* Set mem_write when mem_address is found. Used as callback to
3651 note_stores. */
3652 static void
3653 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3655 struct check_store_data *d = (struct check_store_data *) data;
3657 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3658 d->mem_write = 1;
3661 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3662 important to get some addresses combined. Later more sophisticated
3663 transformations can be added when necessary.
3665 ??? Same trick with swapping operand is done at several other places.
3666 It can be nice to develop some common way to handle this. */
3668 static int
3669 rtx_equal_for_prefetch_p (rtx x, rtx y)
3671 int i;
3672 int j;
3673 enum rtx_code code = GET_CODE (x);
3674 const char *fmt;
3676 if (x == y)
3677 return 1;
3678 if (code != GET_CODE (y))
3679 return 0;
3681 code = GET_CODE (x);
3683 if (GET_RTX_CLASS (code) == 'c')
3685 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3686 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3687 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3688 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3690 /* Compare the elements. If any pair of corresponding elements fails to
3691 match, return 0 for the whole thing. */
3693 fmt = GET_RTX_FORMAT (code);
3694 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3696 switch (fmt[i])
3698 case 'w':
3699 if (XWINT (x, i) != XWINT (y, i))
3700 return 0;
3701 break;
3703 case 'i':
3704 if (XINT (x, i) != XINT (y, i))
3705 return 0;
3706 break;
3708 case 'E':
3709 /* Two vectors must have the same length. */
3710 if (XVECLEN (x, i) != XVECLEN (y, i))
3711 return 0;
3713 /* And the corresponding elements must match. */
3714 for (j = 0; j < XVECLEN (x, i); j++)
3715 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3716 XVECEXP (y, i, j)) == 0)
3717 return 0;
3718 break;
3720 case 'e':
3721 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3722 return 0;
3723 break;
3725 case 's':
3726 if (strcmp (XSTR (x, i), XSTR (y, i)))
3727 return 0;
3728 break;
3730 case 'u':
3731 /* These are just backpointers, so they don't matter. */
3732 break;
3734 case '0':
3735 break;
3737 /* It is believed that rtx's at this level will never
3738 contain anything but integers and other rtx's,
3739 except for within LABEL_REFs and SYMBOL_REFs. */
3740 default:
3741 abort ();
3744 return 1;
3747 /* Remove constant addition value from the expression X (when present)
3748 and return it. */
3750 static HOST_WIDE_INT
3751 remove_constant_addition (rtx *x)
3753 HOST_WIDE_INT addval = 0;
3754 rtx exp = *x;
3756 /* Avoid clobbering a shared CONST expression. */
3757 if (GET_CODE (exp) == CONST)
3759 if (GET_CODE (XEXP (exp, 0)) == PLUS
3760 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3761 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3763 *x = XEXP (XEXP (exp, 0), 0);
3764 return INTVAL (XEXP (XEXP (exp, 0), 1));
3766 return 0;
3769 if (GET_CODE (exp) == CONST_INT)
3771 addval = INTVAL (exp);
3772 *x = const0_rtx;
3775 /* For plus expression recurse on ourself. */
3776 else if (GET_CODE (exp) == PLUS)
3778 addval += remove_constant_addition (&XEXP (exp, 0));
3779 addval += remove_constant_addition (&XEXP (exp, 1));
3781 /* In case our parameter was constant, remove extra zero from the
3782 expression. */
3783 if (XEXP (exp, 0) == const0_rtx)
3784 *x = XEXP (exp, 1);
3785 else if (XEXP (exp, 1) == const0_rtx)
3786 *x = XEXP (exp, 0);
3789 return addval;
3792 /* Attempt to identify accesses to arrays that are most likely to cause cache
3793 misses, and emit prefetch instructions a few prefetch blocks forward.
3795 To detect the arrays we use the GIV information that was collected by the
3796 strength reduction pass.
3798 The prefetch instructions are generated after the GIV information is done
3799 and before the strength reduction process. The new GIVs are injected into
3800 the strength reduction tables, so the prefetch addresses are optimized as
3801 well.
3803 GIVs are split into base address, stride, and constant addition values.
3804 GIVs with the same address, stride and close addition values are combined
3805 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3806 for write instructions can be used for the block we write to, on machines
3807 that support write prefetches.
3809 Several heuristics are used to determine when to prefetch. They are
3810 controlled by defined symbols that can be overridden for each target. */
3812 static void
3813 emit_prefetch_instructions (struct loop *loop)
3815 int num_prefetches = 0;
3816 int num_real_prefetches = 0;
3817 int num_real_write_prefetches = 0;
3818 int num_prefetches_before = 0;
3819 int num_write_prefetches_before = 0;
3820 int ahead = 0;
3821 int i;
3822 struct iv_class *bl;
3823 struct induction *iv;
3824 struct prefetch_info info[MAX_PREFETCHES];
3825 struct loop_ivs *ivs = LOOP_IVS (loop);
3827 if (!HAVE_prefetch)
3828 return;
3830 /* Consider only loops w/o calls. When a call is done, the loop is probably
3831 slow enough to read the memory. */
3832 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3834 if (loop_dump_stream)
3835 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3837 return;
3840 /* Don't prefetch in loops known to have few iterations. */
3841 if (PREFETCH_NO_LOW_LOOPCNT
3842 && LOOP_INFO (loop)->n_iterations
3843 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3845 if (loop_dump_stream)
3846 fprintf (loop_dump_stream,
3847 "Prefetch: ignoring loop: not enough iterations.\n");
3848 return;
3851 /* Search all induction variables and pick those interesting for the prefetch
3852 machinery. */
3853 for (bl = ivs->list; bl; bl = bl->next)
3855 struct induction *biv = bl->biv, *biv1;
3856 int basestride = 0;
3858 biv1 = biv;
3860 /* Expect all BIVs to be executed in each iteration. This makes our
3861 analysis more conservative. */
3862 while (biv1)
3864 /* Discard non-constant additions that we can't handle well yet, and
3865 BIVs that are executed multiple times; such BIVs ought to be
3866 handled in the nested loop. We accept not_every_iteration BIVs,
3867 since these only result in larger strides and make our
3868 heuristics more conservative. */
3869 if (GET_CODE (biv->add_val) != CONST_INT)
3871 if (loop_dump_stream)
3873 fprintf (loop_dump_stream,
3874 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3875 REGNO (biv->src_reg), INSN_UID (biv->insn));
3876 print_rtl (loop_dump_stream, biv->add_val);
3877 fprintf (loop_dump_stream, "\n");
3879 break;
3882 if (biv->maybe_multiple)
3884 if (loop_dump_stream)
3886 fprintf (loop_dump_stream,
3887 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3888 REGNO (biv->src_reg), INSN_UID (biv->insn));
3889 print_rtl (loop_dump_stream, biv->add_val);
3890 fprintf (loop_dump_stream, "\n");
3892 break;
3895 basestride += INTVAL (biv1->add_val);
3896 biv1 = biv1->next_iv;
3899 if (biv1 || !basestride)
3900 continue;
3902 for (iv = bl->giv; iv; iv = iv->next_iv)
3904 rtx address;
3905 rtx temp;
3906 HOST_WIDE_INT index = 0;
3907 int add = 1;
3908 HOST_WIDE_INT stride = 0;
3909 int stride_sign = 1;
3910 struct check_store_data d;
3911 const char *ignore_reason = NULL;
3912 int size = GET_MODE_SIZE (GET_MODE (iv));
3914 /* See whether an induction variable is interesting to us and if
3915 not, report the reason. */
3916 if (iv->giv_type != DEST_ADDR)
3917 ignore_reason = "giv is not a destination address";
3919 /* We are interested only in constant stride memory references
3920 in order to be able to compute density easily. */
3921 else if (GET_CODE (iv->mult_val) != CONST_INT)
3922 ignore_reason = "stride is not constant";
3924 else
3926 stride = INTVAL (iv->mult_val) * basestride;
3927 if (stride < 0)
3929 stride = -stride;
3930 stride_sign = -1;
3933 /* On some targets, reversed order prefetches are not
3934 worthwhile. */
3935 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3936 ignore_reason = "reversed order stride";
3938 /* Prefetch of accesses with an extreme stride might not be
3939 worthwhile, either. */
3940 else if (PREFETCH_NO_EXTREME_STRIDE
3941 && stride > PREFETCH_EXTREME_STRIDE)
3942 ignore_reason = "extreme stride";
3944 /* Ignore GIVs with varying add values; we can't predict the
3945 value for the next iteration. */
3946 else if (!loop_invariant_p (loop, iv->add_val))
3947 ignore_reason = "giv has varying add value";
3949 /* Ignore GIVs in the nested loops; they ought to have been
3950 handled already. */
3951 else if (iv->maybe_multiple)
3952 ignore_reason = "giv is in nested loop";
3955 if (ignore_reason != NULL)
3957 if (loop_dump_stream)
3958 fprintf (loop_dump_stream,
3959 "Prefetch: ignoring giv at %d: %s.\n",
3960 INSN_UID (iv->insn), ignore_reason);
3961 continue;
3964 /* Determine the pointer to the basic array we are examining. It is
3965 the sum of the BIV's initial value and the GIV's add_val. */
3966 address = copy_rtx (iv->add_val);
3967 temp = copy_rtx (bl->initial_value);
3969 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3970 index = remove_constant_addition (&address);
3972 d.mem_write = 0;
3973 d.mem_address = *iv->location;
3975 /* When the GIV is not always executed, we might be better off by
3976 not dirtying the cache pages. */
3977 if (PREFETCH_CONDITIONAL || iv->always_executed)
3978 note_stores (PATTERN (iv->insn), check_store, &d);
3979 else
3981 if (loop_dump_stream)
3982 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3983 INSN_UID (iv->insn), "in conditional code.");
3984 continue;
3987 /* Attempt to find another prefetch to the same array and see if we
3988 can merge this one. */
3989 for (i = 0; i < num_prefetches; i++)
3990 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3991 && stride == info[i].stride)
3993 /* In case both access same array (same location
3994 just with small difference in constant indexes), merge
3995 the prefetches. Just do the later and the earlier will
3996 get prefetched from previous iteration.
3997 The artificial threshold should not be too small,
3998 but also not bigger than small portion of memory usually
3999 traversed by single loop. */
4000 if (index >= info[i].index
4001 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
4003 info[i].write |= d.mem_write;
4004 info[i].bytes_accessed += size;
4005 info[i].index = index;
4006 info[i].giv = iv;
4007 info[i].class = bl;
4008 info[num_prefetches].base_address = address;
4009 add = 0;
4010 break;
4013 if (index < info[i].index
4014 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
4016 info[i].write |= d.mem_write;
4017 info[i].bytes_accessed += size;
4018 add = 0;
4019 break;
4023 /* Merging failed. */
4024 if (add)
4026 info[num_prefetches].giv = iv;
4027 info[num_prefetches].class = bl;
4028 info[num_prefetches].index = index;
4029 info[num_prefetches].stride = stride;
4030 info[num_prefetches].base_address = address;
4031 info[num_prefetches].write = d.mem_write;
4032 info[num_prefetches].bytes_accessed = size;
4033 num_prefetches++;
4034 if (num_prefetches >= MAX_PREFETCHES)
4036 if (loop_dump_stream)
4037 fprintf (loop_dump_stream,
4038 "Maximal number of prefetches exceeded.\n");
4039 return;
4045 for (i = 0; i < num_prefetches; i++)
4047 int density;
4049 /* Attempt to calculate the total number of bytes fetched by all
4050 iterations of the loop. Avoid overflow. */
4051 if (LOOP_INFO (loop)->n_iterations
4052 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4053 >= LOOP_INFO (loop)->n_iterations))
4054 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4055 else
4056 info[i].total_bytes = 0xffffffff;
4058 density = info[i].bytes_accessed * 100 / info[i].stride;
4060 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4061 if (PREFETCH_ONLY_DENSE_MEM)
4062 if (density * 256 > PREFETCH_DENSE_MEM * 100
4063 && (info[i].total_bytes / PREFETCH_BLOCK
4064 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4066 info[i].prefetch_before_loop = 1;
4067 info[i].prefetch_in_loop
4068 = (info[i].total_bytes / PREFETCH_BLOCK
4069 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4071 else
4073 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4074 if (loop_dump_stream)
4075 fprintf (loop_dump_stream,
4076 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4077 INSN_UID (info[i].giv->insn), density);
4079 else
4080 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4082 /* Find how many prefetch instructions we'll use within the loop. */
4083 if (info[i].prefetch_in_loop != 0)
4085 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4086 / PREFETCH_BLOCK);
4087 num_real_prefetches += info[i].prefetch_in_loop;
4088 if (info[i].write)
4089 num_real_write_prefetches += info[i].prefetch_in_loop;
4093 /* Determine how many iterations ahead to prefetch within the loop, based
4094 on how many prefetches we currently expect to do within the loop. */
4095 if (num_real_prefetches != 0)
4097 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4099 if (loop_dump_stream)
4100 fprintf (loop_dump_stream,
4101 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4102 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4103 num_real_prefetches = 0, num_real_write_prefetches = 0;
4106 /* We'll also use AHEAD to determine how many prefetch instructions to
4107 emit before a loop, so don't leave it zero. */
4108 if (ahead == 0)
4109 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4111 for (i = 0; i < num_prefetches; i++)
4113 /* Update if we've decided not to prefetch anything within the loop. */
4114 if (num_real_prefetches == 0)
4115 info[i].prefetch_in_loop = 0;
4117 /* Find how many prefetch instructions we'll use before the loop. */
4118 if (info[i].prefetch_before_loop != 0)
4120 int n = info[i].total_bytes / PREFETCH_BLOCK;
4121 if (n > ahead)
4122 n = ahead;
4123 info[i].prefetch_before_loop = n;
4124 num_prefetches_before += n;
4125 if (info[i].write)
4126 num_write_prefetches_before += n;
4129 if (loop_dump_stream)
4131 if (info[i].prefetch_in_loop == 0
4132 && info[i].prefetch_before_loop == 0)
4133 continue;
4134 fprintf (loop_dump_stream, "Prefetch insn: %d",
4135 INSN_UID (info[i].giv->insn));
4136 fprintf (loop_dump_stream,
4137 "; in loop: %d; before: %d; %s\n",
4138 info[i].prefetch_in_loop,
4139 info[i].prefetch_before_loop,
4140 info[i].write ? "read/write" : "read only");
4141 fprintf (loop_dump_stream,
4142 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4143 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4144 info[i].bytes_accessed, info[i].total_bytes);
4145 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4146 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4147 info[i].index, info[i].stride);
4148 print_rtl (loop_dump_stream, info[i].base_address);
4149 fprintf (loop_dump_stream, "\n");
4153 if (num_real_prefetches + num_prefetches_before > 0)
4155 /* Record that this loop uses prefetch instructions. */
4156 LOOP_INFO (loop)->has_prefetch = 1;
4158 if (loop_dump_stream)
4160 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4161 num_real_prefetches, num_real_write_prefetches);
4162 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4163 num_prefetches_before, num_write_prefetches_before);
4167 for (i = 0; i < num_prefetches; i++)
4169 int y;
4171 for (y = 0; y < info[i].prefetch_in_loop; y++)
4173 rtx loc = copy_rtx (*info[i].giv->location);
4174 rtx insn;
4175 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4176 rtx before_insn = info[i].giv->insn;
4177 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4178 rtx seq;
4180 /* We can save some effort by offsetting the address on
4181 architectures with offsettable memory references. */
4182 if (offsettable_address_p (0, VOIDmode, loc))
4183 loc = plus_constant (loc, bytes_ahead);
4184 else
4186 rtx reg = gen_reg_rtx (Pmode);
4187 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4188 GEN_INT (bytes_ahead), reg,
4189 0, before_insn);
4190 loc = reg;
4193 start_sequence ();
4194 /* Make sure the address operand is valid for prefetch. */
4195 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4196 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4197 loc = force_reg (Pmode, loc);
4198 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4199 GEN_INT (3)));
4200 seq = get_insns ();
4201 end_sequence ();
4202 emit_insn_before (seq, before_insn);
4204 /* Check all insns emitted and record the new GIV
4205 information. */
4206 insn = NEXT_INSN (prev_insn);
4207 while (insn != before_insn)
4209 insn = check_insn_for_givs (loop, insn,
4210 info[i].giv->always_executed,
4211 info[i].giv->maybe_multiple);
4212 insn = NEXT_INSN (insn);
4216 if (PREFETCH_BEFORE_LOOP)
4218 /* Emit insns before the loop to fetch the first cache lines or,
4219 if we're not prefetching within the loop, everything we expect
4220 to need. */
4221 for (y = 0; y < info[i].prefetch_before_loop; y++)
4223 rtx reg = gen_reg_rtx (Pmode);
4224 rtx loop_start = loop->start;
4225 rtx init_val = info[i].class->initial_value;
4226 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4227 info[i].giv->add_val,
4228 GEN_INT (y * PREFETCH_BLOCK));
4230 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4231 non-constant INIT_VAL to have the same mode as REG, which
4232 in this case we know to be Pmode. */
4233 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4235 rtx seq;
4237 start_sequence ();
4238 init_val = convert_to_mode (Pmode, init_val, 0);
4239 seq = get_insns ();
4240 end_sequence ();
4241 loop_insn_emit_before (loop, 0, loop_start, seq);
4243 loop_iv_add_mult_emit_before (loop, init_val,
4244 info[i].giv->mult_val,
4245 add_val, reg, 0, loop_start);
4246 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4247 GEN_INT (3)),
4248 loop_start);
4253 return;
4256 /* Communication with routines called via `note_stores'. */
4258 static rtx note_insn;
4260 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4262 static rtx addr_placeholder;
4264 /* ??? Unfinished optimizations, and possible future optimizations,
4265 for the strength reduction code. */
4267 /* ??? The interaction of biv elimination, and recognition of 'constant'
4268 bivs, may cause problems. */
4270 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4271 performance problems.
4273 Perhaps don't eliminate things that can be combined with an addressing
4274 mode. Find all givs that have the same biv, mult_val, and add_val;
4275 then for each giv, check to see if its only use dies in a following
4276 memory address. If so, generate a new memory address and check to see
4277 if it is valid. If it is valid, then store the modified memory address,
4278 otherwise, mark the giv as not done so that it will get its own iv. */
4280 /* ??? Could try to optimize branches when it is known that a biv is always
4281 positive. */
4283 /* ??? When replace a biv in a compare insn, we should replace with closest
4284 giv so that an optimized branch can still be recognized by the combiner,
4285 e.g. the VAX acb insn. */
4287 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4288 was rerun in loop_optimize whenever a register was added or moved.
4289 Also, some of the optimizations could be a little less conservative. */
4291 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4292 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4293 callback.
4295 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4296 least once for every loop iteration except for the last one.
4298 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4299 loop iteration.
4301 void
4302 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4304 int not_every_iteration = 0;
4305 int maybe_multiple = 0;
4306 int past_loop_latch = 0;
4307 int loop_depth = 0;
4308 rtx p;
4310 /* If loop_scan_start points to the loop exit test, we have to be wary of
4311 subversive use of gotos inside expression statements. */
4312 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4313 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4315 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4316 for (p = next_insn_in_loop (loop, loop->scan_start);
4317 p != NULL_RTX;
4318 p = next_insn_in_loop (loop, p))
4320 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4322 /* Past CODE_LABEL, we get to insns that may be executed multiple
4323 times. The only way we can be sure that they can't is if every
4324 jump insn between here and the end of the loop either
4325 returns, exits the loop, is a jump to a location that is still
4326 behind the label, or is a jump to the loop start. */
4328 if (GET_CODE (p) == CODE_LABEL)
4330 rtx insn = p;
4332 maybe_multiple = 0;
4334 while (1)
4336 insn = NEXT_INSN (insn);
4337 if (insn == loop->scan_start)
4338 break;
4339 if (insn == loop->end)
4341 if (loop->top != 0)
4342 insn = loop->top;
4343 else
4344 break;
4345 if (insn == loop->scan_start)
4346 break;
4349 if (GET_CODE (insn) == JUMP_INSN
4350 && GET_CODE (PATTERN (insn)) != RETURN
4351 && (!any_condjump_p (insn)
4352 || (JUMP_LABEL (insn) != 0
4353 && JUMP_LABEL (insn) != loop->scan_start
4354 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4356 maybe_multiple = 1;
4357 break;
4362 /* Past a jump, we get to insns for which we can't count
4363 on whether they will be executed during each iteration. */
4364 /* This code appears twice in strength_reduce. There is also similar
4365 code in scan_loop. */
4366 if (GET_CODE (p) == JUMP_INSN
4367 /* If we enter the loop in the middle, and scan around to the
4368 beginning, don't set not_every_iteration for that.
4369 This can be any kind of jump, since we want to know if insns
4370 will be executed if the loop is executed. */
4371 && !(JUMP_LABEL (p) == loop->top
4372 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4373 && any_uncondjump_p (p))
4374 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4376 rtx label = 0;
4378 /* If this is a jump outside the loop, then it also doesn't
4379 matter. Check to see if the target of this branch is on the
4380 loop->exits_labels list. */
4382 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4383 if (XEXP (label, 0) == JUMP_LABEL (p))
4384 break;
4386 if (!label)
4387 not_every_iteration = 1;
4390 else if (GET_CODE (p) == NOTE)
4392 /* At the virtual top of a converted loop, insns are again known to
4393 be executed each iteration: logically, the loop begins here
4394 even though the exit code has been duplicated.
4396 Insns are also again known to be executed each iteration at
4397 the LOOP_CONT note. */
4398 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4399 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4400 && loop_depth == 0)
4401 not_every_iteration = 0;
4402 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4403 loop_depth++;
4404 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4405 loop_depth--;
4408 /* Note if we pass a loop latch. If we do, then we can not clear
4409 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4410 a loop since a jump before the last CODE_LABEL may have started
4411 a new loop iteration.
4413 Note that LOOP_TOP is only set for rotated loops and we need
4414 this check for all loops, so compare against the CODE_LABEL
4415 which immediately follows LOOP_START. */
4416 if (GET_CODE (p) == JUMP_INSN
4417 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4418 past_loop_latch = 1;
4420 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4421 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4422 or not an insn is known to be executed each iteration of the
4423 loop, whether or not any iterations are known to occur.
4425 Therefore, if we have just passed a label and have no more labels
4426 between here and the test insn of the loop, and we have not passed
4427 a jump to the top of the loop, then we know these insns will be
4428 executed each iteration. */
4430 if (not_every_iteration
4431 && !past_loop_latch
4432 && GET_CODE (p) == CODE_LABEL
4433 && no_labels_between_p (p, loop->end)
4434 && loop_insn_first_p (p, loop->cont))
4435 not_every_iteration = 0;
4439 static void
4440 loop_bivs_find (struct loop *loop)
4442 struct loop_regs *regs = LOOP_REGS (loop);
4443 struct loop_ivs *ivs = LOOP_IVS (loop);
4444 /* Temporary list pointers for traversing ivs->list. */
4445 struct iv_class *bl, **backbl;
4447 ivs->list = 0;
4449 for_each_insn_in_loop (loop, check_insn_for_bivs);
4451 /* Scan ivs->list to remove all regs that proved not to be bivs.
4452 Make a sanity check against regs->n_times_set. */
4453 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4455 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4456 /* Above happens if register modified by subreg, etc. */
4457 /* Make sure it is not recognized as a basic induction var: */
4458 || regs->array[bl->regno].n_times_set != bl->biv_count
4459 /* If never incremented, it is invariant that we decided not to
4460 move. So leave it alone. */
4461 || ! bl->incremented)
4463 if (loop_dump_stream)
4464 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4465 bl->regno,
4466 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4467 ? "not induction variable"
4468 : (! bl->incremented ? "never incremented"
4469 : "count error")));
4471 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4472 *backbl = bl->next;
4474 else
4476 backbl = &bl->next;
4478 if (loop_dump_stream)
4479 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4485 /* Determine how BIVS are initialized by looking through pre-header
4486 extended basic block. */
4487 static void
4488 loop_bivs_init_find (struct loop *loop)
4490 struct loop_ivs *ivs = LOOP_IVS (loop);
4491 /* Temporary list pointers for traversing ivs->list. */
4492 struct iv_class *bl;
4493 int call_seen;
4494 rtx p;
4496 /* Find initial value for each biv by searching backwards from loop_start,
4497 halting at first label. Also record any test condition. */
4499 call_seen = 0;
4500 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4502 rtx test;
4504 note_insn = p;
4506 if (GET_CODE (p) == CALL_INSN)
4507 call_seen = 1;
4509 if (INSN_P (p))
4510 note_stores (PATTERN (p), record_initial, ivs);
4512 /* Record any test of a biv that branches around the loop if no store
4513 between it and the start of loop. We only care about tests with
4514 constants and registers and only certain of those. */
4515 if (GET_CODE (p) == JUMP_INSN
4516 && JUMP_LABEL (p) != 0
4517 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4518 && (test = get_condition_for_loop (loop, p)) != 0
4519 && GET_CODE (XEXP (test, 0)) == REG
4520 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4521 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4522 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4523 && bl->init_insn == 0)
4525 /* If an NE test, we have an initial value! */
4526 if (GET_CODE (test) == NE)
4528 bl->init_insn = p;
4529 bl->init_set = gen_rtx_SET (VOIDmode,
4530 XEXP (test, 0), XEXP (test, 1));
4532 else
4533 bl->initial_test = test;
4539 /* Look at the each biv and see if we can say anything better about its
4540 initial value from any initializing insns set up above. (This is done
4541 in two passes to avoid missing SETs in a PARALLEL.) */
4542 static void
4543 loop_bivs_check (struct loop *loop)
4545 struct loop_ivs *ivs = LOOP_IVS (loop);
4546 /* Temporary list pointers for traversing ivs->list. */
4547 struct iv_class *bl;
4548 struct iv_class **backbl;
4550 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4552 rtx src;
4553 rtx note;
4555 if (! bl->init_insn)
4556 continue;
4558 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4559 is a constant, use the value of that. */
4560 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4561 && CONSTANT_P (XEXP (note, 0)))
4562 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4563 && CONSTANT_P (XEXP (note, 0))))
4564 src = XEXP (note, 0);
4565 else
4566 src = SET_SRC (bl->init_set);
4568 if (loop_dump_stream)
4569 fprintf (loop_dump_stream,
4570 "Biv %d: initialized at insn %d: initial value ",
4571 bl->regno, INSN_UID (bl->init_insn));
4573 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4574 || GET_MODE (src) == VOIDmode)
4575 && valid_initial_value_p (src, bl->init_insn,
4576 LOOP_INFO (loop)->pre_header_has_call,
4577 loop->start))
4579 bl->initial_value = src;
4581 if (loop_dump_stream)
4583 print_simple_rtl (loop_dump_stream, src);
4584 fputc ('\n', loop_dump_stream);
4587 /* If we can't make it a giv,
4588 let biv keep initial value of "itself". */
4589 else if (loop_dump_stream)
4590 fprintf (loop_dump_stream, "is complex\n");
4595 /* Search the loop for general induction variables. */
4597 static void
4598 loop_givs_find (struct loop* loop)
4600 for_each_insn_in_loop (loop, check_insn_for_givs);
4604 /* For each giv for which we still don't know whether or not it is
4605 replaceable, check to see if it is replaceable because its final value
4606 can be calculated. */
4608 static void
4609 loop_givs_check (struct loop *loop)
4611 struct loop_ivs *ivs = LOOP_IVS (loop);
4612 struct iv_class *bl;
4614 for (bl = ivs->list; bl; bl = bl->next)
4616 struct induction *v;
4618 for (v = bl->giv; v; v = v->next_iv)
4619 if (! v->replaceable && ! v->not_replaceable)
4620 check_final_value (loop, v);
4625 /* Return nonzero if it is possible to eliminate the biv BL provided
4626 all givs are reduced. This is possible if either the reg is not
4627 used outside the loop, or we can compute what its final value will
4628 be. */
4630 static int
4631 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
4632 int threshold, int insn_count)
4634 /* For architectures with a decrement_and_branch_until_zero insn,
4635 don't do this if we put a REG_NONNEG note on the endtest for this
4636 biv. */
4638 #ifdef HAVE_decrement_and_branch_until_zero
4639 if (bl->nonneg)
4641 if (loop_dump_stream)
4642 fprintf (loop_dump_stream,
4643 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4644 return 0;
4646 #endif
4648 /* Check that biv is used outside loop or if it has a final value.
4649 Compare against bl->init_insn rather than loop->start. We aren't
4650 concerned with any uses of the biv between init_insn and
4651 loop->start since these won't be affected by the value of the biv
4652 elsewhere in the function, so long as init_insn doesn't use the
4653 biv itself. */
4655 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4656 && bl->init_insn
4657 && INSN_UID (bl->init_insn) < max_uid_for_loop
4658 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4659 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4660 || (bl->final_value = final_biv_value (loop, bl)))
4661 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4663 if (loop_dump_stream)
4665 fprintf (loop_dump_stream,
4666 "Cannot eliminate biv %d.\n",
4667 bl->regno);
4668 fprintf (loop_dump_stream,
4669 "First use: insn %d, last use: insn %d.\n",
4670 REGNO_FIRST_UID (bl->regno),
4671 REGNO_LAST_UID (bl->regno));
4673 return 0;
4677 /* Reduce each giv of BL that we have decided to reduce. */
4679 static void
4680 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
4682 struct induction *v;
4684 for (v = bl->giv; v; v = v->next_iv)
4686 struct induction *tv;
4687 if (! v->ignore && v->same == 0)
4689 int auto_inc_opt = 0;
4691 /* If the code for derived givs immediately below has already
4692 allocated a new_reg, we must keep it. */
4693 if (! v->new_reg)
4694 v->new_reg = gen_reg_rtx (v->mode);
4696 #ifdef AUTO_INC_DEC
4697 /* If the target has auto-increment addressing modes, and
4698 this is an address giv, then try to put the increment
4699 immediately after its use, so that flow can create an
4700 auto-increment addressing mode. */
4701 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4702 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4703 /* We don't handle reversed biv's because bl->biv->insn
4704 does not have a valid INSN_LUID. */
4705 && ! bl->reversed
4706 && v->always_executed && ! v->maybe_multiple
4707 && INSN_UID (v->insn) < max_uid_for_loop)
4709 /* If other giv's have been combined with this one, then
4710 this will work only if all uses of the other giv's occur
4711 before this giv's insn. This is difficult to check.
4713 We simplify this by looking for the common case where
4714 there is one DEST_REG giv, and this giv's insn is the
4715 last use of the dest_reg of that DEST_REG giv. If the
4716 increment occurs after the address giv, then we can
4717 perform the optimization. (Otherwise, the increment
4718 would have to go before other_giv, and we would not be
4719 able to combine it with the address giv to get an
4720 auto-inc address.) */
4721 if (v->combined_with)
4723 struct induction *other_giv = 0;
4725 for (tv = bl->giv; tv; tv = tv->next_iv)
4726 if (tv->same == v)
4728 if (other_giv)
4729 break;
4730 else
4731 other_giv = tv;
4733 if (! tv && other_giv
4734 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4735 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4736 == INSN_UID (v->insn))
4737 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4738 auto_inc_opt = 1;
4740 /* Check for case where increment is before the address
4741 giv. Do this test in "loop order". */
4742 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4743 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4744 || (INSN_LUID (bl->biv->insn)
4745 > INSN_LUID (loop->scan_start))))
4746 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4747 && (INSN_LUID (loop->scan_start)
4748 < INSN_LUID (bl->biv->insn))))
4749 auto_inc_opt = -1;
4750 else
4751 auto_inc_opt = 1;
4753 #ifdef HAVE_cc0
4755 rtx prev;
4757 /* We can't put an insn immediately after one setting
4758 cc0, or immediately before one using cc0. */
4759 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4760 || (auto_inc_opt == -1
4761 && (prev = prev_nonnote_insn (v->insn)) != 0
4762 && INSN_P (prev)
4763 && sets_cc0_p (PATTERN (prev))))
4764 auto_inc_opt = 0;
4766 #endif
4768 if (auto_inc_opt)
4769 v->auto_inc_opt = 1;
4771 #endif
4773 /* For each place where the biv is incremented, add an insn
4774 to increment the new, reduced reg for the giv. */
4775 for (tv = bl->biv; tv; tv = tv->next_iv)
4777 rtx insert_before;
4779 /* Skip if location is the same as a previous one. */
4780 if (tv->same)
4781 continue;
4782 if (! auto_inc_opt)
4783 insert_before = NEXT_INSN (tv->insn);
4784 else if (auto_inc_opt == 1)
4785 insert_before = NEXT_INSN (v->insn);
4786 else
4787 insert_before = v->insn;
4789 if (tv->mult_val == const1_rtx)
4790 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4791 v->new_reg, v->new_reg,
4792 0, insert_before);
4793 else /* tv->mult_val == const0_rtx */
4794 /* A multiply is acceptable here
4795 since this is presumed to be seldom executed. */
4796 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4797 v->add_val, v->new_reg,
4798 0, insert_before);
4801 /* Add code at loop start to initialize giv's reduced reg. */
4803 loop_iv_add_mult_hoist (loop,
4804 extend_value_for_giv (v, bl->initial_value),
4805 v->mult_val, v->add_val, v->new_reg);
4811 /* Check for givs whose first use is their definition and whose
4812 last use is the definition of another giv. If so, it is likely
4813 dead and should not be used to derive another giv nor to
4814 eliminate a biv. */
4816 static void
4817 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
4819 struct induction *v;
4821 for (v = bl->giv; v; v = v->next_iv)
4823 if (v->ignore
4824 || (v->same && v->same->ignore))
4825 continue;
4827 if (v->giv_type == DEST_REG
4828 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4830 struct induction *v1;
4832 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4833 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4834 v->maybe_dead = 1;
4840 static void
4841 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
4843 struct induction *v;
4845 for (v = bl->giv; v; v = v->next_iv)
4847 if (v->same && v->same->ignore)
4848 v->ignore = 1;
4850 if (v->ignore)
4851 continue;
4853 /* Update expression if this was combined, in case other giv was
4854 replaced. */
4855 if (v->same)
4856 v->new_reg = replace_rtx (v->new_reg,
4857 v->same->dest_reg, v->same->new_reg);
4859 /* See if this register is known to be a pointer to something. If
4860 so, see if we can find the alignment. First see if there is a
4861 destination register that is a pointer. If so, this shares the
4862 alignment too. Next see if we can deduce anything from the
4863 computational information. If not, and this is a DEST_ADDR
4864 giv, at least we know that it's a pointer, though we don't know
4865 the alignment. */
4866 if (GET_CODE (v->new_reg) == REG
4867 && v->giv_type == DEST_REG
4868 && REG_POINTER (v->dest_reg))
4869 mark_reg_pointer (v->new_reg,
4870 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4871 else if (GET_CODE (v->new_reg) == REG
4872 && REG_POINTER (v->src_reg))
4874 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4876 if (align == 0
4877 || GET_CODE (v->add_val) != CONST_INT
4878 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4879 align = 0;
4881 mark_reg_pointer (v->new_reg, align);
4883 else if (GET_CODE (v->new_reg) == REG
4884 && GET_CODE (v->add_val) == REG
4885 && REG_POINTER (v->add_val))
4887 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4889 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4890 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4891 align = 0;
4893 mark_reg_pointer (v->new_reg, align);
4895 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4896 mark_reg_pointer (v->new_reg, 0);
4898 if (v->giv_type == DEST_ADDR)
4899 /* Store reduced reg as the address in the memref where we found
4900 this giv. */
4901 validate_change (v->insn, v->location, v->new_reg, 0);
4902 else if (v->replaceable)
4904 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4906 else
4908 rtx original_insn = v->insn;
4909 rtx note;
4911 /* Not replaceable; emit an insn to set the original giv reg from
4912 the reduced giv, same as above. */
4913 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4914 gen_move_insn (v->dest_reg,
4915 v->new_reg));
4917 /* The original insn may have a REG_EQUAL note. This note is
4918 now incorrect and may result in invalid substitutions later.
4919 The original insn is dead, but may be part of a libcall
4920 sequence, which doesn't seem worth the bother of handling. */
4921 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4922 if (note)
4923 remove_note (original_insn, note);
4926 /* When a loop is reversed, givs which depend on the reversed
4927 biv, and which are live outside the loop, must be set to their
4928 correct final value. This insn is only needed if the giv is
4929 not replaceable. The correct final value is the same as the
4930 value that the giv starts the reversed loop with. */
4931 if (bl->reversed && ! v->replaceable)
4932 loop_iv_add_mult_sink (loop,
4933 extend_value_for_giv (v, bl->initial_value),
4934 v->mult_val, v->add_val, v->dest_reg);
4935 else if (v->final_value)
4936 loop_insn_sink_or_swim (loop,
4937 gen_load_of_final_value (v->dest_reg,
4938 v->final_value));
4940 if (loop_dump_stream)
4942 fprintf (loop_dump_stream, "giv at %d reduced to ",
4943 INSN_UID (v->insn));
4944 print_simple_rtl (loop_dump_stream, v->new_reg);
4945 fprintf (loop_dump_stream, "\n");
4951 static int
4952 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
4953 struct iv_class *bl, struct induction *v,
4954 rtx test_reg)
4956 int add_cost;
4957 int benefit;
4959 benefit = v->benefit;
4960 PUT_MODE (test_reg, v->mode);
4961 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4962 test_reg, test_reg);
4964 /* Reduce benefit if not replaceable, since we will insert a
4965 move-insn to replace the insn that calculates this giv. Don't do
4966 this unless the giv is a user variable, since it will often be
4967 marked non-replaceable because of the duplication of the exit
4968 code outside the loop. In such a case, the copies we insert are
4969 dead and will be deleted. So they don't have a cost. Similar
4970 situations exist. */
4971 /* ??? The new final_[bg]iv_value code does a much better job of
4972 finding replaceable giv's, and hence this code may no longer be
4973 necessary. */
4974 if (! v->replaceable && ! bl->eliminable
4975 && REG_USERVAR_P (v->dest_reg))
4976 benefit -= copy_cost;
4978 /* Decrease the benefit to count the add-insns that we will insert
4979 to increment the reduced reg for the giv. ??? This can
4980 overestimate the run-time cost of the additional insns, e.g. if
4981 there are multiple basic blocks that increment the biv, but only
4982 one of these blocks is executed during each iteration. There is
4983 no good way to detect cases like this with the current structure
4984 of the loop optimizer. This code is more accurate for
4985 determining code size than run-time benefits. */
4986 benefit -= add_cost * bl->biv_count;
4988 /* Decide whether to strength-reduce this giv or to leave the code
4989 unchanged (recompute it from the biv each time it is used). This
4990 decision can be made independently for each giv. */
4992 #ifdef AUTO_INC_DEC
4993 /* Attempt to guess whether autoincrement will handle some of the
4994 new add insns; if so, increase BENEFIT (undo the subtraction of
4995 add_cost that was done above). */
4996 if (v->giv_type == DEST_ADDR
4997 /* Increasing the benefit is risky, since this is only a guess.
4998 Avoid increasing register pressure in cases where there would
4999 be no other benefit from reducing this giv. */
5000 && benefit > 0
5001 && GET_CODE (v->mult_val) == CONST_INT)
5003 int size = GET_MODE_SIZE (GET_MODE (v->mem));
5005 if (HAVE_POST_INCREMENT
5006 && INTVAL (v->mult_val) == size)
5007 benefit += add_cost * bl->biv_count;
5008 else if (HAVE_PRE_INCREMENT
5009 && INTVAL (v->mult_val) == size)
5010 benefit += add_cost * bl->biv_count;
5011 else if (HAVE_POST_DECREMENT
5012 && -INTVAL (v->mult_val) == size)
5013 benefit += add_cost * bl->biv_count;
5014 else if (HAVE_PRE_DECREMENT
5015 && -INTVAL (v->mult_val) == size)
5016 benefit += add_cost * bl->biv_count;
5018 #endif
5020 return benefit;
5024 /* Free IV structures for LOOP. */
5026 static void
5027 loop_ivs_free (struct loop *loop)
5029 struct loop_ivs *ivs = LOOP_IVS (loop);
5030 struct iv_class *iv = ivs->list;
5032 free (ivs->regs);
5034 while (iv)
5036 struct iv_class *next = iv->next;
5037 struct induction *induction;
5038 struct induction *next_induction;
5040 for (induction = iv->biv; induction; induction = next_induction)
5042 next_induction = induction->next_iv;
5043 free (induction);
5045 for (induction = iv->giv; induction; induction = next_induction)
5047 next_induction = induction->next_iv;
5048 free (induction);
5051 free (iv);
5052 iv = next;
5057 /* Perform strength reduction and induction variable elimination.
5059 Pseudo registers created during this function will be beyond the
5060 last valid index in several tables including
5061 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5062 problem here, because the added registers cannot be givs outside of
5063 their loop, and hence will never be reconsidered. But scan_loop
5064 must check regnos to make sure they are in bounds. */
5066 static void
5067 strength_reduce (struct loop *loop, int flags)
5069 struct loop_info *loop_info = LOOP_INFO (loop);
5070 struct loop_regs *regs = LOOP_REGS (loop);
5071 struct loop_ivs *ivs = LOOP_IVS (loop);
5072 rtx p;
5073 /* Temporary list pointer for traversing ivs->list. */
5074 struct iv_class *bl;
5075 /* Ratio of extra register life span we can justify
5076 for saving an instruction. More if loop doesn't call subroutines
5077 since in that case saving an insn makes more difference
5078 and more registers are available. */
5079 /* ??? could set this to last value of threshold in move_movables */
5080 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5081 /* Map of pseudo-register replacements. */
5082 rtx *reg_map = NULL;
5083 int reg_map_size;
5084 int unrolled_insn_copies = 0;
5085 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5086 int insn_count = count_insns_in_loop (loop);
5088 addr_placeholder = gen_reg_rtx (Pmode);
5090 ivs->n_regs = max_reg_before_loop;
5091 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
5093 /* Find all BIVs in loop. */
5094 loop_bivs_find (loop);
5096 /* Exit if there are no bivs. */
5097 if (! ivs->list)
5099 /* Can still unroll the loop anyways, but indicate that there is no
5100 strength reduction info available. */
5101 if (flags & LOOP_UNROLL)
5102 unroll_loop (loop, insn_count, 0);
5104 loop_ivs_free (loop);
5105 return;
5108 /* Determine how BIVS are initialized by looking through pre-header
5109 extended basic block. */
5110 loop_bivs_init_find (loop);
5112 /* Look at the each biv and see if we can say anything better about its
5113 initial value from any initializing insns set up above. */
5114 loop_bivs_check (loop);
5116 /* Search the loop for general induction variables. */
5117 loop_givs_find (loop);
5119 /* Try to calculate and save the number of loop iterations. This is
5120 set to zero if the actual number can not be calculated. This must
5121 be called after all giv's have been identified, since otherwise it may
5122 fail if the iteration variable is a giv. */
5123 loop_iterations (loop);
5125 #ifdef HAVE_prefetch
5126 if (flags & LOOP_PREFETCH)
5127 emit_prefetch_instructions (loop);
5128 #endif
5130 /* Now for each giv for which we still don't know whether or not it is
5131 replaceable, check to see if it is replaceable because its final value
5132 can be calculated. This must be done after loop_iterations is called,
5133 so that final_giv_value will work correctly. */
5134 loop_givs_check (loop);
5136 /* Try to prove that the loop counter variable (if any) is always
5137 nonnegative; if so, record that fact with a REG_NONNEG note
5138 so that "decrement and branch until zero" insn can be used. */
5139 check_dbra_loop (loop, insn_count);
5141 /* Create reg_map to hold substitutions for replaceable giv regs.
5142 Some givs might have been made from biv increments, so look at
5143 ivs->reg_iv_type for a suitable size. */
5144 reg_map_size = ivs->n_regs;
5145 reg_map = xcalloc (reg_map_size, sizeof (rtx));
5147 /* Examine each iv class for feasibility of strength reduction/induction
5148 variable elimination. */
5150 for (bl = ivs->list; bl; bl = bl->next)
5152 struct induction *v;
5153 int benefit;
5155 /* Test whether it will be possible to eliminate this biv
5156 provided all givs are reduced. */
5157 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5159 /* This will be true at the end, if all givs which depend on this
5160 biv have been strength reduced.
5161 We can't (currently) eliminate the biv unless this is so. */
5162 bl->all_reduced = 1;
5164 /* Check each extension dependent giv in this class to see if its
5165 root biv is safe from wrapping in the interior mode. */
5166 check_ext_dependent_givs (loop, bl);
5168 /* Combine all giv's for this iv_class. */
5169 combine_givs (regs, bl);
5171 for (v = bl->giv; v; v = v->next_iv)
5173 struct induction *tv;
5175 if (v->ignore || v->same)
5176 continue;
5178 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5180 /* If an insn is not to be strength reduced, then set its ignore
5181 flag, and clear bl->all_reduced. */
5183 /* A giv that depends on a reversed biv must be reduced if it is
5184 used after the loop exit, otherwise, it would have the wrong
5185 value after the loop exit. To make it simple, just reduce all
5186 of such giv's whether or not we know they are used after the loop
5187 exit. */
5189 if (! flag_reduce_all_givs
5190 && v->lifetime * threshold * benefit < insn_count
5191 && ! bl->reversed)
5193 if (loop_dump_stream)
5194 fprintf (loop_dump_stream,
5195 "giv of insn %d not worth while, %d vs %d.\n",
5196 INSN_UID (v->insn),
5197 v->lifetime * threshold * benefit, insn_count);
5198 v->ignore = 1;
5199 bl->all_reduced = 0;
5201 else
5203 /* Check that we can increment the reduced giv without a
5204 multiply insn. If not, reject it. */
5206 for (tv = bl->biv; tv; tv = tv->next_iv)
5207 if (tv->mult_val == const1_rtx
5208 && ! product_cheap_p (tv->add_val, v->mult_val))
5210 if (loop_dump_stream)
5211 fprintf (loop_dump_stream,
5212 "giv of insn %d: would need a multiply.\n",
5213 INSN_UID (v->insn));
5214 v->ignore = 1;
5215 bl->all_reduced = 0;
5216 break;
5221 /* Check for givs whose first use is their definition and whose
5222 last use is the definition of another giv. If so, it is likely
5223 dead and should not be used to derive another giv nor to
5224 eliminate a biv. */
5225 loop_givs_dead_check (loop, bl);
5227 /* Reduce each giv that we decided to reduce. */
5228 loop_givs_reduce (loop, bl);
5230 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5231 as not reduced.
5233 For each giv register that can be reduced now: if replaceable,
5234 substitute reduced reg wherever the old giv occurs;
5235 else add new move insn "giv_reg = reduced_reg". */
5236 loop_givs_rescan (loop, bl, reg_map);
5238 /* All the givs based on the biv bl have been reduced if they
5239 merit it. */
5241 /* For each giv not marked as maybe dead that has been combined with a
5242 second giv, clear any "maybe dead" mark on that second giv.
5243 v->new_reg will either be or refer to the register of the giv it
5244 combined with.
5246 Doing this clearing avoids problems in biv elimination where
5247 a giv's new_reg is a complex value that can't be put in the
5248 insn but the giv combined with (with a reg as new_reg) is
5249 marked maybe_dead. Since the register will be used in either
5250 case, we'd prefer it be used from the simpler giv. */
5252 for (v = bl->giv; v; v = v->next_iv)
5253 if (! v->maybe_dead && v->same)
5254 v->same->maybe_dead = 0;
5256 /* Try to eliminate the biv, if it is a candidate.
5257 This won't work if ! bl->all_reduced,
5258 since the givs we planned to use might not have been reduced.
5260 We have to be careful that we didn't initially think we could
5261 eliminate this biv because of a giv that we now think may be
5262 dead and shouldn't be used as a biv replacement.
5264 Also, there is the possibility that we may have a giv that looks
5265 like it can be used to eliminate a biv, but the resulting insn
5266 isn't valid. This can happen, for example, on the 88k, where a
5267 JUMP_INSN can compare a register only with zero. Attempts to
5268 replace it with a compare with a constant will fail.
5270 Note that in cases where this call fails, we may have replaced some
5271 of the occurrences of the biv with a giv, but no harm was done in
5272 doing so in the rare cases where it can occur. */
5274 if (bl->all_reduced == 1 && bl->eliminable
5275 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5277 /* ?? If we created a new test to bypass the loop entirely,
5278 or otherwise drop straight in, based on this test, then
5279 we might want to rewrite it also. This way some later
5280 pass has more hope of removing the initialization of this
5281 biv entirely. */
5283 /* If final_value != 0, then the biv may be used after loop end
5284 and we must emit an insn to set it just in case.
5286 Reversed bivs already have an insn after the loop setting their
5287 value, so we don't need another one. We can't calculate the
5288 proper final value for such a biv here anyways. */
5289 if (bl->final_value && ! bl->reversed)
5290 loop_insn_sink_or_swim (loop,
5291 gen_load_of_final_value (bl->biv->dest_reg,
5292 bl->final_value));
5294 if (loop_dump_stream)
5295 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5296 bl->regno);
5298 /* See above note wrt final_value. But since we couldn't eliminate
5299 the biv, we must set the value after the loop instead of before. */
5300 else if (bl->final_value && ! bl->reversed)
5301 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5302 bl->final_value));
5305 /* Go through all the instructions in the loop, making all the
5306 register substitutions scheduled in REG_MAP. */
5308 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5309 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5310 || GET_CODE (p) == CALL_INSN)
5312 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5313 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5314 INSN_CODE (p) = -1;
5317 if (loop_info->n_iterations > 0)
5319 /* When we completely unroll a loop we will likely not need the increment
5320 of the loop BIV and we will not need the conditional branch at the
5321 end of the loop. */
5322 unrolled_insn_copies = insn_count - 2;
5324 #ifdef HAVE_cc0
5325 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5326 need the comparison before the conditional branch at the end of the
5327 loop. */
5328 unrolled_insn_copies -= 1;
5329 #endif
5331 /* We'll need one copy for each loop iteration. */
5332 unrolled_insn_copies *= loop_info->n_iterations;
5334 /* A little slop to account for the ability to remove initialization
5335 code, better CSE, and other secondary benefits of completely
5336 unrolling some loops. */
5337 unrolled_insn_copies -= 1;
5339 /* Clamp the value. */
5340 if (unrolled_insn_copies < 0)
5341 unrolled_insn_copies = 0;
5344 /* Unroll loops from within strength reduction so that we can use the
5345 induction variable information that strength_reduce has already
5346 collected. Always unroll loops that would be as small or smaller
5347 unrolled than when rolled. */
5348 if ((flags & LOOP_UNROLL)
5349 || ((flags & LOOP_AUTO_UNROLL)
5350 && loop_info->n_iterations > 0
5351 && unrolled_insn_copies <= insn_count))
5352 unroll_loop (loop, insn_count, 1);
5354 #ifdef HAVE_doloop_end
5355 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5356 doloop_optimize (loop);
5357 #endif /* HAVE_doloop_end */
5359 /* In case number of iterations is known, drop branch prediction note
5360 in the branch. Do that only in second loop pass, as loop unrolling
5361 may change the number of iterations performed. */
5362 if (flags & LOOP_BCT)
5364 unsigned HOST_WIDE_INT n
5365 = loop_info->n_iterations / loop_info->unroll_number;
5366 if (n > 1)
5367 predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
5368 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5371 if (loop_dump_stream)
5372 fprintf (loop_dump_stream, "\n");
5374 loop_ivs_free (loop);
5375 if (reg_map)
5376 free (reg_map);
5379 /*Record all basic induction variables calculated in the insn. */
5380 static rtx
5381 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
5382 int maybe_multiple)
5384 struct loop_ivs *ivs = LOOP_IVS (loop);
5385 rtx set;
5386 rtx dest_reg;
5387 rtx inc_val;
5388 rtx mult_val;
5389 rtx *location;
5391 if (GET_CODE (p) == INSN
5392 && (set = single_set (p))
5393 && GET_CODE (SET_DEST (set)) == REG)
5395 dest_reg = SET_DEST (set);
5396 if (REGNO (dest_reg) < max_reg_before_loop
5397 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5398 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5400 if (basic_induction_var (loop, SET_SRC (set),
5401 GET_MODE (SET_SRC (set)),
5402 dest_reg, p, &inc_val, &mult_val,
5403 &location))
5405 /* It is a possible basic induction variable.
5406 Create and initialize an induction structure for it. */
5408 struct induction *v = xmalloc (sizeof (struct induction));
5410 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5411 not_every_iteration, maybe_multiple);
5412 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5414 else if (REGNO (dest_reg) < ivs->n_regs)
5415 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5418 return p;
5421 /* Record all givs calculated in the insn.
5422 A register is a giv if: it is only set once, it is a function of a
5423 biv and a constant (or invariant), and it is not a biv. */
5424 static rtx
5425 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
5426 int maybe_multiple)
5428 struct loop_regs *regs = LOOP_REGS (loop);
5430 rtx set;
5431 /* Look for a general induction variable in a register. */
5432 if (GET_CODE (p) == INSN
5433 && (set = single_set (p))
5434 && GET_CODE (SET_DEST (set)) == REG
5435 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5437 rtx src_reg;
5438 rtx dest_reg;
5439 rtx add_val;
5440 rtx mult_val;
5441 rtx ext_val;
5442 int benefit;
5443 rtx regnote = 0;
5444 rtx last_consec_insn;
5446 dest_reg = SET_DEST (set);
5447 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5448 return p;
5450 if (/* SET_SRC is a giv. */
5451 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5452 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5453 /* Equivalent expression is a giv. */
5454 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5455 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5456 &add_val, &mult_val, &ext_val, 0,
5457 &benefit, VOIDmode)))
5458 /* Don't try to handle any regs made by loop optimization.
5459 We have nothing on them in regno_first_uid, etc. */
5460 && REGNO (dest_reg) < max_reg_before_loop
5461 /* Don't recognize a BASIC_INDUCT_VAR here. */
5462 && dest_reg != src_reg
5463 /* This must be the only place where the register is set. */
5464 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5465 /* or all sets must be consecutive and make a giv. */
5466 || (benefit = consec_sets_giv (loop, benefit, p,
5467 src_reg, dest_reg,
5468 &add_val, &mult_val, &ext_val,
5469 &last_consec_insn))))
5471 struct induction *v = xmalloc (sizeof (struct induction));
5473 /* If this is a library call, increase benefit. */
5474 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5475 benefit += libcall_benefit (p);
5477 /* Skip the consecutive insns, if there are any. */
5478 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5479 p = last_consec_insn;
5481 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5482 ext_val, benefit, DEST_REG, not_every_iteration,
5483 maybe_multiple, (rtx*) 0);
5488 /* Look for givs which are memory addresses. */
5489 if (GET_CODE (p) == INSN)
5490 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5491 maybe_multiple);
5493 /* Update the status of whether giv can derive other givs. This can
5494 change when we pass a label or an insn that updates a biv. */
5495 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5496 || GET_CODE (p) == CODE_LABEL)
5497 update_giv_derive (loop, p);
5498 return p;
5501 /* Return 1 if X is a valid source for an initial value (or as value being
5502 compared against in an initial test).
5504 X must be either a register or constant and must not be clobbered between
5505 the current insn and the start of the loop.
5507 INSN is the insn containing X. */
5509 static int
5510 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
5512 if (CONSTANT_P (x))
5513 return 1;
5515 /* Only consider pseudos we know about initialized in insns whose luids
5516 we know. */
5517 if (GET_CODE (x) != REG
5518 || REGNO (x) >= max_reg_before_loop)
5519 return 0;
5521 /* Don't use call-clobbered registers across a call which clobbers it. On
5522 some machines, don't use any hard registers at all. */
5523 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5524 && (SMALL_REGISTER_CLASSES
5525 || (call_used_regs[REGNO (x)] && call_seen)))
5526 return 0;
5528 /* Don't use registers that have been clobbered before the start of the
5529 loop. */
5530 if (reg_set_between_p (x, insn, loop_start))
5531 return 0;
5533 return 1;
5536 /* Scan X for memory refs and check each memory address
5537 as a possible giv. INSN is the insn whose pattern X comes from.
5538 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5539 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5540 more than once in each loop iteration. */
5542 static void
5543 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
5544 int not_every_iteration, int maybe_multiple)
5546 int i, j;
5547 enum rtx_code code;
5548 const char *fmt;
5550 if (x == 0)
5551 return;
5553 code = GET_CODE (x);
5554 switch (code)
5556 case REG:
5557 case CONST_INT:
5558 case CONST:
5559 case CONST_DOUBLE:
5560 case SYMBOL_REF:
5561 case LABEL_REF:
5562 case PC:
5563 case CC0:
5564 case ADDR_VEC:
5565 case ADDR_DIFF_VEC:
5566 case USE:
5567 case CLOBBER:
5568 return;
5570 case MEM:
5572 rtx src_reg;
5573 rtx add_val;
5574 rtx mult_val;
5575 rtx ext_val;
5576 int benefit;
5578 /* This code used to disable creating GIVs with mult_val == 1 and
5579 add_val == 0. However, this leads to lost optimizations when
5580 it comes time to combine a set of related DEST_ADDR GIVs, since
5581 this one would not be seen. */
5583 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5584 &mult_val, &ext_val, 1, &benefit,
5585 GET_MODE (x)))
5587 /* Found one; record it. */
5588 struct induction *v = xmalloc (sizeof (struct induction));
5590 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5591 add_val, ext_val, benefit, DEST_ADDR,
5592 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5594 v->mem = x;
5597 return;
5599 default:
5600 break;
5603 /* Recursively scan the subexpressions for other mem refs. */
5605 fmt = GET_RTX_FORMAT (code);
5606 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5607 if (fmt[i] == 'e')
5608 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5609 maybe_multiple);
5610 else if (fmt[i] == 'E')
5611 for (j = 0; j < XVECLEN (x, i); j++)
5612 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5613 maybe_multiple);
5616 /* Fill in the data about one biv update.
5617 V is the `struct induction' in which we record the biv. (It is
5618 allocated by the caller, with alloca.)
5619 INSN is the insn that sets it.
5620 DEST_REG is the biv's reg.
5622 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5623 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5624 being set to INC_VAL.
5626 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5627 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5628 can be executed more than once per iteration. If MAYBE_MULTIPLE
5629 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5630 executed exactly once per iteration. */
5632 static void
5633 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
5634 rtx inc_val, rtx mult_val, rtx *location,
5635 int not_every_iteration, int maybe_multiple)
5637 struct loop_ivs *ivs = LOOP_IVS (loop);
5638 struct iv_class *bl;
5640 v->insn = insn;
5641 v->src_reg = dest_reg;
5642 v->dest_reg = dest_reg;
5643 v->mult_val = mult_val;
5644 v->add_val = inc_val;
5645 v->ext_dependent = NULL_RTX;
5646 v->location = location;
5647 v->mode = GET_MODE (dest_reg);
5648 v->always_computable = ! not_every_iteration;
5649 v->always_executed = ! not_every_iteration;
5650 v->maybe_multiple = maybe_multiple;
5651 v->same = 0;
5653 /* Add this to the reg's iv_class, creating a class
5654 if this is the first incrementation of the reg. */
5656 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5657 if (bl == 0)
5659 /* Create and initialize new iv_class. */
5661 bl = xmalloc (sizeof (struct iv_class));
5663 bl->regno = REGNO (dest_reg);
5664 bl->biv = 0;
5665 bl->giv = 0;
5666 bl->biv_count = 0;
5667 bl->giv_count = 0;
5669 /* Set initial value to the reg itself. */
5670 bl->initial_value = dest_reg;
5671 bl->final_value = 0;
5672 /* We haven't seen the initializing insn yet. */
5673 bl->init_insn = 0;
5674 bl->init_set = 0;
5675 bl->initial_test = 0;
5676 bl->incremented = 0;
5677 bl->eliminable = 0;
5678 bl->nonneg = 0;
5679 bl->reversed = 0;
5680 bl->total_benefit = 0;
5682 /* Add this class to ivs->list. */
5683 bl->next = ivs->list;
5684 ivs->list = bl;
5686 /* Put it in the array of biv register classes. */
5687 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5689 else
5691 /* Check if location is the same as a previous one. */
5692 struct induction *induction;
5693 for (induction = bl->biv; induction; induction = induction->next_iv)
5694 if (location == induction->location)
5696 v->same = induction;
5697 break;
5701 /* Update IV_CLASS entry for this biv. */
5702 v->next_iv = bl->biv;
5703 bl->biv = v;
5704 bl->biv_count++;
5705 if (mult_val == const1_rtx)
5706 bl->incremented = 1;
5708 if (loop_dump_stream)
5709 loop_biv_dump (v, loop_dump_stream, 0);
5712 /* Fill in the data about one giv.
5713 V is the `struct induction' in which we record the giv. (It is
5714 allocated by the caller, with alloca.)
5715 INSN is the insn that sets it.
5716 BENEFIT estimates the savings from deleting this insn.
5717 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5718 into a register or is used as a memory address.
5720 SRC_REG is the biv reg which the giv is computed from.
5721 DEST_REG is the giv's reg (if the giv is stored in a reg).
5722 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5723 LOCATION points to the place where this giv's value appears in INSN. */
5725 static void
5726 record_giv (const struct loop *loop, struct induction *v, rtx insn,
5727 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
5728 rtx ext_val, int benefit, enum g_types type,
5729 int not_every_iteration, int maybe_multiple, rtx *location)
5731 struct loop_ivs *ivs = LOOP_IVS (loop);
5732 struct induction *b;
5733 struct iv_class *bl;
5734 rtx set = single_set (insn);
5735 rtx temp;
5737 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5738 undo the MULT canonicalization that we performed earlier. */
5739 temp = simplify_rtx (add_val);
5740 if (temp
5741 && ! (GET_CODE (add_val) == MULT
5742 && GET_CODE (temp) == ASHIFT))
5743 add_val = temp;
5745 v->insn = insn;
5746 v->src_reg = src_reg;
5747 v->giv_type = type;
5748 v->dest_reg = dest_reg;
5749 v->mult_val = mult_val;
5750 v->add_val = add_val;
5751 v->ext_dependent = ext_val;
5752 v->benefit = benefit;
5753 v->location = location;
5754 v->cant_derive = 0;
5755 v->combined_with = 0;
5756 v->maybe_multiple = maybe_multiple;
5757 v->maybe_dead = 0;
5758 v->derive_adjustment = 0;
5759 v->same = 0;
5760 v->ignore = 0;
5761 v->new_reg = 0;
5762 v->final_value = 0;
5763 v->same_insn = 0;
5764 v->auto_inc_opt = 0;
5765 v->unrolled = 0;
5766 v->shared = 0;
5768 /* The v->always_computable field is used in update_giv_derive, to
5769 determine whether a giv can be used to derive another giv. For a
5770 DEST_REG giv, INSN computes a new value for the giv, so its value
5771 isn't computable if INSN insn't executed every iteration.
5772 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5773 it does not compute a new value. Hence the value is always computable
5774 regardless of whether INSN is executed each iteration. */
5776 if (type == DEST_ADDR)
5777 v->always_computable = 1;
5778 else
5779 v->always_computable = ! not_every_iteration;
5781 v->always_executed = ! not_every_iteration;
5783 if (type == DEST_ADDR)
5785 v->mode = GET_MODE (*location);
5786 v->lifetime = 1;
5788 else /* type == DEST_REG */
5790 v->mode = GET_MODE (SET_DEST (set));
5792 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5794 /* If the lifetime is zero, it means that this register is
5795 really a dead store. So mark this as a giv that can be
5796 ignored. This will not prevent the biv from being eliminated. */
5797 if (v->lifetime == 0)
5798 v->ignore = 1;
5800 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5801 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5804 /* Add the giv to the class of givs computed from one biv. */
5806 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5807 if (bl)
5809 v->next_iv = bl->giv;
5810 bl->giv = v;
5811 /* Don't count DEST_ADDR. This is supposed to count the number of
5812 insns that calculate givs. */
5813 if (type == DEST_REG)
5814 bl->giv_count++;
5815 bl->total_benefit += benefit;
5817 else
5818 /* Fatal error, biv missing for this giv? */
5819 abort ();
5821 if (type == DEST_ADDR)
5823 v->replaceable = 1;
5824 v->not_replaceable = 0;
5826 else
5828 /* The giv can be replaced outright by the reduced register only if all
5829 of the following conditions are true:
5830 - the insn that sets the giv is always executed on any iteration
5831 on which the giv is used at all
5832 (there are two ways to deduce this:
5833 either the insn is executed on every iteration,
5834 or all uses follow that insn in the same basic block),
5835 - the giv is not used outside the loop
5836 - no assignments to the biv occur during the giv's lifetime. */
5838 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5839 /* Previous line always fails if INSN was moved by loop opt. */
5840 && REGNO_LAST_LUID (REGNO (dest_reg))
5841 < INSN_LUID (loop->end)
5842 && (! not_every_iteration
5843 || last_use_this_basic_block (dest_reg, insn)))
5845 /* Now check that there are no assignments to the biv within the
5846 giv's lifetime. This requires two separate checks. */
5848 /* Check each biv update, and fail if any are between the first
5849 and last use of the giv.
5851 If this loop contains an inner loop that was unrolled, then
5852 the insn modifying the biv may have been emitted by the loop
5853 unrolling code, and hence does not have a valid luid. Just
5854 mark the biv as not replaceable in this case. It is not very
5855 useful as a biv, because it is used in two different loops.
5856 It is very unlikely that we would be able to optimize the giv
5857 using this biv anyways. */
5859 v->replaceable = 1;
5860 v->not_replaceable = 0;
5861 for (b = bl->biv; b; b = b->next_iv)
5863 if (INSN_UID (b->insn) >= max_uid_for_loop
5864 || ((INSN_LUID (b->insn)
5865 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5866 && (INSN_LUID (b->insn)
5867 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5869 v->replaceable = 0;
5870 v->not_replaceable = 1;
5871 break;
5875 /* If there are any backwards branches that go from after the
5876 biv update to before it, then this giv is not replaceable. */
5877 if (v->replaceable)
5878 for (b = bl->biv; b; b = b->next_iv)
5879 if (back_branch_in_range_p (loop, b->insn))
5881 v->replaceable = 0;
5882 v->not_replaceable = 1;
5883 break;
5886 else
5888 /* May still be replaceable, we don't have enough info here to
5889 decide. */
5890 v->replaceable = 0;
5891 v->not_replaceable = 0;
5895 /* Record whether the add_val contains a const_int, for later use by
5896 combine_givs. */
5898 rtx tem = add_val;
5900 v->no_const_addval = 1;
5901 if (tem == const0_rtx)
5903 else if (CONSTANT_P (add_val))
5904 v->no_const_addval = 0;
5905 if (GET_CODE (tem) == PLUS)
5907 while (1)
5909 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5910 tem = XEXP (tem, 0);
5911 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5912 tem = XEXP (tem, 1);
5913 else
5914 break;
5916 if (CONSTANT_P (XEXP (tem, 1)))
5917 v->no_const_addval = 0;
5921 if (loop_dump_stream)
5922 loop_giv_dump (v, loop_dump_stream, 0);
5925 /* All this does is determine whether a giv can be made replaceable because
5926 its final value can be calculated. This code can not be part of record_giv
5927 above, because final_giv_value requires that the number of loop iterations
5928 be known, and that can not be accurately calculated until after all givs
5929 have been identified. */
5931 static void
5932 check_final_value (const struct loop *loop, struct induction *v)
5934 rtx final_value = 0;
5936 /* DEST_ADDR givs will never reach here, because they are always marked
5937 replaceable above in record_giv. */
5939 /* The giv can be replaced outright by the reduced register only if all
5940 of the following conditions are true:
5941 - the insn that sets the giv is always executed on any iteration
5942 on which the giv is used at all
5943 (there are two ways to deduce this:
5944 either the insn is executed on every iteration,
5945 or all uses follow that insn in the same basic block),
5946 - its final value can be calculated (this condition is different
5947 than the one above in record_giv)
5948 - it's not used before the it's set
5949 - no assignments to the biv occur during the giv's lifetime. */
5951 #if 0
5952 /* This is only called now when replaceable is known to be false. */
5953 /* Clear replaceable, so that it won't confuse final_giv_value. */
5954 v->replaceable = 0;
5955 #endif
5957 if ((final_value = final_giv_value (loop, v))
5958 && (v->always_executed
5959 || last_use_this_basic_block (v->dest_reg, v->insn)))
5961 int biv_increment_seen = 0, before_giv_insn = 0;
5962 rtx p = v->insn;
5963 rtx last_giv_use;
5965 v->replaceable = 1;
5966 v->not_replaceable = 0;
5968 /* When trying to determine whether or not a biv increment occurs
5969 during the lifetime of the giv, we can ignore uses of the variable
5970 outside the loop because final_value is true. Hence we can not
5971 use regno_last_uid and regno_first_uid as above in record_giv. */
5973 /* Search the loop to determine whether any assignments to the
5974 biv occur during the giv's lifetime. Start with the insn
5975 that sets the giv, and search around the loop until we come
5976 back to that insn again.
5978 Also fail if there is a jump within the giv's lifetime that jumps
5979 to somewhere outside the lifetime but still within the loop. This
5980 catches spaghetti code where the execution order is not linear, and
5981 hence the above test fails. Here we assume that the giv lifetime
5982 does not extend from one iteration of the loop to the next, so as
5983 to make the test easier. Since the lifetime isn't known yet,
5984 this requires two loops. See also record_giv above. */
5986 last_giv_use = v->insn;
5988 while (1)
5990 p = NEXT_INSN (p);
5991 if (p == loop->end)
5993 before_giv_insn = 1;
5994 p = NEXT_INSN (loop->start);
5996 if (p == v->insn)
5997 break;
5999 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
6000 || GET_CODE (p) == CALL_INSN)
6002 /* It is possible for the BIV increment to use the GIV if we
6003 have a cycle. Thus we must be sure to check each insn for
6004 both BIV and GIV uses, and we must check for BIV uses
6005 first. */
6007 if (! biv_increment_seen
6008 && reg_set_p (v->src_reg, PATTERN (p)))
6009 biv_increment_seen = 1;
6011 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
6013 if (biv_increment_seen || before_giv_insn)
6015 v->replaceable = 0;
6016 v->not_replaceable = 1;
6017 break;
6019 last_giv_use = p;
6024 /* Now that the lifetime of the giv is known, check for branches
6025 from within the lifetime to outside the lifetime if it is still
6026 replaceable. */
6028 if (v->replaceable)
6030 p = v->insn;
6031 while (1)
6033 p = NEXT_INSN (p);
6034 if (p == loop->end)
6035 p = NEXT_INSN (loop->start);
6036 if (p == last_giv_use)
6037 break;
6039 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6040 && LABEL_NAME (JUMP_LABEL (p))
6041 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6042 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6043 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6044 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6046 v->replaceable = 0;
6047 v->not_replaceable = 1;
6049 if (loop_dump_stream)
6050 fprintf (loop_dump_stream,
6051 "Found branch outside giv lifetime.\n");
6053 break;
6058 /* If it is replaceable, then save the final value. */
6059 if (v->replaceable)
6060 v->final_value = final_value;
6063 if (loop_dump_stream && v->replaceable)
6064 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6065 INSN_UID (v->insn), REGNO (v->dest_reg));
6068 /* Update the status of whether a giv can derive other givs.
6070 We need to do something special if there is or may be an update to the biv
6071 between the time the giv is defined and the time it is used to derive
6072 another giv.
6074 In addition, a giv that is only conditionally set is not allowed to
6075 derive another giv once a label has been passed.
6077 The cases we look at are when a label or an update to a biv is passed. */
6079 static void
6080 update_giv_derive (const struct loop *loop, rtx p)
6082 struct loop_ivs *ivs = LOOP_IVS (loop);
6083 struct iv_class *bl;
6084 struct induction *biv, *giv;
6085 rtx tem;
6086 int dummy;
6088 /* Search all IV classes, then all bivs, and finally all givs.
6090 There are three cases we are concerned with. First we have the situation
6091 of a giv that is only updated conditionally. In that case, it may not
6092 derive any givs after a label is passed.
6094 The second case is when a biv update occurs, or may occur, after the
6095 definition of a giv. For certain biv updates (see below) that are
6096 known to occur between the giv definition and use, we can adjust the
6097 giv definition. For others, or when the biv update is conditional,
6098 we must prevent the giv from deriving any other givs. There are two
6099 sub-cases within this case.
6101 If this is a label, we are concerned with any biv update that is done
6102 conditionally, since it may be done after the giv is defined followed by
6103 a branch here (actually, we need to pass both a jump and a label, but
6104 this extra tracking doesn't seem worth it).
6106 If this is a jump, we are concerned about any biv update that may be
6107 executed multiple times. We are actually only concerned about
6108 backward jumps, but it is probably not worth performing the test
6109 on the jump again here.
6111 If this is a biv update, we must adjust the giv status to show that a
6112 subsequent biv update was performed. If this adjustment cannot be done,
6113 the giv cannot derive further givs. */
6115 for (bl = ivs->list; bl; bl = bl->next)
6116 for (biv = bl->biv; biv; biv = biv->next_iv)
6117 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6118 || biv->insn == p)
6120 /* Skip if location is the same as a previous one. */
6121 if (biv->same)
6122 continue;
6124 for (giv = bl->giv; giv; giv = giv->next_iv)
6126 /* If cant_derive is already true, there is no point in
6127 checking all of these conditions again. */
6128 if (giv->cant_derive)
6129 continue;
6131 /* If this giv is conditionally set and we have passed a label,
6132 it cannot derive anything. */
6133 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6134 giv->cant_derive = 1;
6136 /* Skip givs that have mult_val == 0, since
6137 they are really invariants. Also skip those that are
6138 replaceable, since we know their lifetime doesn't contain
6139 any biv update. */
6140 else if (giv->mult_val == const0_rtx || giv->replaceable)
6141 continue;
6143 /* The only way we can allow this giv to derive another
6144 is if this is a biv increment and we can form the product
6145 of biv->add_val and giv->mult_val. In this case, we will
6146 be able to compute a compensation. */
6147 else if (biv->insn == p)
6149 rtx ext_val_dummy;
6151 tem = 0;
6152 if (biv->mult_val == const1_rtx)
6153 tem = simplify_giv_expr (loop,
6154 gen_rtx_MULT (giv->mode,
6155 biv->add_val,
6156 giv->mult_val),
6157 &ext_val_dummy, &dummy);
6159 if (tem && giv->derive_adjustment)
6160 tem = simplify_giv_expr
6161 (loop,
6162 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6163 &ext_val_dummy, &dummy);
6165 if (tem)
6166 giv->derive_adjustment = tem;
6167 else
6168 giv->cant_derive = 1;
6170 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6171 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6172 giv->cant_derive = 1;
6177 /* Check whether an insn is an increment legitimate for a basic induction var.
6178 X is the source of insn P, or a part of it.
6179 MODE is the mode in which X should be interpreted.
6181 DEST_REG is the putative biv, also the destination of the insn.
6182 We accept patterns of these forms:
6183 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6184 REG = INVARIANT + REG
6186 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6187 store the additive term into *INC_VAL, and store the place where
6188 we found the additive term into *LOCATION.
6190 If X is an assignment of an invariant into DEST_REG, we set
6191 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6193 We also want to detect a BIV when it corresponds to a variable
6194 whose mode was promoted. In that case, an increment
6195 of the variable may be a PLUS that adds a SUBREG of that variable to
6196 an invariant and then sign- or zero-extends the result of the PLUS
6197 into the variable.
6199 Most GIVs in such cases will be in the promoted mode, since that is the
6200 probably the natural computation mode (and almost certainly the mode
6201 used for addresses) on the machine. So we view the pseudo-reg containing
6202 the variable as the BIV, as if it were simply incremented.
6204 Note that treating the entire pseudo as a BIV will result in making
6205 simple increments to any GIVs based on it. However, if the variable
6206 overflows in its declared mode but not its promoted mode, the result will
6207 be incorrect. This is acceptable if the variable is signed, since
6208 overflows in such cases are undefined, but not if it is unsigned, since
6209 those overflows are defined. So we only check for SIGN_EXTEND and
6210 not ZERO_EXTEND.
6212 If we cannot find a biv, we return 0. */
6214 static int
6215 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
6216 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
6217 rtx **location)
6219 enum rtx_code code;
6220 rtx *argp, arg;
6221 rtx insn, set = 0, last, inc;
6223 code = GET_CODE (x);
6224 *location = NULL;
6225 switch (code)
6227 case PLUS:
6228 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6229 || (GET_CODE (XEXP (x, 0)) == SUBREG
6230 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6231 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6233 argp = &XEXP (x, 1);
6235 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6236 || (GET_CODE (XEXP (x, 1)) == SUBREG
6237 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6238 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6240 argp = &XEXP (x, 0);
6242 else
6243 return 0;
6245 arg = *argp;
6246 if (loop_invariant_p (loop, arg) != 1)
6247 return 0;
6249 /* convert_modes can emit new instructions, e.g. when arg is a loop
6250 invariant MEM and dest_reg has a different mode.
6251 These instructions would be emitted after the end of the function
6252 and then *inc_val would be an uninitialized pseudo.
6253 Detect this and bail in this case.
6254 Other alternatives to solve this can be introducing a convert_modes
6255 variant which is allowed to fail but not allowed to emit new
6256 instructions, emit these instructions before loop start and let
6257 it be garbage collected if *inc_val is never used or saving the
6258 *inc_val initialization sequence generated here and when *inc_val
6259 is going to be actually used, emit it at some suitable place. */
6260 last = get_last_insn ();
6261 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6262 if (get_last_insn () != last)
6264 delete_insns_since (last);
6265 return 0;
6268 *inc_val = inc;
6269 *mult_val = const1_rtx;
6270 *location = argp;
6271 return 1;
6273 case SUBREG:
6274 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6275 handle addition of promoted variables.
6276 ??? The comment at the start of this function is wrong: promoted
6277 variable increments don't look like it says they do. */
6278 return basic_induction_var (loop, SUBREG_REG (x),
6279 GET_MODE (SUBREG_REG (x)),
6280 dest_reg, p, inc_val, mult_val, location);
6282 case REG:
6283 /* If this register is assigned in a previous insn, look at its
6284 source, but don't go outside the loop or past a label. */
6286 /* If this sets a register to itself, we would repeat any previous
6287 biv increment if we applied this strategy blindly. */
6288 if (rtx_equal_p (dest_reg, x))
6289 return 0;
6291 insn = p;
6292 while (1)
6294 rtx dest;
6297 insn = PREV_INSN (insn);
6299 while (insn && GET_CODE (insn) == NOTE
6300 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6302 if (!insn)
6303 break;
6304 set = single_set (insn);
6305 if (set == 0)
6306 break;
6307 dest = SET_DEST (set);
6308 if (dest == x
6309 || (GET_CODE (dest) == SUBREG
6310 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6311 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6312 && SUBREG_REG (dest) == x))
6313 return basic_induction_var (loop, SET_SRC (set),
6314 (GET_MODE (SET_SRC (set)) == VOIDmode
6315 ? GET_MODE (x)
6316 : GET_MODE (SET_SRC (set))),
6317 dest_reg, insn,
6318 inc_val, mult_val, location);
6320 while (GET_CODE (dest) == SIGN_EXTRACT
6321 || GET_CODE (dest) == ZERO_EXTRACT
6322 || GET_CODE (dest) == SUBREG
6323 || GET_CODE (dest) == STRICT_LOW_PART)
6324 dest = XEXP (dest, 0);
6325 if (dest == x)
6326 break;
6328 /* Fall through. */
6330 /* Can accept constant setting of biv only when inside inner most loop.
6331 Otherwise, a biv of an inner loop may be incorrectly recognized
6332 as a biv of the outer loop,
6333 causing code to be moved INTO the inner loop. */
6334 case MEM:
6335 if (loop_invariant_p (loop, x) != 1)
6336 return 0;
6337 case CONST_INT:
6338 case SYMBOL_REF:
6339 case CONST:
6340 /* convert_modes aborts if we try to convert to or from CCmode, so just
6341 exclude that case. It is very unlikely that a condition code value
6342 would be a useful iterator anyways. convert_modes aborts if we try to
6343 convert a float mode to non-float or vice versa too. */
6344 if (loop->level == 1
6345 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6346 && GET_MODE_CLASS (mode) != MODE_CC)
6348 /* Possible bug here? Perhaps we don't know the mode of X. */
6349 last = get_last_insn ();
6350 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6351 if (get_last_insn () != last)
6353 delete_insns_since (last);
6354 return 0;
6357 *inc_val = inc;
6358 *mult_val = const0_rtx;
6359 return 1;
6361 else
6362 return 0;
6364 case SIGN_EXTEND:
6365 /* Ignore this BIV if signed arithmetic overflow is defined. */
6366 if (flag_wrapv)
6367 return 0;
6368 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6369 dest_reg, p, inc_val, mult_val, location);
6371 case ASHIFTRT:
6372 /* Similar, since this can be a sign extension. */
6373 for (insn = PREV_INSN (p);
6374 (insn && GET_CODE (insn) == NOTE
6375 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6376 insn = PREV_INSN (insn))
6379 if (insn)
6380 set = single_set (insn);
6382 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6383 && set && SET_DEST (set) == XEXP (x, 0)
6384 && GET_CODE (XEXP (x, 1)) == CONST_INT
6385 && INTVAL (XEXP (x, 1)) >= 0
6386 && GET_CODE (SET_SRC (set)) == ASHIFT
6387 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6388 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6389 GET_MODE (XEXP (x, 0)),
6390 dest_reg, insn, inc_val, mult_val,
6391 location);
6392 return 0;
6394 default:
6395 return 0;
6399 /* A general induction variable (giv) is any quantity that is a linear
6400 function of a basic induction variable,
6401 i.e. giv = biv * mult_val + add_val.
6402 The coefficients can be any loop invariant quantity.
6403 A giv need not be computed directly from the biv;
6404 it can be computed by way of other givs. */
6406 /* Determine whether X computes a giv.
6407 If it does, return a nonzero value
6408 which is the benefit from eliminating the computation of X;
6409 set *SRC_REG to the register of the biv that it is computed from;
6410 set *ADD_VAL and *MULT_VAL to the coefficients,
6411 such that the value of X is biv * mult + add; */
6413 static int
6414 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
6415 rtx *add_val, rtx *mult_val, rtx *ext_val,
6416 int is_addr, int *pbenefit,
6417 enum machine_mode addr_mode)
6419 struct loop_ivs *ivs = LOOP_IVS (loop);
6420 rtx orig_x = x;
6422 /* If this is an invariant, forget it, it isn't a giv. */
6423 if (loop_invariant_p (loop, x) == 1)
6424 return 0;
6426 *pbenefit = 0;
6427 *ext_val = NULL_RTX;
6428 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6429 if (x == 0)
6430 return 0;
6432 switch (GET_CODE (x))
6434 case USE:
6435 case CONST_INT:
6436 /* Since this is now an invariant and wasn't before, it must be a giv
6437 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6438 with. */
6439 *src_reg = ivs->list->biv->dest_reg;
6440 *mult_val = const0_rtx;
6441 *add_val = x;
6442 break;
6444 case REG:
6445 /* This is equivalent to a BIV. */
6446 *src_reg = x;
6447 *mult_val = const1_rtx;
6448 *add_val = const0_rtx;
6449 break;
6451 case PLUS:
6452 /* Either (plus (biv) (invar)) or
6453 (plus (mult (biv) (invar_1)) (invar_2)). */
6454 if (GET_CODE (XEXP (x, 0)) == MULT)
6456 *src_reg = XEXP (XEXP (x, 0), 0);
6457 *mult_val = XEXP (XEXP (x, 0), 1);
6459 else
6461 *src_reg = XEXP (x, 0);
6462 *mult_val = const1_rtx;
6464 *add_val = XEXP (x, 1);
6465 break;
6467 case MULT:
6468 /* ADD_VAL is zero. */
6469 *src_reg = XEXP (x, 0);
6470 *mult_val = XEXP (x, 1);
6471 *add_val = const0_rtx;
6472 break;
6474 default:
6475 abort ();
6478 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6479 unless they are CONST_INT). */
6480 if (GET_CODE (*add_val) == USE)
6481 *add_val = XEXP (*add_val, 0);
6482 if (GET_CODE (*mult_val) == USE)
6483 *mult_val = XEXP (*mult_val, 0);
6485 if (is_addr)
6486 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6487 else
6488 *pbenefit += rtx_cost (orig_x, SET);
6490 /* Always return true if this is a giv so it will be detected as such,
6491 even if the benefit is zero or negative. This allows elimination
6492 of bivs that might otherwise not be eliminated. */
6493 return 1;
6496 /* Given an expression, X, try to form it as a linear function of a biv.
6497 We will canonicalize it to be of the form
6498 (plus (mult (BIV) (invar_1))
6499 (invar_2))
6500 with possible degeneracies.
6502 The invariant expressions must each be of a form that can be used as a
6503 machine operand. We surround then with a USE rtx (a hack, but localized
6504 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6505 routine; it is the caller's responsibility to strip them.
6507 If no such canonicalization is possible (i.e., two biv's are used or an
6508 expression that is neither invariant nor a biv or giv), this routine
6509 returns 0.
6511 For a nonzero return, the result will have a code of CONST_INT, USE,
6512 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6514 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6516 static rtx sge_plus (enum machine_mode, rtx, rtx);
6517 static rtx sge_plus_constant (rtx, rtx);
6519 static rtx
6520 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
6522 struct loop_ivs *ivs = LOOP_IVS (loop);
6523 struct loop_regs *regs = LOOP_REGS (loop);
6524 enum machine_mode mode = GET_MODE (x);
6525 rtx arg0, arg1;
6526 rtx tem;
6528 /* If this is not an integer mode, or if we cannot do arithmetic in this
6529 mode, this can't be a giv. */
6530 if (mode != VOIDmode
6531 && (GET_MODE_CLASS (mode) != MODE_INT
6532 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6533 return NULL_RTX;
6535 switch (GET_CODE (x))
6537 case PLUS:
6538 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6539 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6540 if (arg0 == 0 || arg1 == 0)
6541 return NULL_RTX;
6543 /* Put constant last, CONST_INT last if both constant. */
6544 if ((GET_CODE (arg0) == USE
6545 || GET_CODE (arg0) == CONST_INT)
6546 && ! ((GET_CODE (arg0) == USE
6547 && GET_CODE (arg1) == USE)
6548 || GET_CODE (arg1) == CONST_INT))
6549 tem = arg0, arg0 = arg1, arg1 = tem;
6551 /* Handle addition of zero, then addition of an invariant. */
6552 if (arg1 == const0_rtx)
6553 return arg0;
6554 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6555 switch (GET_CODE (arg0))
6557 case CONST_INT:
6558 case USE:
6559 /* Adding two invariants must result in an invariant, so enclose
6560 addition operation inside a USE and return it. */
6561 if (GET_CODE (arg0) == USE)
6562 arg0 = XEXP (arg0, 0);
6563 if (GET_CODE (arg1) == USE)
6564 arg1 = XEXP (arg1, 0);
6566 if (GET_CODE (arg0) == CONST_INT)
6567 tem = arg0, arg0 = arg1, arg1 = tem;
6568 if (GET_CODE (arg1) == CONST_INT)
6569 tem = sge_plus_constant (arg0, arg1);
6570 else
6571 tem = sge_plus (mode, arg0, arg1);
6573 if (GET_CODE (tem) != CONST_INT)
6574 tem = gen_rtx_USE (mode, tem);
6575 return tem;
6577 case REG:
6578 case MULT:
6579 /* biv + invar or mult + invar. Return sum. */
6580 return gen_rtx_PLUS (mode, arg0, arg1);
6582 case PLUS:
6583 /* (a + invar_1) + invar_2. Associate. */
6584 return
6585 simplify_giv_expr (loop,
6586 gen_rtx_PLUS (mode,
6587 XEXP (arg0, 0),
6588 gen_rtx_PLUS (mode,
6589 XEXP (arg0, 1),
6590 arg1)),
6591 ext_val, benefit);
6593 default:
6594 abort ();
6597 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6598 MULT to reduce cases. */
6599 if (GET_CODE (arg0) == REG)
6600 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6601 if (GET_CODE (arg1) == REG)
6602 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6604 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6605 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6606 Recurse to associate the second PLUS. */
6607 if (GET_CODE (arg1) == MULT)
6608 tem = arg0, arg0 = arg1, arg1 = tem;
6610 if (GET_CODE (arg1) == PLUS)
6611 return
6612 simplify_giv_expr (loop,
6613 gen_rtx_PLUS (mode,
6614 gen_rtx_PLUS (mode, arg0,
6615 XEXP (arg1, 0)),
6616 XEXP (arg1, 1)),
6617 ext_val, benefit);
6619 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6620 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6621 return NULL_RTX;
6623 if (!rtx_equal_p (arg0, arg1))
6624 return NULL_RTX;
6626 return simplify_giv_expr (loop,
6627 gen_rtx_MULT (mode,
6628 XEXP (arg0, 0),
6629 gen_rtx_PLUS (mode,
6630 XEXP (arg0, 1),
6631 XEXP (arg1, 1))),
6632 ext_val, benefit);
6634 case MINUS:
6635 /* Handle "a - b" as "a + b * (-1)". */
6636 return simplify_giv_expr (loop,
6637 gen_rtx_PLUS (mode,
6638 XEXP (x, 0),
6639 gen_rtx_MULT (mode,
6640 XEXP (x, 1),
6641 constm1_rtx)),
6642 ext_val, benefit);
6644 case MULT:
6645 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6646 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6647 if (arg0 == 0 || arg1 == 0)
6648 return NULL_RTX;
6650 /* Put constant last, CONST_INT last if both constant. */
6651 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6652 && GET_CODE (arg1) != CONST_INT)
6653 tem = arg0, arg0 = arg1, arg1 = tem;
6655 /* If second argument is not now constant, not giv. */
6656 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6657 return NULL_RTX;
6659 /* Handle multiply by 0 or 1. */
6660 if (arg1 == const0_rtx)
6661 return const0_rtx;
6663 else if (arg1 == const1_rtx)
6664 return arg0;
6666 switch (GET_CODE (arg0))
6668 case REG:
6669 /* biv * invar. Done. */
6670 return gen_rtx_MULT (mode, arg0, arg1);
6672 case CONST_INT:
6673 /* Product of two constants. */
6674 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6676 case USE:
6677 /* invar * invar is a giv, but attempt to simplify it somehow. */
6678 if (GET_CODE (arg1) != CONST_INT)
6679 return NULL_RTX;
6681 arg0 = XEXP (arg0, 0);
6682 if (GET_CODE (arg0) == MULT)
6684 /* (invar_0 * invar_1) * invar_2. Associate. */
6685 return simplify_giv_expr (loop,
6686 gen_rtx_MULT (mode,
6687 XEXP (arg0, 0),
6688 gen_rtx_MULT (mode,
6689 XEXP (arg0,
6691 arg1)),
6692 ext_val, benefit);
6694 /* Propagate the MULT expressions to the innermost nodes. */
6695 else if (GET_CODE (arg0) == PLUS)
6697 /* (invar_0 + invar_1) * invar_2. Distribute. */
6698 return simplify_giv_expr (loop,
6699 gen_rtx_PLUS (mode,
6700 gen_rtx_MULT (mode,
6701 XEXP (arg0,
6703 arg1),
6704 gen_rtx_MULT (mode,
6705 XEXP (arg0,
6707 arg1)),
6708 ext_val, benefit);
6710 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6712 case MULT:
6713 /* (a * invar_1) * invar_2. Associate. */
6714 return simplify_giv_expr (loop,
6715 gen_rtx_MULT (mode,
6716 XEXP (arg0, 0),
6717 gen_rtx_MULT (mode,
6718 XEXP (arg0, 1),
6719 arg1)),
6720 ext_val, benefit);
6722 case PLUS:
6723 /* (a + invar_1) * invar_2. Distribute. */
6724 return simplify_giv_expr (loop,
6725 gen_rtx_PLUS (mode,
6726 gen_rtx_MULT (mode,
6727 XEXP (arg0, 0),
6728 arg1),
6729 gen_rtx_MULT (mode,
6730 XEXP (arg0, 1),
6731 arg1)),
6732 ext_val, benefit);
6734 default:
6735 abort ();
6738 case ASHIFT:
6739 /* Shift by constant is multiply by power of two. */
6740 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6741 return 0;
6743 return
6744 simplify_giv_expr (loop,
6745 gen_rtx_MULT (mode,
6746 XEXP (x, 0),
6747 GEN_INT ((HOST_WIDE_INT) 1
6748 << INTVAL (XEXP (x, 1)))),
6749 ext_val, benefit);
6751 case NEG:
6752 /* "-a" is "a * (-1)" */
6753 return simplify_giv_expr (loop,
6754 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6755 ext_val, benefit);
6757 case NOT:
6758 /* "~a" is "-a - 1". Silly, but easy. */
6759 return simplify_giv_expr (loop,
6760 gen_rtx_MINUS (mode,
6761 gen_rtx_NEG (mode, XEXP (x, 0)),
6762 const1_rtx),
6763 ext_val, benefit);
6765 case USE:
6766 /* Already in proper form for invariant. */
6767 return x;
6769 case SIGN_EXTEND:
6770 case ZERO_EXTEND:
6771 case TRUNCATE:
6772 /* Conditionally recognize extensions of simple IVs. After we've
6773 computed loop traversal counts and verified the range of the
6774 source IV, we'll reevaluate this as a GIV. */
6775 if (*ext_val == NULL_RTX)
6777 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6778 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6780 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6781 return arg0;
6784 goto do_default;
6786 case REG:
6787 /* If this is a new register, we can't deal with it. */
6788 if (REGNO (x) >= max_reg_before_loop)
6789 return 0;
6791 /* Check for biv or giv. */
6792 switch (REG_IV_TYPE (ivs, REGNO (x)))
6794 case BASIC_INDUCT:
6795 return x;
6796 case GENERAL_INDUCT:
6798 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6800 /* Form expression from giv and add benefit. Ensure this giv
6801 can derive another and subtract any needed adjustment if so. */
6803 /* Increasing the benefit here is risky. The only case in which it
6804 is arguably correct is if this is the only use of V. In other
6805 cases, this will artificially inflate the benefit of the current
6806 giv, and lead to suboptimal code. Thus, it is disabled, since
6807 potentially not reducing an only marginally beneficial giv is
6808 less harmful than reducing many givs that are not really
6809 beneficial. */
6811 rtx single_use = regs->array[REGNO (x)].single_usage;
6812 if (single_use && single_use != const0_rtx)
6813 *benefit += v->benefit;
6816 if (v->cant_derive)
6817 return 0;
6819 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6820 v->src_reg, v->mult_val),
6821 v->add_val);
6823 if (v->derive_adjustment)
6824 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6825 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6826 if (*ext_val)
6828 if (!v->ext_dependent)
6829 return arg0;
6831 else
6833 *ext_val = v->ext_dependent;
6834 return arg0;
6836 return 0;
6839 default:
6840 do_default:
6841 /* If it isn't an induction variable, and it is invariant, we
6842 may be able to simplify things further by looking through
6843 the bits we just moved outside the loop. */
6844 if (loop_invariant_p (loop, x) == 1)
6846 struct movable *m;
6847 struct loop_movables *movables = LOOP_MOVABLES (loop);
6849 for (m = movables->head; m; m = m->next)
6850 if (rtx_equal_p (x, m->set_dest))
6852 /* Ok, we found a match. Substitute and simplify. */
6854 /* If we match another movable, we must use that, as
6855 this one is going away. */
6856 if (m->match)
6857 return simplify_giv_expr (loop, m->match->set_dest,
6858 ext_val, benefit);
6860 /* If consec is nonzero, this is a member of a group of
6861 instructions that were moved together. We handle this
6862 case only to the point of seeking to the last insn and
6863 looking for a REG_EQUAL. Fail if we don't find one. */
6864 if (m->consec != 0)
6866 int i = m->consec;
6867 tem = m->insn;
6870 tem = NEXT_INSN (tem);
6872 while (--i > 0);
6874 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6875 if (tem)
6876 tem = XEXP (tem, 0);
6878 else
6880 tem = single_set (m->insn);
6881 if (tem)
6882 tem = SET_SRC (tem);
6885 if (tem)
6887 /* What we are most interested in is pointer
6888 arithmetic on invariants -- only take
6889 patterns we may be able to do something with. */
6890 if (GET_CODE (tem) == PLUS
6891 || GET_CODE (tem) == MULT
6892 || GET_CODE (tem) == ASHIFT
6893 || GET_CODE (tem) == CONST_INT
6894 || GET_CODE (tem) == SYMBOL_REF)
6896 tem = simplify_giv_expr (loop, tem, ext_val,
6897 benefit);
6898 if (tem)
6899 return tem;
6901 else if (GET_CODE (tem) == CONST
6902 && GET_CODE (XEXP (tem, 0)) == PLUS
6903 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6904 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6906 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6907 ext_val, benefit);
6908 if (tem)
6909 return tem;
6912 break;
6915 break;
6918 /* Fall through to general case. */
6919 default:
6920 /* If invariant, return as USE (unless CONST_INT).
6921 Otherwise, not giv. */
6922 if (GET_CODE (x) == USE)
6923 x = XEXP (x, 0);
6925 if (loop_invariant_p (loop, x) == 1)
6927 if (GET_CODE (x) == CONST_INT)
6928 return x;
6929 if (GET_CODE (x) == CONST
6930 && GET_CODE (XEXP (x, 0)) == PLUS
6931 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6932 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6933 x = XEXP (x, 0);
6934 return gen_rtx_USE (mode, x);
6936 else
6937 return 0;
6941 /* This routine folds invariants such that there is only ever one
6942 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6944 static rtx
6945 sge_plus_constant (rtx x, rtx c)
6947 if (GET_CODE (x) == CONST_INT)
6948 return GEN_INT (INTVAL (x) + INTVAL (c));
6949 else if (GET_CODE (x) != PLUS)
6950 return gen_rtx_PLUS (GET_MODE (x), x, c);
6951 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6953 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6954 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6956 else if (GET_CODE (XEXP (x, 0)) == PLUS
6957 || GET_CODE (XEXP (x, 1)) != PLUS)
6959 return gen_rtx_PLUS (GET_MODE (x),
6960 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6962 else
6964 return gen_rtx_PLUS (GET_MODE (x),
6965 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6969 static rtx
6970 sge_plus (enum machine_mode mode, rtx x, rtx y)
6972 while (GET_CODE (y) == PLUS)
6974 rtx a = XEXP (y, 0);
6975 if (GET_CODE (a) == CONST_INT)
6976 x = sge_plus_constant (x, a);
6977 else
6978 x = gen_rtx_PLUS (mode, x, a);
6979 y = XEXP (y, 1);
6981 if (GET_CODE (y) == CONST_INT)
6982 x = sge_plus_constant (x, y);
6983 else
6984 x = gen_rtx_PLUS (mode, x, y);
6985 return x;
6988 /* Help detect a giv that is calculated by several consecutive insns;
6989 for example,
6990 giv = biv * M
6991 giv = giv + A
6992 The caller has already identified the first insn P as having a giv as dest;
6993 we check that all other insns that set the same register follow
6994 immediately after P, that they alter nothing else,
6995 and that the result of the last is still a giv.
6997 The value is 0 if the reg set in P is not really a giv.
6998 Otherwise, the value is the amount gained by eliminating
6999 all the consecutive insns that compute the value.
7001 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
7002 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
7004 The coefficients of the ultimate giv value are stored in
7005 *MULT_VAL and *ADD_VAL. */
7007 static int
7008 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
7009 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
7010 rtx *ext_val, rtx *last_consec_insn)
7012 struct loop_ivs *ivs = LOOP_IVS (loop);
7013 struct loop_regs *regs = LOOP_REGS (loop);
7014 int count;
7015 enum rtx_code code;
7016 int benefit;
7017 rtx temp;
7018 rtx set;
7020 /* Indicate that this is a giv so that we can update the value produced in
7021 each insn of the multi-insn sequence.
7023 This induction structure will be used only by the call to
7024 general_induction_var below, so we can allocate it on our stack.
7025 If this is a giv, our caller will replace the induct var entry with
7026 a new induction structure. */
7027 struct induction *v;
7029 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7030 return 0;
7032 v = alloca (sizeof (struct induction));
7033 v->src_reg = src_reg;
7034 v->mult_val = *mult_val;
7035 v->add_val = *add_val;
7036 v->benefit = first_benefit;
7037 v->cant_derive = 0;
7038 v->derive_adjustment = 0;
7039 v->ext_dependent = NULL_RTX;
7041 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7042 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7044 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7046 while (count > 0)
7048 p = NEXT_INSN (p);
7049 code = GET_CODE (p);
7051 /* If libcall, skip to end of call sequence. */
7052 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7053 p = XEXP (temp, 0);
7055 if (code == INSN
7056 && (set = single_set (p))
7057 && GET_CODE (SET_DEST (set)) == REG
7058 && SET_DEST (set) == dest_reg
7059 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7060 add_val, mult_val, ext_val, 0,
7061 &benefit, VOIDmode)
7062 /* Giv created by equivalent expression. */
7063 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7064 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7065 add_val, mult_val, ext_val, 0,
7066 &benefit, VOIDmode)))
7067 && src_reg == v->src_reg)
7069 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7070 benefit += libcall_benefit (p);
7072 count--;
7073 v->mult_val = *mult_val;
7074 v->add_val = *add_val;
7075 v->benefit += benefit;
7077 else if (code != NOTE)
7079 /* Allow insns that set something other than this giv to a
7080 constant. Such insns are needed on machines which cannot
7081 include long constants and should not disqualify a giv. */
7082 if (code == INSN
7083 && (set = single_set (p))
7084 && SET_DEST (set) != dest_reg
7085 && CONSTANT_P (SET_SRC (set)))
7086 continue;
7088 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7089 return 0;
7093 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7094 *last_consec_insn = p;
7095 return v->benefit;
7098 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7099 represented by G1. If no such expression can be found, or it is clear that
7100 it cannot possibly be a valid address, 0 is returned.
7102 To perform the computation, we note that
7103 G1 = x * v + a and
7104 G2 = y * v + b
7105 where `v' is the biv.
7107 So G2 = (y/b) * G1 + (b - a*y/x).
7109 Note that MULT = y/x.
7111 Update: A and B are now allowed to be additive expressions such that
7112 B contains all variables in A. That is, computing B-A will not require
7113 subtracting variables. */
7115 static rtx
7116 express_from_1 (rtx a, rtx b, rtx mult)
7118 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7120 if (mult == const0_rtx)
7121 return b;
7123 /* If MULT is not 1, we cannot handle A with non-constants, since we
7124 would then be required to subtract multiples of the registers in A.
7125 This is theoretically possible, and may even apply to some Fortran
7126 constructs, but it is a lot of work and we do not attempt it here. */
7128 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7129 return NULL_RTX;
7131 /* In general these structures are sorted top to bottom (down the PLUS
7132 chain), but not left to right across the PLUS. If B is a higher
7133 order giv than A, we can strip one level and recurse. If A is higher
7134 order, we'll eventually bail out, but won't know that until the end.
7135 If they are the same, we'll strip one level around this loop. */
7137 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7139 rtx ra, rb, oa, ob, tmp;
7141 ra = XEXP (a, 0), oa = XEXP (a, 1);
7142 if (GET_CODE (ra) == PLUS)
7143 tmp = ra, ra = oa, oa = tmp;
7145 rb = XEXP (b, 0), ob = XEXP (b, 1);
7146 if (GET_CODE (rb) == PLUS)
7147 tmp = rb, rb = ob, ob = tmp;
7149 if (rtx_equal_p (ra, rb))
7150 /* We matched: remove one reg completely. */
7151 a = oa, b = ob;
7152 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7153 /* An alternate match. */
7154 a = oa, b = rb;
7155 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7156 /* An alternate match. */
7157 a = ra, b = ob;
7158 else
7160 /* Indicates an extra register in B. Strip one level from B and
7161 recurse, hoping B was the higher order expression. */
7162 ob = express_from_1 (a, ob, mult);
7163 if (ob == NULL_RTX)
7164 return NULL_RTX;
7165 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7169 /* Here we are at the last level of A, go through the cases hoping to
7170 get rid of everything but a constant. */
7172 if (GET_CODE (a) == PLUS)
7174 rtx ra, oa;
7176 ra = XEXP (a, 0), oa = XEXP (a, 1);
7177 if (rtx_equal_p (oa, b))
7178 oa = ra;
7179 else if (!rtx_equal_p (ra, b))
7180 return NULL_RTX;
7182 if (GET_CODE (oa) != CONST_INT)
7183 return NULL_RTX;
7185 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7187 else if (GET_CODE (a) == CONST_INT)
7189 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7191 else if (CONSTANT_P (a))
7193 enum machine_mode mode_a = GET_MODE (a);
7194 enum machine_mode mode_b = GET_MODE (b);
7195 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7196 return simplify_gen_binary (MINUS, mode, b, a);
7198 else if (GET_CODE (b) == PLUS)
7200 if (rtx_equal_p (a, XEXP (b, 0)))
7201 return XEXP (b, 1);
7202 else if (rtx_equal_p (a, XEXP (b, 1)))
7203 return XEXP (b, 0);
7204 else
7205 return NULL_RTX;
7207 else if (rtx_equal_p (a, b))
7208 return const0_rtx;
7210 return NULL_RTX;
7214 express_from (struct induction *g1, struct induction *g2)
7216 rtx mult, add;
7218 /* The value that G1 will be multiplied by must be a constant integer. Also,
7219 the only chance we have of getting a valid address is if b*c/a (see above
7220 for notation) is also an integer. */
7221 if (GET_CODE (g1->mult_val) == CONST_INT
7222 && GET_CODE (g2->mult_val) == CONST_INT)
7224 if (g1->mult_val == const0_rtx
7225 || (g1->mult_val == constm1_rtx
7226 && INTVAL (g2->mult_val)
7227 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
7228 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7229 return NULL_RTX;
7230 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7232 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7233 mult = const1_rtx;
7234 else
7236 /* ??? Find out if the one is a multiple of the other? */
7237 return NULL_RTX;
7240 add = express_from_1 (g1->add_val, g2->add_val, mult);
7241 if (add == NULL_RTX)
7243 /* Failed. If we've got a multiplication factor between G1 and G2,
7244 scale G1's addend and try again. */
7245 if (INTVAL (mult) > 1)
7247 rtx g1_add_val = g1->add_val;
7248 if (GET_CODE (g1_add_val) == MULT
7249 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7251 HOST_WIDE_INT m;
7252 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7253 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7254 XEXP (g1_add_val, 0), GEN_INT (m));
7256 else
7258 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7259 mult);
7262 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7265 if (add == NULL_RTX)
7266 return NULL_RTX;
7268 /* Form simplified final result. */
7269 if (mult == const0_rtx)
7270 return add;
7271 else if (mult == const1_rtx)
7272 mult = g1->dest_reg;
7273 else
7274 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7276 if (add == const0_rtx)
7277 return mult;
7278 else
7280 if (GET_CODE (add) == PLUS
7281 && CONSTANT_P (XEXP (add, 1)))
7283 rtx tem = XEXP (add, 1);
7284 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7285 add = tem;
7288 return gen_rtx_PLUS (g2->mode, mult, add);
7292 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7293 represented by G1. This indicates that G2 should be combined with G1 and
7294 that G2 can use (either directly or via an address expression) a register
7295 used to represent G1. */
7297 static rtx
7298 combine_givs_p (struct induction *g1, struct induction *g2)
7300 rtx comb, ret;
7302 /* With the introduction of ext dependent givs, we must care for modes.
7303 G2 must not use a wider mode than G1. */
7304 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7305 return NULL_RTX;
7307 ret = comb = express_from (g1, g2);
7308 if (comb == NULL_RTX)
7309 return NULL_RTX;
7310 if (g1->mode != g2->mode)
7311 ret = gen_lowpart (g2->mode, comb);
7313 /* If these givs are identical, they can be combined. We use the results
7314 of express_from because the addends are not in a canonical form, so
7315 rtx_equal_p is a weaker test. */
7316 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7317 combination to be the other way round. */
7318 if (comb == g1->dest_reg
7319 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7321 return ret;
7324 /* If G2 can be expressed as a function of G1 and that function is valid
7325 as an address and no more expensive than using a register for G2,
7326 the expression of G2 in terms of G1 can be used. */
7327 if (ret != NULL_RTX
7328 && g2->giv_type == DEST_ADDR
7329 && memory_address_p (GET_MODE (g2->mem), ret))
7330 return ret;
7332 return NULL_RTX;
7335 /* Check each extension dependent giv in this class to see if its
7336 root biv is safe from wrapping in the interior mode, which would
7337 make the giv illegal. */
7339 static void
7340 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
7342 struct loop_info *loop_info = LOOP_INFO (loop);
7343 int ze_ok = 0, se_ok = 0, info_ok = 0;
7344 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7345 HOST_WIDE_INT start_val;
7346 unsigned HOST_WIDE_INT u_end_val = 0;
7347 unsigned HOST_WIDE_INT u_start_val = 0;
7348 rtx incr = pc_rtx;
7349 struct induction *v;
7351 /* Make sure the iteration data is available. We must have
7352 constants in order to be certain of no overflow. */
7353 if (loop_info->n_iterations > 0
7354 && bl->initial_value
7355 && GET_CODE (bl->initial_value) == CONST_INT
7356 && (incr = biv_total_increment (bl))
7357 && GET_CODE (incr) == CONST_INT
7358 /* Make sure the host can represent the arithmetic. */
7359 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7361 unsigned HOST_WIDE_INT abs_incr, total_incr;
7362 HOST_WIDE_INT s_end_val;
7363 int neg_incr;
7365 info_ok = 1;
7366 start_val = INTVAL (bl->initial_value);
7367 u_start_val = start_val;
7369 neg_incr = 0, abs_incr = INTVAL (incr);
7370 if (INTVAL (incr) < 0)
7371 neg_incr = 1, abs_incr = -abs_incr;
7372 total_incr = abs_incr * loop_info->n_iterations;
7374 /* Check for host arithmetic overflow. */
7375 if (total_incr / loop_info->n_iterations == abs_incr)
7377 unsigned HOST_WIDE_INT u_max;
7378 HOST_WIDE_INT s_max;
7380 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7381 s_end_val = u_end_val;
7382 u_max = GET_MODE_MASK (biv_mode);
7383 s_max = u_max >> 1;
7385 /* Check zero extension of biv ok. */
7386 if (start_val >= 0
7387 /* Check for host arithmetic overflow. */
7388 && (neg_incr
7389 ? u_end_val < u_start_val
7390 : u_end_val > u_start_val)
7391 /* Check for target arithmetic overflow. */
7392 && (neg_incr
7393 ? 1 /* taken care of with host overflow */
7394 : u_end_val <= u_max))
7396 ze_ok = 1;
7399 /* Check sign extension of biv ok. */
7400 /* ??? While it is true that overflow with signed and pointer
7401 arithmetic is undefined, I fear too many programmers don't
7402 keep this fact in mind -- myself included on occasion.
7403 So leave alone with the signed overflow optimizations. */
7404 if (start_val >= -s_max - 1
7405 /* Check for host arithmetic overflow. */
7406 && (neg_incr
7407 ? s_end_val < start_val
7408 : s_end_val > start_val)
7409 /* Check for target arithmetic overflow. */
7410 && (neg_incr
7411 ? s_end_val >= -s_max - 1
7412 : s_end_val <= s_max))
7414 se_ok = 1;
7419 /* If we know the BIV is compared at run-time against an
7420 invariant value, and the increment is +/- 1, we may also
7421 be able to prove that the BIV cannot overflow. */
7422 else if (bl->biv->src_reg == loop_info->iteration_var
7423 && loop_info->comparison_value
7424 && loop_invariant_p (loop, loop_info->comparison_value)
7425 && (incr = biv_total_increment (bl))
7426 && GET_CODE (incr) == CONST_INT)
7428 /* If the increment is +1, and the exit test is a <,
7429 the BIV cannot overflow. (For <=, we have the
7430 problematic case that the comparison value might
7431 be the maximum value of the range.) */
7432 if (INTVAL (incr) == 1)
7434 if (loop_info->comparison_code == LT)
7435 se_ok = ze_ok = 1;
7436 else if (loop_info->comparison_code == LTU)
7437 ze_ok = 1;
7440 /* Likewise for increment -1 and exit test >. */
7441 if (INTVAL (incr) == -1)
7443 if (loop_info->comparison_code == GT)
7444 se_ok = ze_ok = 1;
7445 else if (loop_info->comparison_code == GTU)
7446 ze_ok = 1;
7450 /* Invalidate givs that fail the tests. */
7451 for (v = bl->giv; v; v = v->next_iv)
7452 if (v->ext_dependent)
7454 enum rtx_code code = GET_CODE (v->ext_dependent);
7455 int ok = 0;
7457 switch (code)
7459 case SIGN_EXTEND:
7460 ok = se_ok;
7461 break;
7462 case ZERO_EXTEND:
7463 ok = ze_ok;
7464 break;
7466 case TRUNCATE:
7467 /* We don't know whether this value is being used as either
7468 signed or unsigned, so to safely truncate we must satisfy
7469 both. The initial check here verifies the BIV itself;
7470 once that is successful we may check its range wrt the
7471 derived GIV. This works only if we were able to determine
7472 constant start and end values above. */
7473 if (se_ok && ze_ok && info_ok)
7475 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7476 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7478 /* We know from the above that both endpoints are nonnegative,
7479 and that there is no wrapping. Verify that both endpoints
7480 are within the (signed) range of the outer mode. */
7481 if (u_start_val <= max && u_end_val <= max)
7482 ok = 1;
7484 break;
7486 default:
7487 abort ();
7490 if (ok)
7492 if (loop_dump_stream)
7494 fprintf (loop_dump_stream,
7495 "Verified ext dependent giv at %d of reg %d\n",
7496 INSN_UID (v->insn), bl->regno);
7499 else
7501 if (loop_dump_stream)
7503 const char *why;
7505 if (info_ok)
7506 why = "biv iteration values overflowed";
7507 else
7509 if (incr == pc_rtx)
7510 incr = biv_total_increment (bl);
7511 if (incr == const1_rtx)
7512 why = "biv iteration info incomplete; incr by 1";
7513 else
7514 why = "biv iteration info incomplete";
7517 fprintf (loop_dump_stream,
7518 "Failed ext dependent giv at %d, %s\n",
7519 INSN_UID (v->insn), why);
7521 v->ignore = 1;
7522 bl->all_reduced = 0;
7527 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7530 extend_value_for_giv (struct induction *v, rtx value)
7532 rtx ext_dep = v->ext_dependent;
7534 if (! ext_dep)
7535 return value;
7537 /* Recall that check_ext_dependent_givs verified that the known bounds
7538 of a biv did not overflow or wrap with respect to the extension for
7539 the giv. Therefore, constants need no additional adjustment. */
7540 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7541 return value;
7543 /* Otherwise, we must adjust the value to compensate for the
7544 differing modes of the biv and the giv. */
7545 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7548 struct combine_givs_stats
7550 int giv_number;
7551 int total_benefit;
7554 static int
7555 cmp_combine_givs_stats (const void *xp, const void *yp)
7557 const struct combine_givs_stats * const x =
7558 (const struct combine_givs_stats *) xp;
7559 const struct combine_givs_stats * const y =
7560 (const struct combine_givs_stats *) yp;
7561 int d;
7562 d = y->total_benefit - x->total_benefit;
7563 /* Stabilize the sort. */
7564 if (!d)
7565 d = x->giv_number - y->giv_number;
7566 return d;
7569 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7570 any other. If so, point SAME to the giv combined with and set NEW_REG to
7571 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7572 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7574 static void
7575 combine_givs (struct loop_regs *regs, struct iv_class *bl)
7577 /* Additional benefit to add for being combined multiple times. */
7578 const int extra_benefit = 3;
7580 struct induction *g1, *g2, **giv_array;
7581 int i, j, k, giv_count;
7582 struct combine_givs_stats *stats;
7583 rtx *can_combine;
7585 /* Count givs, because bl->giv_count is incorrect here. */
7586 giv_count = 0;
7587 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7588 if (!g1->ignore)
7589 giv_count++;
7591 giv_array = alloca (giv_count * sizeof (struct induction *));
7592 i = 0;
7593 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7594 if (!g1->ignore)
7595 giv_array[i++] = g1;
7597 stats = xcalloc (giv_count, sizeof (*stats));
7598 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
7600 for (i = 0; i < giv_count; i++)
7602 int this_benefit;
7603 rtx single_use;
7605 g1 = giv_array[i];
7606 stats[i].giv_number = i;
7608 /* If a DEST_REG GIV is used only once, do not allow it to combine
7609 with anything, for in doing so we will gain nothing that cannot
7610 be had by simply letting the GIV with which we would have combined
7611 to be reduced on its own. The losage shows up in particular with
7612 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7613 be seen elsewhere as well. */
7614 if (g1->giv_type == DEST_REG
7615 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7616 && single_use != const0_rtx)
7617 continue;
7619 this_benefit = g1->benefit;
7620 /* Add an additional weight for zero addends. */
7621 if (g1->no_const_addval)
7622 this_benefit += 1;
7624 for (j = 0; j < giv_count; j++)
7626 rtx this_combine;
7628 g2 = giv_array[j];
7629 if (g1 != g2
7630 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7632 can_combine[i * giv_count + j] = this_combine;
7633 this_benefit += g2->benefit + extra_benefit;
7636 stats[i].total_benefit = this_benefit;
7639 /* Iterate, combining until we can't. */
7640 restart:
7641 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7643 if (loop_dump_stream)
7645 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7646 for (k = 0; k < giv_count; k++)
7648 g1 = giv_array[stats[k].giv_number];
7649 if (!g1->combined_with && !g1->same)
7650 fprintf (loop_dump_stream, " {%d, %d}",
7651 INSN_UID (giv_array[stats[k].giv_number]->insn),
7652 stats[k].total_benefit);
7654 putc ('\n', loop_dump_stream);
7657 for (k = 0; k < giv_count; k++)
7659 int g1_add_benefit = 0;
7661 i = stats[k].giv_number;
7662 g1 = giv_array[i];
7664 /* If it has already been combined, skip. */
7665 if (g1->combined_with || g1->same)
7666 continue;
7668 for (j = 0; j < giv_count; j++)
7670 g2 = giv_array[j];
7671 if (g1 != g2 && can_combine[i * giv_count + j]
7672 /* If it has already been combined, skip. */
7673 && ! g2->same && ! g2->combined_with)
7675 int l;
7677 g2->new_reg = can_combine[i * giv_count + j];
7678 g2->same = g1;
7679 /* For destination, we now may replace by mem expression instead
7680 of register. This changes the costs considerably, so add the
7681 compensation. */
7682 if (g2->giv_type == DEST_ADDR)
7683 g2->benefit = (g2->benefit + reg_address_cost
7684 - address_cost (g2->new_reg,
7685 GET_MODE (g2->mem)));
7686 g1->combined_with++;
7687 g1->lifetime += g2->lifetime;
7689 g1_add_benefit += g2->benefit;
7691 /* ??? The new final_[bg]iv_value code does a much better job
7692 of finding replaceable giv's, and hence this code may no
7693 longer be necessary. */
7694 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7695 g1_add_benefit -= copy_cost;
7697 /* To help optimize the next set of combinations, remove
7698 this giv from the benefits of other potential mates. */
7699 for (l = 0; l < giv_count; ++l)
7701 int m = stats[l].giv_number;
7702 if (can_combine[m * giv_count + j])
7703 stats[l].total_benefit -= g2->benefit + extra_benefit;
7706 if (loop_dump_stream)
7707 fprintf (loop_dump_stream,
7708 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7709 INSN_UID (g2->insn), INSN_UID (g1->insn),
7710 g1->benefit, g1_add_benefit, g1->lifetime);
7714 /* To help optimize the next set of combinations, remove
7715 this giv from the benefits of other potential mates. */
7716 if (g1->combined_with)
7718 for (j = 0; j < giv_count; ++j)
7720 int m = stats[j].giv_number;
7721 if (can_combine[m * giv_count + i])
7722 stats[j].total_benefit -= g1->benefit + extra_benefit;
7725 g1->benefit += g1_add_benefit;
7727 /* We've finished with this giv, and everything it touched.
7728 Restart the combination so that proper weights for the
7729 rest of the givs are properly taken into account. */
7730 /* ??? Ideally we would compact the arrays at this point, so
7731 as to not cover old ground. But sanely compacting
7732 can_combine is tricky. */
7733 goto restart;
7737 /* Clean up. */
7738 free (stats);
7739 free (can_combine);
7742 /* Generate sequence for REG = B * M + A. B is the initial value of
7743 the basic induction variable, M a multiplicative constant, A an
7744 additive constant and REG the destination register. */
7746 static rtx
7747 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
7749 rtx seq;
7750 rtx result;
7752 start_sequence ();
7753 /* Use unsigned arithmetic. */
7754 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7755 if (reg != result)
7756 emit_move_insn (reg, result);
7757 seq = get_insns ();
7758 end_sequence ();
7760 return seq;
7764 /* Update registers created in insn sequence SEQ. */
7766 static void
7767 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
7769 rtx insn;
7771 /* Update register info for alias analysis. */
7773 insn = seq;
7774 while (insn != NULL_RTX)
7776 rtx set = single_set (insn);
7778 if (set && GET_CODE (SET_DEST (set)) == REG)
7779 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7781 insn = NEXT_INSN (insn);
7786 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
7787 is the initial value of the basic induction variable, M a
7788 multiplicative constant, A an additive constant and REG the
7789 destination register. */
7791 void
7792 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
7793 rtx reg, basic_block before_bb, rtx before_insn)
7795 rtx seq;
7797 if (! before_insn)
7799 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7800 return;
7803 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7804 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7806 /* Increase the lifetime of any invariants moved further in code. */
7807 update_reg_last_use (a, before_insn);
7808 update_reg_last_use (b, before_insn);
7809 update_reg_last_use (m, before_insn);
7811 /* It is possible that the expansion created lots of new registers.
7812 Iterate over the sequence we just created and record them all. We
7813 must do this before inserting the sequence. */
7814 loop_regs_update (loop, seq);
7816 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7820 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
7821 initial value of the basic induction variable, M a multiplicative
7822 constant, A an additive constant and REG the destination
7823 register. */
7825 void
7826 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7828 rtx seq;
7830 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7831 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7833 /* Increase the lifetime of any invariants moved further in code.
7834 ???? Is this really necessary? */
7835 update_reg_last_use (a, loop->sink);
7836 update_reg_last_use (b, loop->sink);
7837 update_reg_last_use (m, loop->sink);
7839 /* It is possible that the expansion created lots of new registers.
7840 Iterate over the sequence we just created and record them all. We
7841 must do this before inserting the sequence. */
7842 loop_regs_update (loop, seq);
7844 loop_insn_sink (loop, seq);
7848 /* Emit insns after loop to set REG = B * M + A. B is the initial
7849 value of the basic induction variable, M a multiplicative constant,
7850 A an additive constant and REG the destination register. */
7852 void
7853 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7855 rtx seq;
7857 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7858 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7860 /* It is possible that the expansion created lots of new registers.
7861 Iterate over the sequence we just created and record them all. We
7862 must do this before inserting the sequence. */
7863 loop_regs_update (loop, seq);
7865 loop_insn_hoist (loop, seq);
7870 /* Similar to gen_add_mult, but compute cost rather than generating
7871 sequence. */
7873 static int
7874 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
7876 int cost = 0;
7877 rtx last, result;
7879 start_sequence ();
7880 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7881 if (reg != result)
7882 emit_move_insn (reg, result);
7883 last = get_last_insn ();
7884 while (last)
7886 rtx t = single_set (last);
7887 if (t)
7888 cost += rtx_cost (SET_SRC (t), SET);
7889 last = PREV_INSN (last);
7891 end_sequence ();
7892 return cost;
7895 /* Test whether A * B can be computed without
7896 an actual multiply insn. Value is 1 if so.
7898 ??? This function stinks because it generates a ton of wasted RTL
7899 ??? and as a result fragments GC memory to no end. There are other
7900 ??? places in the compiler which are invoked a lot and do the same
7901 ??? thing, generate wasted RTL just to see if something is possible. */
7903 static int
7904 product_cheap_p (rtx a, rtx b)
7906 rtx tmp;
7907 int win, n_insns;
7909 /* If only one is constant, make it B. */
7910 if (GET_CODE (a) == CONST_INT)
7911 tmp = a, a = b, b = tmp;
7913 /* If first constant, both constant, so don't need multiply. */
7914 if (GET_CODE (a) == CONST_INT)
7915 return 1;
7917 /* If second not constant, neither is constant, so would need multiply. */
7918 if (GET_CODE (b) != CONST_INT)
7919 return 0;
7921 /* One operand is constant, so might not need multiply insn. Generate the
7922 code for the multiply and see if a call or multiply, or long sequence
7923 of insns is generated. */
7925 start_sequence ();
7926 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7927 tmp = get_insns ();
7928 end_sequence ();
7930 win = 1;
7931 if (INSN_P (tmp))
7933 n_insns = 0;
7934 while (tmp != NULL_RTX)
7936 rtx next = NEXT_INSN (tmp);
7938 if (++n_insns > 3
7939 || GET_CODE (tmp) != INSN
7940 || (GET_CODE (PATTERN (tmp)) == SET
7941 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7942 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7943 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7944 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7946 win = 0;
7947 break;
7950 tmp = next;
7953 else if (GET_CODE (tmp) == SET
7954 && GET_CODE (SET_SRC (tmp)) == MULT)
7955 win = 0;
7956 else if (GET_CODE (tmp) == PARALLEL
7957 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7958 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7959 win = 0;
7961 return win;
7964 /* Check to see if loop can be terminated by a "decrement and branch until
7965 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7966 Also try reversing an increment loop to a decrement loop
7967 to see if the optimization can be performed.
7968 Value is nonzero if optimization was performed. */
7970 /* This is useful even if the architecture doesn't have such an insn,
7971 because it might change a loops which increments from 0 to n to a loop
7972 which decrements from n to 0. A loop that decrements to zero is usually
7973 faster than one that increments from zero. */
7975 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7976 such as approx_final_value, biv_total_increment, loop_iterations, and
7977 final_[bg]iv_value. */
7979 static int
7980 check_dbra_loop (struct loop *loop, int insn_count)
7982 struct loop_info *loop_info = LOOP_INFO (loop);
7983 struct loop_regs *regs = LOOP_REGS (loop);
7984 struct loop_ivs *ivs = LOOP_IVS (loop);
7985 struct iv_class *bl;
7986 rtx reg;
7987 enum machine_mode mode;
7988 rtx jump_label;
7989 rtx final_value;
7990 rtx start_value;
7991 rtx new_add_val;
7992 rtx comparison;
7993 rtx before_comparison;
7994 rtx p;
7995 rtx jump;
7996 rtx first_compare;
7997 int compare_and_branch;
7998 rtx loop_start = loop->start;
7999 rtx loop_end = loop->end;
8001 /* If last insn is a conditional branch, and the insn before tests a
8002 register value, try to optimize it. Otherwise, we can't do anything. */
8004 jump = PREV_INSN (loop_end);
8005 comparison = get_condition_for_loop (loop, jump);
8006 if (comparison == 0)
8007 return 0;
8008 if (!onlyjump_p (jump))
8009 return 0;
8011 /* Try to compute whether the compare/branch at the loop end is one or
8012 two instructions. */
8013 get_condition (jump, &first_compare, false);
8014 if (first_compare == jump)
8015 compare_and_branch = 1;
8016 else if (first_compare == prev_nonnote_insn (jump))
8017 compare_and_branch = 2;
8018 else
8019 return 0;
8022 /* If more than one condition is present to control the loop, then
8023 do not proceed, as this function does not know how to rewrite
8024 loop tests with more than one condition.
8026 Look backwards from the first insn in the last comparison
8027 sequence and see if we've got another comparison sequence. */
8029 rtx jump1;
8030 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8031 if (GET_CODE (jump1) == JUMP_INSN)
8032 return 0;
8035 /* Check all of the bivs to see if the compare uses one of them.
8036 Skip biv's set more than once because we can't guarantee that
8037 it will be zero on the last iteration. Also skip if the biv is
8038 used between its update and the test insn. */
8040 for (bl = ivs->list; bl; bl = bl->next)
8042 if (bl->biv_count == 1
8043 && ! bl->biv->maybe_multiple
8044 && bl->biv->dest_reg == XEXP (comparison, 0)
8045 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8046 first_compare))
8047 break;
8050 /* Try swapping the comparison to identify a suitable biv. */
8051 if (!bl)
8052 for (bl = ivs->list; bl; bl = bl->next)
8053 if (bl->biv_count == 1
8054 && ! bl->biv->maybe_multiple
8055 && bl->biv->dest_reg == XEXP (comparison, 1)
8056 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8057 first_compare))
8059 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
8060 VOIDmode,
8061 XEXP (comparison, 1),
8062 XEXP (comparison, 0));
8063 break;
8066 if (! bl)
8067 return 0;
8069 /* Look for the case where the basic induction variable is always
8070 nonnegative, and equals zero on the last iteration.
8071 In this case, add a reg_note REG_NONNEG, which allows the
8072 m68k DBRA instruction to be used. */
8074 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
8075 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8076 && GET_CODE (bl->biv->add_val) == CONST_INT
8077 && INTVAL (bl->biv->add_val) < 0)
8079 /* Initial value must be greater than 0,
8080 init_val % -dec_value == 0 to ensure that it equals zero on
8081 the last iteration */
8083 if (GET_CODE (bl->initial_value) == CONST_INT
8084 && INTVAL (bl->initial_value) > 0
8085 && (INTVAL (bl->initial_value)
8086 % (-INTVAL (bl->biv->add_val))) == 0)
8088 /* Register always nonnegative, add REG_NOTE to branch. */
8089 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8090 REG_NOTES (jump)
8091 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8092 REG_NOTES (jump));
8093 bl->nonneg = 1;
8095 return 1;
8098 /* If the decrement is 1 and the value was tested as >= 0 before
8099 the loop, then we can safely optimize. */
8100 for (p = loop_start; p; p = PREV_INSN (p))
8102 if (GET_CODE (p) == CODE_LABEL)
8103 break;
8104 if (GET_CODE (p) != JUMP_INSN)
8105 continue;
8107 before_comparison = get_condition_for_loop (loop, p);
8108 if (before_comparison
8109 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8110 && (GET_CODE (before_comparison) == LT
8111 || GET_CODE (before_comparison) == LTU)
8112 && XEXP (before_comparison, 1) == const0_rtx
8113 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8114 && INTVAL (bl->biv->add_val) == -1)
8116 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8117 REG_NOTES (jump)
8118 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8119 REG_NOTES (jump));
8120 bl->nonneg = 1;
8122 return 1;
8126 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8127 && INTVAL (bl->biv->add_val) > 0)
8129 /* Try to change inc to dec, so can apply above optimization. */
8130 /* Can do this if:
8131 all registers modified are induction variables or invariant,
8132 all memory references have non-overlapping addresses
8133 (obviously true if only one write)
8134 allow 2 insns for the compare/jump at the end of the loop. */
8135 /* Also, we must avoid any instructions which use both the reversed
8136 biv and another biv. Such instructions will fail if the loop is
8137 reversed. We meet this condition by requiring that either
8138 no_use_except_counting is true, or else that there is only
8139 one biv. */
8140 int num_nonfixed_reads = 0;
8141 /* 1 if the iteration var is used only to count iterations. */
8142 int no_use_except_counting = 0;
8143 /* 1 if the loop has no memory store, or it has a single memory store
8144 which is reversible. */
8145 int reversible_mem_store = 1;
8147 if (bl->giv_count == 0
8148 && !loop->exit_count
8149 && !loop_info->has_multiple_exit_targets)
8151 rtx bivreg = regno_reg_rtx[bl->regno];
8152 struct iv_class *blt;
8154 /* If there are no givs for this biv, and the only exit is the
8155 fall through at the end of the loop, then
8156 see if perhaps there are no uses except to count. */
8157 no_use_except_counting = 1;
8158 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8159 if (INSN_P (p))
8161 rtx set = single_set (p);
8163 if (set && GET_CODE (SET_DEST (set)) == REG
8164 && REGNO (SET_DEST (set)) == bl->regno)
8165 /* An insn that sets the biv is okay. */
8167 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
8168 /* An insn that doesn't mention the biv is okay. */
8170 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8171 || p == prev_nonnote_insn (loop_end))
8173 /* If either of these insns uses the biv and sets a pseudo
8174 that has more than one usage, then the biv has uses
8175 other than counting since it's used to derive a value
8176 that is used more than one time. */
8177 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8178 regs);
8179 if (regs->multiple_uses)
8181 no_use_except_counting = 0;
8182 break;
8185 else
8187 no_use_except_counting = 0;
8188 break;
8192 /* A biv has uses besides counting if it is used to set
8193 another biv. */
8194 for (blt = ivs->list; blt; blt = blt->next)
8195 if (blt->init_set
8196 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8198 no_use_except_counting = 0;
8199 break;
8203 if (no_use_except_counting)
8204 /* No need to worry about MEMs. */
8206 else if (loop_info->num_mem_sets <= 1)
8208 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8209 if (INSN_P (p))
8210 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8212 /* If the loop has a single store, and the destination address is
8213 invariant, then we can't reverse the loop, because this address
8214 might then have the wrong value at loop exit.
8215 This would work if the source was invariant also, however, in that
8216 case, the insn should have been moved out of the loop. */
8218 if (loop_info->num_mem_sets == 1)
8220 struct induction *v;
8222 /* If we could prove that each of the memory locations
8223 written to was different, then we could reverse the
8224 store -- but we don't presently have any way of
8225 knowing that. */
8226 reversible_mem_store = 0;
8228 /* If the store depends on a register that is set after the
8229 store, it depends on the initial value, and is thus not
8230 reversible. */
8231 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8233 if (v->giv_type == DEST_REG
8234 && reg_mentioned_p (v->dest_reg,
8235 PATTERN (loop_info->first_loop_store_insn))
8236 && loop_insn_first_p (loop_info->first_loop_store_insn,
8237 v->insn))
8238 reversible_mem_store = 0;
8242 else
8243 return 0;
8245 /* This code only acts for innermost loops. Also it simplifies
8246 the memory address check by only reversing loops with
8247 zero or one memory access.
8248 Two memory accesses could involve parts of the same array,
8249 and that can't be reversed.
8250 If the biv is used only for counting, than we don't need to worry
8251 about all these things. */
8253 if ((num_nonfixed_reads <= 1
8254 && ! loop_info->has_nonconst_call
8255 && ! loop_info->has_prefetch
8256 && ! loop_info->has_volatile
8257 && reversible_mem_store
8258 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8259 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8260 && (bl == ivs->list && bl->next == 0))
8261 || (no_use_except_counting && ! loop_info->has_prefetch))
8263 rtx tem;
8265 /* Loop can be reversed. */
8266 if (loop_dump_stream)
8267 fprintf (loop_dump_stream, "Can reverse loop\n");
8269 /* Now check other conditions:
8271 The increment must be a constant, as must the initial value,
8272 and the comparison code must be LT.
8274 This test can probably be improved since +/- 1 in the constant
8275 can be obtained by changing LT to LE and vice versa; this is
8276 confusing. */
8278 if (comparison
8279 /* for constants, LE gets turned into LT */
8280 && (GET_CODE (comparison) == LT
8281 || (GET_CODE (comparison) == LE
8282 && no_use_except_counting)
8283 || GET_CODE (comparison) == LTU))
8285 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8286 rtx initial_value, comparison_value;
8287 int nonneg = 0;
8288 enum rtx_code cmp_code;
8289 int comparison_const_width;
8290 unsigned HOST_WIDE_INT comparison_sign_mask;
8292 add_val = INTVAL (bl->biv->add_val);
8293 comparison_value = XEXP (comparison, 1);
8294 if (GET_MODE (comparison_value) == VOIDmode)
8295 comparison_const_width
8296 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8297 else
8298 comparison_const_width
8299 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8300 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8301 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8302 comparison_sign_mask
8303 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8305 /* If the comparison value is not a loop invariant, then we
8306 can not reverse this loop.
8308 ??? If the insns which initialize the comparison value as
8309 a whole compute an invariant result, then we could move
8310 them out of the loop and proceed with loop reversal. */
8311 if (! loop_invariant_p (loop, comparison_value))
8312 return 0;
8314 if (GET_CODE (comparison_value) == CONST_INT)
8315 comparison_val = INTVAL (comparison_value);
8316 initial_value = bl->initial_value;
8318 /* Normalize the initial value if it is an integer and
8319 has no other use except as a counter. This will allow
8320 a few more loops to be reversed. */
8321 if (no_use_except_counting
8322 && GET_CODE (comparison_value) == CONST_INT
8323 && GET_CODE (initial_value) == CONST_INT)
8325 comparison_val = comparison_val - INTVAL (bl->initial_value);
8326 /* The code below requires comparison_val to be a multiple
8327 of add_val in order to do the loop reversal, so
8328 round up comparison_val to a multiple of add_val.
8329 Since comparison_value is constant, we know that the
8330 current comparison code is LT. */
8331 comparison_val = comparison_val + add_val - 1;
8332 comparison_val
8333 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8334 /* We postpone overflow checks for COMPARISON_VAL here;
8335 even if there is an overflow, we might still be able to
8336 reverse the loop, if converting the loop exit test to
8337 NE is possible. */
8338 initial_value = const0_rtx;
8341 /* First check if we can do a vanilla loop reversal. */
8342 if (initial_value == const0_rtx
8343 /* If we have a decrement_and_branch_on_count,
8344 prefer the NE test, since this will allow that
8345 instruction to be generated. Note that we must
8346 use a vanilla loop reversal if the biv is used to
8347 calculate a giv or has a non-counting use. */
8348 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8349 && defined (HAVE_decrement_and_branch_on_count)
8350 && (! (add_val == 1 && loop->vtop
8351 && (bl->biv_count == 0
8352 || no_use_except_counting)))
8353 #endif
8354 && GET_CODE (comparison_value) == CONST_INT
8355 /* Now do postponed overflow checks on COMPARISON_VAL. */
8356 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8357 & comparison_sign_mask))
8359 /* Register will always be nonnegative, with value
8360 0 on last iteration */
8361 add_adjust = add_val;
8362 nonneg = 1;
8363 cmp_code = GE;
8365 else if (add_val == 1 && loop->vtop
8366 && (bl->biv_count == 0
8367 || no_use_except_counting))
8369 add_adjust = 0;
8370 cmp_code = NE;
8372 else
8373 return 0;
8375 if (GET_CODE (comparison) == LE)
8376 add_adjust -= add_val;
8378 /* If the initial value is not zero, or if the comparison
8379 value is not an exact multiple of the increment, then we
8380 can not reverse this loop. */
8381 if (initial_value == const0_rtx
8382 && GET_CODE (comparison_value) == CONST_INT)
8384 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8385 return 0;
8387 else
8389 if (! no_use_except_counting || add_val != 1)
8390 return 0;
8393 final_value = comparison_value;
8395 /* Reset these in case we normalized the initial value
8396 and comparison value above. */
8397 if (GET_CODE (comparison_value) == CONST_INT
8398 && GET_CODE (initial_value) == CONST_INT)
8400 comparison_value = GEN_INT (comparison_val);
8401 final_value
8402 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8404 bl->initial_value = initial_value;
8406 /* Save some info needed to produce the new insns. */
8407 reg = bl->biv->dest_reg;
8408 mode = GET_MODE (reg);
8409 jump_label = condjump_label (PREV_INSN (loop_end));
8410 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8412 /* Set start_value; if this is not a CONST_INT, we need
8413 to generate a SUB.
8414 Initialize biv to start_value before loop start.
8415 The old initializing insn will be deleted as a
8416 dead store by flow.c. */
8417 if (initial_value == const0_rtx
8418 && GET_CODE (comparison_value) == CONST_INT)
8420 start_value
8421 = gen_int_mode (comparison_val - add_adjust, mode);
8422 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8424 else if (GET_CODE (initial_value) == CONST_INT)
8426 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8427 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8429 if (add_insn == 0)
8430 return 0;
8432 start_value
8433 = gen_rtx_PLUS (mode, comparison_value, offset);
8434 loop_insn_hoist (loop, add_insn);
8435 if (GET_CODE (comparison) == LE)
8436 final_value = gen_rtx_PLUS (mode, comparison_value,
8437 GEN_INT (add_val));
8439 else if (! add_adjust)
8441 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8442 initial_value);
8444 if (sub_insn == 0)
8445 return 0;
8446 start_value
8447 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8448 loop_insn_hoist (loop, sub_insn);
8450 else
8451 /* We could handle the other cases too, but it'll be
8452 better to have a testcase first. */
8453 return 0;
8455 /* We may not have a single insn which can increment a reg, so
8456 create a sequence to hold all the insns from expand_inc. */
8457 start_sequence ();
8458 expand_inc (reg, new_add_val);
8459 tem = get_insns ();
8460 end_sequence ();
8462 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8463 delete_insn (bl->biv->insn);
8465 /* Update biv info to reflect its new status. */
8466 bl->biv->insn = p;
8467 bl->initial_value = start_value;
8468 bl->biv->add_val = new_add_val;
8470 /* Update loop info. */
8471 loop_info->initial_value = reg;
8472 loop_info->initial_equiv_value = reg;
8473 loop_info->final_value = const0_rtx;
8474 loop_info->final_equiv_value = const0_rtx;
8475 loop_info->comparison_value = const0_rtx;
8476 loop_info->comparison_code = cmp_code;
8477 loop_info->increment = new_add_val;
8479 /* Inc LABEL_NUSES so that delete_insn will
8480 not delete the label. */
8481 LABEL_NUSES (XEXP (jump_label, 0))++;
8483 /* Emit an insn after the end of the loop to set the biv's
8484 proper exit value if it is used anywhere outside the loop. */
8485 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8486 || ! bl->init_insn
8487 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8488 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8490 /* Delete compare/branch at end of loop. */
8491 delete_related_insns (PREV_INSN (loop_end));
8492 if (compare_and_branch == 2)
8493 delete_related_insns (first_compare);
8495 /* Add new compare/branch insn at end of loop. */
8496 start_sequence ();
8497 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8498 mode, 0,
8499 XEXP (jump_label, 0));
8500 tem = get_insns ();
8501 end_sequence ();
8502 emit_jump_insn_before (tem, loop_end);
8504 for (tem = PREV_INSN (loop_end);
8505 tem && GET_CODE (tem) != JUMP_INSN;
8506 tem = PREV_INSN (tem))
8509 if (tem)
8510 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8512 if (nonneg)
8514 if (tem)
8516 /* Increment of LABEL_NUSES done above. */
8517 /* Register is now always nonnegative,
8518 so add REG_NONNEG note to the branch. */
8519 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8520 REG_NOTES (tem));
8522 bl->nonneg = 1;
8525 /* No insn may reference both the reversed and another biv or it
8526 will fail (see comment near the top of the loop reversal
8527 code).
8528 Earlier on, we have verified that the biv has no use except
8529 counting, or it is the only biv in this function.
8530 However, the code that computes no_use_except_counting does
8531 not verify reg notes. It's possible to have an insn that
8532 references another biv, and has a REG_EQUAL note with an
8533 expression based on the reversed biv. To avoid this case,
8534 remove all REG_EQUAL notes based on the reversed biv
8535 here. */
8536 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8537 if (INSN_P (p))
8539 rtx *pnote;
8540 rtx set = single_set (p);
8541 /* If this is a set of a GIV based on the reversed biv, any
8542 REG_EQUAL notes should still be correct. */
8543 if (! set
8544 || GET_CODE (SET_DEST (set)) != REG
8545 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8546 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8547 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8548 for (pnote = &REG_NOTES (p); *pnote;)
8550 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8551 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8552 XEXP (*pnote, 0)))
8553 *pnote = XEXP (*pnote, 1);
8554 else
8555 pnote = &XEXP (*pnote, 1);
8559 /* Mark that this biv has been reversed. Each giv which depends
8560 on this biv, and which is also live past the end of the loop
8561 will have to be fixed up. */
8563 bl->reversed = 1;
8565 if (loop_dump_stream)
8567 fprintf (loop_dump_stream, "Reversed loop");
8568 if (bl->nonneg)
8569 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8570 else
8571 fprintf (loop_dump_stream, "\n");
8574 return 1;
8579 return 0;
8582 /* Verify whether the biv BL appears to be eliminable,
8583 based on the insns in the loop that refer to it.
8585 If ELIMINATE_P is nonzero, actually do the elimination.
8587 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8588 determine whether invariant insns should be placed inside or at the
8589 start of the loop. */
8591 static int
8592 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
8593 int eliminate_p, int threshold, int insn_count)
8595 struct loop_ivs *ivs = LOOP_IVS (loop);
8596 rtx reg = bl->biv->dest_reg;
8597 rtx p;
8599 /* Scan all insns in the loop, stopping if we find one that uses the
8600 biv in a way that we cannot eliminate. */
8602 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8604 enum rtx_code code = GET_CODE (p);
8605 basic_block where_bb = 0;
8606 rtx where_insn = threshold >= insn_count ? 0 : p;
8607 rtx note;
8609 /* If this is a libcall that sets a giv, skip ahead to its end. */
8610 if (GET_RTX_CLASS (code) == 'i')
8612 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8614 if (note)
8616 rtx last = XEXP (note, 0);
8617 rtx set = single_set (last);
8619 if (set && GET_CODE (SET_DEST (set)) == REG)
8621 unsigned int regno = REGNO (SET_DEST (set));
8623 if (regno < ivs->n_regs
8624 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8625 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8626 p = last;
8631 /* Closely examine the insn if the biv is mentioned. */
8632 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8633 && reg_mentioned_p (reg, PATTERN (p))
8634 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8635 eliminate_p, where_bb, where_insn))
8637 if (loop_dump_stream)
8638 fprintf (loop_dump_stream,
8639 "Cannot eliminate biv %d: biv used in insn %d.\n",
8640 bl->regno, INSN_UID (p));
8641 break;
8644 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8645 if (eliminate_p
8646 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8647 && reg_mentioned_p (reg, XEXP (note, 0)))
8648 remove_note (p, note);
8651 if (p == loop->end)
8653 if (loop_dump_stream)
8654 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8655 bl->regno, eliminate_p ? "was" : "can be");
8656 return 1;
8659 return 0;
8662 /* INSN and REFERENCE are instructions in the same insn chain.
8663 Return nonzero if INSN is first. */
8666 loop_insn_first_p (rtx insn, rtx reference)
8668 rtx p, q;
8670 for (p = insn, q = reference;;)
8672 /* Start with test for not first so that INSN == REFERENCE yields not
8673 first. */
8674 if (q == insn || ! p)
8675 return 0;
8676 if (p == reference || ! q)
8677 return 1;
8679 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8680 previous insn, hence the <= comparison below does not work if
8681 P is a note. */
8682 if (INSN_UID (p) < max_uid_for_loop
8683 && INSN_UID (q) < max_uid_for_loop
8684 && GET_CODE (p) != NOTE)
8685 return INSN_LUID (p) <= INSN_LUID (q);
8687 if (INSN_UID (p) >= max_uid_for_loop
8688 || GET_CODE (p) == NOTE)
8689 p = NEXT_INSN (p);
8690 if (INSN_UID (q) >= max_uid_for_loop)
8691 q = NEXT_INSN (q);
8695 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8696 the offset that we have to take into account due to auto-increment /
8697 div derivation is zero. */
8698 static int
8699 biv_elimination_giv_has_0_offset (struct induction *biv,
8700 struct induction *giv, rtx insn)
8702 /* If the giv V had the auto-inc address optimization applied
8703 to it, and INSN occurs between the giv insn and the biv
8704 insn, then we'd have to adjust the value used here.
8705 This is rare, so we don't bother to make this possible. */
8706 if (giv->auto_inc_opt
8707 && ((loop_insn_first_p (giv->insn, insn)
8708 && loop_insn_first_p (insn, biv->insn))
8709 || (loop_insn_first_p (biv->insn, insn)
8710 && loop_insn_first_p (insn, giv->insn))))
8711 return 0;
8713 return 1;
8716 /* If BL appears in X (part of the pattern of INSN), see if we can
8717 eliminate its use. If so, return 1. If not, return 0.
8719 If BIV does not appear in X, return 1.
8721 If ELIMINATE_P is nonzero, actually do the elimination.
8722 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8723 Depending on how many items have been moved out of the loop, it
8724 will either be before INSN (when WHERE_INSN is nonzero) or at the
8725 start of the loop (when WHERE_INSN is zero). */
8727 static int
8728 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
8729 struct iv_class *bl, int eliminate_p,
8730 basic_block where_bb, rtx where_insn)
8732 enum rtx_code code = GET_CODE (x);
8733 rtx reg = bl->biv->dest_reg;
8734 enum machine_mode mode = GET_MODE (reg);
8735 struct induction *v;
8736 rtx arg, tem;
8737 #ifdef HAVE_cc0
8738 rtx new;
8739 #endif
8740 int arg_operand;
8741 const char *fmt;
8742 int i, j;
8744 switch (code)
8746 case REG:
8747 /* If we haven't already been able to do something with this BIV,
8748 we can't eliminate it. */
8749 if (x == reg)
8750 return 0;
8751 return 1;
8753 case SET:
8754 /* If this sets the BIV, it is not a problem. */
8755 if (SET_DEST (x) == reg)
8756 return 1;
8758 /* If this is an insn that defines a giv, it is also ok because
8759 it will go away when the giv is reduced. */
8760 for (v = bl->giv; v; v = v->next_iv)
8761 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8762 return 1;
8764 #ifdef HAVE_cc0
8765 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8767 /* Can replace with any giv that was reduced and
8768 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8769 Require a constant for MULT_VAL, so we know it's nonzero.
8770 ??? We disable this optimization to avoid potential
8771 overflows. */
8773 for (v = bl->giv; v; v = v->next_iv)
8774 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8775 && v->add_val == const0_rtx
8776 && ! v->ignore && ! v->maybe_dead && v->always_computable
8777 && v->mode == mode
8778 && 0)
8780 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8781 continue;
8783 if (! eliminate_p)
8784 return 1;
8786 /* If the giv has the opposite direction of change,
8787 then reverse the comparison. */
8788 if (INTVAL (v->mult_val) < 0)
8789 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8790 const0_rtx, v->new_reg);
8791 else
8792 new = v->new_reg;
8794 /* We can probably test that giv's reduced reg. */
8795 if (validate_change (insn, &SET_SRC (x), new, 0))
8796 return 1;
8799 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8800 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8801 Require a constant for MULT_VAL, so we know it's nonzero.
8802 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8803 overflow problem. */
8805 for (v = bl->giv; v; v = v->next_iv)
8806 if (GET_CODE (v->mult_val) == CONST_INT
8807 && v->mult_val != const0_rtx
8808 && ! v->ignore && ! v->maybe_dead && v->always_computable
8809 && v->mode == mode
8810 && (GET_CODE (v->add_val) == SYMBOL_REF
8811 || GET_CODE (v->add_val) == LABEL_REF
8812 || GET_CODE (v->add_val) == CONST
8813 || (GET_CODE (v->add_val) == REG
8814 && REG_POINTER (v->add_val))))
8816 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8817 continue;
8819 if (! eliminate_p)
8820 return 1;
8822 /* If the giv has the opposite direction of change,
8823 then reverse the comparison. */
8824 if (INTVAL (v->mult_val) < 0)
8825 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8826 v->new_reg);
8827 else
8828 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8829 copy_rtx (v->add_val));
8831 /* Replace biv with the giv's reduced register. */
8832 update_reg_last_use (v->add_val, insn);
8833 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8834 return 1;
8836 /* Insn doesn't support that constant or invariant. Copy it
8837 into a register (it will be a loop invariant.) */
8838 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8840 loop_insn_emit_before (loop, 0, where_insn,
8841 gen_move_insn (tem,
8842 copy_rtx (v->add_val)));
8844 /* Substitute the new register for its invariant value in
8845 the compare expression. */
8846 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8847 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8848 return 1;
8851 #endif
8852 break;
8854 case COMPARE:
8855 case EQ: case NE:
8856 case GT: case GE: case GTU: case GEU:
8857 case LT: case LE: case LTU: case LEU:
8858 /* See if either argument is the biv. */
8859 if (XEXP (x, 0) == reg)
8860 arg = XEXP (x, 1), arg_operand = 1;
8861 else if (XEXP (x, 1) == reg)
8862 arg = XEXP (x, 0), arg_operand = 0;
8863 else
8864 break;
8866 if (CONSTANT_P (arg))
8868 /* First try to replace with any giv that has constant positive
8869 mult_val and constant add_val. We might be able to support
8870 negative mult_val, but it seems complex to do it in general. */
8872 for (v = bl->giv; v; v = v->next_iv)
8873 if (GET_CODE (v->mult_val) == CONST_INT
8874 && INTVAL (v->mult_val) > 0
8875 && (GET_CODE (v->add_val) == SYMBOL_REF
8876 || GET_CODE (v->add_val) == LABEL_REF
8877 || GET_CODE (v->add_val) == CONST
8878 || (GET_CODE (v->add_val) == REG
8879 && REG_POINTER (v->add_val)))
8880 && ! v->ignore && ! v->maybe_dead && v->always_computable
8881 && v->mode == mode)
8883 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8884 continue;
8886 /* Don't eliminate if the linear combination that makes up
8887 the giv overflows when it is applied to ARG. */
8888 if (GET_CODE (arg) == CONST_INT)
8890 rtx add_val;
8892 if (GET_CODE (v->add_val) == CONST_INT)
8893 add_val = v->add_val;
8894 else
8895 add_val = const0_rtx;
8897 if (const_mult_add_overflow_p (arg, v->mult_val,
8898 add_val, mode, 1))
8899 continue;
8902 if (! eliminate_p)
8903 return 1;
8905 /* Replace biv with the giv's reduced reg. */
8906 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8908 /* If all constants are actually constant integers and
8909 the derived constant can be directly placed in the COMPARE,
8910 do so. */
8911 if (GET_CODE (arg) == CONST_INT
8912 && GET_CODE (v->add_val) == CONST_INT)
8914 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8915 v->add_val, mode, 1);
8917 else
8919 /* Otherwise, load it into a register. */
8920 tem = gen_reg_rtx (mode);
8921 loop_iv_add_mult_emit_before (loop, arg,
8922 v->mult_val, v->add_val,
8923 tem, where_bb, where_insn);
8926 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8928 if (apply_change_group ())
8929 return 1;
8932 /* Look for giv with positive constant mult_val and nonconst add_val.
8933 Insert insns to calculate new compare value.
8934 ??? Turn this off due to possible overflow. */
8936 for (v = bl->giv; v; v = v->next_iv)
8937 if (GET_CODE (v->mult_val) == CONST_INT
8938 && INTVAL (v->mult_val) > 0
8939 && ! v->ignore && ! v->maybe_dead && v->always_computable
8940 && v->mode == mode
8941 && 0)
8943 rtx tem;
8945 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8946 continue;
8948 if (! eliminate_p)
8949 return 1;
8951 tem = gen_reg_rtx (mode);
8953 /* Replace biv with giv's reduced register. */
8954 validate_change (insn, &XEXP (x, 1 - arg_operand),
8955 v->new_reg, 1);
8957 /* Compute value to compare against. */
8958 loop_iv_add_mult_emit_before (loop, arg,
8959 v->mult_val, v->add_val,
8960 tem, where_bb, where_insn);
8961 /* Use it in this insn. */
8962 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8963 if (apply_change_group ())
8964 return 1;
8967 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8969 if (loop_invariant_p (loop, arg) == 1)
8971 /* Look for giv with constant positive mult_val and nonconst
8972 add_val. Insert insns to compute new compare value.
8973 ??? Turn this off due to possible overflow. */
8975 for (v = bl->giv; v; v = v->next_iv)
8976 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8977 && ! v->ignore && ! v->maybe_dead && v->always_computable
8978 && v->mode == mode
8979 && 0)
8981 rtx tem;
8983 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8984 continue;
8986 if (! eliminate_p)
8987 return 1;
8989 tem = gen_reg_rtx (mode);
8991 /* Replace biv with giv's reduced register. */
8992 validate_change (insn, &XEXP (x, 1 - arg_operand),
8993 v->new_reg, 1);
8995 /* Compute value to compare against. */
8996 loop_iv_add_mult_emit_before (loop, arg,
8997 v->mult_val, v->add_val,
8998 tem, where_bb, where_insn);
8999 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
9000 if (apply_change_group ())
9001 return 1;
9005 /* This code has problems. Basically, you can't know when
9006 seeing if we will eliminate BL, whether a particular giv
9007 of ARG will be reduced. If it isn't going to be reduced,
9008 we can't eliminate BL. We can try forcing it to be reduced,
9009 but that can generate poor code.
9011 The problem is that the benefit of reducing TV, below should
9012 be increased if BL can actually be eliminated, but this means
9013 we might have to do a topological sort of the order in which
9014 we try to process biv. It doesn't seem worthwhile to do
9015 this sort of thing now. */
9017 #if 0
9018 /* Otherwise the reg compared with had better be a biv. */
9019 if (GET_CODE (arg) != REG
9020 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9021 return 0;
9023 /* Look for a pair of givs, one for each biv,
9024 with identical coefficients. */
9025 for (v = bl->giv; v; v = v->next_iv)
9027 struct induction *tv;
9029 if (v->ignore || v->maybe_dead || v->mode != mode)
9030 continue;
9032 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9033 tv = tv->next_iv)
9034 if (! tv->ignore && ! tv->maybe_dead
9035 && rtx_equal_p (tv->mult_val, v->mult_val)
9036 && rtx_equal_p (tv->add_val, v->add_val)
9037 && tv->mode == mode)
9039 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9040 continue;
9042 if (! eliminate_p)
9043 return 1;
9045 /* Replace biv with its giv's reduced reg. */
9046 XEXP (x, 1 - arg_operand) = v->new_reg;
9047 /* Replace other operand with the other giv's
9048 reduced reg. */
9049 XEXP (x, arg_operand) = tv->new_reg;
9050 return 1;
9053 #endif
9056 /* If we get here, the biv can't be eliminated. */
9057 return 0;
9059 case MEM:
9060 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9061 biv is used in it, since it will be replaced. */
9062 for (v = bl->giv; v; v = v->next_iv)
9063 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9064 return 1;
9065 break;
9067 default:
9068 break;
9071 /* See if any subexpression fails elimination. */
9072 fmt = GET_RTX_FORMAT (code);
9073 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9075 switch (fmt[i])
9077 case 'e':
9078 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9079 eliminate_p, where_bb, where_insn))
9080 return 0;
9081 break;
9083 case 'E':
9084 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9085 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9086 eliminate_p, where_bb, where_insn))
9087 return 0;
9088 break;
9092 return 1;
9095 /* Return nonzero if the last use of REG
9096 is in an insn following INSN in the same basic block. */
9098 static int
9099 last_use_this_basic_block (rtx reg, rtx insn)
9101 rtx n;
9102 for (n = insn;
9103 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9104 n = NEXT_INSN (n))
9106 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9107 return 1;
9109 return 0;
9112 /* Called via `note_stores' to record the initial value of a biv. Here we
9113 just record the location of the set and process it later. */
9115 static void
9116 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
9118 struct loop_ivs *ivs = (struct loop_ivs *) data;
9119 struct iv_class *bl;
9121 if (GET_CODE (dest) != REG
9122 || REGNO (dest) >= ivs->n_regs
9123 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9124 return;
9126 bl = REG_IV_CLASS (ivs, REGNO (dest));
9128 /* If this is the first set found, record it. */
9129 if (bl->init_insn == 0)
9131 bl->init_insn = note_insn;
9132 bl->init_set = set;
9136 /* If any of the registers in X are "old" and currently have a last use earlier
9137 than INSN, update them to have a last use of INSN. Their actual last use
9138 will be the previous insn but it will not have a valid uid_luid so we can't
9139 use it. X must be a source expression only. */
9141 static void
9142 update_reg_last_use (rtx x, rtx insn)
9144 /* Check for the case where INSN does not have a valid luid. In this case,
9145 there is no need to modify the regno_last_uid, as this can only happen
9146 when code is inserted after the loop_end to set a pseudo's final value,
9147 and hence this insn will never be the last use of x.
9148 ???? This comment is not correct. See for example loop_givs_reduce.
9149 This may insert an insn before another new insn. */
9150 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9151 && INSN_UID (insn) < max_uid_for_loop
9152 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9154 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9156 else
9158 int i, j;
9159 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9160 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9162 if (fmt[i] == 'e')
9163 update_reg_last_use (XEXP (x, i), insn);
9164 else if (fmt[i] == 'E')
9165 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9166 update_reg_last_use (XVECEXP (x, i, j), insn);
9171 /* Given an insn INSN and condition COND, return the condition in a
9172 canonical form to simplify testing by callers. Specifically:
9174 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9175 (2) Both operands will be machine operands; (cc0) will have been replaced.
9176 (3) If an operand is a constant, it will be the second operand.
9177 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9178 for GE, GEU, and LEU.
9180 If the condition cannot be understood, or is an inequality floating-point
9181 comparison which needs to be reversed, 0 will be returned.
9183 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9185 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9186 insn used in locating the condition was found. If a replacement test
9187 of the condition is desired, it should be placed in front of that
9188 insn and we will be sure that the inputs are still valid.
9190 If WANT_REG is nonzero, we wish the condition to be relative to that
9191 register, if possible. Therefore, do not canonicalize the condition
9192 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
9193 to be a compare to a CC mode register. */
9196 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
9197 rtx want_reg, int allow_cc_mode)
9199 enum rtx_code code;
9200 rtx prev = insn;
9201 rtx set;
9202 rtx tem;
9203 rtx op0, op1;
9204 int reverse_code = 0;
9205 enum machine_mode mode;
9207 code = GET_CODE (cond);
9208 mode = GET_MODE (cond);
9209 op0 = XEXP (cond, 0);
9210 op1 = XEXP (cond, 1);
9212 if (reverse)
9213 code = reversed_comparison_code (cond, insn);
9214 if (code == UNKNOWN)
9215 return 0;
9217 if (earliest)
9218 *earliest = insn;
9220 /* If we are comparing a register with zero, see if the register is set
9221 in the previous insn to a COMPARE or a comparison operation. Perform
9222 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9223 in cse.c */
9225 while (GET_RTX_CLASS (code) == '<'
9226 && op1 == CONST0_RTX (GET_MODE (op0))
9227 && op0 != want_reg)
9229 /* Set nonzero when we find something of interest. */
9230 rtx x = 0;
9232 #ifdef HAVE_cc0
9233 /* If comparison with cc0, import actual comparison from compare
9234 insn. */
9235 if (op0 == cc0_rtx)
9237 if ((prev = prev_nonnote_insn (prev)) == 0
9238 || GET_CODE (prev) != INSN
9239 || (set = single_set (prev)) == 0
9240 || SET_DEST (set) != cc0_rtx)
9241 return 0;
9243 op0 = SET_SRC (set);
9244 op1 = CONST0_RTX (GET_MODE (op0));
9245 if (earliest)
9246 *earliest = prev;
9248 #endif
9250 /* If this is a COMPARE, pick up the two things being compared. */
9251 if (GET_CODE (op0) == COMPARE)
9253 op1 = XEXP (op0, 1);
9254 op0 = XEXP (op0, 0);
9255 continue;
9257 else if (GET_CODE (op0) != REG)
9258 break;
9260 /* Go back to the previous insn. Stop if it is not an INSN. We also
9261 stop if it isn't a single set or if it has a REG_INC note because
9262 we don't want to bother dealing with it. */
9264 if ((prev = prev_nonnote_insn (prev)) == 0
9265 || GET_CODE (prev) != INSN
9266 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9267 break;
9269 set = set_of (op0, prev);
9271 if (set
9272 && (GET_CODE (set) != SET
9273 || !rtx_equal_p (SET_DEST (set), op0)))
9274 break;
9276 /* If this is setting OP0, get what it sets it to if it looks
9277 relevant. */
9278 if (set)
9280 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9281 #ifdef FLOAT_STORE_FLAG_VALUE
9282 REAL_VALUE_TYPE fsfv;
9283 #endif
9285 /* ??? We may not combine comparisons done in a CCmode with
9286 comparisons not done in a CCmode. This is to aid targets
9287 like Alpha that have an IEEE compliant EQ instruction, and
9288 a non-IEEE compliant BEQ instruction. The use of CCmode is
9289 actually artificial, simply to prevent the combination, but
9290 should not affect other platforms.
9292 However, we must allow VOIDmode comparisons to match either
9293 CCmode or non-CCmode comparison, because some ports have
9294 modeless comparisons inside branch patterns.
9296 ??? This mode check should perhaps look more like the mode check
9297 in simplify_comparison in combine. */
9299 if ((GET_CODE (SET_SRC (set)) == COMPARE
9300 || (((code == NE
9301 || (code == LT
9302 && GET_MODE_CLASS (inner_mode) == MODE_INT
9303 && (GET_MODE_BITSIZE (inner_mode)
9304 <= HOST_BITS_PER_WIDE_INT)
9305 && (STORE_FLAG_VALUE
9306 & ((HOST_WIDE_INT) 1
9307 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9308 #ifdef FLOAT_STORE_FLAG_VALUE
9309 || (code == LT
9310 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9311 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9312 REAL_VALUE_NEGATIVE (fsfv)))
9313 #endif
9315 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9316 && (((GET_MODE_CLASS (mode) == MODE_CC)
9317 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9318 || mode == VOIDmode || inner_mode == VOIDmode))
9319 x = SET_SRC (set);
9320 else if (((code == EQ
9321 || (code == GE
9322 && (GET_MODE_BITSIZE (inner_mode)
9323 <= HOST_BITS_PER_WIDE_INT)
9324 && GET_MODE_CLASS (inner_mode) == MODE_INT
9325 && (STORE_FLAG_VALUE
9326 & ((HOST_WIDE_INT) 1
9327 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9328 #ifdef FLOAT_STORE_FLAG_VALUE
9329 || (code == GE
9330 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9331 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9332 REAL_VALUE_NEGATIVE (fsfv)))
9333 #endif
9335 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9336 && (((GET_MODE_CLASS (mode) == MODE_CC)
9337 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9338 || mode == VOIDmode || inner_mode == VOIDmode))
9341 reverse_code = 1;
9342 x = SET_SRC (set);
9344 else
9345 break;
9348 else if (reg_set_p (op0, prev))
9349 /* If this sets OP0, but not directly, we have to give up. */
9350 break;
9352 if (x)
9354 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9355 code = GET_CODE (x);
9356 if (reverse_code)
9358 code = reversed_comparison_code (x, prev);
9359 if (code == UNKNOWN)
9360 return 0;
9361 reverse_code = 0;
9364 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9365 if (earliest)
9366 *earliest = prev;
9370 /* If constant is first, put it last. */
9371 if (CONSTANT_P (op0))
9372 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9374 /* If OP0 is the result of a comparison, we weren't able to find what
9375 was really being compared, so fail. */
9376 if (!allow_cc_mode
9377 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9378 return 0;
9380 /* Canonicalize any ordered comparison with integers involving equality
9381 if we can do computations in the relevant mode and we do not
9382 overflow. */
9384 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
9385 && GET_CODE (op1) == CONST_INT
9386 && GET_MODE (op0) != VOIDmode
9387 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9389 HOST_WIDE_INT const_val = INTVAL (op1);
9390 unsigned HOST_WIDE_INT uconst_val = const_val;
9391 unsigned HOST_WIDE_INT max_val
9392 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9394 switch (code)
9396 case LE:
9397 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9398 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9399 break;
9401 /* When cross-compiling, const_val might be sign-extended from
9402 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9403 case GE:
9404 if ((HOST_WIDE_INT) (const_val & max_val)
9405 != (((HOST_WIDE_INT) 1
9406 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9407 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9408 break;
9410 case LEU:
9411 if (uconst_val < max_val)
9412 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9413 break;
9415 case GEU:
9416 if (uconst_val != 0)
9417 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9418 break;
9420 default:
9421 break;
9425 /* Never return CC0; return zero instead. */
9426 if (CC0_P (op0))
9427 return 0;
9429 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9432 /* Given a jump insn JUMP, return the condition that will cause it to branch
9433 to its JUMP_LABEL. If the condition cannot be understood, or is an
9434 inequality floating-point comparison which needs to be reversed, 0 will
9435 be returned.
9437 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9438 insn used in locating the condition was found. If a replacement test
9439 of the condition is desired, it should be placed in front of that
9440 insn and we will be sure that the inputs are still valid.
9442 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
9443 compare CC mode register. */
9446 get_condition (rtx jump, rtx *earliest, int allow_cc_mode)
9448 rtx cond;
9449 int reverse;
9450 rtx set;
9452 /* If this is not a standard conditional jump, we can't parse it. */
9453 if (GET_CODE (jump) != JUMP_INSN
9454 || ! any_condjump_p (jump))
9455 return 0;
9456 set = pc_set (jump);
9458 cond = XEXP (SET_SRC (set), 0);
9460 /* If this branches to JUMP_LABEL when the condition is false, reverse
9461 the condition. */
9462 reverse
9463 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9464 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9466 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
9467 allow_cc_mode);
9470 /* Similar to above routine, except that we also put an invariant last
9471 unless both operands are invariants. */
9474 get_condition_for_loop (const struct loop *loop, rtx x)
9476 rtx comparison = get_condition (x, (rtx*) 0, false);
9478 if (comparison == 0
9479 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9480 || loop_invariant_p (loop, XEXP (comparison, 1)))
9481 return comparison;
9483 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9484 XEXP (comparison, 1), XEXP (comparison, 0));
9487 /* Scan the function and determine whether it has indirect (computed) jumps.
9489 This is taken mostly from flow.c; similar code exists elsewhere
9490 in the compiler. It may be useful to put this into rtlanal.c. */
9491 static int
9492 indirect_jump_in_function_p (rtx start)
9494 rtx insn;
9496 for (insn = start; insn; insn = NEXT_INSN (insn))
9497 if (computed_jump_p (insn))
9498 return 1;
9500 return 0;
9503 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9504 documentation for LOOP_MEMS for the definition of `appropriate'.
9505 This function is called from prescan_loop via for_each_rtx. */
9507 static int
9508 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
9510 struct loop_info *loop_info = data;
9511 int i;
9512 rtx m = *mem;
9514 if (m == NULL_RTX)
9515 return 0;
9517 switch (GET_CODE (m))
9519 case MEM:
9520 break;
9522 case CLOBBER:
9523 /* We're not interested in MEMs that are only clobbered. */
9524 return -1;
9526 case CONST_DOUBLE:
9527 /* We're not interested in the MEM associated with a
9528 CONST_DOUBLE, so there's no need to traverse into this. */
9529 return -1;
9531 case EXPR_LIST:
9532 /* We're not interested in any MEMs that only appear in notes. */
9533 return -1;
9535 default:
9536 /* This is not a MEM. */
9537 return 0;
9540 /* See if we've already seen this MEM. */
9541 for (i = 0; i < loop_info->mems_idx; ++i)
9542 if (rtx_equal_p (m, loop_info->mems[i].mem))
9544 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
9545 loop_info->mems[i].mem = m;
9546 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9547 /* The modes of the two memory accesses are different. If
9548 this happens, something tricky is going on, and we just
9549 don't optimize accesses to this MEM. */
9550 loop_info->mems[i].optimize = 0;
9552 return 0;
9555 /* Resize the array, if necessary. */
9556 if (loop_info->mems_idx == loop_info->mems_allocated)
9558 if (loop_info->mems_allocated != 0)
9559 loop_info->mems_allocated *= 2;
9560 else
9561 loop_info->mems_allocated = 32;
9563 loop_info->mems = xrealloc (loop_info->mems,
9564 loop_info->mems_allocated * sizeof (loop_mem_info));
9567 /* Actually insert the MEM. */
9568 loop_info->mems[loop_info->mems_idx].mem = m;
9569 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9570 because we can't put it in a register. We still store it in the
9571 table, though, so that if we see the same address later, but in a
9572 non-BLK mode, we'll not think we can optimize it at that point. */
9573 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9574 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9575 ++loop_info->mems_idx;
9577 return 0;
9581 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9583 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9584 register that is modified by an insn between FROM and TO. If the
9585 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9586 more, stop incrementing it, to avoid overflow.
9588 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9589 register I is used, if it is only used once. Otherwise, it is set
9590 to 0 (for no uses) or const0_rtx for more than one use. This
9591 parameter may be zero, in which case this processing is not done.
9593 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9594 optimize register I. */
9596 static void
9597 loop_regs_scan (const struct loop *loop, int extra_size)
9599 struct loop_regs *regs = LOOP_REGS (loop);
9600 int old_nregs;
9601 /* last_set[n] is nonzero iff reg n has been set in the current
9602 basic block. In that case, it is the insn that last set reg n. */
9603 rtx *last_set;
9604 rtx insn;
9605 int i;
9607 old_nregs = regs->num;
9608 regs->num = max_reg_num ();
9610 /* Grow the regs array if not allocated or too small. */
9611 if (regs->num >= regs->size)
9613 regs->size = regs->num + extra_size;
9615 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
9617 /* Zero the new elements. */
9618 memset (regs->array + old_nregs, 0,
9619 (regs->size - old_nregs) * sizeof (*regs->array));
9622 /* Clear previously scanned fields but do not clear n_times_set. */
9623 for (i = 0; i < old_nregs; i++)
9625 regs->array[i].set_in_loop = 0;
9626 regs->array[i].may_not_optimize = 0;
9627 regs->array[i].single_usage = NULL_RTX;
9630 last_set = xcalloc (regs->num, sizeof (rtx));
9632 /* Scan the loop, recording register usage. */
9633 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9634 insn = NEXT_INSN (insn))
9636 if (INSN_P (insn))
9638 /* Record registers that have exactly one use. */
9639 find_single_use_in_loop (regs, insn, PATTERN (insn));
9641 /* Include uses in REG_EQUAL notes. */
9642 if (REG_NOTES (insn))
9643 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9645 if (GET_CODE (PATTERN (insn)) == SET
9646 || GET_CODE (PATTERN (insn)) == CLOBBER)
9647 count_one_set (regs, insn, PATTERN (insn), last_set);
9648 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9650 int i;
9651 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9652 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9653 last_set);
9657 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9658 memset (last_set, 0, regs->num * sizeof (rtx));
9660 /* Invalidate all registers used for function argument passing.
9661 We check rtx_varies_p for the same reason as below, to allow
9662 optimizing PIC calculations. */
9663 if (GET_CODE (insn) == CALL_INSN)
9665 rtx link;
9666 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9667 link;
9668 link = XEXP (link, 1))
9670 rtx op, reg;
9672 if (GET_CODE (op = XEXP (link, 0)) == USE
9673 && GET_CODE (reg = XEXP (op, 0)) == REG
9674 && rtx_varies_p (reg, 1))
9675 regs->array[REGNO (reg)].may_not_optimize = 1;
9680 /* Invalidate all hard registers clobbered by calls. With one exception:
9681 a call-clobbered PIC register is still function-invariant for our
9682 purposes, since we can hoist any PIC calculations out of the loop.
9683 Thus the call to rtx_varies_p. */
9684 if (LOOP_INFO (loop)->has_call)
9685 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9686 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9687 && rtx_varies_p (regno_reg_rtx[i], 1))
9689 regs->array[i].may_not_optimize = 1;
9690 regs->array[i].set_in_loop = 1;
9693 #ifdef AVOID_CCMODE_COPIES
9694 /* Don't try to move insns which set CC registers if we should not
9695 create CCmode register copies. */
9696 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9697 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9698 regs->array[i].may_not_optimize = 1;
9699 #endif
9701 /* Set regs->array[I].n_times_set for the new registers. */
9702 for (i = old_nregs; i < regs->num; i++)
9703 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9705 free (last_set);
9708 /* Returns the number of real INSNs in the LOOP. */
9710 static int
9711 count_insns_in_loop (const struct loop *loop)
9713 int count = 0;
9714 rtx insn;
9716 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9717 insn = NEXT_INSN (insn))
9718 if (INSN_P (insn))
9719 ++count;
9721 return count;
9724 /* Move MEMs into registers for the duration of the loop. */
9726 static void
9727 load_mems (const struct loop *loop)
9729 struct loop_info *loop_info = LOOP_INFO (loop);
9730 struct loop_regs *regs = LOOP_REGS (loop);
9731 int maybe_never = 0;
9732 int i;
9733 rtx p, prev_ebb_head;
9734 rtx label = NULL_RTX;
9735 rtx end_label;
9736 /* Nonzero if the next instruction may never be executed. */
9737 int next_maybe_never = 0;
9738 unsigned int last_max_reg = max_reg_num ();
9740 if (loop_info->mems_idx == 0)
9741 return;
9743 /* We cannot use next_label here because it skips over normal insns. */
9744 end_label = next_nonnote_insn (loop->end);
9745 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9746 end_label = NULL_RTX;
9748 /* Check to see if it's possible that some instructions in the loop are
9749 never executed. Also check if there is a goto out of the loop other
9750 than right after the end of the loop. */
9751 for (p = next_insn_in_loop (loop, loop->scan_start);
9752 p != NULL_RTX;
9753 p = next_insn_in_loop (loop, p))
9755 if (GET_CODE (p) == CODE_LABEL)
9756 maybe_never = 1;
9757 else if (GET_CODE (p) == JUMP_INSN
9758 /* If we enter the loop in the middle, and scan
9759 around to the beginning, don't set maybe_never
9760 for that. This must be an unconditional jump,
9761 otherwise the code at the top of the loop might
9762 never be executed. Unconditional jumps are
9763 followed a by barrier then loop end. */
9764 && ! (GET_CODE (p) == JUMP_INSN
9765 && JUMP_LABEL (p) == loop->top
9766 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9767 && any_uncondjump_p (p)))
9769 /* If this is a jump outside of the loop but not right
9770 after the end of the loop, we would have to emit new fixup
9771 sequences for each such label. */
9772 if (/* If we can't tell where control might go when this
9773 JUMP_INSN is executed, we must be conservative. */
9774 !JUMP_LABEL (p)
9775 || (JUMP_LABEL (p) != end_label
9776 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9777 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9778 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9779 return;
9781 if (!any_condjump_p (p))
9782 /* Something complicated. */
9783 maybe_never = 1;
9784 else
9785 /* If there are any more instructions in the loop, they
9786 might not be reached. */
9787 next_maybe_never = 1;
9789 else if (next_maybe_never)
9790 maybe_never = 1;
9793 /* Find start of the extended basic block that enters the loop. */
9794 for (p = loop->start;
9795 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9796 p = PREV_INSN (p))
9798 prev_ebb_head = p;
9800 cselib_init ();
9802 /* Build table of mems that get set to constant values before the
9803 loop. */
9804 for (; p != loop->start; p = NEXT_INSN (p))
9805 cselib_process_insn (p);
9807 /* Actually move the MEMs. */
9808 for (i = 0; i < loop_info->mems_idx; ++i)
9810 regset_head load_copies;
9811 regset_head store_copies;
9812 int written = 0;
9813 rtx reg;
9814 rtx mem = loop_info->mems[i].mem;
9815 rtx mem_list_entry;
9817 if (MEM_VOLATILE_P (mem)
9818 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9819 /* There's no telling whether or not MEM is modified. */
9820 loop_info->mems[i].optimize = 0;
9822 /* Go through the MEMs written to in the loop to see if this
9823 one is aliased by one of them. */
9824 mem_list_entry = loop_info->store_mems;
9825 while (mem_list_entry)
9827 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9828 written = 1;
9829 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9830 mem, rtx_varies_p))
9832 /* MEM is indeed aliased by this store. */
9833 loop_info->mems[i].optimize = 0;
9834 break;
9836 mem_list_entry = XEXP (mem_list_entry, 1);
9839 if (flag_float_store && written
9840 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9841 loop_info->mems[i].optimize = 0;
9843 /* If this MEM is written to, we must be sure that there
9844 are no reads from another MEM that aliases this one. */
9845 if (loop_info->mems[i].optimize && written)
9847 int j;
9849 for (j = 0; j < loop_info->mems_idx; ++j)
9851 if (j == i)
9852 continue;
9853 else if (true_dependence (mem,
9854 VOIDmode,
9855 loop_info->mems[j].mem,
9856 rtx_varies_p))
9858 /* It's not safe to hoist loop_info->mems[i] out of
9859 the loop because writes to it might not be
9860 seen by reads from loop_info->mems[j]. */
9861 loop_info->mems[i].optimize = 0;
9862 break;
9867 if (maybe_never && may_trap_p (mem))
9868 /* We can't access the MEM outside the loop; it might
9869 cause a trap that wouldn't have happened otherwise. */
9870 loop_info->mems[i].optimize = 0;
9872 if (!loop_info->mems[i].optimize)
9873 /* We thought we were going to lift this MEM out of the
9874 loop, but later discovered that we could not. */
9875 continue;
9877 INIT_REG_SET (&load_copies);
9878 INIT_REG_SET (&store_copies);
9880 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9881 order to keep scan_loop from moving stores to this MEM
9882 out of the loop just because this REG is neither a
9883 user-variable nor used in the loop test. */
9884 reg = gen_reg_rtx (GET_MODE (mem));
9885 REG_USERVAR_P (reg) = 1;
9886 loop_info->mems[i].reg = reg;
9888 /* Now, replace all references to the MEM with the
9889 corresponding pseudos. */
9890 maybe_never = 0;
9891 for (p = next_insn_in_loop (loop, loop->scan_start);
9892 p != NULL_RTX;
9893 p = next_insn_in_loop (loop, p))
9895 if (INSN_P (p))
9897 rtx set;
9899 set = single_set (p);
9901 /* See if this copies the mem into a register that isn't
9902 modified afterwards. We'll try to do copy propagation
9903 a little further on. */
9904 if (set
9905 /* @@@ This test is _way_ too conservative. */
9906 && ! maybe_never
9907 && GET_CODE (SET_DEST (set)) == REG
9908 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9909 && REGNO (SET_DEST (set)) < last_max_reg
9910 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9911 && rtx_equal_p (SET_SRC (set), mem))
9912 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9914 /* See if this copies the mem from a register that isn't
9915 modified afterwards. We'll try to remove the
9916 redundant copy later on by doing a little register
9917 renaming and copy propagation. This will help
9918 to untangle things for the BIV detection code. */
9919 if (set
9920 && ! maybe_never
9921 && GET_CODE (SET_SRC (set)) == REG
9922 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9923 && REGNO (SET_SRC (set)) < last_max_reg
9924 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9925 && rtx_equal_p (SET_DEST (set), mem))
9926 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9928 /* If this is a call which uses / clobbers this memory
9929 location, we must not change the interface here. */
9930 if (GET_CODE (p) == CALL_INSN
9931 && reg_mentioned_p (loop_info->mems[i].mem,
9932 CALL_INSN_FUNCTION_USAGE (p)))
9934 cancel_changes (0);
9935 loop_info->mems[i].optimize = 0;
9936 break;
9938 else
9939 /* Replace the memory reference with the shadow register. */
9940 replace_loop_mems (p, loop_info->mems[i].mem,
9941 loop_info->mems[i].reg, written);
9944 if (GET_CODE (p) == CODE_LABEL
9945 || GET_CODE (p) == JUMP_INSN)
9946 maybe_never = 1;
9949 if (! loop_info->mems[i].optimize)
9950 ; /* We found we couldn't do the replacement, so do nothing. */
9951 else if (! apply_change_group ())
9952 /* We couldn't replace all occurrences of the MEM. */
9953 loop_info->mems[i].optimize = 0;
9954 else
9956 /* Load the memory immediately before LOOP->START, which is
9957 the NOTE_LOOP_BEG. */
9958 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9959 rtx set;
9960 rtx best = mem;
9961 int j;
9962 struct elt_loc_list *const_equiv = 0;
9964 if (e)
9966 struct elt_loc_list *equiv;
9967 struct elt_loc_list *best_equiv = 0;
9968 for (equiv = e->locs; equiv; equiv = equiv->next)
9970 if (CONSTANT_P (equiv->loc))
9971 const_equiv = equiv;
9972 else if (GET_CODE (equiv->loc) == REG
9973 /* Extending hard register lifetimes causes crash
9974 on SRC targets. Doing so on non-SRC is
9975 probably also not good idea, since we most
9976 probably have pseudoregister equivalence as
9977 well. */
9978 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9979 best_equiv = equiv;
9981 /* Use the constant equivalence if that is cheap enough. */
9982 if (! best_equiv)
9983 best_equiv = const_equiv;
9984 else if (const_equiv
9985 && (rtx_cost (const_equiv->loc, SET)
9986 <= rtx_cost (best_equiv->loc, SET)))
9988 best_equiv = const_equiv;
9989 const_equiv = 0;
9992 /* If best_equiv is nonzero, we know that MEM is set to a
9993 constant or register before the loop. We will use this
9994 knowledge to initialize the shadow register with that
9995 constant or reg rather than by loading from MEM. */
9996 if (best_equiv)
9997 best = copy_rtx (best_equiv->loc);
10000 set = gen_move_insn (reg, best);
10001 set = loop_insn_hoist (loop, set);
10002 if (REG_P (best))
10004 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
10005 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
10007 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
10008 break;
10012 if (const_equiv)
10013 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
10015 if (written)
10017 if (label == NULL_RTX)
10019 label = gen_label_rtx ();
10020 emit_label_after (label, loop->end);
10023 /* Store the memory immediately after END, which is
10024 the NOTE_LOOP_END. */
10025 set = gen_move_insn (copy_rtx (mem), reg);
10026 loop_insn_emit_after (loop, 0, label, set);
10029 if (loop_dump_stream)
10031 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10032 REGNO (reg), (written ? "r/w" : "r/o"));
10033 print_rtl (loop_dump_stream, mem);
10034 fputc ('\n', loop_dump_stream);
10037 /* Attempt a bit of copy propagation. This helps untangle the
10038 data flow, and enables {basic,general}_induction_var to find
10039 more bivs/givs. */
10040 EXECUTE_IF_SET_IN_REG_SET
10041 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10043 try_copy_prop (loop, reg, j);
10045 CLEAR_REG_SET (&load_copies);
10047 EXECUTE_IF_SET_IN_REG_SET
10048 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10050 try_swap_copy_prop (loop, reg, j);
10052 CLEAR_REG_SET (&store_copies);
10056 /* Now, we need to replace all references to the previous exit
10057 label with the new one. */
10058 if (label != NULL_RTX && end_label != NULL_RTX)
10059 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10060 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10061 redirect_jump (p, label, false);
10063 cselib_finish ();
10066 /* For communication between note_reg_stored and its caller. */
10067 struct note_reg_stored_arg
10069 int set_seen;
10070 rtx reg;
10073 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10074 is equal to ARG. */
10075 static void
10076 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
10078 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10079 if (t->reg == x)
10080 t->set_seen = 1;
10083 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10084 There must be exactly one insn that sets this pseudo; it will be
10085 deleted if all replacements succeed and we can prove that the register
10086 is not used after the loop. */
10088 static void
10089 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
10091 /* This is the reg that we are copying from. */
10092 rtx reg_rtx = regno_reg_rtx[regno];
10093 rtx init_insn = 0;
10094 rtx insn;
10095 /* These help keep track of whether we replaced all uses of the reg. */
10096 int replaced_last = 0;
10097 int store_is_first = 0;
10099 for (insn = next_insn_in_loop (loop, loop->scan_start);
10100 insn != NULL_RTX;
10101 insn = next_insn_in_loop (loop, insn))
10103 rtx set;
10105 /* Only substitute within one extended basic block from the initializing
10106 insn. */
10107 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10108 break;
10110 if (! INSN_P (insn))
10111 continue;
10113 /* Is this the initializing insn? */
10114 set = single_set (insn);
10115 if (set
10116 && GET_CODE (SET_DEST (set)) == REG
10117 && REGNO (SET_DEST (set)) == regno)
10119 if (init_insn)
10120 abort ();
10122 init_insn = insn;
10123 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10124 store_is_first = 1;
10127 /* Only substitute after seeing the initializing insn. */
10128 if (init_insn && insn != init_insn)
10130 struct note_reg_stored_arg arg;
10132 replace_loop_regs (insn, reg_rtx, replacement);
10133 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10134 replaced_last = 1;
10136 /* Stop replacing when REPLACEMENT is modified. */
10137 arg.reg = replacement;
10138 arg.set_seen = 0;
10139 note_stores (PATTERN (insn), note_reg_stored, &arg);
10140 if (arg.set_seen)
10142 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10144 /* It is possible that we've turned previously valid REG_EQUAL to
10145 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10146 REPLACEMENT is modified, we get different meaning. */
10147 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10148 remove_note (insn, note);
10149 break;
10153 if (! init_insn)
10154 abort ();
10155 if (apply_change_group ())
10157 if (loop_dump_stream)
10158 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10159 if (store_is_first && replaced_last)
10161 rtx first;
10162 rtx retval_note;
10164 /* Assume we're just deleting INIT_INSN. */
10165 first = init_insn;
10166 /* Look for REG_RETVAL note. If we're deleting the end of
10167 the libcall sequence, the whole sequence can go. */
10168 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10169 /* If we found a REG_RETVAL note, find the first instruction
10170 in the sequence. */
10171 if (retval_note)
10172 first = XEXP (retval_note, 0);
10174 /* Delete the instructions. */
10175 loop_delete_insns (first, init_insn);
10177 if (loop_dump_stream)
10178 fprintf (loop_dump_stream, ".\n");
10182 /* Replace all the instructions from FIRST up to and including LAST
10183 with NOTE_INSN_DELETED notes. */
10185 static void
10186 loop_delete_insns (rtx first, rtx last)
10188 while (1)
10190 if (loop_dump_stream)
10191 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10192 INSN_UID (first));
10193 delete_insn (first);
10195 /* If this was the LAST instructions we're supposed to delete,
10196 we're done. */
10197 if (first == last)
10198 break;
10200 first = NEXT_INSN (first);
10204 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10205 loop LOOP if the order of the sets of these registers can be
10206 swapped. There must be exactly one insn within the loop that sets
10207 this pseudo followed immediately by a move insn that sets
10208 REPLACEMENT with REGNO. */
10209 static void
10210 try_swap_copy_prop (const struct loop *loop, rtx replacement,
10211 unsigned int regno)
10213 rtx insn;
10214 rtx set = NULL_RTX;
10215 unsigned int new_regno;
10217 new_regno = REGNO (replacement);
10219 for (insn = next_insn_in_loop (loop, loop->scan_start);
10220 insn != NULL_RTX;
10221 insn = next_insn_in_loop (loop, insn))
10223 /* Search for the insn that copies REGNO to NEW_REGNO? */
10224 if (INSN_P (insn)
10225 && (set = single_set (insn))
10226 && GET_CODE (SET_DEST (set)) == REG
10227 && REGNO (SET_DEST (set)) == new_regno
10228 && GET_CODE (SET_SRC (set)) == REG
10229 && REGNO (SET_SRC (set)) == regno)
10230 break;
10233 if (insn != NULL_RTX)
10235 rtx prev_insn;
10236 rtx prev_set;
10238 /* Some DEF-USE info would come in handy here to make this
10239 function more general. For now, just check the previous insn
10240 which is the most likely candidate for setting REGNO. */
10242 prev_insn = PREV_INSN (insn);
10244 if (INSN_P (insn)
10245 && (prev_set = single_set (prev_insn))
10246 && GET_CODE (SET_DEST (prev_set)) == REG
10247 && REGNO (SET_DEST (prev_set)) == regno)
10249 /* We have:
10250 (set (reg regno) (expr))
10251 (set (reg new_regno) (reg regno))
10253 so try converting this to:
10254 (set (reg new_regno) (expr))
10255 (set (reg regno) (reg new_regno))
10257 The former construct is often generated when a global
10258 variable used for an induction variable is shadowed by a
10259 register (NEW_REGNO). The latter construct improves the
10260 chances of GIV replacement and BIV elimination. */
10262 validate_change (prev_insn, &SET_DEST (prev_set),
10263 replacement, 1);
10264 validate_change (insn, &SET_DEST (set),
10265 SET_SRC (set), 1);
10266 validate_change (insn, &SET_SRC (set),
10267 replacement, 1);
10269 if (apply_change_group ())
10271 if (loop_dump_stream)
10272 fprintf (loop_dump_stream,
10273 " Swapped set of reg %d at %d with reg %d at %d.\n",
10274 regno, INSN_UID (insn),
10275 new_regno, INSN_UID (prev_insn));
10277 /* Update first use of REGNO. */
10278 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10279 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10281 /* Now perform copy propagation to hopefully
10282 remove all uses of REGNO within the loop. */
10283 try_copy_prop (loop, replacement, regno);
10289 /* Worker function for find_mem_in_note, called via for_each_rtx. */
10291 static int
10292 find_mem_in_note_1 (rtx *x, void *data)
10294 if (*x != NULL_RTX && GET_CODE (*x) == MEM)
10296 rtx *res = (rtx *) data;
10297 *res = *x;
10298 return 1;
10300 return 0;
10303 /* Returns the first MEM found in NOTE by depth-first search. */
10305 static rtx
10306 find_mem_in_note (rtx note)
10308 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
10309 return note;
10310 return NULL_RTX;
10313 /* Replace MEM with its associated pseudo register. This function is
10314 called from load_mems via for_each_rtx. DATA is actually a pointer
10315 to a structure describing the instruction currently being scanned
10316 and the MEM we are currently replacing. */
10318 static int
10319 replace_loop_mem (rtx *mem, void *data)
10321 loop_replace_args *args = (loop_replace_args *) data;
10322 rtx m = *mem;
10324 if (m == NULL_RTX)
10325 return 0;
10327 switch (GET_CODE (m))
10329 case MEM:
10330 break;
10332 case CONST_DOUBLE:
10333 /* We're not interested in the MEM associated with a
10334 CONST_DOUBLE, so there's no need to traverse into one. */
10335 return -1;
10337 default:
10338 /* This is not a MEM. */
10339 return 0;
10342 if (!rtx_equal_p (args->match, m))
10343 /* This is not the MEM we are currently replacing. */
10344 return 0;
10346 /* Actually replace the MEM. */
10347 validate_change (args->insn, mem, args->replacement, 1);
10349 return 0;
10352 static void
10353 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
10355 loop_replace_args args;
10357 args.insn = insn;
10358 args.match = mem;
10359 args.replacement = reg;
10361 for_each_rtx (&insn, replace_loop_mem, &args);
10363 /* If we hoist a mem write out of the loop, then REG_EQUAL
10364 notes referring to the mem are no longer valid. */
10365 if (written)
10367 rtx note, sub;
10368 rtx *link;
10370 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
10372 if (REG_NOTE_KIND (note) == REG_EQUAL
10373 && (sub = find_mem_in_note (note))
10374 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
10376 /* Remove the note. */
10377 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
10378 break;
10384 /* Replace one register with another. Called through for_each_rtx; PX points
10385 to the rtx being scanned. DATA is actually a pointer to
10386 a structure of arguments. */
10388 static int
10389 replace_loop_reg (rtx *px, void *data)
10391 rtx x = *px;
10392 loop_replace_args *args = (loop_replace_args *) data;
10394 if (x == NULL_RTX)
10395 return 0;
10397 if (x == args->match)
10398 validate_change (args->insn, px, args->replacement, 1);
10400 return 0;
10403 static void
10404 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
10406 loop_replace_args args;
10408 args.insn = insn;
10409 args.match = reg;
10410 args.replacement = replacement;
10412 for_each_rtx (&insn, replace_loop_reg, &args);
10415 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10416 (ignored in the interim). */
10418 static rtx
10419 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
10420 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
10421 rtx pattern)
10423 return emit_insn_after (pattern, where_insn);
10427 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10428 in basic block WHERE_BB (ignored in the interim) within the loop
10429 otherwise hoist PATTERN into the loop pre-header. */
10432 loop_insn_emit_before (const struct loop *loop,
10433 basic_block where_bb ATTRIBUTE_UNUSED,
10434 rtx where_insn, rtx pattern)
10436 if (! where_insn)
10437 return loop_insn_hoist (loop, pattern);
10438 return emit_insn_before (pattern, where_insn);
10442 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10443 WHERE_BB (ignored in the interim) within the loop. */
10445 static rtx
10446 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
10447 basic_block where_bb ATTRIBUTE_UNUSED,
10448 rtx where_insn, rtx pattern)
10450 return emit_call_insn_before (pattern, where_insn);
10454 /* Hoist insn for PATTERN into the loop pre-header. */
10457 loop_insn_hoist (const struct loop *loop, rtx pattern)
10459 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10463 /* Hoist call insn for PATTERN into the loop pre-header. */
10465 static rtx
10466 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
10468 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10472 /* Sink insn for PATTERN after the loop end. */
10475 loop_insn_sink (const struct loop *loop, rtx pattern)
10477 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10480 /* bl->final_value can be either general_operand or PLUS of general_operand
10481 and constant. Emit sequence of instructions to load it into REG. */
10482 static rtx
10483 gen_load_of_final_value (rtx reg, rtx final_value)
10485 rtx seq;
10486 start_sequence ();
10487 final_value = force_operand (final_value, reg);
10488 if (final_value != reg)
10489 emit_move_insn (reg, final_value);
10490 seq = get_insns ();
10491 end_sequence ();
10492 return seq;
10495 /* If the loop has multiple exits, emit insn for PATTERN before the
10496 loop to ensure that it will always be executed no matter how the
10497 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10498 since this is slightly more efficient. */
10500 static rtx
10501 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
10503 if (loop->exit_count)
10504 return loop_insn_hoist (loop, pattern);
10505 else
10506 return loop_insn_sink (loop, pattern);
10509 static void
10510 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
10512 struct iv_class *bl;
10513 int iv_num = 0;
10515 if (! loop || ! file)
10516 return;
10518 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10519 iv_num++;
10521 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10523 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10525 loop_iv_class_dump (bl, file, verbose);
10526 fputc ('\n', file);
10531 static void
10532 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
10533 int verbose ATTRIBUTE_UNUSED)
10535 struct induction *v;
10536 rtx incr;
10537 int i;
10539 if (! bl || ! file)
10540 return;
10542 fprintf (file, "IV class for reg %d, benefit %d\n",
10543 bl->regno, bl->total_benefit);
10545 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10546 if (bl->initial_value)
10548 fprintf (file, ", init val: ");
10549 print_simple_rtl (file, bl->initial_value);
10551 if (bl->initial_test)
10553 fprintf (file, ", init test: ");
10554 print_simple_rtl (file, bl->initial_test);
10556 fputc ('\n', file);
10558 if (bl->final_value)
10560 fprintf (file, " Final val: ");
10561 print_simple_rtl (file, bl->final_value);
10562 fputc ('\n', file);
10565 if ((incr = biv_total_increment (bl)))
10567 fprintf (file, " Total increment: ");
10568 print_simple_rtl (file, incr);
10569 fputc ('\n', file);
10572 /* List the increments. */
10573 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10575 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10576 print_simple_rtl (file, v->add_val);
10577 fputc ('\n', file);
10580 /* List the givs. */
10581 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10583 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10584 i, INSN_UID (v->insn), v->benefit);
10585 if (v->giv_type == DEST_ADDR)
10586 print_simple_rtl (file, v->mem);
10587 else
10588 print_simple_rtl (file, single_set (v->insn));
10589 fputc ('\n', file);
10594 static void
10595 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
10597 if (! v || ! file)
10598 return;
10600 fprintf (file,
10601 "Biv %d: insn %d",
10602 REGNO (v->dest_reg), INSN_UID (v->insn));
10603 fprintf (file, " const ");
10604 print_simple_rtl (file, v->add_val);
10606 if (verbose && v->final_value)
10608 fputc ('\n', file);
10609 fprintf (file, " final ");
10610 print_simple_rtl (file, v->final_value);
10613 fputc ('\n', file);
10617 static void
10618 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
10620 if (! v || ! file)
10621 return;
10623 if (v->giv_type == DEST_REG)
10624 fprintf (file, "Giv %d: insn %d",
10625 REGNO (v->dest_reg), INSN_UID (v->insn));
10626 else
10627 fprintf (file, "Dest address: insn %d",
10628 INSN_UID (v->insn));
10630 fprintf (file, " src reg %d benefit %d",
10631 REGNO (v->src_reg), v->benefit);
10632 fprintf (file, " lifetime %d",
10633 v->lifetime);
10635 if (v->replaceable)
10636 fprintf (file, " replaceable");
10638 if (v->no_const_addval)
10639 fprintf (file, " ncav");
10641 if (v->ext_dependent)
10643 switch (GET_CODE (v->ext_dependent))
10645 case SIGN_EXTEND:
10646 fprintf (file, " ext se");
10647 break;
10648 case ZERO_EXTEND:
10649 fprintf (file, " ext ze");
10650 break;
10651 case TRUNCATE:
10652 fprintf (file, " ext tr");
10653 break;
10654 default:
10655 abort ();
10659 fputc ('\n', file);
10660 fprintf (file, " mult ");
10661 print_simple_rtl (file, v->mult_val);
10663 fputc ('\n', file);
10664 fprintf (file, " add ");
10665 print_simple_rtl (file, v->add_val);
10667 if (verbose && v->final_value)
10669 fputc ('\n', file);
10670 fprintf (file, " final ");
10671 print_simple_rtl (file, v->final_value);
10674 fputc ('\n', file);
10678 void
10679 debug_ivs (const struct loop *loop)
10681 loop_ivs_dump (loop, stderr, 1);
10685 void
10686 debug_iv_class (const struct iv_class *bl)
10688 loop_iv_class_dump (bl, stderr, 1);
10692 void
10693 debug_biv (const struct induction *v)
10695 loop_biv_dump (v, stderr, 1);
10699 void
10700 debug_giv (const struct induction *v)
10702 loop_giv_dump (v, stderr, 1);
10706 #define LOOP_BLOCK_NUM_1(INSN) \
10707 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10709 /* The notes do not have an assigned block, so look at the next insn. */
10710 #define LOOP_BLOCK_NUM(INSN) \
10711 ((INSN) ? (GET_CODE (INSN) == NOTE \
10712 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10713 : LOOP_BLOCK_NUM_1 (INSN)) \
10714 : -1)
10716 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10718 static void
10719 loop_dump_aux (const struct loop *loop, FILE *file,
10720 int verbose ATTRIBUTE_UNUSED)
10722 rtx label;
10724 if (! loop || ! file)
10725 return;
10727 /* Print diagnostics to compare our concept of a loop with
10728 what the loop notes say. */
10729 if (! PREV_INSN (BB_HEAD (loop->first))
10730 || GET_CODE (PREV_INSN (BB_HEAD (loop->first))) != NOTE
10731 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
10732 != NOTE_INSN_LOOP_BEG)
10733 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10734 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
10735 if (! NEXT_INSN (BB_END (loop->last))
10736 || GET_CODE (NEXT_INSN (BB_END (loop->last))) != NOTE
10737 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
10738 != NOTE_INSN_LOOP_END)
10739 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10740 INSN_UID (NEXT_INSN (BB_END (loop->last))));
10742 if (loop->start)
10744 fprintf (file,
10745 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10746 LOOP_BLOCK_NUM (loop->start),
10747 LOOP_INSN_UID (loop->start),
10748 LOOP_BLOCK_NUM (loop->cont),
10749 LOOP_INSN_UID (loop->cont),
10750 LOOP_BLOCK_NUM (loop->cont),
10751 LOOP_INSN_UID (loop->cont),
10752 LOOP_BLOCK_NUM (loop->vtop),
10753 LOOP_INSN_UID (loop->vtop),
10754 LOOP_BLOCK_NUM (loop->end),
10755 LOOP_INSN_UID (loop->end));
10756 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10757 LOOP_BLOCK_NUM (loop->top),
10758 LOOP_INSN_UID (loop->top),
10759 LOOP_BLOCK_NUM (loop->scan_start),
10760 LOOP_INSN_UID (loop->scan_start));
10761 fprintf (file, ";; exit_count %d", loop->exit_count);
10762 if (loop->exit_count)
10764 fputs (", labels:", file);
10765 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10767 fprintf (file, " %d ",
10768 LOOP_INSN_UID (XEXP (label, 0)));
10771 fputs ("\n", file);
10773 /* This can happen when a marked loop appears as two nested loops,
10774 say from while (a || b) {}. The inner loop won't match
10775 the loop markers but the outer one will. */
10776 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10777 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10781 /* Call this function from the debugger to dump LOOP. */
10783 void
10784 debug_loop (const struct loop *loop)
10786 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10789 /* Call this function from the debugger to dump LOOPS. */
10791 void
10792 debug_loops (const struct loops *loops)
10794 flow_loops_dump (loops, stderr, loop_dump_aux, 1);