2002-02-19 Philip Blundell <philb@gnu.org>
[official-gcc.git] / gcc / loop.c
blobf675a87f0debcbc19be4262b25c6553dcc3e465f
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* The minimal number of prefetch blocks that a loop must consume to make
83 the emitting of prefetch instruction in the body of loop worthwhile. */
84 #define PREFETCH_BLOCKS_IN_LOOP_MIN 6
86 /* Parameterize some prefetch heuristics so they can be turned on and off
87 easily for performance testing on new architecures. These can be
88 defined in target-dependent files. */
90 /* Prefetch is worthwhile only when loads/stores are dense. */
91 #ifndef PREFETCH_ONLY_DENSE_MEM
92 #define PREFETCH_ONLY_DENSE_MEM 1
93 #endif
95 /* Define what we mean by "dense" loads and stores; This value divided by 256
96 is the minimum percentage of memory references that worth prefetching. */
97 #ifndef PREFETCH_DENSE_MEM
98 #define PREFETCH_DENSE_MEM 220
99 #endif
101 /* Do not prefetch for a loop whose iteration count is known to be low. */
102 #ifndef PREFETCH_NO_LOW_LOOPCNT
103 #define PREFETCH_NO_LOW_LOOPCNT 1
104 #endif
106 /* Define what we mean by a "low" iteration count. */
107 #ifndef PREFETCH_LOW_LOOPCNT
108 #define PREFETCH_LOW_LOOPCNT 32
109 #endif
111 /* Do not prefetch for a loop that contains a function call; such a loop is
112 probably not an internal loop. */
113 #ifndef PREFETCH_NO_CALL
114 #define PREFETCH_NO_CALL 1
115 #endif
117 /* Do not prefetch accesses with an extreme stride. */
118 #ifndef PREFETCH_NO_EXTREME_STRIDE
119 #define PREFETCH_NO_EXTREME_STRIDE 1
120 #endif
122 /* Define what we mean by an "extreme" stride. */
123 #ifndef PREFETCH_EXTREME_STRIDE
124 #define PREFETCH_EXTREME_STRIDE 4096
125 #endif
127 /* Do not handle reversed order prefetches (negative stride). */
128 #ifndef PREFETCH_NO_REVERSE_ORDER
129 #define PREFETCH_NO_REVERSE_ORDER 1
130 #endif
132 /* Prefetch even if the GIV is not always executed. */
133 #ifndef PREFETCH_NOT_ALWAYS
134 #define PREFETCH_NOT_ALWAYS 0
135 #endif
137 /* If the loop requires more prefetches than the target can process in
138 parallel then don't prefetch anything in that loop. */
139 #ifndef PREFETCH_LIMIT_TO_SIMULTANEOUS
140 #define PREFETCH_LIMIT_TO_SIMULTANEOUS 1
141 #endif
143 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
144 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
146 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
147 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
148 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
150 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
151 ((REGNO) < FIRST_PSEUDO_REGISTER \
152 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
155 /* Vector mapping INSN_UIDs to luids.
156 The luids are like uids but increase monotonically always.
157 We use them to see whether a jump comes from outside a given loop. */
159 int *uid_luid;
161 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
162 number the insn is contained in. */
164 struct loop **uid_loop;
166 /* 1 + largest uid of any insn. */
168 int max_uid_for_loop;
170 /* 1 + luid of last insn. */
172 static int max_luid;
174 /* Number of loops detected in current function. Used as index to the
175 next few tables. */
177 static int max_loop_num;
179 /* Bound on pseudo register number before loop optimization.
180 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
181 unsigned int max_reg_before_loop;
183 /* The value to pass to the next call of reg_scan_update. */
184 static int loop_max_reg;
186 #define obstack_chunk_alloc xmalloc
187 #define obstack_chunk_free free
189 /* During the analysis of a loop, a chain of `struct movable's
190 is made to record all the movable insns found.
191 Then the entire chain can be scanned to decide which to move. */
193 struct movable
195 rtx insn; /* A movable insn */
196 rtx set_src; /* The expression this reg is set from. */
197 rtx set_dest; /* The destination of this SET. */
198 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
199 of any registers used within the LIBCALL. */
200 int consec; /* Number of consecutive following insns
201 that must be moved with this one. */
202 unsigned int regno; /* The register it sets */
203 short lifetime; /* lifetime of that register;
204 may be adjusted when matching movables
205 that load the same value are found. */
206 short savings; /* Number of insns we can move for this reg,
207 including other movables that force this
208 or match this one. */
209 unsigned int cond : 1; /* 1 if only conditionally movable */
210 unsigned int force : 1; /* 1 means MUST move this insn */
211 unsigned int global : 1; /* 1 means reg is live outside this loop */
212 /* If PARTIAL is 1, GLOBAL means something different:
213 that the reg is live outside the range from where it is set
214 to the following label. */
215 unsigned int done : 1; /* 1 inhibits further processing of this */
217 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
218 In particular, moving it does not make it
219 invariant. */
220 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
221 load SRC, rather than copying INSN. */
222 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
223 first insn of a consecutive sets group. */
224 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
225 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
226 that we should avoid changing when clearing
227 the rest of the reg. */
228 struct movable *match; /* First entry for same value */
229 struct movable *forces; /* An insn that must be moved if this is */
230 struct movable *next;
234 FILE *loop_dump_stream;
236 /* Forward declarations. */
238 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
239 static void mark_loop_jump PARAMS ((rtx, struct loop *));
240 static void prescan_loop PARAMS ((struct loop *));
241 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
242 static int consec_sets_invariant_p PARAMS ((const struct loop *,
243 rtx, int, rtx));
244 static int labels_in_range_p PARAMS ((rtx, int));
245 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
246 static void note_addr_stored PARAMS ((rtx, rtx, void *));
247 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
248 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
249 static void scan_loop PARAMS ((struct loop*, int));
250 #if 0
251 static void replace_call_address PARAMS ((rtx, rtx, rtx));
252 #endif
253 static rtx skip_consec_insns PARAMS ((rtx, int));
254 static int libcall_benefit PARAMS ((rtx));
255 static void ignore_some_movables PARAMS ((struct loop_movables *));
256 static void force_movables PARAMS ((struct loop_movables *));
257 static void combine_movables PARAMS ((struct loop_movables *,
258 struct loop_regs *));
259 static int num_unmoved_movables PARAMS ((const struct loop *));
260 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
261 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
262 struct loop_regs *));
263 static void add_label_notes PARAMS ((rtx, rtx));
264 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
265 int, int));
266 static void loop_movables_add PARAMS((struct loop_movables *,
267 struct movable *));
268 static void loop_movables_free PARAMS((struct loop_movables *));
269 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
270 static void loop_bivs_find PARAMS((struct loop *));
271 static void loop_bivs_init_find PARAMS((struct loop *));
272 static void loop_bivs_check PARAMS((struct loop *));
273 static void loop_givs_find PARAMS((struct loop *));
274 static void loop_givs_check PARAMS((struct loop *));
275 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
276 int, int));
277 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
278 struct induction *, rtx));
279 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
280 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
282 rtx *));
283 static void loop_ivs_free PARAMS((struct loop *));
284 static void strength_reduce PARAMS ((struct loop *, int));
285 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
286 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
287 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
288 static void record_biv PARAMS ((struct loop *, struct induction *,
289 rtx, rtx, rtx, rtx, rtx *,
290 int, int));
291 static void check_final_value PARAMS ((const struct loop *,
292 struct induction *));
293 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
294 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
295 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
296 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
297 static void record_giv PARAMS ((const struct loop *, struct induction *,
298 rtx, rtx, rtx, rtx, rtx, rtx, int,
299 enum g_types, int, int, rtx *));
300 static void update_giv_derive PARAMS ((const struct loop *, rtx));
301 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
302 struct loop_info *));
303 static int basic_induction_var PARAMS ((const struct loop *, rtx,
304 enum machine_mode, rtx, rtx,
305 rtx *, rtx *, rtx **));
306 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
307 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
308 rtx *, rtx *, rtx *, int, int *,
309 enum machine_mode));
310 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
311 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
312 static int check_dbra_loop PARAMS ((struct loop *, int));
313 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
314 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
315 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
316 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
317 static int product_cheap_p PARAMS ((rtx, rtx));
318 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
319 int, int, int));
320 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
321 struct iv_class *, int,
322 basic_block, rtx));
323 static int last_use_this_basic_block PARAMS ((rtx, rtx));
324 static void record_initial PARAMS ((rtx, rtx, void *));
325 static void update_reg_last_use PARAMS ((rtx, rtx));
326 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
327 static void loop_regs_scan PARAMS ((const struct loop *, int));
328 static int count_insns_in_loop PARAMS ((const struct loop *));
329 static void load_mems PARAMS ((const struct loop *));
330 static int insert_loop_mem PARAMS ((rtx *, void *));
331 static int replace_loop_mem PARAMS ((rtx *, void *));
332 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
333 static int replace_loop_reg PARAMS ((rtx *, void *));
334 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
335 static void note_reg_stored PARAMS ((rtx, rtx, void *));
336 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
337 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
338 unsigned int));
339 static int replace_label PARAMS ((rtx *, void *));
340 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
341 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
342 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
343 static void loop_regs_update PARAMS ((const struct loop *, rtx));
344 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
346 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
347 rtx, rtx));
348 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
349 basic_block, rtx, rtx));
350 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
351 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
353 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
354 static void loop_delete_insns PARAMS ((rtx, rtx));
355 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
356 void debug_ivs PARAMS ((const struct loop *));
357 void debug_iv_class PARAMS ((const struct iv_class *));
358 void debug_biv PARAMS ((const struct induction *));
359 void debug_giv PARAMS ((const struct induction *));
360 void debug_loop PARAMS ((const struct loop *));
361 void debug_loops PARAMS ((const struct loops *));
363 typedef struct rtx_pair
365 rtx r1;
366 rtx r2;
367 } rtx_pair;
369 typedef struct loop_replace_args
371 rtx match;
372 rtx replacement;
373 rtx insn;
374 } loop_replace_args;
376 /* Nonzero iff INSN is between START and END, inclusive. */
377 #define INSN_IN_RANGE_P(INSN, START, END) \
378 (INSN_UID (INSN) < max_uid_for_loop \
379 && INSN_LUID (INSN) >= INSN_LUID (START) \
380 && INSN_LUID (INSN) <= INSN_LUID (END))
382 /* Indirect_jump_in_function is computed once per function. */
383 static int indirect_jump_in_function;
384 static int indirect_jump_in_function_p PARAMS ((rtx));
386 static int compute_luids PARAMS ((rtx, rtx, int));
388 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
389 struct induction *,
390 rtx));
392 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
393 copy the value of the strength reduced giv to its original register. */
394 static int copy_cost;
396 /* Cost of using a register, to normalize the benefits of a giv. */
397 static int reg_address_cost;
399 void
400 init_loop ()
402 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
404 reg_address_cost = address_cost (reg, SImode);
406 copy_cost = COSTS_N_INSNS (1);
409 /* Compute the mapping from uids to luids.
410 LUIDs are numbers assigned to insns, like uids,
411 except that luids increase monotonically through the code.
412 Start at insn START and stop just before END. Assign LUIDs
413 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
414 static int
415 compute_luids (start, end, prev_luid)
416 rtx start, end;
417 int prev_luid;
419 int i;
420 rtx insn;
422 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
424 if (INSN_UID (insn) >= max_uid_for_loop)
425 continue;
426 /* Don't assign luids to line-number NOTEs, so that the distance in
427 luids between two insns is not affected by -g. */
428 if (GET_CODE (insn) != NOTE
429 || NOTE_LINE_NUMBER (insn) <= 0)
430 uid_luid[INSN_UID (insn)] = ++i;
431 else
432 /* Give a line number note the same luid as preceding insn. */
433 uid_luid[INSN_UID (insn)] = i;
435 return i + 1;
438 /* Entry point of this file. Perform loop optimization
439 on the current function. F is the first insn of the function
440 and DUMPFILE is a stream for output of a trace of actions taken
441 (or 0 if none should be output). */
443 void
444 loop_optimize (f, dumpfile, flags)
445 /* f is the first instruction of a chain of insns for one function */
446 rtx f;
447 FILE *dumpfile;
448 int flags;
450 rtx insn;
451 int i;
452 struct loops loops_data;
453 struct loops *loops = &loops_data;
454 struct loop_info *loops_info;
456 loop_dump_stream = dumpfile;
458 init_recog_no_volatile ();
460 max_reg_before_loop = max_reg_num ();
461 loop_max_reg = max_reg_before_loop;
463 regs_may_share = 0;
465 /* Count the number of loops. */
467 max_loop_num = 0;
468 for (insn = f; insn; insn = NEXT_INSN (insn))
470 if (GET_CODE (insn) == NOTE
471 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
472 max_loop_num++;
475 /* Don't waste time if no loops. */
476 if (max_loop_num == 0)
477 return;
479 loops->num = max_loop_num;
481 /* Get size to use for tables indexed by uids.
482 Leave some space for labels allocated by find_and_verify_loops. */
483 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
485 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
486 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
487 sizeof (struct loop *));
489 /* Allocate storage for array of loops. */
490 loops->array = (struct loop *)
491 xcalloc (loops->num, sizeof (struct loop));
493 /* Find and process each loop.
494 First, find them, and record them in order of their beginnings. */
495 find_and_verify_loops (f, loops);
497 /* Allocate and initialize auxiliary loop information. */
498 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
499 for (i = 0; i < loops->num; i++)
500 loops->array[i].aux = loops_info + i;
502 /* Now find all register lifetimes. This must be done after
503 find_and_verify_loops, because it might reorder the insns in the
504 function. */
505 reg_scan (f, max_reg_before_loop, 1);
507 /* This must occur after reg_scan so that registers created by gcse
508 will have entries in the register tables.
510 We could have added a call to reg_scan after gcse_main in toplev.c,
511 but moving this call to init_alias_analysis is more efficient. */
512 init_alias_analysis ();
514 /* See if we went too far. Note that get_max_uid already returns
515 one more that the maximum uid of all insn. */
516 if (get_max_uid () > max_uid_for_loop)
517 abort ();
518 /* Now reset it to the actual size we need. See above. */
519 max_uid_for_loop = get_max_uid ();
521 /* find_and_verify_loops has already called compute_luids, but it
522 might have rearranged code afterwards, so we need to recompute
523 the luids now. */
524 max_luid = compute_luids (f, NULL_RTX, 0);
526 /* Don't leave gaps in uid_luid for insns that have been
527 deleted. It is possible that the first or last insn
528 using some register has been deleted by cross-jumping.
529 Make sure that uid_luid for that former insn's uid
530 points to the general area where that insn used to be. */
531 for (i = 0; i < max_uid_for_loop; i++)
533 uid_luid[0] = uid_luid[i];
534 if (uid_luid[0] != 0)
535 break;
537 for (i = 0; i < max_uid_for_loop; i++)
538 if (uid_luid[i] == 0)
539 uid_luid[i] = uid_luid[i - 1];
541 /* Determine if the function has indirect jump. On some systems
542 this prevents low overhead loop instructions from being used. */
543 indirect_jump_in_function = indirect_jump_in_function_p (f);
545 /* Now scan the loops, last ones first, since this means inner ones are done
546 before outer ones. */
547 for (i = max_loop_num - 1; i >= 0; i--)
549 struct loop *loop = &loops->array[i];
551 if (! loop->invalid && loop->end)
552 scan_loop (loop, flags);
555 /* If there were lexical blocks inside the loop, they have been
556 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
557 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
558 the BLOCKs as well. */
559 if (write_symbols != NO_DEBUG)
560 reorder_blocks ();
562 end_alias_analysis ();
564 /* Clean up. */
565 free (uid_luid);
566 free (uid_loop);
567 free (loops_info);
568 free (loops->array);
571 /* Returns the next insn, in execution order, after INSN. START and
572 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
573 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
574 insn-stream; it is used with loops that are entered near the
575 bottom. */
577 static rtx
578 next_insn_in_loop (loop, insn)
579 const struct loop *loop;
580 rtx insn;
582 insn = NEXT_INSN (insn);
584 if (insn == loop->end)
586 if (loop->top)
587 /* Go to the top of the loop, and continue there. */
588 insn = loop->top;
589 else
590 /* We're done. */
591 insn = NULL_RTX;
594 if (insn == loop->scan_start)
595 /* We're done. */
596 insn = NULL_RTX;
598 return insn;
601 /* Optimize one loop described by LOOP. */
603 /* ??? Could also move memory writes out of loops if the destination address
604 is invariant, the source is invariant, the memory write is not volatile,
605 and if we can prove that no read inside the loop can read this address
606 before the write occurs. If there is a read of this address after the
607 write, then we can also mark the memory read as invariant. */
609 static void
610 scan_loop (loop, flags)
611 struct loop *loop;
612 int flags;
614 struct loop_info *loop_info = LOOP_INFO (loop);
615 struct loop_regs *regs = LOOP_REGS (loop);
616 int i;
617 rtx loop_start = loop->start;
618 rtx loop_end = loop->end;
619 rtx p;
620 /* 1 if we are scanning insns that could be executed zero times. */
621 int maybe_never = 0;
622 /* 1 if we are scanning insns that might never be executed
623 due to a subroutine call which might exit before they are reached. */
624 int call_passed = 0;
625 /* Jump insn that enters the loop, or 0 if control drops in. */
626 rtx loop_entry_jump = 0;
627 /* Number of insns in the loop. */
628 int insn_count;
629 int tem;
630 rtx temp, update_start, update_end;
631 /* The SET from an insn, if it is the only SET in the insn. */
632 rtx set, set1;
633 /* Chain describing insns movable in current loop. */
634 struct loop_movables *movables = LOOP_MOVABLES (loop);
635 /* Ratio of extra register life span we can justify
636 for saving an instruction. More if loop doesn't call subroutines
637 since in that case saving an insn makes more difference
638 and more registers are available. */
639 int threshold;
640 /* Nonzero if we are scanning instructions in a sub-loop. */
641 int loop_depth = 0;
643 loop->top = 0;
645 movables->head = 0;
646 movables->last = 0;
648 /* Determine whether this loop starts with a jump down to a test at
649 the end. This will occur for a small number of loops with a test
650 that is too complex to duplicate in front of the loop.
652 We search for the first insn or label in the loop, skipping NOTEs.
653 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
654 (because we might have a loop executed only once that contains a
655 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
656 (in case we have a degenerate loop).
658 Note that if we mistakenly think that a loop is entered at the top
659 when, in fact, it is entered at the exit test, the only effect will be
660 slightly poorer optimization. Making the opposite error can generate
661 incorrect code. Since very few loops now start with a jump to the
662 exit test, the code here to detect that case is very conservative. */
664 for (p = NEXT_INSN (loop_start);
665 p != loop_end
666 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
667 && (GET_CODE (p) != NOTE
668 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
669 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
670 p = NEXT_INSN (p))
673 loop->scan_start = p;
675 /* If loop end is the end of the current function, then emit a
676 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
677 note insn. This is the position we use when sinking insns out of
678 the loop. */
679 if (NEXT_INSN (loop->end) != 0)
680 loop->sink = NEXT_INSN (loop->end);
681 else
682 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
684 /* Set up variables describing this loop. */
685 prescan_loop (loop);
686 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
688 /* If loop has a jump before the first label,
689 the true entry is the target of that jump.
690 Start scan from there.
691 But record in LOOP->TOP the place where the end-test jumps
692 back to so we can scan that after the end of the loop. */
693 if (GET_CODE (p) == JUMP_INSN)
695 loop_entry_jump = p;
697 /* Loop entry must be unconditional jump (and not a RETURN) */
698 if (any_uncondjump_p (p)
699 && JUMP_LABEL (p) != 0
700 /* Check to see whether the jump actually
701 jumps out of the loop (meaning it's no loop).
702 This case can happen for things like
703 do {..} while (0). If this label was generated previously
704 by loop, we can't tell anything about it and have to reject
705 the loop. */
706 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
708 loop->top = next_label (loop->scan_start);
709 loop->scan_start = JUMP_LABEL (p);
713 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
714 as required by loop_reg_used_before_p. So skip such loops. (This
715 test may never be true, but it's best to play it safe.)
717 Also, skip loops where we do not start scanning at a label. This
718 test also rejects loops starting with a JUMP_INSN that failed the
719 test above. */
721 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
722 || GET_CODE (loop->scan_start) != CODE_LABEL)
724 if (loop_dump_stream)
725 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
726 INSN_UID (loop_start), INSN_UID (loop_end));
727 return;
730 /* Allocate extra space for REGs that might be created by load_mems.
731 We allocate a little extra slop as well, in the hopes that we
732 won't have to reallocate the regs array. */
733 loop_regs_scan (loop, loop_info->mems_idx + 16);
734 insn_count = count_insns_in_loop (loop);
736 if (loop_dump_stream)
738 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
739 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
740 if (loop->cont)
741 fprintf (loop_dump_stream, "Continue at insn %d.\n",
742 INSN_UID (loop->cont));
745 /* Scan through the loop finding insns that are safe to move.
746 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
747 this reg will be considered invariant for subsequent insns.
748 We consider whether subsequent insns use the reg
749 in deciding whether it is worth actually moving.
751 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
752 and therefore it is possible that the insns we are scanning
753 would never be executed. At such times, we must make sure
754 that it is safe to execute the insn once instead of zero times.
755 When MAYBE_NEVER is 0, all insns will be executed at least once
756 so that is not a problem. */
758 for (p = next_insn_in_loop (loop, loop->scan_start);
759 p != NULL_RTX;
760 p = next_insn_in_loop (loop, p))
762 if (GET_CODE (p) == INSN
763 && (set = single_set (p))
764 && GET_CODE (SET_DEST (set)) == REG
765 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
766 && SET_DEST (set) != pic_offset_table_rtx
767 #endif
768 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
770 int tem1 = 0;
771 int tem2 = 0;
772 int move_insn = 0;
773 rtx src = SET_SRC (set);
774 rtx dependencies = 0;
776 /* Figure out what to use as a source of this insn. If a REG_EQUIV
777 note is given or if a REG_EQUAL note with a constant operand is
778 specified, use it as the source and mark that we should move
779 this insn by calling emit_move_insn rather that duplicating the
780 insn.
782 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
783 is present. */
784 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
785 if (temp)
786 src = XEXP (temp, 0), move_insn = 1;
787 else
789 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
790 if (temp && CONSTANT_P (XEXP (temp, 0)))
791 src = XEXP (temp, 0), move_insn = 1;
792 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
794 src = XEXP (temp, 0);
795 /* A libcall block can use regs that don't appear in
796 the equivalent expression. To move the libcall,
797 we must move those regs too. */
798 dependencies = libcall_other_reg (p, src);
802 /* For parallels, add any possible uses to the depencies, as we can't move
803 the insn without resolving them first. */
804 if (GET_CODE (PATTERN (p)) == PARALLEL)
806 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
808 rtx x = XVECEXP (PATTERN (p), 0, i);
809 if (GET_CODE (x) == USE)
810 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
814 /* Don't try to optimize a register that was made
815 by loop-optimization for an inner loop.
816 We don't know its life-span, so we can't compute the benefit. */
817 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
819 else if (/* The register is used in basic blocks other
820 than the one where it is set (meaning that
821 something after this point in the loop might
822 depend on its value before the set). */
823 ! reg_in_basic_block_p (p, SET_DEST (set))
824 /* And the set is not guaranteed to be executed once
825 the loop starts, or the value before the set is
826 needed before the set occurs...
828 ??? Note we have quadratic behaviour here, mitigated
829 by the fact that the previous test will often fail for
830 large loops. Rather than re-scanning the entire loop
831 each time for register usage, we should build tables
832 of the register usage and use them here instead. */
833 && (maybe_never
834 || loop_reg_used_before_p (loop, set, p)))
835 /* It is unsafe to move the set.
837 This code used to consider it OK to move a set of a variable
838 which was not created by the user and not used in an exit test.
839 That behavior is incorrect and was removed. */
841 else if ((tem = loop_invariant_p (loop, src))
842 && (dependencies == 0
843 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
844 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
845 || (tem1
846 = consec_sets_invariant_p
847 (loop, SET_DEST (set),
848 regs->array[REGNO (SET_DEST (set))].set_in_loop,
849 p)))
850 /* If the insn can cause a trap (such as divide by zero),
851 can't move it unless it's guaranteed to be executed
852 once loop is entered. Even a function call might
853 prevent the trap insn from being reached
854 (since it might exit!) */
855 && ! ((maybe_never || call_passed)
856 && may_trap_p (src)))
858 struct movable *m;
859 int regno = REGNO (SET_DEST (set));
861 /* A potential lossage is where we have a case where two insns
862 can be combined as long as they are both in the loop, but
863 we move one of them outside the loop. For large loops,
864 this can lose. The most common case of this is the address
865 of a function being called.
867 Therefore, if this register is marked as being used exactly
868 once if we are in a loop with calls (a "large loop"), see if
869 we can replace the usage of this register with the source
870 of this SET. If we can, delete this insn.
872 Don't do this if P has a REG_RETVAL note or if we have
873 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
875 if (loop_info->has_call
876 && regs->array[regno].single_usage != 0
877 && regs->array[regno].single_usage != const0_rtx
878 && REGNO_FIRST_UID (regno) == INSN_UID (p)
879 && (REGNO_LAST_UID (regno)
880 == INSN_UID (regs->array[regno].single_usage))
881 && regs->array[regno].set_in_loop == 1
882 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
883 && ! side_effects_p (SET_SRC (set))
884 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
885 && (! SMALL_REGISTER_CLASSES
886 || (! (GET_CODE (SET_SRC (set)) == REG
887 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
888 /* This test is not redundant; SET_SRC (set) might be
889 a call-clobbered register and the life of REGNO
890 might span a call. */
891 && ! modified_between_p (SET_SRC (set), p,
892 regs->array[regno].single_usage)
893 && no_labels_between_p (p, regs->array[regno].single_usage)
894 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
895 regs->array[regno].single_usage))
897 /* Replace any usage in a REG_EQUAL note. Must copy the
898 new source, so that we don't get rtx sharing between the
899 SET_SOURCE and REG_NOTES of insn p. */
900 REG_NOTES (regs->array[regno].single_usage)
901 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
902 SET_DEST (set), copy_rtx (SET_SRC (set)));
904 delete_insn (p);
905 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
906 regs->array[regno+i].set_in_loop = 0;
907 continue;
910 m = (struct movable *) xmalloc (sizeof (struct movable));
911 m->next = 0;
912 m->insn = p;
913 m->set_src = src;
914 m->dependencies = dependencies;
915 m->set_dest = SET_DEST (set);
916 m->force = 0;
917 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
918 m->done = 0;
919 m->forces = 0;
920 m->partial = 0;
921 m->move_insn = move_insn;
922 m->move_insn_first = 0;
923 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
924 m->savemode = VOIDmode;
925 m->regno = regno;
926 /* Set M->cond if either loop_invariant_p
927 or consec_sets_invariant_p returned 2
928 (only conditionally invariant). */
929 m->cond = ((tem | tem1 | tem2) > 1);
930 m->global = LOOP_REG_GLOBAL_P (loop, regno);
931 m->match = 0;
932 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
933 m->savings = regs->array[regno].n_times_set;
934 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
935 m->savings += libcall_benefit (p);
936 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
937 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
938 /* Add M to the end of the chain MOVABLES. */
939 loop_movables_add (movables, m);
941 if (m->consec > 0)
943 /* It is possible for the first instruction to have a
944 REG_EQUAL note but a non-invariant SET_SRC, so we must
945 remember the status of the first instruction in case
946 the last instruction doesn't have a REG_EQUAL note. */
947 m->move_insn_first = m->move_insn;
949 /* Skip this insn, not checking REG_LIBCALL notes. */
950 p = next_nonnote_insn (p);
951 /* Skip the consecutive insns, if there are any. */
952 p = skip_consec_insns (p, m->consec);
953 /* Back up to the last insn of the consecutive group. */
954 p = prev_nonnote_insn (p);
956 /* We must now reset m->move_insn, m->is_equiv, and possibly
957 m->set_src to correspond to the effects of all the
958 insns. */
959 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
960 if (temp)
961 m->set_src = XEXP (temp, 0), m->move_insn = 1;
962 else
964 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
965 if (temp && CONSTANT_P (XEXP (temp, 0)))
966 m->set_src = XEXP (temp, 0), m->move_insn = 1;
967 else
968 m->move_insn = 0;
971 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
974 /* If this register is always set within a STRICT_LOW_PART
975 or set to zero, then its high bytes are constant.
976 So clear them outside the loop and within the loop
977 just load the low bytes.
978 We must check that the machine has an instruction to do so.
979 Also, if the value loaded into the register
980 depends on the same register, this cannot be done. */
981 else if (SET_SRC (set) == const0_rtx
982 && GET_CODE (NEXT_INSN (p)) == INSN
983 && (set1 = single_set (NEXT_INSN (p)))
984 && GET_CODE (set1) == SET
985 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
986 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
987 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
988 == SET_DEST (set))
989 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
991 int regno = REGNO (SET_DEST (set));
992 if (regs->array[regno].set_in_loop == 2)
994 struct movable *m;
995 m = (struct movable *) xmalloc (sizeof (struct movable));
996 m->next = 0;
997 m->insn = p;
998 m->set_dest = SET_DEST (set);
999 m->dependencies = 0;
1000 m->force = 0;
1001 m->consec = 0;
1002 m->done = 0;
1003 m->forces = 0;
1004 m->move_insn = 0;
1005 m->move_insn_first = 0;
1006 m->partial = 1;
1007 /* If the insn may not be executed on some cycles,
1008 we can't clear the whole reg; clear just high part.
1009 Not even if the reg is used only within this loop.
1010 Consider this:
1011 while (1)
1012 while (s != t) {
1013 if (foo ()) x = *s;
1014 use (x);
1016 Clearing x before the inner loop could clobber a value
1017 being saved from the last time around the outer loop.
1018 However, if the reg is not used outside this loop
1019 and all uses of the register are in the same
1020 basic block as the store, there is no problem.
1022 If this insn was made by loop, we don't know its
1023 INSN_LUID and hence must make a conservative
1024 assumption. */
1025 m->global = (INSN_UID (p) >= max_uid_for_loop
1026 || LOOP_REG_GLOBAL_P (loop, regno)
1027 || (labels_in_range_p
1028 (p, REGNO_FIRST_LUID (regno))));
1029 if (maybe_never && m->global)
1030 m->savemode = GET_MODE (SET_SRC (set1));
1031 else
1032 m->savemode = VOIDmode;
1033 m->regno = regno;
1034 m->cond = 0;
1035 m->match = 0;
1036 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1037 m->savings = 1;
1038 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1039 regs->array[regno+i].set_in_loop = -1;
1040 /* Add M to the end of the chain MOVABLES. */
1041 loop_movables_add (movables, m);
1045 /* Past a call insn, we get to insns which might not be executed
1046 because the call might exit. This matters for insns that trap.
1047 Constant and pure call insns always return, so they don't count. */
1048 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1049 call_passed = 1;
1050 /* Past a label or a jump, we get to insns for which we
1051 can't count on whether or how many times they will be
1052 executed during each iteration. Therefore, we can
1053 only move out sets of trivial variables
1054 (those not used after the loop). */
1055 /* Similar code appears twice in strength_reduce. */
1056 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1057 /* If we enter the loop in the middle, and scan around to the
1058 beginning, don't set maybe_never for that. This must be an
1059 unconditional jump, otherwise the code at the top of the
1060 loop might never be executed. Unconditional jumps are
1061 followed by a barrier then the loop_end. */
1062 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1063 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1064 && any_uncondjump_p (p)))
1065 maybe_never = 1;
1066 else if (GET_CODE (p) == NOTE)
1068 /* At the virtual top of a converted loop, insns are again known to
1069 be executed: logically, the loop begins here even though the exit
1070 code has been duplicated. */
1071 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1072 maybe_never = call_passed = 0;
1073 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1074 loop_depth++;
1075 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1076 loop_depth--;
1080 /* If one movable subsumes another, ignore that other. */
1082 ignore_some_movables (movables);
1084 /* For each movable insn, see if the reg that it loads
1085 leads when it dies right into another conditionally movable insn.
1086 If so, record that the second insn "forces" the first one,
1087 since the second can be moved only if the first is. */
1089 force_movables (movables);
1091 /* See if there are multiple movable insns that load the same value.
1092 If there are, make all but the first point at the first one
1093 through the `match' field, and add the priorities of them
1094 all together as the priority of the first. */
1096 combine_movables (movables, regs);
1098 /* Now consider each movable insn to decide whether it is worth moving.
1099 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1101 Generally this increases code size, so do not move moveables when
1102 optimizing for code size. */
1104 if (! optimize_size)
1105 move_movables (loop, movables, threshold, insn_count);
1107 /* Now candidates that still are negative are those not moved.
1108 Change regs->array[I].set_in_loop to indicate that those are not actually
1109 invariant. */
1110 for (i = 0; i < regs->num; i++)
1111 if (regs->array[i].set_in_loop < 0)
1112 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1114 /* Now that we've moved some things out of the loop, we might be able to
1115 hoist even more memory references. */
1116 load_mems (loop);
1118 /* Recalculate regs->array if load_mems has created new registers. */
1119 if (max_reg_num () > regs->num)
1120 loop_regs_scan (loop, 0);
1122 for (update_start = loop_start;
1123 PREV_INSN (update_start)
1124 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1125 update_start = PREV_INSN (update_start))
1127 update_end = NEXT_INSN (loop_end);
1129 reg_scan_update (update_start, update_end, loop_max_reg);
1130 loop_max_reg = max_reg_num ();
1132 if (flag_strength_reduce)
1134 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1135 /* Ensure our label doesn't go away. */
1136 LABEL_NUSES (update_end)++;
1138 strength_reduce (loop, flags);
1140 reg_scan_update (update_start, update_end, loop_max_reg);
1141 loop_max_reg = max_reg_num ();
1143 if (update_end && GET_CODE (update_end) == CODE_LABEL
1144 && --LABEL_NUSES (update_end) == 0)
1145 delete_related_insns (update_end);
1149 /* The movable information is required for strength reduction. */
1150 loop_movables_free (movables);
1152 free (regs->array);
1153 regs->array = 0;
1154 regs->num = 0;
1157 /* Add elements to *OUTPUT to record all the pseudo-regs
1158 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1160 void
1161 record_excess_regs (in_this, not_in_this, output)
1162 rtx in_this, not_in_this;
1163 rtx *output;
1165 enum rtx_code code;
1166 const char *fmt;
1167 int i;
1169 code = GET_CODE (in_this);
1171 switch (code)
1173 case PC:
1174 case CC0:
1175 case CONST_INT:
1176 case CONST_DOUBLE:
1177 case CONST:
1178 case SYMBOL_REF:
1179 case LABEL_REF:
1180 return;
1182 case REG:
1183 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1184 && ! reg_mentioned_p (in_this, not_in_this))
1185 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1186 return;
1188 default:
1189 break;
1192 fmt = GET_RTX_FORMAT (code);
1193 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1195 int j;
1197 switch (fmt[i])
1199 case 'E':
1200 for (j = 0; j < XVECLEN (in_this, i); j++)
1201 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1202 break;
1204 case 'e':
1205 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1206 break;
1211 /* Check what regs are referred to in the libcall block ending with INSN,
1212 aside from those mentioned in the equivalent value.
1213 If there are none, return 0.
1214 If there are one or more, return an EXPR_LIST containing all of them. */
1217 libcall_other_reg (insn, equiv)
1218 rtx insn, equiv;
1220 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1221 rtx p = XEXP (note, 0);
1222 rtx output = 0;
1224 /* First, find all the regs used in the libcall block
1225 that are not mentioned as inputs to the result. */
1227 while (p != insn)
1229 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1230 || GET_CODE (p) == CALL_INSN)
1231 record_excess_regs (PATTERN (p), equiv, &output);
1232 p = NEXT_INSN (p);
1235 return output;
1238 /* Return 1 if all uses of REG
1239 are between INSN and the end of the basic block. */
1241 static int
1242 reg_in_basic_block_p (insn, reg)
1243 rtx insn, reg;
1245 int regno = REGNO (reg);
1246 rtx p;
1248 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1249 return 0;
1251 /* Search this basic block for the already recorded last use of the reg. */
1252 for (p = insn; p; p = NEXT_INSN (p))
1254 switch (GET_CODE (p))
1256 case NOTE:
1257 break;
1259 case INSN:
1260 case CALL_INSN:
1261 /* Ordinary insn: if this is the last use, we win. */
1262 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1263 return 1;
1264 break;
1266 case JUMP_INSN:
1267 /* Jump insn: if this is the last use, we win. */
1268 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1269 return 1;
1270 /* Otherwise, it's the end of the basic block, so we lose. */
1271 return 0;
1273 case CODE_LABEL:
1274 case BARRIER:
1275 /* It's the end of the basic block, so we lose. */
1276 return 0;
1278 default:
1279 break;
1283 /* The "last use" that was recorded can't be found after the first
1284 use. This can happen when the last use was deleted while
1285 processing an inner loop, this inner loop was then completely
1286 unrolled, and the outer loop is always exited after the inner loop,
1287 so that everything after the first use becomes a single basic block. */
1288 return 1;
1291 /* Compute the benefit of eliminating the insns in the block whose
1292 last insn is LAST. This may be a group of insns used to compute a
1293 value directly or can contain a library call. */
1295 static int
1296 libcall_benefit (last)
1297 rtx last;
1299 rtx insn;
1300 int benefit = 0;
1302 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1303 insn != last; insn = NEXT_INSN (insn))
1305 if (GET_CODE (insn) == CALL_INSN)
1306 benefit += 10; /* Assume at least this many insns in a library
1307 routine. */
1308 else if (GET_CODE (insn) == INSN
1309 && GET_CODE (PATTERN (insn)) != USE
1310 && GET_CODE (PATTERN (insn)) != CLOBBER)
1311 benefit++;
1314 return benefit;
1317 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1319 static rtx
1320 skip_consec_insns (insn, count)
1321 rtx insn;
1322 int count;
1324 for (; count > 0; count--)
1326 rtx temp;
1328 /* If first insn of libcall sequence, skip to end. */
1329 /* Do this at start of loop, since INSN is guaranteed to
1330 be an insn here. */
1331 if (GET_CODE (insn) != NOTE
1332 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1333 insn = XEXP (temp, 0);
1336 insn = NEXT_INSN (insn);
1337 while (GET_CODE (insn) == NOTE);
1340 return insn;
1343 /* Ignore any movable whose insn falls within a libcall
1344 which is part of another movable.
1345 We make use of the fact that the movable for the libcall value
1346 was made later and so appears later on the chain. */
1348 static void
1349 ignore_some_movables (movables)
1350 struct loop_movables *movables;
1352 struct movable *m, *m1;
1354 for (m = movables->head; m; m = m->next)
1356 /* Is this a movable for the value of a libcall? */
1357 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1358 if (note)
1360 rtx insn;
1361 /* Check for earlier movables inside that range,
1362 and mark them invalid. We cannot use LUIDs here because
1363 insns created by loop.c for prior loops don't have LUIDs.
1364 Rather than reject all such insns from movables, we just
1365 explicitly check each insn in the libcall (since invariant
1366 libcalls aren't that common). */
1367 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1368 for (m1 = movables->head; m1 != m; m1 = m1->next)
1369 if (m1->insn == insn)
1370 m1->done = 1;
1375 /* For each movable insn, see if the reg that it loads
1376 leads when it dies right into another conditionally movable insn.
1377 If so, record that the second insn "forces" the first one,
1378 since the second can be moved only if the first is. */
1380 static void
1381 force_movables (movables)
1382 struct loop_movables *movables;
1384 struct movable *m, *m1;
1386 for (m1 = movables->head; m1; m1 = m1->next)
1387 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1388 if (!m1->partial && !m1->done)
1390 int regno = m1->regno;
1391 for (m = m1->next; m; m = m->next)
1392 /* ??? Could this be a bug? What if CSE caused the
1393 register of M1 to be used after this insn?
1394 Since CSE does not update regno_last_uid,
1395 this insn M->insn might not be where it dies.
1396 But very likely this doesn't matter; what matters is
1397 that M's reg is computed from M1's reg. */
1398 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1399 && !m->done)
1400 break;
1401 if (m != 0 && m->set_src == m1->set_dest
1402 /* If m->consec, m->set_src isn't valid. */
1403 && m->consec == 0)
1404 m = 0;
1406 /* Increase the priority of the moving the first insn
1407 since it permits the second to be moved as well. */
1408 if (m != 0)
1410 m->forces = m1;
1411 m1->lifetime += m->lifetime;
1412 m1->savings += m->savings;
1417 /* Find invariant expressions that are equal and can be combined into
1418 one register. */
1420 static void
1421 combine_movables (movables, regs)
1422 struct loop_movables *movables;
1423 struct loop_regs *regs;
1425 struct movable *m;
1426 char *matched_regs = (char *) xmalloc (regs->num);
1427 enum machine_mode mode;
1429 /* Regs that are set more than once are not allowed to match
1430 or be matched. I'm no longer sure why not. */
1431 /* Perhaps testing m->consec_sets would be more appropriate here? */
1433 for (m = movables->head; m; m = m->next)
1434 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1435 && !m->partial)
1437 struct movable *m1;
1438 int regno = m->regno;
1440 memset (matched_regs, 0, regs->num);
1441 matched_regs[regno] = 1;
1443 /* We want later insns to match the first one. Don't make the first
1444 one match any later ones. So start this loop at m->next. */
1445 for (m1 = m->next; m1; m1 = m1->next)
1446 /* ??? HACK! move_movables does not verify that the replacement
1447 is valid, which can have disasterous effects with hard regs
1448 and match_dup. Turn combination off for now. */
1449 if (0 && m != m1 && m1->match == 0
1450 && regs->array[m1->regno].n_times_set == 1
1451 /* A reg used outside the loop mustn't be eliminated. */
1452 && !m1->global
1453 /* A reg used for zero-extending mustn't be eliminated. */
1454 && !m1->partial
1455 && (matched_regs[m1->regno]
1458 /* Can combine regs with different modes loaded from the
1459 same constant only if the modes are the same or
1460 if both are integer modes with M wider or the same
1461 width as M1. The check for integer is redundant, but
1462 safe, since the only case of differing destination
1463 modes with equal sources is when both sources are
1464 VOIDmode, i.e., CONST_INT. */
1465 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1466 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1467 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1468 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1469 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1470 /* See if the source of M1 says it matches M. */
1471 && ((GET_CODE (m1->set_src) == REG
1472 && matched_regs[REGNO (m1->set_src)])
1473 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1474 movables, regs))))
1475 && ((m->dependencies == m1->dependencies)
1476 || rtx_equal_p (m->dependencies, m1->dependencies)))
1478 m->lifetime += m1->lifetime;
1479 m->savings += m1->savings;
1480 m1->done = 1;
1481 m1->match = m;
1482 matched_regs[m1->regno] = 1;
1486 /* Now combine the regs used for zero-extension.
1487 This can be done for those not marked `global'
1488 provided their lives don't overlap. */
1490 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1491 mode = GET_MODE_WIDER_MODE (mode))
1493 struct movable *m0 = 0;
1495 /* Combine all the registers for extension from mode MODE.
1496 Don't combine any that are used outside this loop. */
1497 for (m = movables->head; m; m = m->next)
1498 if (m->partial && ! m->global
1499 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1501 struct movable *m1;
1503 int first = REGNO_FIRST_LUID (m->regno);
1504 int last = REGNO_LAST_LUID (m->regno);
1506 if (m0 == 0)
1508 /* First one: don't check for overlap, just record it. */
1509 m0 = m;
1510 continue;
1513 /* Make sure they extend to the same mode.
1514 (Almost always true.) */
1515 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1516 continue;
1518 /* We already have one: check for overlap with those
1519 already combined together. */
1520 for (m1 = movables->head; m1 != m; m1 = m1->next)
1521 if (m1 == m0 || (m1->partial && m1->match == m0))
1522 if (! (REGNO_FIRST_LUID (m1->regno) > last
1523 || REGNO_LAST_LUID (m1->regno) < first))
1524 goto overlap;
1526 /* No overlap: we can combine this with the others. */
1527 m0->lifetime += m->lifetime;
1528 m0->savings += m->savings;
1529 m->done = 1;
1530 m->match = m0;
1532 overlap:
1537 /* Clean up. */
1538 free (matched_regs);
1541 /* Returns the number of movable instructions in LOOP that were not
1542 moved outside the loop. */
1544 static int
1545 num_unmoved_movables (loop)
1546 const struct loop *loop;
1548 int num = 0;
1549 struct movable *m;
1551 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1552 if (!m->done)
1553 ++num;
1555 return num;
1559 /* Return 1 if regs X and Y will become the same if moved. */
1561 static int
1562 regs_match_p (x, y, movables)
1563 rtx x, y;
1564 struct loop_movables *movables;
1566 unsigned int xn = REGNO (x);
1567 unsigned int yn = REGNO (y);
1568 struct movable *mx, *my;
1570 for (mx = movables->head; mx; mx = mx->next)
1571 if (mx->regno == xn)
1572 break;
1574 for (my = movables->head; my; my = my->next)
1575 if (my->regno == yn)
1576 break;
1578 return (mx && my
1579 && ((mx->match == my->match && mx->match != 0)
1580 || mx->match == my
1581 || mx == my->match));
1584 /* Return 1 if X and Y are identical-looking rtx's.
1585 This is the Lisp function EQUAL for rtx arguments.
1587 If two registers are matching movables or a movable register and an
1588 equivalent constant, consider them equal. */
1590 static int
1591 rtx_equal_for_loop_p (x, y, movables, regs)
1592 rtx x, y;
1593 struct loop_movables *movables;
1594 struct loop_regs *regs;
1596 int i;
1597 int j;
1598 struct movable *m;
1599 enum rtx_code code;
1600 const char *fmt;
1602 if (x == y)
1603 return 1;
1604 if (x == 0 || y == 0)
1605 return 0;
1607 code = GET_CODE (x);
1609 /* If we have a register and a constant, they may sometimes be
1610 equal. */
1611 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1612 && CONSTANT_P (y))
1614 for (m = movables->head; m; m = m->next)
1615 if (m->move_insn && m->regno == REGNO (x)
1616 && rtx_equal_p (m->set_src, y))
1617 return 1;
1619 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1620 && CONSTANT_P (x))
1622 for (m = movables->head; m; m = m->next)
1623 if (m->move_insn && m->regno == REGNO (y)
1624 && rtx_equal_p (m->set_src, x))
1625 return 1;
1628 /* Otherwise, rtx's of different codes cannot be equal. */
1629 if (code != GET_CODE (y))
1630 return 0;
1632 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1633 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1635 if (GET_MODE (x) != GET_MODE (y))
1636 return 0;
1638 /* These three types of rtx's can be compared nonrecursively. */
1639 if (code == REG)
1640 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1642 if (code == LABEL_REF)
1643 return XEXP (x, 0) == XEXP (y, 0);
1644 if (code == SYMBOL_REF)
1645 return XSTR (x, 0) == XSTR (y, 0);
1647 /* Compare the elements. If any pair of corresponding elements
1648 fail to match, return 0 for the whole things. */
1650 fmt = GET_RTX_FORMAT (code);
1651 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1653 switch (fmt[i])
1655 case 'w':
1656 if (XWINT (x, i) != XWINT (y, i))
1657 return 0;
1658 break;
1660 case 'i':
1661 if (XINT (x, i) != XINT (y, i))
1662 return 0;
1663 break;
1665 case 'E':
1666 /* Two vectors must have the same length. */
1667 if (XVECLEN (x, i) != XVECLEN (y, i))
1668 return 0;
1670 /* And the corresponding elements must match. */
1671 for (j = 0; j < XVECLEN (x, i); j++)
1672 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1673 movables, regs) == 0)
1674 return 0;
1675 break;
1677 case 'e':
1678 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1679 == 0)
1680 return 0;
1681 break;
1683 case 's':
1684 if (strcmp (XSTR (x, i), XSTR (y, i)))
1685 return 0;
1686 break;
1688 case 'u':
1689 /* These are just backpointers, so they don't matter. */
1690 break;
1692 case '0':
1693 break;
1695 /* It is believed that rtx's at this level will never
1696 contain anything but integers and other rtx's,
1697 except for within LABEL_REFs and SYMBOL_REFs. */
1698 default:
1699 abort ();
1702 return 1;
1705 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1706 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1707 references is incremented once for each added note. */
1709 static void
1710 add_label_notes (x, insns)
1711 rtx x;
1712 rtx insns;
1714 enum rtx_code code = GET_CODE (x);
1715 int i, j;
1716 const char *fmt;
1717 rtx insn;
1719 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1721 /* This code used to ignore labels that referred to dispatch tables to
1722 avoid flow generating (slighly) worse code.
1724 We no longer ignore such label references (see LABEL_REF handling in
1725 mark_jump_label for additional information). */
1726 for (insn = insns; insn; insn = NEXT_INSN (insn))
1727 if (reg_mentioned_p (XEXP (x, 0), insn))
1729 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1730 REG_NOTES (insn));
1731 if (LABEL_P (XEXP (x, 0)))
1732 LABEL_NUSES (XEXP (x, 0))++;
1736 fmt = GET_RTX_FORMAT (code);
1737 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1739 if (fmt[i] == 'e')
1740 add_label_notes (XEXP (x, i), insns);
1741 else if (fmt[i] == 'E')
1742 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1743 add_label_notes (XVECEXP (x, i, j), insns);
1747 /* Scan MOVABLES, and move the insns that deserve to be moved.
1748 If two matching movables are combined, replace one reg with the
1749 other throughout. */
1751 static void
1752 move_movables (loop, movables, threshold, insn_count)
1753 struct loop *loop;
1754 struct loop_movables *movables;
1755 int threshold;
1756 int insn_count;
1758 struct loop_regs *regs = LOOP_REGS (loop);
1759 int nregs = regs->num;
1760 rtx new_start = 0;
1761 struct movable *m;
1762 rtx p;
1763 rtx loop_start = loop->start;
1764 rtx loop_end = loop->end;
1765 /* Map of pseudo-register replacements to handle combining
1766 when we move several insns that load the same value
1767 into different pseudo-registers. */
1768 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1769 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1771 for (m = movables->head; m; m = m->next)
1773 /* Describe this movable insn. */
1775 if (loop_dump_stream)
1777 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1778 INSN_UID (m->insn), m->regno, m->lifetime);
1779 if (m->consec > 0)
1780 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1781 if (m->cond)
1782 fprintf (loop_dump_stream, "cond ");
1783 if (m->force)
1784 fprintf (loop_dump_stream, "force ");
1785 if (m->global)
1786 fprintf (loop_dump_stream, "global ");
1787 if (m->done)
1788 fprintf (loop_dump_stream, "done ");
1789 if (m->move_insn)
1790 fprintf (loop_dump_stream, "move-insn ");
1791 if (m->match)
1792 fprintf (loop_dump_stream, "matches %d ",
1793 INSN_UID (m->match->insn));
1794 if (m->forces)
1795 fprintf (loop_dump_stream, "forces %d ",
1796 INSN_UID (m->forces->insn));
1799 /* Ignore the insn if it's already done (it matched something else).
1800 Otherwise, see if it is now safe to move. */
1802 if (!m->done
1803 && (! m->cond
1804 || (1 == loop_invariant_p (loop, m->set_src)
1805 && (m->dependencies == 0
1806 || 1 == loop_invariant_p (loop, m->dependencies))
1807 && (m->consec == 0
1808 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1809 m->consec + 1,
1810 m->insn))))
1811 && (! m->forces || m->forces->done))
1813 int regno;
1814 rtx p;
1815 int savings = m->savings;
1817 /* We have an insn that is safe to move.
1818 Compute its desirability. */
1820 p = m->insn;
1821 regno = m->regno;
1823 if (loop_dump_stream)
1824 fprintf (loop_dump_stream, "savings %d ", savings);
1826 if (regs->array[regno].moved_once && loop_dump_stream)
1827 fprintf (loop_dump_stream, "halved since already moved ");
1829 /* An insn MUST be moved if we already moved something else
1830 which is safe only if this one is moved too: that is,
1831 if already_moved[REGNO] is nonzero. */
1833 /* An insn is desirable to move if the new lifetime of the
1834 register is no more than THRESHOLD times the old lifetime.
1835 If it's not desirable, it means the loop is so big
1836 that moving won't speed things up much,
1837 and it is liable to make register usage worse. */
1839 /* It is also desirable to move if it can be moved at no
1840 extra cost because something else was already moved. */
1842 if (already_moved[regno]
1843 || flag_move_all_movables
1844 || (threshold * savings * m->lifetime) >=
1845 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1846 || (m->forces && m->forces->done
1847 && regs->array[m->forces->regno].n_times_set == 1))
1849 int count;
1850 struct movable *m1;
1851 rtx first = NULL_RTX;
1853 /* Now move the insns that set the reg. */
1855 if (m->partial && m->match)
1857 rtx newpat, i1;
1858 rtx r1, r2;
1859 /* Find the end of this chain of matching regs.
1860 Thus, we load each reg in the chain from that one reg.
1861 And that reg is loaded with 0 directly,
1862 since it has ->match == 0. */
1863 for (m1 = m; m1->match; m1 = m1->match);
1864 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1865 SET_DEST (PATTERN (m1->insn)));
1866 i1 = loop_insn_hoist (loop, newpat);
1868 /* Mark the moved, invariant reg as being allowed to
1869 share a hard reg with the other matching invariant. */
1870 REG_NOTES (i1) = REG_NOTES (m->insn);
1871 r1 = SET_DEST (PATTERN (m->insn));
1872 r2 = SET_DEST (PATTERN (m1->insn));
1873 regs_may_share
1874 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1875 gen_rtx_EXPR_LIST (VOIDmode, r2,
1876 regs_may_share));
1877 delete_insn (m->insn);
1879 if (new_start == 0)
1880 new_start = i1;
1882 if (loop_dump_stream)
1883 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1885 /* If we are to re-generate the item being moved with a
1886 new move insn, first delete what we have and then emit
1887 the move insn before the loop. */
1888 else if (m->move_insn)
1890 rtx i1, temp, seq;
1892 for (count = m->consec; count >= 0; count--)
1894 /* If this is the first insn of a library call sequence,
1895 skip to the end. */
1896 if (GET_CODE (p) != NOTE
1897 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1898 p = XEXP (temp, 0);
1900 /* If this is the last insn of a libcall sequence, then
1901 delete every insn in the sequence except the last.
1902 The last insn is handled in the normal manner. */
1903 if (GET_CODE (p) != NOTE
1904 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1906 temp = XEXP (temp, 0);
1907 while (temp != p)
1908 temp = delete_insn (temp);
1911 temp = p;
1912 p = delete_insn (p);
1914 /* simplify_giv_expr expects that it can walk the insns
1915 at m->insn forwards and see this old sequence we are
1916 tossing here. delete_insn does preserve the next
1917 pointers, but when we skip over a NOTE we must fix
1918 it up. Otherwise that code walks into the non-deleted
1919 insn stream. */
1920 while (p && GET_CODE (p) == NOTE)
1921 p = NEXT_INSN (temp) = NEXT_INSN (p);
1924 start_sequence ();
1925 emit_move_insn (m->set_dest, m->set_src);
1926 temp = get_insns ();
1927 seq = gen_sequence ();
1928 end_sequence ();
1930 add_label_notes (m->set_src, temp);
1932 i1 = loop_insn_hoist (loop, seq);
1933 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1934 set_unique_reg_note (i1,
1935 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1936 m->set_src);
1938 if (loop_dump_stream)
1939 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1941 /* The more regs we move, the less we like moving them. */
1942 threshold -= 3;
1944 else
1946 for (count = m->consec; count >= 0; count--)
1948 rtx i1, temp;
1950 /* If first insn of libcall sequence, skip to end. */
1951 /* Do this at start of loop, since p is guaranteed to
1952 be an insn here. */
1953 if (GET_CODE (p) != NOTE
1954 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1955 p = XEXP (temp, 0);
1957 /* If last insn of libcall sequence, move all
1958 insns except the last before the loop. The last
1959 insn is handled in the normal manner. */
1960 if (GET_CODE (p) != NOTE
1961 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1963 rtx fn_address = 0;
1964 rtx fn_reg = 0;
1965 rtx fn_address_insn = 0;
1967 first = 0;
1968 for (temp = XEXP (temp, 0); temp != p;
1969 temp = NEXT_INSN (temp))
1971 rtx body;
1972 rtx n;
1973 rtx next;
1975 if (GET_CODE (temp) == NOTE)
1976 continue;
1978 body = PATTERN (temp);
1980 /* Find the next insn after TEMP,
1981 not counting USE or NOTE insns. */
1982 for (next = NEXT_INSN (temp); next != p;
1983 next = NEXT_INSN (next))
1984 if (! (GET_CODE (next) == INSN
1985 && GET_CODE (PATTERN (next)) == USE)
1986 && GET_CODE (next) != NOTE)
1987 break;
1989 /* If that is the call, this may be the insn
1990 that loads the function address.
1992 Extract the function address from the insn
1993 that loads it into a register.
1994 If this insn was cse'd, we get incorrect code.
1996 So emit a new move insn that copies the
1997 function address into the register that the
1998 call insn will use. flow.c will delete any
1999 redundant stores that we have created. */
2000 if (GET_CODE (next) == CALL_INSN
2001 && GET_CODE (body) == SET
2002 && GET_CODE (SET_DEST (body)) == REG
2003 && (n = find_reg_note (temp, REG_EQUAL,
2004 NULL_RTX)))
2006 fn_reg = SET_SRC (body);
2007 if (GET_CODE (fn_reg) != REG)
2008 fn_reg = SET_DEST (body);
2009 fn_address = XEXP (n, 0);
2010 fn_address_insn = temp;
2012 /* We have the call insn.
2013 If it uses the register we suspect it might,
2014 load it with the correct address directly. */
2015 if (GET_CODE (temp) == CALL_INSN
2016 && fn_address != 0
2017 && reg_referenced_p (fn_reg, body))
2018 loop_insn_emit_after (loop, 0, fn_address_insn,
2019 gen_move_insn
2020 (fn_reg, fn_address));
2022 if (GET_CODE (temp) == CALL_INSN)
2024 i1 = loop_call_insn_hoist (loop, body);
2025 /* Because the USAGE information potentially
2026 contains objects other than hard registers
2027 we need to copy it. */
2028 if (CALL_INSN_FUNCTION_USAGE (temp))
2029 CALL_INSN_FUNCTION_USAGE (i1)
2030 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2032 else
2033 i1 = loop_insn_hoist (loop, body);
2034 if (first == 0)
2035 first = i1;
2036 if (temp == fn_address_insn)
2037 fn_address_insn = i1;
2038 REG_NOTES (i1) = REG_NOTES (temp);
2039 REG_NOTES (temp) = NULL;
2040 delete_insn (temp);
2042 if (new_start == 0)
2043 new_start = first;
2045 if (m->savemode != VOIDmode)
2047 /* P sets REG to zero; but we should clear only
2048 the bits that are not covered by the mode
2049 m->savemode. */
2050 rtx reg = m->set_dest;
2051 rtx sequence;
2052 rtx tem;
2054 start_sequence ();
2055 tem = expand_simple_binop
2056 (GET_MODE (reg), AND, reg,
2057 GEN_INT ((((HOST_WIDE_INT) 1
2058 << GET_MODE_BITSIZE (m->savemode)))
2059 - 1),
2060 reg, 1, OPTAB_LIB_WIDEN);
2061 if (tem == 0)
2062 abort ();
2063 if (tem != reg)
2064 emit_move_insn (reg, tem);
2065 sequence = gen_sequence ();
2066 end_sequence ();
2067 i1 = loop_insn_hoist (loop, sequence);
2069 else if (GET_CODE (p) == CALL_INSN)
2071 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2072 /* Because the USAGE information potentially
2073 contains objects other than hard registers
2074 we need to copy it. */
2075 if (CALL_INSN_FUNCTION_USAGE (p))
2076 CALL_INSN_FUNCTION_USAGE (i1)
2077 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2079 else if (count == m->consec && m->move_insn_first)
2081 rtx seq;
2082 /* The SET_SRC might not be invariant, so we must
2083 use the REG_EQUAL note. */
2084 start_sequence ();
2085 emit_move_insn (m->set_dest, m->set_src);
2086 temp = get_insns ();
2087 seq = gen_sequence ();
2088 end_sequence ();
2090 add_label_notes (m->set_src, temp);
2092 i1 = loop_insn_hoist (loop, seq);
2093 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2094 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2095 : REG_EQUAL, m->set_src);
2097 else
2098 i1 = loop_insn_hoist (loop, PATTERN (p));
2100 if (REG_NOTES (i1) == 0)
2102 REG_NOTES (i1) = REG_NOTES (p);
2103 REG_NOTES (p) = NULL;
2105 /* If there is a REG_EQUAL note present whose value
2106 is not loop invariant, then delete it, since it
2107 may cause problems with later optimization passes.
2108 It is possible for cse to create such notes
2109 like this as a result of record_jump_cond. */
2111 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2112 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2113 remove_note (i1, temp);
2116 if (new_start == 0)
2117 new_start = i1;
2119 if (loop_dump_stream)
2120 fprintf (loop_dump_stream, " moved to %d",
2121 INSN_UID (i1));
2123 /* If library call, now fix the REG_NOTES that contain
2124 insn pointers, namely REG_LIBCALL on FIRST
2125 and REG_RETVAL on I1. */
2126 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2128 XEXP (temp, 0) = first;
2129 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2130 XEXP (temp, 0) = i1;
2133 temp = p;
2134 delete_insn (p);
2135 p = NEXT_INSN (p);
2137 /* simplify_giv_expr expects that it can walk the insns
2138 at m->insn forwards and see this old sequence we are
2139 tossing here. delete_insn does preserve the next
2140 pointers, but when we skip over a NOTE we must fix
2141 it up. Otherwise that code walks into the non-deleted
2142 insn stream. */
2143 while (p && GET_CODE (p) == NOTE)
2144 p = NEXT_INSN (temp) = NEXT_INSN (p);
2147 /* The more regs we move, the less we like moving them. */
2148 threshold -= 3;
2151 /* Any other movable that loads the same register
2152 MUST be moved. */
2153 already_moved[regno] = 1;
2155 /* This reg has been moved out of one loop. */
2156 regs->array[regno].moved_once = 1;
2158 /* The reg set here is now invariant. */
2159 if (! m->partial)
2161 int i;
2162 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2163 regs->array[regno+i].set_in_loop = 0;
2166 m->done = 1;
2168 /* Change the length-of-life info for the register
2169 to say it lives at least the full length of this loop.
2170 This will help guide optimizations in outer loops. */
2172 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2173 /* This is the old insn before all the moved insns.
2174 We can't use the moved insn because it is out of range
2175 in uid_luid. Only the old insns have luids. */
2176 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2177 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2178 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2180 /* Combine with this moved insn any other matching movables. */
2182 if (! m->partial)
2183 for (m1 = movables->head; m1; m1 = m1->next)
2184 if (m1->match == m)
2186 rtx temp;
2188 /* Schedule the reg loaded by M1
2189 for replacement so that shares the reg of M.
2190 If the modes differ (only possible in restricted
2191 circumstances, make a SUBREG.
2193 Note this assumes that the target dependent files
2194 treat REG and SUBREG equally, including within
2195 GO_IF_LEGITIMATE_ADDRESS and in all the
2196 predicates since we never verify that replacing the
2197 original register with a SUBREG results in a
2198 recognizable insn. */
2199 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2200 reg_map[m1->regno] = m->set_dest;
2201 else
2202 reg_map[m1->regno]
2203 = gen_lowpart_common (GET_MODE (m1->set_dest),
2204 m->set_dest);
2206 /* Get rid of the matching insn
2207 and prevent further processing of it. */
2208 m1->done = 1;
2210 /* if library call, delete all insns. */
2211 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2212 NULL_RTX)))
2213 delete_insn_chain (XEXP (temp, 0), m1->insn);
2214 else
2215 delete_insn (m1->insn);
2217 /* Any other movable that loads the same register
2218 MUST be moved. */
2219 already_moved[m1->regno] = 1;
2221 /* The reg merged here is now invariant,
2222 if the reg it matches is invariant. */
2223 if (! m->partial)
2225 int i;
2226 for (i = 0;
2227 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2228 i++)
2229 regs->array[m1->regno+i].set_in_loop = 0;
2233 else if (loop_dump_stream)
2234 fprintf (loop_dump_stream, "not desirable");
2236 else if (loop_dump_stream && !m->match)
2237 fprintf (loop_dump_stream, "not safe");
2239 if (loop_dump_stream)
2240 fprintf (loop_dump_stream, "\n");
2243 if (new_start == 0)
2244 new_start = loop_start;
2246 /* Go through all the instructions in the loop, making
2247 all the register substitutions scheduled in REG_MAP. */
2248 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2249 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2250 || GET_CODE (p) == CALL_INSN)
2252 replace_regs (PATTERN (p), reg_map, nregs, 0);
2253 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2254 INSN_CODE (p) = -1;
2257 /* Clean up. */
2258 free (reg_map);
2259 free (already_moved);
2263 static void
2264 loop_movables_add (movables, m)
2265 struct loop_movables *movables;
2266 struct movable *m;
2268 if (movables->head == 0)
2269 movables->head = m;
2270 else
2271 movables->last->next = m;
2272 movables->last = m;
2276 static void
2277 loop_movables_free (movables)
2278 struct loop_movables *movables;
2280 struct movable *m;
2281 struct movable *m_next;
2283 for (m = movables->head; m; m = m_next)
2285 m_next = m->next;
2286 free (m);
2290 #if 0
2291 /* Scan X and replace the address of any MEM in it with ADDR.
2292 REG is the address that MEM should have before the replacement. */
2294 static void
2295 replace_call_address (x, reg, addr)
2296 rtx x, reg, addr;
2298 enum rtx_code code;
2299 int i;
2300 const char *fmt;
2302 if (x == 0)
2303 return;
2304 code = GET_CODE (x);
2305 switch (code)
2307 case PC:
2308 case CC0:
2309 case CONST_INT:
2310 case CONST_DOUBLE:
2311 case CONST:
2312 case SYMBOL_REF:
2313 case LABEL_REF:
2314 case REG:
2315 return;
2317 case SET:
2318 /* Short cut for very common case. */
2319 replace_call_address (XEXP (x, 1), reg, addr);
2320 return;
2322 case CALL:
2323 /* Short cut for very common case. */
2324 replace_call_address (XEXP (x, 0), reg, addr);
2325 return;
2327 case MEM:
2328 /* If this MEM uses a reg other than the one we expected,
2329 something is wrong. */
2330 if (XEXP (x, 0) != reg)
2331 abort ();
2332 XEXP (x, 0) = addr;
2333 return;
2335 default:
2336 break;
2339 fmt = GET_RTX_FORMAT (code);
2340 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2342 if (fmt[i] == 'e')
2343 replace_call_address (XEXP (x, i), reg, addr);
2344 else if (fmt[i] == 'E')
2346 int j;
2347 for (j = 0; j < XVECLEN (x, i); j++)
2348 replace_call_address (XVECEXP (x, i, j), reg, addr);
2352 #endif
2354 /* Return the number of memory refs to addresses that vary
2355 in the rtx X. */
2357 static int
2358 count_nonfixed_reads (loop, x)
2359 const struct loop *loop;
2360 rtx x;
2362 enum rtx_code code;
2363 int i;
2364 const char *fmt;
2365 int value;
2367 if (x == 0)
2368 return 0;
2370 code = GET_CODE (x);
2371 switch (code)
2373 case PC:
2374 case CC0:
2375 case CONST_INT:
2376 case CONST_DOUBLE:
2377 case CONST:
2378 case SYMBOL_REF:
2379 case LABEL_REF:
2380 case REG:
2381 return 0;
2383 case MEM:
2384 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2385 + count_nonfixed_reads (loop, XEXP (x, 0)));
2387 default:
2388 break;
2391 value = 0;
2392 fmt = GET_RTX_FORMAT (code);
2393 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2395 if (fmt[i] == 'e')
2396 value += count_nonfixed_reads (loop, XEXP (x, i));
2397 if (fmt[i] == 'E')
2399 int j;
2400 for (j = 0; j < XVECLEN (x, i); j++)
2401 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2404 return value;
2407 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2408 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2409 `unknown_address_altered', `unknown_constant_address_altered', and
2410 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2411 list `store_mems' in LOOP. */
2413 static void
2414 prescan_loop (loop)
2415 struct loop *loop;
2417 int level = 1;
2418 rtx insn;
2419 struct loop_info *loop_info = LOOP_INFO (loop);
2420 rtx start = loop->start;
2421 rtx end = loop->end;
2422 /* The label after END. Jumping here is just like falling off the
2423 end of the loop. We use next_nonnote_insn instead of next_label
2424 as a hedge against the (pathological) case where some actual insn
2425 might end up between the two. */
2426 rtx exit_target = next_nonnote_insn (end);
2428 loop_info->has_indirect_jump = indirect_jump_in_function;
2429 loop_info->pre_header_has_call = 0;
2430 loop_info->has_call = 0;
2431 loop_info->has_nonconst_call = 0;
2432 loop_info->has_volatile = 0;
2433 loop_info->has_tablejump = 0;
2434 loop_info->has_multiple_exit_targets = 0;
2435 loop->level = 1;
2437 loop_info->unknown_address_altered = 0;
2438 loop_info->unknown_constant_address_altered = 0;
2439 loop_info->store_mems = NULL_RTX;
2440 loop_info->first_loop_store_insn = NULL_RTX;
2441 loop_info->mems_idx = 0;
2442 loop_info->num_mem_sets = 0;
2445 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2446 insn = PREV_INSN (insn))
2448 if (GET_CODE (insn) == CALL_INSN)
2450 loop_info->pre_header_has_call = 1;
2451 break;
2455 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2456 insn = NEXT_INSN (insn))
2458 switch (GET_CODE (insn))
2460 case NOTE:
2461 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2463 ++level;
2464 /* Count number of loops contained in this one. */
2465 loop->level++;
2467 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2468 --level;
2469 break;
2471 case CALL_INSN:
2472 if (! CONST_OR_PURE_CALL_P (insn))
2474 loop_info->unknown_address_altered = 1;
2475 loop_info->has_nonconst_call = 1;
2477 loop_info->has_call = 1;
2478 if (can_throw_internal (insn))
2479 loop_info->has_multiple_exit_targets = 1;
2480 break;
2482 case JUMP_INSN:
2483 if (! loop_info->has_multiple_exit_targets)
2485 rtx set = pc_set (insn);
2487 if (set)
2489 rtx label1, label2;
2491 if (GET_CODE (SET_SRC (set)) == IF_THEN_ELSE)
2493 label1 = XEXP (SET_SRC (set), 1);
2494 label2 = XEXP (SET_SRC (set), 2);
2496 else
2498 label1 = SET_SRC (PATTERN (insn));
2499 label2 = NULL_RTX;
2504 if (label1 && label1 != pc_rtx)
2506 if (GET_CODE (label1) != LABEL_REF)
2508 /* Something tricky. */
2509 loop_info->has_multiple_exit_targets = 1;
2510 break;
2512 else if (XEXP (label1, 0) != exit_target
2513 && LABEL_OUTSIDE_LOOP_P (label1))
2515 /* A jump outside the current loop. */
2516 loop_info->has_multiple_exit_targets = 1;
2517 break;
2521 label1 = label2;
2522 label2 = NULL_RTX;
2524 while (label1);
2526 else
2528 /* A return, or something tricky. */
2529 loop_info->has_multiple_exit_targets = 1;
2532 /* FALLTHRU */
2534 case INSN:
2535 if (volatile_refs_p (PATTERN (insn)))
2536 loop_info->has_volatile = 1;
2538 if (GET_CODE (insn) == JUMP_INSN
2539 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2540 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2541 loop_info->has_tablejump = 1;
2543 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2544 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2545 loop_info->first_loop_store_insn = insn;
2547 if (flag_non_call_exceptions && can_throw_internal (insn))
2548 loop_info->has_multiple_exit_targets = 1;
2549 break;
2551 default:
2552 break;
2556 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2557 if (/* An exception thrown by a called function might land us
2558 anywhere. */
2559 ! loop_info->has_nonconst_call
2560 /* We don't want loads for MEMs moved to a location before the
2561 one at which their stack memory becomes allocated. (Note
2562 that this is not a problem for malloc, etc., since those
2563 require actual function calls. */
2564 && ! current_function_calls_alloca
2565 /* There are ways to leave the loop other than falling off the
2566 end. */
2567 && ! loop_info->has_multiple_exit_targets)
2568 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2569 insn = NEXT_INSN (insn))
2570 for_each_rtx (&insn, insert_loop_mem, loop_info);
2572 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2573 that loop_invariant_p and load_mems can use true_dependence
2574 to determine what is really clobbered. */
2575 if (loop_info->unknown_address_altered)
2577 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2579 loop_info->store_mems
2580 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2582 if (loop_info->unknown_constant_address_altered)
2584 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2586 RTX_UNCHANGING_P (mem) = 1;
2587 loop_info->store_mems
2588 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2592 /* Scan the function looking for loops. Record the start and end of each loop.
2593 Also mark as invalid loops any loops that contain a setjmp or are branched
2594 to from outside the loop. */
2596 static void
2597 find_and_verify_loops (f, loops)
2598 rtx f;
2599 struct loops *loops;
2601 rtx insn;
2602 rtx label;
2603 int num_loops;
2604 struct loop *current_loop;
2605 struct loop *next_loop;
2606 struct loop *loop;
2608 num_loops = loops->num;
2610 compute_luids (f, NULL_RTX, 0);
2612 /* If there are jumps to undefined labels,
2613 treat them as jumps out of any/all loops.
2614 This also avoids writing past end of tables when there are no loops. */
2615 uid_loop[0] = NULL;
2617 /* Find boundaries of loops, mark which loops are contained within
2618 loops, and invalidate loops that have setjmp. */
2620 num_loops = 0;
2621 current_loop = NULL;
2622 for (insn = f; insn; insn = NEXT_INSN (insn))
2624 if (GET_CODE (insn) == NOTE)
2625 switch (NOTE_LINE_NUMBER (insn))
2627 case NOTE_INSN_LOOP_BEG:
2628 next_loop = loops->array + num_loops;
2629 next_loop->num = num_loops;
2630 num_loops++;
2631 next_loop->start = insn;
2632 next_loop->outer = current_loop;
2633 current_loop = next_loop;
2634 break;
2636 case NOTE_INSN_LOOP_CONT:
2637 current_loop->cont = insn;
2638 break;
2640 case NOTE_INSN_LOOP_VTOP:
2641 current_loop->vtop = insn;
2642 break;
2644 case NOTE_INSN_LOOP_END:
2645 if (! current_loop)
2646 abort ();
2648 current_loop->end = insn;
2649 current_loop = current_loop->outer;
2650 break;
2652 default:
2653 break;
2656 if (GET_CODE (insn) == CALL_INSN
2657 && find_reg_note (insn, REG_SETJMP, NULL))
2659 /* In this case, we must invalidate our current loop and any
2660 enclosing loop. */
2661 for (loop = current_loop; loop; loop = loop->outer)
2663 loop->invalid = 1;
2664 if (loop_dump_stream)
2665 fprintf (loop_dump_stream,
2666 "\nLoop at %d ignored due to setjmp.\n",
2667 INSN_UID (loop->start));
2671 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2672 enclosing loop, but this doesn't matter. */
2673 uid_loop[INSN_UID (insn)] = current_loop;
2676 /* Any loop containing a label used in an initializer must be invalidated,
2677 because it can be jumped into from anywhere. */
2679 for (label = forced_labels; label; label = XEXP (label, 1))
2681 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2682 loop; loop = loop->outer)
2683 loop->invalid = 1;
2686 /* Any loop containing a label used for an exception handler must be
2687 invalidated, because it can be jumped into from anywhere. */
2689 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2691 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2692 loop; loop = loop->outer)
2693 loop->invalid = 1;
2696 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2697 loop that it is not contained within, that loop is marked invalid.
2698 If any INSN or CALL_INSN uses a label's address, then the loop containing
2699 that label is marked invalid, because it could be jumped into from
2700 anywhere.
2702 Also look for blocks of code ending in an unconditional branch that
2703 exits the loop. If such a block is surrounded by a conditional
2704 branch around the block, move the block elsewhere (see below) and
2705 invert the jump to point to the code block. This may eliminate a
2706 label in our loop and will simplify processing by both us and a
2707 possible second cse pass. */
2709 for (insn = f; insn; insn = NEXT_INSN (insn))
2710 if (INSN_P (insn))
2712 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2714 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2716 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2717 if (note)
2719 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2720 loop; loop = loop->outer)
2721 loop->invalid = 1;
2725 if (GET_CODE (insn) != JUMP_INSN)
2726 continue;
2728 mark_loop_jump (PATTERN (insn), this_loop);
2730 /* See if this is an unconditional branch outside the loop. */
2731 if (this_loop
2732 && (GET_CODE (PATTERN (insn)) == RETURN
2733 || (any_uncondjump_p (insn)
2734 && onlyjump_p (insn)
2735 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2736 != this_loop)))
2737 && get_max_uid () < max_uid_for_loop)
2739 rtx p;
2740 rtx our_next = next_real_insn (insn);
2741 rtx last_insn_to_move = NEXT_INSN (insn);
2742 struct loop *dest_loop;
2743 struct loop *outer_loop = NULL;
2745 /* Go backwards until we reach the start of the loop, a label,
2746 or a JUMP_INSN. */
2747 for (p = PREV_INSN (insn);
2748 GET_CODE (p) != CODE_LABEL
2749 && ! (GET_CODE (p) == NOTE
2750 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2751 && GET_CODE (p) != JUMP_INSN;
2752 p = PREV_INSN (p))
2755 /* Check for the case where we have a jump to an inner nested
2756 loop, and do not perform the optimization in that case. */
2758 if (JUMP_LABEL (insn))
2760 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2761 if (dest_loop)
2763 for (outer_loop = dest_loop; outer_loop;
2764 outer_loop = outer_loop->outer)
2765 if (outer_loop == this_loop)
2766 break;
2770 /* Make sure that the target of P is within the current loop. */
2772 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2773 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2774 outer_loop = this_loop;
2776 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2777 we have a block of code to try to move.
2779 We look backward and then forward from the target of INSN
2780 to find a BARRIER at the same loop depth as the target.
2781 If we find such a BARRIER, we make a new label for the start
2782 of the block, invert the jump in P and point it to that label,
2783 and move the block of code to the spot we found. */
2785 if (! outer_loop
2786 && GET_CODE (p) == JUMP_INSN
2787 && JUMP_LABEL (p) != 0
2788 /* Just ignore jumps to labels that were never emitted.
2789 These always indicate compilation errors. */
2790 && INSN_UID (JUMP_LABEL (p)) != 0
2791 && any_condjump_p (p) && onlyjump_p (p)
2792 && next_real_insn (JUMP_LABEL (p)) == our_next
2793 /* If it's not safe to move the sequence, then we
2794 mustn't try. */
2795 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2796 &last_insn_to_move))
2798 rtx target
2799 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2800 struct loop *target_loop = uid_loop[INSN_UID (target)];
2801 rtx loc, loc2;
2802 rtx tmp;
2804 /* Search for possible garbage past the conditional jumps
2805 and look for the last barrier. */
2806 for (tmp = last_insn_to_move;
2807 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2808 if (GET_CODE (tmp) == BARRIER)
2809 last_insn_to_move = tmp;
2811 for (loc = target; loc; loc = PREV_INSN (loc))
2812 if (GET_CODE (loc) == BARRIER
2813 /* Don't move things inside a tablejump. */
2814 && ((loc2 = next_nonnote_insn (loc)) == 0
2815 || GET_CODE (loc2) != CODE_LABEL
2816 || (loc2 = next_nonnote_insn (loc2)) == 0
2817 || GET_CODE (loc2) != JUMP_INSN
2818 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2819 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2820 && uid_loop[INSN_UID (loc)] == target_loop)
2821 break;
2823 if (loc == 0)
2824 for (loc = target; loc; loc = NEXT_INSN (loc))
2825 if (GET_CODE (loc) == BARRIER
2826 /* Don't move things inside a tablejump. */
2827 && ((loc2 = next_nonnote_insn (loc)) == 0
2828 || GET_CODE (loc2) != CODE_LABEL
2829 || (loc2 = next_nonnote_insn (loc2)) == 0
2830 || GET_CODE (loc2) != JUMP_INSN
2831 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2832 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2833 && uid_loop[INSN_UID (loc)] == target_loop)
2834 break;
2836 if (loc)
2838 rtx cond_label = JUMP_LABEL (p);
2839 rtx new_label = get_label_after (p);
2841 /* Ensure our label doesn't go away. */
2842 LABEL_NUSES (cond_label)++;
2844 /* Verify that uid_loop is large enough and that
2845 we can invert P. */
2846 if (invert_jump (p, new_label, 1))
2848 rtx q, r;
2850 /* If no suitable BARRIER was found, create a suitable
2851 one before TARGET. Since TARGET is a fall through
2852 path, we'll need to insert an jump around our block
2853 and add a BARRIER before TARGET.
2855 This creates an extra unconditional jump outside
2856 the loop. However, the benefits of removing rarely
2857 executed instructions from inside the loop usually
2858 outweighs the cost of the extra unconditional jump
2859 outside the loop. */
2860 if (loc == 0)
2862 rtx temp;
2864 temp = gen_jump (JUMP_LABEL (insn));
2865 temp = emit_jump_insn_before (temp, target);
2866 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2867 LABEL_NUSES (JUMP_LABEL (insn))++;
2868 loc = emit_barrier_before (target);
2871 /* Include the BARRIER after INSN and copy the
2872 block after LOC. */
2873 if (squeeze_notes (&new_label, &last_insn_to_move))
2874 abort ();
2875 reorder_insns (new_label, last_insn_to_move, loc);
2877 /* All those insns are now in TARGET_LOOP. */
2878 for (q = new_label;
2879 q != NEXT_INSN (last_insn_to_move);
2880 q = NEXT_INSN (q))
2881 uid_loop[INSN_UID (q)] = target_loop;
2883 /* The label jumped to by INSN is no longer a loop
2884 exit. Unless INSN does not have a label (e.g.,
2885 it is a RETURN insn), search loop->exit_labels
2886 to find its label_ref, and remove it. Also turn
2887 off LABEL_OUTSIDE_LOOP_P bit. */
2888 if (JUMP_LABEL (insn))
2890 for (q = 0, r = this_loop->exit_labels;
2892 q = r, r = LABEL_NEXTREF (r))
2893 if (XEXP (r, 0) == JUMP_LABEL (insn))
2895 LABEL_OUTSIDE_LOOP_P (r) = 0;
2896 if (q)
2897 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2898 else
2899 this_loop->exit_labels = LABEL_NEXTREF (r);
2900 break;
2903 for (loop = this_loop; loop && loop != target_loop;
2904 loop = loop->outer)
2905 loop->exit_count--;
2907 /* If we didn't find it, then something is
2908 wrong. */
2909 if (! r)
2910 abort ();
2913 /* P is now a jump outside the loop, so it must be put
2914 in loop->exit_labels, and marked as such.
2915 The easiest way to do this is to just call
2916 mark_loop_jump again for P. */
2917 mark_loop_jump (PATTERN (p), this_loop);
2919 /* If INSN now jumps to the insn after it,
2920 delete INSN. */
2921 if (JUMP_LABEL (insn) != 0
2922 && (next_real_insn (JUMP_LABEL (insn))
2923 == next_real_insn (insn)))
2924 delete_related_insns (insn);
2927 /* Continue the loop after where the conditional
2928 branch used to jump, since the only branch insn
2929 in the block (if it still remains) is an inter-loop
2930 branch and hence needs no processing. */
2931 insn = NEXT_INSN (cond_label);
2933 if (--LABEL_NUSES (cond_label) == 0)
2934 delete_related_insns (cond_label);
2936 /* This loop will be continued with NEXT_INSN (insn). */
2937 insn = PREV_INSN (insn);
2944 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2945 loops it is contained in, mark the target loop invalid.
2947 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2949 static void
2950 mark_loop_jump (x, loop)
2951 rtx x;
2952 struct loop *loop;
2954 struct loop *dest_loop;
2955 struct loop *outer_loop;
2956 int i;
2958 switch (GET_CODE (x))
2960 case PC:
2961 case USE:
2962 case CLOBBER:
2963 case REG:
2964 case MEM:
2965 case CONST_INT:
2966 case CONST_DOUBLE:
2967 case RETURN:
2968 return;
2970 case CONST:
2971 /* There could be a label reference in here. */
2972 mark_loop_jump (XEXP (x, 0), loop);
2973 return;
2975 case PLUS:
2976 case MINUS:
2977 case MULT:
2978 mark_loop_jump (XEXP (x, 0), loop);
2979 mark_loop_jump (XEXP (x, 1), loop);
2980 return;
2982 case LO_SUM:
2983 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2984 mark_loop_jump (XEXP (x, 1), loop);
2985 return;
2987 case SIGN_EXTEND:
2988 case ZERO_EXTEND:
2989 mark_loop_jump (XEXP (x, 0), loop);
2990 return;
2992 case LABEL_REF:
2993 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2995 /* Link together all labels that branch outside the loop. This
2996 is used by final_[bg]iv_value and the loop unrolling code. Also
2997 mark this LABEL_REF so we know that this branch should predict
2998 false. */
3000 /* A check to make sure the label is not in an inner nested loop,
3001 since this does not count as a loop exit. */
3002 if (dest_loop)
3004 for (outer_loop = dest_loop; outer_loop;
3005 outer_loop = outer_loop->outer)
3006 if (outer_loop == loop)
3007 break;
3009 else
3010 outer_loop = NULL;
3012 if (loop && ! outer_loop)
3014 LABEL_OUTSIDE_LOOP_P (x) = 1;
3015 LABEL_NEXTREF (x) = loop->exit_labels;
3016 loop->exit_labels = x;
3018 for (outer_loop = loop;
3019 outer_loop && outer_loop != dest_loop;
3020 outer_loop = outer_loop->outer)
3021 outer_loop->exit_count++;
3024 /* If this is inside a loop, but not in the current loop or one enclosed
3025 by it, it invalidates at least one loop. */
3027 if (! dest_loop)
3028 return;
3030 /* We must invalidate every nested loop containing the target of this
3031 label, except those that also contain the jump insn. */
3033 for (; dest_loop; dest_loop = dest_loop->outer)
3035 /* Stop when we reach a loop that also contains the jump insn. */
3036 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3037 if (dest_loop == outer_loop)
3038 return;
3040 /* If we get here, we know we need to invalidate a loop. */
3041 if (loop_dump_stream && ! dest_loop->invalid)
3042 fprintf (loop_dump_stream,
3043 "\nLoop at %d ignored due to multiple entry points.\n",
3044 INSN_UID (dest_loop->start));
3046 dest_loop->invalid = 1;
3048 return;
3050 case SET:
3051 /* If this is not setting pc, ignore. */
3052 if (SET_DEST (x) == pc_rtx)
3053 mark_loop_jump (SET_SRC (x), loop);
3054 return;
3056 case IF_THEN_ELSE:
3057 mark_loop_jump (XEXP (x, 1), loop);
3058 mark_loop_jump (XEXP (x, 2), loop);
3059 return;
3061 case PARALLEL:
3062 case ADDR_VEC:
3063 for (i = 0; i < XVECLEN (x, 0); i++)
3064 mark_loop_jump (XVECEXP (x, 0, i), loop);
3065 return;
3067 case ADDR_DIFF_VEC:
3068 for (i = 0; i < XVECLEN (x, 1); i++)
3069 mark_loop_jump (XVECEXP (x, 1, i), loop);
3070 return;
3072 default:
3073 /* Strictly speaking this is not a jump into the loop, only a possible
3074 jump out of the loop. However, we have no way to link the destination
3075 of this jump onto the list of exit labels. To be safe we mark this
3076 loop and any containing loops as invalid. */
3077 if (loop)
3079 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3081 if (loop_dump_stream && ! outer_loop->invalid)
3082 fprintf (loop_dump_stream,
3083 "\nLoop at %d ignored due to unknown exit jump.\n",
3084 INSN_UID (outer_loop->start));
3085 outer_loop->invalid = 1;
3088 return;
3092 /* Return nonzero if there is a label in the range from
3093 insn INSN to and including the insn whose luid is END
3094 INSN must have an assigned luid (i.e., it must not have
3095 been previously created by loop.c). */
3097 static int
3098 labels_in_range_p (insn, end)
3099 rtx insn;
3100 int end;
3102 while (insn && INSN_LUID (insn) <= end)
3104 if (GET_CODE (insn) == CODE_LABEL)
3105 return 1;
3106 insn = NEXT_INSN (insn);
3109 return 0;
3112 /* Record that a memory reference X is being set. */
3114 static void
3115 note_addr_stored (x, y, data)
3116 rtx x;
3117 rtx y ATTRIBUTE_UNUSED;
3118 void *data ATTRIBUTE_UNUSED;
3120 struct loop_info *loop_info = data;
3122 if (x == 0 || GET_CODE (x) != MEM)
3123 return;
3125 /* Count number of memory writes.
3126 This affects heuristics in strength_reduce. */
3127 loop_info->num_mem_sets++;
3129 /* BLKmode MEM means all memory is clobbered. */
3130 if (GET_MODE (x) == BLKmode)
3132 if (RTX_UNCHANGING_P (x))
3133 loop_info->unknown_constant_address_altered = 1;
3134 else
3135 loop_info->unknown_address_altered = 1;
3137 return;
3140 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3141 loop_info->store_mems);
3144 /* X is a value modified by an INSN that references a biv inside a loop
3145 exit test (ie, X is somehow related to the value of the biv). If X
3146 is a pseudo that is used more than once, then the biv is (effectively)
3147 used more than once. DATA is a pointer to a loop_regs structure. */
3149 static void
3150 note_set_pseudo_multiple_uses (x, y, data)
3151 rtx x;
3152 rtx y ATTRIBUTE_UNUSED;
3153 void *data;
3155 struct loop_regs *regs = (struct loop_regs *) data;
3157 if (x == 0)
3158 return;
3160 while (GET_CODE (x) == STRICT_LOW_PART
3161 || GET_CODE (x) == SIGN_EXTRACT
3162 || GET_CODE (x) == ZERO_EXTRACT
3163 || GET_CODE (x) == SUBREG)
3164 x = XEXP (x, 0);
3166 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3167 return;
3169 /* If we do not have usage information, or if we know the register
3170 is used more than once, note that fact for check_dbra_loop. */
3171 if (REGNO (x) >= max_reg_before_loop
3172 || ! regs->array[REGNO (x)].single_usage
3173 || regs->array[REGNO (x)].single_usage == const0_rtx)
3174 regs->multiple_uses = 1;
3177 /* Return nonzero if the rtx X is invariant over the current loop.
3179 The value is 2 if we refer to something only conditionally invariant.
3181 A memory ref is invariant if it is not volatile and does not conflict
3182 with anything stored in `loop_info->store_mems'. */
3185 loop_invariant_p (loop, x)
3186 const struct loop *loop;
3187 rtx x;
3189 struct loop_info *loop_info = LOOP_INFO (loop);
3190 struct loop_regs *regs = LOOP_REGS (loop);
3191 int i;
3192 enum rtx_code code;
3193 const char *fmt;
3194 int conditional = 0;
3195 rtx mem_list_entry;
3197 if (x == 0)
3198 return 1;
3199 code = GET_CODE (x);
3200 switch (code)
3202 case CONST_INT:
3203 case CONST_DOUBLE:
3204 case SYMBOL_REF:
3205 case CONST:
3206 return 1;
3208 case LABEL_REF:
3209 /* A LABEL_REF is normally invariant, however, if we are unrolling
3210 loops, and this label is inside the loop, then it isn't invariant.
3211 This is because each unrolled copy of the loop body will have
3212 a copy of this label. If this was invariant, then an insn loading
3213 the address of this label into a register might get moved outside
3214 the loop, and then each loop body would end up using the same label.
3216 We don't know the loop bounds here though, so just fail for all
3217 labels. */
3218 if (flag_unroll_loops)
3219 return 0;
3220 else
3221 return 1;
3223 case PC:
3224 case CC0:
3225 case UNSPEC_VOLATILE:
3226 return 0;
3228 case REG:
3229 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3230 since the reg might be set by initialization within the loop. */
3232 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3233 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3234 && ! current_function_has_nonlocal_goto)
3235 return 1;
3237 if (LOOP_INFO (loop)->has_call
3238 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3239 return 0;
3241 if (regs->array[REGNO (x)].set_in_loop < 0)
3242 return 2;
3244 return regs->array[REGNO (x)].set_in_loop == 0;
3246 case MEM:
3247 /* Volatile memory references must be rejected. Do this before
3248 checking for read-only items, so that volatile read-only items
3249 will be rejected also. */
3250 if (MEM_VOLATILE_P (x))
3251 return 0;
3253 /* See if there is any dependence between a store and this load. */
3254 mem_list_entry = loop_info->store_mems;
3255 while (mem_list_entry)
3257 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3258 x, rtx_varies_p))
3259 return 0;
3261 mem_list_entry = XEXP (mem_list_entry, 1);
3264 /* It's not invalidated by a store in memory
3265 but we must still verify the address is invariant. */
3266 break;
3268 case ASM_OPERANDS:
3269 /* Don't mess with insns declared volatile. */
3270 if (MEM_VOLATILE_P (x))
3271 return 0;
3272 break;
3274 default:
3275 break;
3278 fmt = GET_RTX_FORMAT (code);
3279 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3281 if (fmt[i] == 'e')
3283 int tem = loop_invariant_p (loop, XEXP (x, i));
3284 if (tem == 0)
3285 return 0;
3286 if (tem == 2)
3287 conditional = 1;
3289 else if (fmt[i] == 'E')
3291 int j;
3292 for (j = 0; j < XVECLEN (x, i); j++)
3294 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3295 if (tem == 0)
3296 return 0;
3297 if (tem == 2)
3298 conditional = 1;
3304 return 1 + conditional;
3307 /* Return nonzero if all the insns in the loop that set REG
3308 are INSN and the immediately following insns,
3309 and if each of those insns sets REG in an invariant way
3310 (not counting uses of REG in them).
3312 The value is 2 if some of these insns are only conditionally invariant.
3314 We assume that INSN itself is the first set of REG
3315 and that its source is invariant. */
3317 static int
3318 consec_sets_invariant_p (loop, reg, n_sets, insn)
3319 const struct loop *loop;
3320 int n_sets;
3321 rtx reg, insn;
3323 struct loop_regs *regs = LOOP_REGS (loop);
3324 rtx p = insn;
3325 unsigned int regno = REGNO (reg);
3326 rtx temp;
3327 /* Number of sets we have to insist on finding after INSN. */
3328 int count = n_sets - 1;
3329 int old = regs->array[regno].set_in_loop;
3330 int value = 0;
3331 int this;
3333 /* If N_SETS hit the limit, we can't rely on its value. */
3334 if (n_sets == 127)
3335 return 0;
3337 regs->array[regno].set_in_loop = 0;
3339 while (count > 0)
3341 enum rtx_code code;
3342 rtx set;
3344 p = NEXT_INSN (p);
3345 code = GET_CODE (p);
3347 /* If library call, skip to end of it. */
3348 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3349 p = XEXP (temp, 0);
3351 this = 0;
3352 if (code == INSN
3353 && (set = single_set (p))
3354 && GET_CODE (SET_DEST (set)) == REG
3355 && REGNO (SET_DEST (set)) == regno)
3357 this = loop_invariant_p (loop, SET_SRC (set));
3358 if (this != 0)
3359 value |= this;
3360 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3362 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3363 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3364 notes are OK. */
3365 this = (CONSTANT_P (XEXP (temp, 0))
3366 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3367 && loop_invariant_p (loop, XEXP (temp, 0))));
3368 if (this != 0)
3369 value |= this;
3372 if (this != 0)
3373 count--;
3374 else if (code != NOTE)
3376 regs->array[regno].set_in_loop = old;
3377 return 0;
3381 regs->array[regno].set_in_loop = old;
3382 /* If loop_invariant_p ever returned 2, we return 2. */
3383 return 1 + (value & 2);
3386 #if 0
3387 /* I don't think this condition is sufficient to allow INSN
3388 to be moved, so we no longer test it. */
3390 /* Return 1 if all insns in the basic block of INSN and following INSN
3391 that set REG are invariant according to TABLE. */
3393 static int
3394 all_sets_invariant_p (reg, insn, table)
3395 rtx reg, insn;
3396 short *table;
3398 rtx p = insn;
3399 int regno = REGNO (reg);
3401 while (1)
3403 enum rtx_code code;
3404 p = NEXT_INSN (p);
3405 code = GET_CODE (p);
3406 if (code == CODE_LABEL || code == JUMP_INSN)
3407 return 1;
3408 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3409 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3410 && REGNO (SET_DEST (PATTERN (p))) == regno)
3412 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3413 return 0;
3417 #endif /* 0 */
3419 /* Look at all uses (not sets) of registers in X. For each, if it is
3420 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3421 a different insn, set USAGE[REGNO] to const0_rtx. */
3423 static void
3424 find_single_use_in_loop (regs, insn, x)
3425 struct loop_regs *regs;
3426 rtx insn;
3427 rtx x;
3429 enum rtx_code code = GET_CODE (x);
3430 const char *fmt = GET_RTX_FORMAT (code);
3431 int i, j;
3433 if (code == REG)
3434 regs->array[REGNO (x)].single_usage
3435 = (regs->array[REGNO (x)].single_usage != 0
3436 && regs->array[REGNO (x)].single_usage != insn)
3437 ? const0_rtx : insn;
3439 else if (code == SET)
3441 /* Don't count SET_DEST if it is a REG; otherwise count things
3442 in SET_DEST because if a register is partially modified, it won't
3443 show up as a potential movable so we don't care how USAGE is set
3444 for it. */
3445 if (GET_CODE (SET_DEST (x)) != REG)
3446 find_single_use_in_loop (regs, insn, SET_DEST (x));
3447 find_single_use_in_loop (regs, insn, SET_SRC (x));
3449 else
3450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3452 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3453 find_single_use_in_loop (regs, insn, XEXP (x, i));
3454 else if (fmt[i] == 'E')
3455 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3456 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3460 /* Count and record any set in X which is contained in INSN. Update
3461 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3462 in X. */
3464 static void
3465 count_one_set (regs, insn, x, last_set)
3466 struct loop_regs *regs;
3467 rtx insn, x;
3468 rtx *last_set;
3470 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3471 /* Don't move a reg that has an explicit clobber.
3472 It's not worth the pain to try to do it correctly. */
3473 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3475 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3477 rtx dest = SET_DEST (x);
3478 while (GET_CODE (dest) == SUBREG
3479 || GET_CODE (dest) == ZERO_EXTRACT
3480 || GET_CODE (dest) == SIGN_EXTRACT
3481 || GET_CODE (dest) == STRICT_LOW_PART)
3482 dest = XEXP (dest, 0);
3483 if (GET_CODE (dest) == REG)
3485 int i;
3486 int regno = REGNO (dest);
3487 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3489 /* If this is the first setting of this reg
3490 in current basic block, and it was set before,
3491 it must be set in two basic blocks, so it cannot
3492 be moved out of the loop. */
3493 if (regs->array[regno].set_in_loop > 0
3494 && last_set == 0)
3495 regs->array[regno+i].may_not_optimize = 1;
3496 /* If this is not first setting in current basic block,
3497 see if reg was used in between previous one and this.
3498 If so, neither one can be moved. */
3499 if (last_set[regno] != 0
3500 && reg_used_between_p (dest, last_set[regno], insn))
3501 regs->array[regno+i].may_not_optimize = 1;
3502 if (regs->array[regno+i].set_in_loop < 127)
3503 ++regs->array[regno+i].set_in_loop;
3504 last_set[regno+i] = insn;
3510 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3511 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3512 contained in insn INSN is used by any insn that precedes INSN in
3513 cyclic order starting from the loop entry point.
3515 We don't want to use INSN_LUID here because if we restrict INSN to those
3516 that have a valid INSN_LUID, it means we cannot move an invariant out
3517 from an inner loop past two loops. */
3519 static int
3520 loop_reg_used_before_p (loop, set, insn)
3521 const struct loop *loop;
3522 rtx set, insn;
3524 rtx reg = SET_DEST (set);
3525 rtx p;
3527 /* Scan forward checking for register usage. If we hit INSN, we
3528 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3529 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3531 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3532 return 1;
3534 if (p == loop->end)
3535 p = loop->start;
3538 return 0;
3542 /* Information we collect about arrays that we might want to prefetch. */
3543 struct prefetch_info
3545 struct iv_class *class; /* Class this prefetch is based on. */
3546 struct induction *giv; /* GIV this prefetch is based on. */
3547 rtx base_address; /* Start prefetching from this address plus
3548 index. */
3549 HOST_WIDE_INT index;
3550 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3551 iteration. */
3552 unsigned int bytes_accesed; /* Sum of sizes of all acceses to this
3553 prefetch area in one iteration. */
3554 unsigned int total_bytes; /* Total bytes loop will access in this block.
3555 This is set only for loops with known
3556 iteration counts and is 0xffffffff
3557 otherwise. */
3558 unsigned int write : 1; /* 1 for read/write prefetches. */
3559 unsigned int prefetch_in_loop : 1;
3560 /* 1 for those chosen for prefetching. */
3561 unsigned int prefetch_before_loop : 1;
3562 /* 1 for those chosen for prefetching. */
3565 /* Data used by check_store function. */
3566 struct check_store_data
3568 rtx mem_address;
3569 int mem_write;
3572 static void check_store PARAMS ((rtx, rtx, void *));
3573 static void emit_prefetch_instructions PARAMS ((struct loop *));
3574 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3576 /* Set mem_write when mem_address is found. Used as callback to
3577 note_stores. */
3578 static void
3579 check_store (x, pat, data)
3580 rtx x, pat ATTRIBUTE_UNUSED;
3581 void *data;
3583 struct check_store_data *d = (struct check_store_data *) data;
3585 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3586 d->mem_write = 1;
3589 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3590 important to get some addresses combined. Later more sophisticated
3591 transformations can be added when necesary.
3593 ??? Same trick with swapping operand is done at several other places.
3594 It can be nice to develop some common way to handle this. */
3596 static int
3597 rtx_equal_for_prefetch_p (x, y)
3598 rtx x, y;
3600 int i;
3601 int j;
3602 enum rtx_code code = GET_CODE (x);
3603 const char *fmt;
3605 if (x == y)
3606 return 1;
3607 if (code != GET_CODE (y))
3608 return 0;
3610 code = GET_CODE (x);
3612 if (GET_RTX_CLASS (code) == 'c')
3614 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3615 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3616 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3617 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3619 /* Compare the elements. If any pair of corresponding elements fails to
3620 match, return 0 for the whole thing. */
3622 fmt = GET_RTX_FORMAT (code);
3623 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3625 switch (fmt[i])
3627 case 'w':
3628 if (XWINT (x, i) != XWINT (y, i))
3629 return 0;
3630 break;
3632 case 'i':
3633 if (XINT (x, i) != XINT (y, i))
3634 return 0;
3635 break;
3637 case 'E':
3638 /* Two vectors must have the same length. */
3639 if (XVECLEN (x, i) != XVECLEN (y, i))
3640 return 0;
3642 /* And the corresponding elements must match. */
3643 for (j = 0; j < XVECLEN (x, i); j++)
3644 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3645 XVECEXP (y, i, j)) == 0)
3646 return 0;
3647 break;
3649 case 'e':
3650 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3651 return 0;
3652 break;
3654 case 's':
3655 if (strcmp (XSTR (x, i), XSTR (y, i)))
3656 return 0;
3657 break;
3659 case 'u':
3660 /* These are just backpointers, so they don't matter. */
3661 break;
3663 case '0':
3664 break;
3666 /* It is believed that rtx's at this level will never
3667 contain anything but integers and other rtx's,
3668 except for within LABEL_REFs and SYMBOL_REFs. */
3669 default:
3670 abort ();
3673 return 1;
3676 /* Remove constant addition value from the expression X (when present)
3677 and return it. */
3679 static HOST_WIDE_INT
3680 remove_constant_addition (x)
3681 rtx *x;
3683 HOST_WIDE_INT addval = 0;
3684 rtx exp = *x;
3686 /* Avoid clobbering a shared CONST expression. */
3687 if (GET_CODE (exp) == CONST)
3689 if (GET_CODE (XEXP (exp, 0)) == PLUS
3690 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3691 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3693 *x = XEXP (XEXP (exp, 0), 0);
3694 return INTVAL (XEXP (XEXP (exp, 0), 1));
3696 return 0;
3699 if (GET_CODE (exp) == CONST_INT)
3701 addval = INTVAL (exp);
3702 *x = const0_rtx;
3705 /* For plus expression recurse on ourself. */
3706 else if (GET_CODE (exp) == PLUS)
3708 addval += remove_constant_addition (&XEXP (exp, 0));
3709 addval += remove_constant_addition (&XEXP (exp, 1));
3711 /* In case our parameter was constant, remove extra zero from the
3712 expression. */
3713 if (XEXP (exp, 0) == const0_rtx)
3714 *x = XEXP (exp, 1);
3715 else if (XEXP (exp, 1) == const0_rtx)
3716 *x = XEXP (exp, 0);
3719 return addval;
3722 /* Attempt to identify accesses to arrays that are most likely to cause cache
3723 misses, and emit prefetch instructions a few prefetch blocks forward.
3725 To detect the arrays we use the GIV information that was collected by the
3726 strength reduction pass.
3728 The prefetch instructions are generated after the GIV information is done
3729 and before the strength reduction process. The new GIVs are injected into
3730 the strength reduction tables, so the prefetch addresses are optimized as
3731 well.
3733 GIVs are split into base address, stride, and constant addition values.
3734 GIVs with the same address, stride and close addition values are combined
3735 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3736 for write instructions can be used for the block we write to, on machines
3737 that support write prefetches.
3739 Several heuristics are used to determine when to prefetch. They are
3740 controlled by defined symbols that can be overridden for each target. */
3742 static void
3743 emit_prefetch_instructions (loop)
3744 struct loop *loop;
3746 int num_prefetches = 0;
3747 int num_real_prefetches = 0;
3748 int num_real_write_prefetches = 0;
3749 int ahead;
3750 int i;
3751 struct iv_class *bl;
3752 struct induction *iv;
3753 struct prefetch_info info[MAX_PREFETCHES];
3754 struct loop_ivs *ivs = LOOP_IVS (loop);
3756 if (!HAVE_prefetch)
3757 return;
3759 /* Consider only loops w/o calls. When a call is done, the loop is probably
3760 slow enough to read the memory. */
3761 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3763 if (loop_dump_stream)
3764 fprintf (loop_dump_stream, "Prefetch: ignoring loop - has call.\n");
3766 return;
3769 if (PREFETCH_NO_LOW_LOOPCNT
3770 && LOOP_INFO (loop)->n_iterations
3771 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3773 if (loop_dump_stream)
3774 fprintf (loop_dump_stream,
3775 "Prefetch: ignoring loop - not enought iterations.\n");
3776 return;
3779 /* Search all induction variables and pick those interesting for the prefetch
3780 machinery. */
3781 for (bl = ivs->list; bl; bl = bl->next)
3783 struct induction *biv = bl->biv, *biv1;
3784 int basestride = 0;
3786 biv1 = biv;
3788 /* Expect all BIVs to be executed in each iteration. This makes our
3789 analysis more conservative. */
3790 while (biv1)
3792 /* Discard non-constant additions that we can't handle well yet, and
3793 BIVs that are executed multiple times; such BIVs ought to be
3794 handled in the nested loop. We accept not_every_iteration BIVs,
3795 since these only result in larger strides and make our
3796 heuristics more conservative.
3797 ??? What does the last sentence mean? */
3798 if (GET_CODE (biv->add_val) != CONST_INT)
3800 if (loop_dump_stream)
3802 fprintf (loop_dump_stream,
3803 "Prefetch: biv %i ignored: non-constant addition at insn %i:",
3804 REGNO (biv->src_reg), INSN_UID (biv->insn));
3805 print_rtl (loop_dump_stream, biv->add_val);
3806 fprintf (loop_dump_stream, "\n");
3808 break;
3811 if (biv->maybe_multiple)
3813 if (loop_dump_stream)
3815 fprintf (loop_dump_stream,
3816 "Prefetch: biv %i ignored: maybe_multiple at insn %i:",
3817 REGNO (biv->src_reg), INSN_UID (biv->insn));
3818 print_rtl (loop_dump_stream, biv->add_val);
3819 fprintf (loop_dump_stream, "\n");
3821 break;
3824 basestride += INTVAL (biv1->add_val);
3825 biv1 = biv1->next_iv;
3828 if (biv1 || !basestride)
3829 continue;
3831 for (iv = bl->giv; iv; iv = iv->next_iv)
3833 rtx address;
3834 rtx temp;
3835 HOST_WIDE_INT index = 0;
3836 int add = 1;
3837 HOST_WIDE_INT stride;
3838 struct check_store_data d;
3839 int size = GET_MODE_SIZE (GET_MODE (iv));
3841 /* There are several reasons why an induction variable is not
3842 interesting to us. */
3843 if (iv->giv_type != DEST_ADDR
3844 /* We are interested only in constant stride memory references
3845 in order to be able to compute density easily. */
3846 || GET_CODE (iv->mult_val) != CONST_INT
3847 /* Don't handle reversed order prefetches, since they are usually
3848 ineffective. Later we may be able to reverse such BIVs. */
3849 || (PREFETCH_NO_REVERSE_ORDER
3850 && (stride = INTVAL (iv->mult_val) * basestride) < 0)
3851 /* Prefetching of accesses with such an extreme stride is probably
3852 not worthwhile, either. */
3853 || (PREFETCH_NO_EXTREME_STRIDE
3854 && stride > PREFETCH_EXTREME_STRIDE)
3855 /* Ignore GIVs with varying add values; we can't predict the
3856 value for the next iteration. */
3857 || !loop_invariant_p (loop, iv->add_val)
3858 /* Ignore GIVs in the nested loops; they ought to have been
3859 handled already. */
3860 || iv->maybe_multiple)
3862 if (loop_dump_stream)
3863 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %i\n",
3864 INSN_UID (iv->insn));
3865 continue;
3868 /* Determine the pointer to the basic array we are examining. It is
3869 the sum of the BIV's initial value and the GIV's add_val. */
3870 index = 0;
3872 address = copy_rtx (iv->add_val);
3873 temp = copy_rtx (bl->initial_value);
3875 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3876 index = remove_constant_addition (&address);
3878 index += size;
3879 d.mem_write = 0;
3880 d.mem_address = *iv->location;
3882 /* When the GIV is not always executed, we might be better off by
3883 not dirtying the cache pages. */
3884 if (PREFETCH_NOT_ALWAYS || iv->always_executed)
3885 note_stores (PATTERN (iv->insn), check_store, &d);
3887 /* Attempt to find another prefetch to the same array and see if we
3888 can merge this one. */
3889 for (i = 0; i < num_prefetches; i++)
3890 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3891 && stride == info[i].stride)
3893 /* In case both access same array (same location
3894 just with small difference in constant indexes), merge
3895 the prefetches. Just do the later and the earlier will
3896 get prefetched from previous iteration.
3897 4096 is artificial threshold. It should not be too small,
3898 but also not bigger than small portion of memory usually
3899 traversed by single loop. */
3900 if (index >= info[i].index && index - info[i].index < 4096)
3902 info[i].write |= d.mem_write;
3903 info[i].bytes_accesed += size;
3904 info[i].index = index;
3905 info[i].giv = iv;
3906 info[i].class = bl;
3907 info[num_prefetches].base_address = address;
3908 add = 0;
3909 break;
3912 if (index < info[i].index && info[i].index - index < 4096)
3914 info[i].write |= d.mem_write;
3915 info[i].bytes_accesed += size;
3916 add = 0;
3917 break;
3921 /* Merging failed. */
3922 if (add)
3924 info[num_prefetches].giv = iv;
3925 info[num_prefetches].class = bl;
3926 info[num_prefetches].index = index;
3927 info[num_prefetches].stride = stride;
3928 info[num_prefetches].base_address = address;
3929 info[num_prefetches].write = d.mem_write;
3930 info[num_prefetches].bytes_accesed = size;
3931 num_prefetches++;
3932 if (num_prefetches >= MAX_PREFETCHES)
3934 if (loop_dump_stream)
3935 fprintf (loop_dump_stream,
3936 "Maximal number of prefetches exceeded.\n");
3937 return;
3943 for (i = 0; i < num_prefetches; i++)
3945 /* Attempt to calculate the number of bytes fetched by the loop.
3946 Avoid overflow. */
3947 if (LOOP_INFO (loop)->n_iterations
3948 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3949 >= LOOP_INFO (loop)->n_iterations))
3950 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
3951 else
3952 info[i].total_bytes = 0xffffffff;
3954 /* Prefetch is worthwhile only when the loads/stores are dense. */
3955 if (PREFETCH_ONLY_DENSE_MEM
3956 && info[i].bytes_accesed * 256 / info[i].stride > PREFETCH_DENSE_MEM
3957 && (info[i].total_bytes / PREFETCH_BLOCK
3958 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
3960 info[i].prefetch_before_loop = 1;
3961 info[i].prefetch_in_loop
3962 = (info[i].total_bytes / PREFETCH_BLOCK
3963 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
3965 else
3966 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
3968 if (info[i].prefetch_in_loop)
3970 num_real_prefetches += ((info[i].stride + PREFETCH_BLOCK - 1)
3971 / PREFETCH_BLOCK);
3972 if (info[i].write)
3973 num_real_write_prefetches
3974 += (info[i].stride + PREFETCH_BLOCK - 1) / PREFETCH_BLOCK;
3978 if (loop_dump_stream)
3980 for (i = 0; i < num_prefetches; i++)
3982 fprintf (loop_dump_stream, "Prefetch insn %i address: ",
3983 INSN_UID (info[i].giv->insn));
3984 print_rtl (loop_dump_stream, info[i].base_address);
3985 fprintf (loop_dump_stream, " Index: ");
3986 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
3987 fprintf (loop_dump_stream, " stride: ");
3988 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
3989 fprintf (loop_dump_stream,
3990 " density: %i%% total_bytes: %u%sin loop: %s before: %s\n",
3991 (int) (info[i].bytes_accesed * 100 / info[i].stride),
3992 info[i].total_bytes,
3993 info[i].write ? " read/write " : " read only ",
3994 info[i].prefetch_in_loop ? "yes" : "no",
3995 info[i].prefetch_before_loop ? "yes" : "no");
3998 fprintf (loop_dump_stream, "Real prefetches needed: %i (write: %i)\n",
3999 num_real_prefetches, num_real_write_prefetches);
4002 if (!num_real_prefetches)
4003 return;
4005 ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches;
4007 if (!ahead)
4008 return;
4010 for (i = 0; i < num_prefetches; i++)
4012 if (info[i].prefetch_in_loop)
4014 int y;
4016 for (y = 0; y < ((info[i].stride + PREFETCH_BLOCK - 1)
4017 / PREFETCH_BLOCK); y++)
4019 rtx loc = copy_rtx (*info[i].giv->location);
4020 rtx insn;
4021 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4022 rtx before_insn = info[i].giv->insn;
4023 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4025 /* We can save some effort by offsetting the address on
4026 architectures with offsettable memory references. */
4027 if (offsettable_address_p (0, VOIDmode, loc))
4028 loc = plus_constant (loc, bytes_ahead);
4029 else
4031 rtx reg = gen_reg_rtx (Pmode);
4032 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4033 GEN_INT (bytes_ahead), reg,
4034 0, before_insn);
4035 loc = reg;
4038 /* Make sure the address operand is valid for prefetch. */
4039 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4040 (loc,
4041 insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4042 loc = force_reg (Pmode, loc);
4043 emit_insn_before (gen_prefetch (loc, GEN_INT (info[i].write),
4044 GEN_INT (3)),
4045 before_insn);
4047 /* Check all insns emitted and record the new GIV
4048 information. */
4049 insn = NEXT_INSN (prev_insn);
4050 while (insn != before_insn)
4052 insn = check_insn_for_givs (loop, insn,
4053 info[i].giv->always_executed,
4054 info[i].giv->maybe_multiple);
4055 insn = NEXT_INSN (insn);
4060 if (info[i].prefetch_before_loop)
4062 int y;
4064 /* Emit INSNs before the loop to fetch the first cache lines. */
4065 for (y = 0;
4066 (!info[i].prefetch_in_loop || y < ahead)
4067 && y * PREFETCH_BLOCK < (int) info[i].total_bytes; y ++)
4069 rtx reg = gen_reg_rtx (Pmode);
4070 rtx loop_start = loop->start;
4071 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4072 info[i].giv->add_val,
4073 GEN_INT (y * PREFETCH_BLOCK));
4075 loop_iv_add_mult_emit_before (loop, info[i].class->initial_value,
4076 info[i].giv->mult_val,
4077 add_val, reg, 0, loop_start);
4078 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4079 GEN_INT (3)),
4080 loop_start);
4085 return;
4088 /* A "basic induction variable" or biv is a pseudo reg that is set
4089 (within this loop) only by incrementing or decrementing it. */
4090 /* A "general induction variable" or giv is a pseudo reg whose
4091 value is a linear function of a biv. */
4093 /* Bivs are recognized by `basic_induction_var';
4094 Givs by `general_induction_var'. */
4096 /* Communication with routines called via `note_stores'. */
4098 static rtx note_insn;
4100 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4102 static rtx addr_placeholder;
4104 /* ??? Unfinished optimizations, and possible future optimizations,
4105 for the strength reduction code. */
4107 /* ??? The interaction of biv elimination, and recognition of 'constant'
4108 bivs, may cause problems. */
4110 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4111 performance problems.
4113 Perhaps don't eliminate things that can be combined with an addressing
4114 mode. Find all givs that have the same biv, mult_val, and add_val;
4115 then for each giv, check to see if its only use dies in a following
4116 memory address. If so, generate a new memory address and check to see
4117 if it is valid. If it is valid, then store the modified memory address,
4118 otherwise, mark the giv as not done so that it will get its own iv. */
4120 /* ??? Could try to optimize branches when it is known that a biv is always
4121 positive. */
4123 /* ??? When replace a biv in a compare insn, we should replace with closest
4124 giv so that an optimized branch can still be recognized by the combiner,
4125 e.g. the VAX acb insn. */
4127 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4128 was rerun in loop_optimize whenever a register was added or moved.
4129 Also, some of the optimizations could be a little less conservative. */
4131 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4132 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4133 callback.
4135 NOT_EVERY_ITERATION if current insn is not executed at least once for every
4136 loop iteration except for the last one.
4138 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4139 loop iteration.
4141 void
4142 for_each_insn_in_loop (loop, fncall)
4143 struct loop *loop;
4144 loop_insn_callback fncall;
4146 /* This is 1 if current insn is not executed at least once for every loop
4147 iteration. */
4148 int not_every_iteration = 0;
4149 int maybe_multiple = 0;
4150 int past_loop_latch = 0;
4151 int loop_depth = 0;
4152 rtx p;
4154 /* If loop_scan_start points to the loop exit test, we have to be wary of
4155 subversive use of gotos inside expression statements. */
4156 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4157 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4159 /* Scan through loop to find all possible bivs. */
4161 for (p = next_insn_in_loop (loop, loop->scan_start);
4162 p != NULL_RTX;
4163 p = next_insn_in_loop (loop, p))
4165 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4167 /* Past CODE_LABEL, we get to insns that may be executed multiple
4168 times. The only way we can be sure that they can't is if every
4169 jump insn between here and the end of the loop either
4170 returns, exits the loop, is a jump to a location that is still
4171 behind the label, or is a jump to the loop start. */
4173 if (GET_CODE (p) == CODE_LABEL)
4175 rtx insn = p;
4177 maybe_multiple = 0;
4179 while (1)
4181 insn = NEXT_INSN (insn);
4182 if (insn == loop->scan_start)
4183 break;
4184 if (insn == loop->end)
4186 if (loop->top != 0)
4187 insn = loop->top;
4188 else
4189 break;
4190 if (insn == loop->scan_start)
4191 break;
4194 if (GET_CODE (insn) == JUMP_INSN
4195 && GET_CODE (PATTERN (insn)) != RETURN
4196 && (!any_condjump_p (insn)
4197 || (JUMP_LABEL (insn) != 0
4198 && JUMP_LABEL (insn) != loop->scan_start
4199 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4201 maybe_multiple = 1;
4202 break;
4207 /* Past a jump, we get to insns for which we can't count
4208 on whether they will be executed during each iteration. */
4209 /* This code appears twice in strength_reduce. There is also similar
4210 code in scan_loop. */
4211 if (GET_CODE (p) == JUMP_INSN
4212 /* If we enter the loop in the middle, and scan around to the
4213 beginning, don't set not_every_iteration for that.
4214 This can be any kind of jump, since we want to know if insns
4215 will be executed if the loop is executed. */
4216 && !(JUMP_LABEL (p) == loop->top
4217 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4218 && any_uncondjump_p (p))
4219 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4221 rtx label = 0;
4223 /* If this is a jump outside the loop, then it also doesn't
4224 matter. Check to see if the target of this branch is on the
4225 loop->exits_labels list. */
4227 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4228 if (XEXP (label, 0) == JUMP_LABEL (p))
4229 break;
4231 if (!label)
4232 not_every_iteration = 1;
4235 else if (GET_CODE (p) == NOTE)
4237 /* At the virtual top of a converted loop, insns are again known to
4238 be executed each iteration: logically, the loop begins here
4239 even though the exit code has been duplicated.
4241 Insns are also again known to be executed each iteration at
4242 the LOOP_CONT note. */
4243 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4244 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4245 && loop_depth == 0)
4246 not_every_iteration = 0;
4247 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4248 loop_depth++;
4249 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4250 loop_depth--;
4253 /* Note if we pass a loop latch. If we do, then we can not clear
4254 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4255 a loop since a jump before the last CODE_LABEL may have started
4256 a new loop iteration.
4258 Note that LOOP_TOP is only set for rotated loops and we need
4259 this check for all loops, so compare against the CODE_LABEL
4260 which immediately follows LOOP_START. */
4261 if (GET_CODE (p) == JUMP_INSN
4262 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4263 past_loop_latch = 1;
4265 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4266 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4267 or not an insn is known to be executed each iteration of the
4268 loop, whether or not any iterations are known to occur.
4270 Therefore, if we have just passed a label and have no more labels
4271 between here and the test insn of the loop, and we have not passed
4272 a jump to the top of the loop, then we know these insns will be
4273 executed each iteration. */
4275 if (not_every_iteration
4276 && !past_loop_latch
4277 && GET_CODE (p) == CODE_LABEL
4278 && no_labels_between_p (p, loop->end)
4279 && loop_insn_first_p (p, loop->cont))
4280 not_every_iteration = 0;
4284 static void
4285 loop_bivs_find (loop)
4286 struct loop *loop;
4288 struct loop_regs *regs = LOOP_REGS (loop);
4289 struct loop_ivs *ivs = LOOP_IVS (loop);
4290 /* Temporary list pointers for traversing ivs->list. */
4291 struct iv_class *bl, **backbl;
4293 ivs->list = 0;
4295 for_each_insn_in_loop (loop, check_insn_for_bivs);
4297 /* Scan ivs->list to remove all regs that proved not to be bivs.
4298 Make a sanity check against regs->n_times_set. */
4299 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4301 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4302 /* Above happens if register modified by subreg, etc. */
4303 /* Make sure it is not recognized as a basic induction var: */
4304 || regs->array[bl->regno].n_times_set != bl->biv_count
4305 /* If never incremented, it is invariant that we decided not to
4306 move. So leave it alone. */
4307 || ! bl->incremented)
4309 if (loop_dump_stream)
4310 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4311 bl->regno,
4312 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4313 ? "not induction variable"
4314 : (! bl->incremented ? "never incremented"
4315 : "count error")));
4317 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4318 *backbl = bl->next;
4320 else
4322 backbl = &bl->next;
4324 if (loop_dump_stream)
4325 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4331 /* Determine how BIVS are initialised by looking through pre-header
4332 extended basic block. */
4333 static void
4334 loop_bivs_init_find (loop)
4335 struct loop *loop;
4337 struct loop_ivs *ivs = LOOP_IVS (loop);
4338 /* Temporary list pointers for traversing ivs->list. */
4339 struct iv_class *bl;
4340 int call_seen;
4341 rtx p;
4343 /* Find initial value for each biv by searching backwards from loop_start,
4344 halting at first label. Also record any test condition. */
4346 call_seen = 0;
4347 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4349 rtx test;
4351 note_insn = p;
4353 if (GET_CODE (p) == CALL_INSN)
4354 call_seen = 1;
4356 if (INSN_P (p))
4357 note_stores (PATTERN (p), record_initial, ivs);
4359 /* Record any test of a biv that branches around the loop if no store
4360 between it and the start of loop. We only care about tests with
4361 constants and registers and only certain of those. */
4362 if (GET_CODE (p) == JUMP_INSN
4363 && JUMP_LABEL (p) != 0
4364 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4365 && (test = get_condition_for_loop (loop, p)) != 0
4366 && GET_CODE (XEXP (test, 0)) == REG
4367 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4368 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4369 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4370 && bl->init_insn == 0)
4372 /* If an NE test, we have an initial value! */
4373 if (GET_CODE (test) == NE)
4375 bl->init_insn = p;
4376 bl->init_set = gen_rtx_SET (VOIDmode,
4377 XEXP (test, 0), XEXP (test, 1));
4379 else
4380 bl->initial_test = test;
4386 /* Look at the each biv and see if we can say anything better about its
4387 initial value from any initializing insns set up above. (This is done
4388 in two passes to avoid missing SETs in a PARALLEL.) */
4389 static void
4390 loop_bivs_check (loop)
4391 struct loop *loop;
4393 struct loop_ivs *ivs = LOOP_IVS (loop);
4394 /* Temporary list pointers for traversing ivs->list. */
4395 struct iv_class *bl;
4396 struct iv_class **backbl;
4398 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4400 rtx src;
4401 rtx note;
4403 if (! bl->init_insn)
4404 continue;
4406 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4407 is a constant, use the value of that. */
4408 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4409 && CONSTANT_P (XEXP (note, 0)))
4410 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4411 && CONSTANT_P (XEXP (note, 0))))
4412 src = XEXP (note, 0);
4413 else
4414 src = SET_SRC (bl->init_set);
4416 if (loop_dump_stream)
4417 fprintf (loop_dump_stream,
4418 "Biv %d: initialized at insn %d: initial value ",
4419 bl->regno, INSN_UID (bl->init_insn));
4421 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4422 || GET_MODE (src) == VOIDmode)
4423 && valid_initial_value_p (src, bl->init_insn,
4424 LOOP_INFO (loop)->pre_header_has_call,
4425 loop->start))
4427 bl->initial_value = src;
4429 if (loop_dump_stream)
4431 print_simple_rtl (loop_dump_stream, src);
4432 fputc ('\n', loop_dump_stream);
4435 /* If we can't make it a giv,
4436 let biv keep initial value of "itself". */
4437 else if (loop_dump_stream)
4438 fprintf (loop_dump_stream, "is complex\n");
4443 /* Search the loop for general induction variables. */
4445 static void
4446 loop_givs_find (loop)
4447 struct loop* loop;
4449 for_each_insn_in_loop (loop, check_insn_for_givs);
4453 /* For each giv for which we still don't know whether or not it is
4454 replaceable, check to see if it is replaceable because its final value
4455 can be calculated. */
4457 static void
4458 loop_givs_check (loop)
4459 struct loop *loop;
4461 struct loop_ivs *ivs = LOOP_IVS (loop);
4462 struct iv_class *bl;
4464 for (bl = ivs->list; bl; bl = bl->next)
4466 struct induction *v;
4468 for (v = bl->giv; v; v = v->next_iv)
4469 if (! v->replaceable && ! v->not_replaceable)
4470 check_final_value (loop, v);
4475 /* Return non-zero if it is possible to eliminate the biv BL provided
4476 all givs are reduced. This is possible if either the reg is not
4477 used outside the loop, or we can compute what its final value will
4478 be. */
4480 static int
4481 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4482 struct loop *loop;
4483 struct iv_class *bl;
4484 int threshold;
4485 int insn_count;
4487 /* For architectures with a decrement_and_branch_until_zero insn,
4488 don't do this if we put a REG_NONNEG note on the endtest for this
4489 biv. */
4491 #ifdef HAVE_decrement_and_branch_until_zero
4492 if (bl->nonneg)
4494 if (loop_dump_stream)
4495 fprintf (loop_dump_stream,
4496 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4497 return 0;
4499 #endif
4501 /* Check that biv is used outside loop or if it has a final value.
4502 Compare against bl->init_insn rather than loop->start. We aren't
4503 concerned with any uses of the biv between init_insn and
4504 loop->start since these won't be affected by the value of the biv
4505 elsewhere in the function, so long as init_insn doesn't use the
4506 biv itself. */
4508 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4509 && bl->init_insn
4510 && INSN_UID (bl->init_insn) < max_uid_for_loop
4511 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4512 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4513 || (bl->final_value = final_biv_value (loop, bl)))
4514 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4516 if (loop_dump_stream)
4518 fprintf (loop_dump_stream,
4519 "Cannot eliminate biv %d.\n",
4520 bl->regno);
4521 fprintf (loop_dump_stream,
4522 "First use: insn %d, last use: insn %d.\n",
4523 REGNO_FIRST_UID (bl->regno),
4524 REGNO_LAST_UID (bl->regno));
4526 return 0;
4530 /* Reduce each giv of BL that we have decided to reduce. */
4532 static void
4533 loop_givs_reduce (loop, bl)
4534 struct loop *loop;
4535 struct iv_class *bl;
4537 struct induction *v;
4539 for (v = bl->giv; v; v = v->next_iv)
4541 struct induction *tv;
4542 if (! v->ignore && v->same == 0)
4544 int auto_inc_opt = 0;
4546 /* If the code for derived givs immediately below has already
4547 allocated a new_reg, we must keep it. */
4548 if (! v->new_reg)
4549 v->new_reg = gen_reg_rtx (v->mode);
4551 #ifdef AUTO_INC_DEC
4552 /* If the target has auto-increment addressing modes, and
4553 this is an address giv, then try to put the increment
4554 immediately after its use, so that flow can create an
4555 auto-increment addressing mode. */
4556 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4557 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4558 /* We don't handle reversed biv's because bl->biv->insn
4559 does not have a valid INSN_LUID. */
4560 && ! bl->reversed
4561 && v->always_executed && ! v->maybe_multiple
4562 && INSN_UID (v->insn) < max_uid_for_loop)
4564 /* If other giv's have been combined with this one, then
4565 this will work only if all uses of the other giv's occur
4566 before this giv's insn. This is difficult to check.
4568 We simplify this by looking for the common case where
4569 there is one DEST_REG giv, and this giv's insn is the
4570 last use of the dest_reg of that DEST_REG giv. If the
4571 increment occurs after the address giv, then we can
4572 perform the optimization. (Otherwise, the increment
4573 would have to go before other_giv, and we would not be
4574 able to combine it with the address giv to get an
4575 auto-inc address.) */
4576 if (v->combined_with)
4578 struct induction *other_giv = 0;
4580 for (tv = bl->giv; tv; tv = tv->next_iv)
4581 if (tv->same == v)
4583 if (other_giv)
4584 break;
4585 else
4586 other_giv = tv;
4588 if (! tv && other_giv
4589 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4590 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4591 == INSN_UID (v->insn))
4592 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4593 auto_inc_opt = 1;
4595 /* Check for case where increment is before the address
4596 giv. Do this test in "loop order". */
4597 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4598 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4599 || (INSN_LUID (bl->biv->insn)
4600 > INSN_LUID (loop->scan_start))))
4601 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4602 && (INSN_LUID (loop->scan_start)
4603 < INSN_LUID (bl->biv->insn))))
4604 auto_inc_opt = -1;
4605 else
4606 auto_inc_opt = 1;
4608 #ifdef HAVE_cc0
4610 rtx prev;
4612 /* We can't put an insn immediately after one setting
4613 cc0, or immediately before one using cc0. */
4614 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4615 || (auto_inc_opt == -1
4616 && (prev = prev_nonnote_insn (v->insn)) != 0
4617 && INSN_P (prev)
4618 && sets_cc0_p (PATTERN (prev))))
4619 auto_inc_opt = 0;
4621 #endif
4623 if (auto_inc_opt)
4624 v->auto_inc_opt = 1;
4626 #endif
4628 /* For each place where the biv is incremented, add an insn
4629 to increment the new, reduced reg for the giv. */
4630 for (tv = bl->biv; tv; tv = tv->next_iv)
4632 rtx insert_before;
4634 if (! auto_inc_opt)
4635 insert_before = tv->insn;
4636 else if (auto_inc_opt == 1)
4637 insert_before = NEXT_INSN (v->insn);
4638 else
4639 insert_before = v->insn;
4641 if (tv->mult_val == const1_rtx)
4642 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4643 v->new_reg, v->new_reg,
4644 0, insert_before);
4645 else /* tv->mult_val == const0_rtx */
4646 /* A multiply is acceptable here
4647 since this is presumed to be seldom executed. */
4648 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4649 v->add_val, v->new_reg,
4650 0, insert_before);
4653 /* Add code at loop start to initialize giv's reduced reg. */
4655 loop_iv_add_mult_hoist (loop,
4656 extend_value_for_giv (v, bl->initial_value),
4657 v->mult_val, v->add_val, v->new_reg);
4663 /* Check for givs whose first use is their definition and whose
4664 last use is the definition of another giv. If so, it is likely
4665 dead and should not be used to derive another giv nor to
4666 eliminate a biv. */
4668 static void
4669 loop_givs_dead_check (loop, bl)
4670 struct loop *loop ATTRIBUTE_UNUSED;
4671 struct iv_class *bl;
4673 struct induction *v;
4675 for (v = bl->giv; v; v = v->next_iv)
4677 if (v->ignore
4678 || (v->same && v->same->ignore))
4679 continue;
4681 if (v->giv_type == DEST_REG
4682 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4684 struct induction *v1;
4686 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4687 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4688 v->maybe_dead = 1;
4694 static void
4695 loop_givs_rescan (loop, bl, reg_map)
4696 struct loop *loop;
4697 struct iv_class *bl;
4698 rtx *reg_map;
4700 struct induction *v;
4702 for (v = bl->giv; v; v = v->next_iv)
4704 if (v->same && v->same->ignore)
4705 v->ignore = 1;
4707 if (v->ignore)
4708 continue;
4710 /* Update expression if this was combined, in case other giv was
4711 replaced. */
4712 if (v->same)
4713 v->new_reg = replace_rtx (v->new_reg,
4714 v->same->dest_reg, v->same->new_reg);
4716 /* See if this register is known to be a pointer to something. If
4717 so, see if we can find the alignment. First see if there is a
4718 destination register that is a pointer. If so, this shares the
4719 alignment too. Next see if we can deduce anything from the
4720 computational information. If not, and this is a DEST_ADDR
4721 giv, at least we know that it's a pointer, though we don't know
4722 the alignment. */
4723 if (GET_CODE (v->new_reg) == REG
4724 && v->giv_type == DEST_REG
4725 && REG_POINTER (v->dest_reg))
4726 mark_reg_pointer (v->new_reg,
4727 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4728 else if (GET_CODE (v->new_reg) == REG
4729 && REG_POINTER (v->src_reg))
4731 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4733 if (align == 0
4734 || GET_CODE (v->add_val) != CONST_INT
4735 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4736 align = 0;
4738 mark_reg_pointer (v->new_reg, align);
4740 else if (GET_CODE (v->new_reg) == REG
4741 && GET_CODE (v->add_val) == REG
4742 && REG_POINTER (v->add_val))
4744 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4746 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4747 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4748 align = 0;
4750 mark_reg_pointer (v->new_reg, align);
4752 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4753 mark_reg_pointer (v->new_reg, 0);
4755 if (v->giv_type == DEST_ADDR)
4756 /* Store reduced reg as the address in the memref where we found
4757 this giv. */
4758 validate_change (v->insn, v->location, v->new_reg, 0);
4759 else if (v->replaceable)
4761 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4763 else
4765 /* Not replaceable; emit an insn to set the original giv reg from
4766 the reduced giv, same as above. */
4767 loop_insn_emit_after (loop, 0, v->insn,
4768 gen_move_insn (v->dest_reg, v->new_reg));
4771 /* When a loop is reversed, givs which depend on the reversed
4772 biv, and which are live outside the loop, must be set to their
4773 correct final value. This insn is only needed if the giv is
4774 not replaceable. The correct final value is the same as the
4775 value that the giv starts the reversed loop with. */
4776 if (bl->reversed && ! v->replaceable)
4777 loop_iv_add_mult_sink (loop,
4778 extend_value_for_giv (v, bl->initial_value),
4779 v->mult_val, v->add_val, v->dest_reg);
4780 else if (v->final_value)
4781 loop_insn_sink_or_swim (loop,
4782 gen_move_insn (v->dest_reg, v->final_value));
4784 if (loop_dump_stream)
4786 fprintf (loop_dump_stream, "giv at %d reduced to ",
4787 INSN_UID (v->insn));
4788 print_simple_rtl (loop_dump_stream, v->new_reg);
4789 fprintf (loop_dump_stream, "\n");
4795 static int
4796 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4797 struct loop *loop ATTRIBUTE_UNUSED;
4798 struct iv_class *bl;
4799 struct induction *v;
4800 rtx test_reg;
4802 int add_cost;
4803 int benefit;
4805 benefit = v->benefit;
4806 PUT_MODE (test_reg, v->mode);
4807 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4808 test_reg, test_reg);
4810 /* Reduce benefit if not replaceable, since we will insert a
4811 move-insn to replace the insn that calculates this giv. Don't do
4812 this unless the giv is a user variable, since it will often be
4813 marked non-replaceable because of the duplication of the exit
4814 code outside the loop. In such a case, the copies we insert are
4815 dead and will be deleted. So they don't have a cost. Similar
4816 situations exist. */
4817 /* ??? The new final_[bg]iv_value code does a much better job of
4818 finding replaceable giv's, and hence this code may no longer be
4819 necessary. */
4820 if (! v->replaceable && ! bl->eliminable
4821 && REG_USERVAR_P (v->dest_reg))
4822 benefit -= copy_cost;
4824 /* Decrease the benefit to count the add-insns that we will insert
4825 to increment the reduced reg for the giv. ??? This can
4826 overestimate the run-time cost of the additional insns, e.g. if
4827 there are multiple basic blocks that increment the biv, but only
4828 one of these blocks is executed during each iteration. There is
4829 no good way to detect cases like this with the current structure
4830 of the loop optimizer. This code is more accurate for
4831 determining code size than run-time benefits. */
4832 benefit -= add_cost * bl->biv_count;
4834 /* Decide whether to strength-reduce this giv or to leave the code
4835 unchanged (recompute it from the biv each time it is used). This
4836 decision can be made independently for each giv. */
4838 #ifdef AUTO_INC_DEC
4839 /* Attempt to guess whether autoincrement will handle some of the
4840 new add insns; if so, increase BENEFIT (undo the subtraction of
4841 add_cost that was done above). */
4842 if (v->giv_type == DEST_ADDR
4843 /* Increasing the benefit is risky, since this is only a guess.
4844 Avoid increasing register pressure in cases where there would
4845 be no other benefit from reducing this giv. */
4846 && benefit > 0
4847 && GET_CODE (v->mult_val) == CONST_INT)
4849 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4851 if (HAVE_POST_INCREMENT
4852 && INTVAL (v->mult_val) == size)
4853 benefit += add_cost * bl->biv_count;
4854 else if (HAVE_PRE_INCREMENT
4855 && INTVAL (v->mult_val) == size)
4856 benefit += add_cost * bl->biv_count;
4857 else if (HAVE_POST_DECREMENT
4858 && -INTVAL (v->mult_val) == size)
4859 benefit += add_cost * bl->biv_count;
4860 else if (HAVE_PRE_DECREMENT
4861 && -INTVAL (v->mult_val) == size)
4862 benefit += add_cost * bl->biv_count;
4864 #endif
4866 return benefit;
4870 /* Free IV structures for LOOP. */
4872 static void
4873 loop_ivs_free (loop)
4874 struct loop *loop;
4876 struct loop_ivs *ivs = LOOP_IVS (loop);
4877 struct iv_class *iv = ivs->list;
4879 free (ivs->regs);
4881 while (iv)
4883 struct iv_class *next = iv->next;
4884 struct induction *induction;
4885 struct induction *next_induction;
4887 for (induction = iv->biv; induction; induction = next_induction)
4889 next_induction = induction->next_iv;
4890 free (induction);
4892 for (induction = iv->giv; induction; induction = next_induction)
4894 next_induction = induction->next_iv;
4895 free (induction);
4898 free (iv);
4899 iv = next;
4904 /* Perform strength reduction and induction variable elimination.
4906 Pseudo registers created during this function will be beyond the
4907 last valid index in several tables including
4908 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4909 problem here, because the added registers cannot be givs outside of
4910 their loop, and hence will never be reconsidered. But scan_loop
4911 must check regnos to make sure they are in bounds. */
4913 static void
4914 strength_reduce (loop, flags)
4915 struct loop *loop;
4916 int flags;
4918 struct loop_info *loop_info = LOOP_INFO (loop);
4919 struct loop_regs *regs = LOOP_REGS (loop);
4920 struct loop_ivs *ivs = LOOP_IVS (loop);
4921 rtx p;
4922 /* Temporary list pointer for traversing ivs->list. */
4923 struct iv_class *bl;
4924 /* Ratio of extra register life span we can justify
4925 for saving an instruction. More if loop doesn't call subroutines
4926 since in that case saving an insn makes more difference
4927 and more registers are available. */
4928 /* ??? could set this to last value of threshold in move_movables */
4929 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4930 /* Map of pseudo-register replacements. */
4931 rtx *reg_map = NULL;
4932 int reg_map_size;
4933 int unrolled_insn_copies = 0;
4934 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4935 int insn_count = count_insns_in_loop (loop);
4937 addr_placeholder = gen_reg_rtx (Pmode);
4939 ivs->n_regs = max_reg_before_loop;
4940 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4942 /* Find all BIVs in loop. */
4943 loop_bivs_find (loop);
4945 /* Exit if there are no bivs. */
4946 if (! ivs->list)
4948 /* Can still unroll the loop anyways, but indicate that there is no
4949 strength reduction info available. */
4950 if (flags & LOOP_UNROLL)
4951 unroll_loop (loop, insn_count, 0);
4953 loop_ivs_free (loop);
4954 return;
4957 /* Determine how BIVS are initialised by looking through pre-header
4958 extended basic block. */
4959 loop_bivs_init_find (loop);
4961 /* Look at the each biv and see if we can say anything better about its
4962 initial value from any initializing insns set up above. */
4963 loop_bivs_check (loop);
4965 /* Search the loop for general induction variables. */
4966 loop_givs_find (loop);
4968 /* Try to calculate and save the number of loop iterations. This is
4969 set to zero if the actual number can not be calculated. This must
4970 be called after all giv's have been identified, since otherwise it may
4971 fail if the iteration variable is a giv. */
4972 loop_iterations (loop);
4974 #ifdef HAVE_prefetch
4975 if (flags & LOOP_PREFETCH)
4976 emit_prefetch_instructions (loop);
4977 #endif
4979 /* Now for each giv for which we still don't know whether or not it is
4980 replaceable, check to see if it is replaceable because its final value
4981 can be calculated. This must be done after loop_iterations is called,
4982 so that final_giv_value will work correctly. */
4983 loop_givs_check (loop);
4985 /* Try to prove that the loop counter variable (if any) is always
4986 nonnegative; if so, record that fact with a REG_NONNEG note
4987 so that "decrement and branch until zero" insn can be used. */
4988 check_dbra_loop (loop, insn_count);
4990 /* Create reg_map to hold substitutions for replaceable giv regs.
4991 Some givs might have been made from biv increments, so look at
4992 ivs->reg_iv_type for a suitable size. */
4993 reg_map_size = ivs->n_regs;
4994 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4996 /* Examine each iv class for feasibility of strength reduction/induction
4997 variable elimination. */
4999 for (bl = ivs->list; bl; bl = bl->next)
5001 struct induction *v;
5002 int benefit;
5004 /* Test whether it will be possible to eliminate this biv
5005 provided all givs are reduced. */
5006 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5008 /* This will be true at the end, if all givs which depend on this
5009 biv have been strength reduced.
5010 We can't (currently) eliminate the biv unless this is so. */
5011 bl->all_reduced = 1;
5013 /* Check each extension dependent giv in this class to see if its
5014 root biv is safe from wrapping in the interior mode. */
5015 check_ext_dependent_givs (bl, loop_info);
5017 /* Combine all giv's for this iv_class. */
5018 combine_givs (regs, bl);
5020 for (v = bl->giv; v; v = v->next_iv)
5022 struct induction *tv;
5024 if (v->ignore || v->same)
5025 continue;
5027 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5029 /* If an insn is not to be strength reduced, then set its ignore
5030 flag, and clear bl->all_reduced. */
5032 /* A giv that depends on a reversed biv must be reduced if it is
5033 used after the loop exit, otherwise, it would have the wrong
5034 value after the loop exit. To make it simple, just reduce all
5035 of such giv's whether or not we know they are used after the loop
5036 exit. */
5038 if (! flag_reduce_all_givs
5039 && v->lifetime * threshold * benefit < insn_count
5040 && ! bl->reversed)
5042 if (loop_dump_stream)
5043 fprintf (loop_dump_stream,
5044 "giv of insn %d not worth while, %d vs %d.\n",
5045 INSN_UID (v->insn),
5046 v->lifetime * threshold * benefit, insn_count);
5047 v->ignore = 1;
5048 bl->all_reduced = 0;
5050 else
5052 /* Check that we can increment the reduced giv without a
5053 multiply insn. If not, reject it. */
5055 for (tv = bl->biv; tv; tv = tv->next_iv)
5056 if (tv->mult_val == const1_rtx
5057 && ! product_cheap_p (tv->add_val, v->mult_val))
5059 if (loop_dump_stream)
5060 fprintf (loop_dump_stream,
5061 "giv of insn %d: would need a multiply.\n",
5062 INSN_UID (v->insn));
5063 v->ignore = 1;
5064 bl->all_reduced = 0;
5065 break;
5070 /* Check for givs whose first use is their definition and whose
5071 last use is the definition of another giv. If so, it is likely
5072 dead and should not be used to derive another giv nor to
5073 eliminate a biv. */
5074 loop_givs_dead_check (loop, bl);
5076 /* Reduce each giv that we decided to reduce. */
5077 loop_givs_reduce (loop, bl);
5079 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5080 as not reduced.
5082 For each giv register that can be reduced now: if replaceable,
5083 substitute reduced reg wherever the old giv occurs;
5084 else add new move insn "giv_reg = reduced_reg". */
5085 loop_givs_rescan (loop, bl, reg_map);
5087 /* All the givs based on the biv bl have been reduced if they
5088 merit it. */
5090 /* For each giv not marked as maybe dead that has been combined with a
5091 second giv, clear any "maybe dead" mark on that second giv.
5092 v->new_reg will either be or refer to the register of the giv it
5093 combined with.
5095 Doing this clearing avoids problems in biv elimination where
5096 a giv's new_reg is a complex value that can't be put in the
5097 insn but the giv combined with (with a reg as new_reg) is
5098 marked maybe_dead. Since the register will be used in either
5099 case, we'd prefer it be used from the simpler giv. */
5101 for (v = bl->giv; v; v = v->next_iv)
5102 if (! v->maybe_dead && v->same)
5103 v->same->maybe_dead = 0;
5105 /* Try to eliminate the biv, if it is a candidate.
5106 This won't work if ! bl->all_reduced,
5107 since the givs we planned to use might not have been reduced.
5109 We have to be careful that we didn't initially think we could
5110 eliminate this biv because of a giv that we now think may be
5111 dead and shouldn't be used as a biv replacement.
5113 Also, there is the possibility that we may have a giv that looks
5114 like it can be used to eliminate a biv, but the resulting insn
5115 isn't valid. This can happen, for example, on the 88k, where a
5116 JUMP_INSN can compare a register only with zero. Attempts to
5117 replace it with a compare with a constant will fail.
5119 Note that in cases where this call fails, we may have replaced some
5120 of the occurrences of the biv with a giv, but no harm was done in
5121 doing so in the rare cases where it can occur. */
5123 if (bl->all_reduced == 1 && bl->eliminable
5124 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5126 /* ?? If we created a new test to bypass the loop entirely,
5127 or otherwise drop straight in, based on this test, then
5128 we might want to rewrite it also. This way some later
5129 pass has more hope of removing the initialization of this
5130 biv entirely. */
5132 /* If final_value != 0, then the biv may be used after loop end
5133 and we must emit an insn to set it just in case.
5135 Reversed bivs already have an insn after the loop setting their
5136 value, so we don't need another one. We can't calculate the
5137 proper final value for such a biv here anyways. */
5138 if (bl->final_value && ! bl->reversed)
5139 loop_insn_sink_or_swim (loop, gen_move_insn
5140 (bl->biv->dest_reg, bl->final_value));
5142 if (loop_dump_stream)
5143 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5144 bl->regno);
5146 /* See above note wrt final_value. But since we couldn't eliminate
5147 the biv, we must set the value after the loop instead of before. */
5148 else if (bl->final_value && ! bl->reversed)
5149 loop_insn_sink (loop, gen_move_insn (bl->biv->dest_reg,
5150 bl->final_value));
5153 /* Go through all the instructions in the loop, making all the
5154 register substitutions scheduled in REG_MAP. */
5156 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5157 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5158 || GET_CODE (p) == CALL_INSN)
5160 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5161 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5162 INSN_CODE (p) = -1;
5165 if (loop_info->n_iterations > 0)
5167 /* When we completely unroll a loop we will likely not need the increment
5168 of the loop BIV and we will not need the conditional branch at the
5169 end of the loop. */
5170 unrolled_insn_copies = insn_count - 2;
5172 #ifdef HAVE_cc0
5173 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5174 need the comparison before the conditional branch at the end of the
5175 loop. */
5176 unrolled_insn_copies -= 1;
5177 #endif
5179 /* We'll need one copy for each loop iteration. */
5180 unrolled_insn_copies *= loop_info->n_iterations;
5182 /* A little slop to account for the ability to remove initialization
5183 code, better CSE, and other secondary benefits of completely
5184 unrolling some loops. */
5185 unrolled_insn_copies -= 1;
5187 /* Clamp the value. */
5188 if (unrolled_insn_copies < 0)
5189 unrolled_insn_copies = 0;
5192 /* Unroll loops from within strength reduction so that we can use the
5193 induction variable information that strength_reduce has already
5194 collected. Always unroll loops that would be as small or smaller
5195 unrolled than when rolled. */
5196 if ((flags & LOOP_UNROLL)
5197 || (loop_info->n_iterations > 0
5198 && unrolled_insn_copies <= insn_count))
5199 unroll_loop (loop, insn_count, 1);
5201 #ifdef HAVE_doloop_end
5202 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5203 doloop_optimize (loop);
5204 #endif /* HAVE_doloop_end */
5206 /* In case number of iterations is known, drop branch prediction note
5207 in the branch. Do that only in second loop pass, as loop unrolling
5208 may change the number of iterations performed. */
5209 if ((flags & LOOP_BCT)
5210 && loop_info->n_iterations / loop_info->unroll_number > 1)
5212 int n = loop_info->n_iterations / loop_info->unroll_number;
5213 predict_insn (PREV_INSN (loop->end),
5214 PRED_LOOP_ITERATIONS,
5215 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5218 if (loop_dump_stream)
5219 fprintf (loop_dump_stream, "\n");
5221 loop_ivs_free (loop);
5222 if (reg_map)
5223 free (reg_map);
5226 /*Record all basic induction variables calculated in the insn. */
5227 static rtx
5228 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5229 struct loop *loop;
5230 rtx p;
5231 int not_every_iteration;
5232 int maybe_multiple;
5234 struct loop_ivs *ivs = LOOP_IVS (loop);
5235 rtx set;
5236 rtx dest_reg;
5237 rtx inc_val;
5238 rtx mult_val;
5239 rtx *location;
5241 if (GET_CODE (p) == INSN
5242 && (set = single_set (p))
5243 && GET_CODE (SET_DEST (set)) == REG)
5245 dest_reg = SET_DEST (set);
5246 if (REGNO (dest_reg) < max_reg_before_loop
5247 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5248 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5250 if (basic_induction_var (loop, SET_SRC (set),
5251 GET_MODE (SET_SRC (set)),
5252 dest_reg, p, &inc_val, &mult_val,
5253 &location))
5255 /* It is a possible basic induction variable.
5256 Create and initialize an induction structure for it. */
5258 struct induction *v
5259 = (struct induction *) xmalloc (sizeof (struct induction));
5261 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5262 not_every_iteration, maybe_multiple);
5263 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5265 else if (REGNO (dest_reg) < ivs->n_regs)
5266 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5269 return p;
5272 /* Record all givs calculated in the insn.
5273 A register is a giv if: it is only set once, it is a function of a
5274 biv and a constant (or invariant), and it is not a biv. */
5275 static rtx
5276 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5277 struct loop *loop;
5278 rtx p;
5279 int not_every_iteration;
5280 int maybe_multiple;
5282 struct loop_regs *regs = LOOP_REGS (loop);
5284 rtx set;
5285 /* Look for a general induction variable in a register. */
5286 if (GET_CODE (p) == INSN
5287 && (set = single_set (p))
5288 && GET_CODE (SET_DEST (set)) == REG
5289 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5291 rtx src_reg;
5292 rtx dest_reg;
5293 rtx add_val;
5294 rtx mult_val;
5295 rtx ext_val;
5296 int benefit;
5297 rtx regnote = 0;
5298 rtx last_consec_insn;
5300 dest_reg = SET_DEST (set);
5301 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5302 return p;
5304 if (/* SET_SRC is a giv. */
5305 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5306 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5307 /* Equivalent expression is a giv. */
5308 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5309 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5310 &add_val, &mult_val, &ext_val, 0,
5311 &benefit, VOIDmode)))
5312 /* Don't try to handle any regs made by loop optimization.
5313 We have nothing on them in regno_first_uid, etc. */
5314 && REGNO (dest_reg) < max_reg_before_loop
5315 /* Don't recognize a BASIC_INDUCT_VAR here. */
5316 && dest_reg != src_reg
5317 /* This must be the only place where the register is set. */
5318 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5319 /* or all sets must be consecutive and make a giv. */
5320 || (benefit = consec_sets_giv (loop, benefit, p,
5321 src_reg, dest_reg,
5322 &add_val, &mult_val, &ext_val,
5323 &last_consec_insn))))
5325 struct induction *v
5326 = (struct induction *) xmalloc (sizeof (struct induction));
5328 /* If this is a library call, increase benefit. */
5329 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5330 benefit += libcall_benefit (p);
5332 /* Skip the consecutive insns, if there are any. */
5333 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5334 p = last_consec_insn;
5336 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5337 ext_val, benefit, DEST_REG, not_every_iteration,
5338 maybe_multiple, (rtx*) 0);
5343 #ifndef DONT_REDUCE_ADDR
5344 /* Look for givs which are memory addresses. */
5345 /* This resulted in worse code on a VAX 8600. I wonder if it
5346 still does. */
5347 if (GET_CODE (p) == INSN)
5348 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5349 maybe_multiple);
5350 #endif
5352 /* Update the status of whether giv can derive other givs. This can
5353 change when we pass a label or an insn that updates a biv. */
5354 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5355 || GET_CODE (p) == CODE_LABEL)
5356 update_giv_derive (loop, p);
5357 return p;
5360 /* Return 1 if X is a valid source for an initial value (or as value being
5361 compared against in an initial test).
5363 X must be either a register or constant and must not be clobbered between
5364 the current insn and the start of the loop.
5366 INSN is the insn containing X. */
5368 static int
5369 valid_initial_value_p (x, insn, call_seen, loop_start)
5370 rtx x;
5371 rtx insn;
5372 int call_seen;
5373 rtx loop_start;
5375 if (CONSTANT_P (x))
5376 return 1;
5378 /* Only consider pseudos we know about initialized in insns whose luids
5379 we know. */
5380 if (GET_CODE (x) != REG
5381 || REGNO (x) >= max_reg_before_loop)
5382 return 0;
5384 /* Don't use call-clobbered registers across a call which clobbers it. On
5385 some machines, don't use any hard registers at all. */
5386 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5387 && (SMALL_REGISTER_CLASSES
5388 || (call_used_regs[REGNO (x)] && call_seen)))
5389 return 0;
5391 /* Don't use registers that have been clobbered before the start of the
5392 loop. */
5393 if (reg_set_between_p (x, insn, loop_start))
5394 return 0;
5396 return 1;
5399 /* Scan X for memory refs and check each memory address
5400 as a possible giv. INSN is the insn whose pattern X comes from.
5401 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5402 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5403 more thanonce in each loop iteration. */
5405 static void
5406 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5407 const struct loop *loop;
5408 rtx x;
5409 rtx insn;
5410 int not_every_iteration, maybe_multiple;
5412 int i, j;
5413 enum rtx_code code;
5414 const char *fmt;
5416 if (x == 0)
5417 return;
5419 code = GET_CODE (x);
5420 switch (code)
5422 case REG:
5423 case CONST_INT:
5424 case CONST:
5425 case CONST_DOUBLE:
5426 case SYMBOL_REF:
5427 case LABEL_REF:
5428 case PC:
5429 case CC0:
5430 case ADDR_VEC:
5431 case ADDR_DIFF_VEC:
5432 case USE:
5433 case CLOBBER:
5434 return;
5436 case MEM:
5438 rtx src_reg;
5439 rtx add_val;
5440 rtx mult_val;
5441 rtx ext_val;
5442 int benefit;
5444 /* This code used to disable creating GIVs with mult_val == 1 and
5445 add_val == 0. However, this leads to lost optimizations when
5446 it comes time to combine a set of related DEST_ADDR GIVs, since
5447 this one would not be seen. */
5449 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5450 &mult_val, &ext_val, 1, &benefit,
5451 GET_MODE (x)))
5453 /* Found one; record it. */
5454 struct induction *v
5455 = (struct induction *) xmalloc (sizeof (struct induction));
5457 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5458 add_val, ext_val, benefit, DEST_ADDR,
5459 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5461 v->mem = x;
5464 return;
5466 default:
5467 break;
5470 /* Recursively scan the subexpressions for other mem refs. */
5472 fmt = GET_RTX_FORMAT (code);
5473 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5474 if (fmt[i] == 'e')
5475 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5476 maybe_multiple);
5477 else if (fmt[i] == 'E')
5478 for (j = 0; j < XVECLEN (x, i); j++)
5479 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5480 maybe_multiple);
5483 /* Fill in the data about one biv update.
5484 V is the `struct induction' in which we record the biv. (It is
5485 allocated by the caller, with alloca.)
5486 INSN is the insn that sets it.
5487 DEST_REG is the biv's reg.
5489 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5490 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5491 being set to INC_VAL.
5493 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5494 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5495 can be executed more than once per iteration. If MAYBE_MULTIPLE
5496 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5497 executed exactly once per iteration. */
5499 static void
5500 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5501 not_every_iteration, maybe_multiple)
5502 struct loop *loop;
5503 struct induction *v;
5504 rtx insn;
5505 rtx dest_reg;
5506 rtx inc_val;
5507 rtx mult_val;
5508 rtx *location;
5509 int not_every_iteration;
5510 int maybe_multiple;
5512 struct loop_ivs *ivs = LOOP_IVS (loop);
5513 struct iv_class *bl;
5515 v->insn = insn;
5516 v->src_reg = dest_reg;
5517 v->dest_reg = dest_reg;
5518 v->mult_val = mult_val;
5519 v->add_val = inc_val;
5520 v->ext_dependent = NULL_RTX;
5521 v->location = location;
5522 v->mode = GET_MODE (dest_reg);
5523 v->always_computable = ! not_every_iteration;
5524 v->always_executed = ! not_every_iteration;
5525 v->maybe_multiple = maybe_multiple;
5527 /* Add this to the reg's iv_class, creating a class
5528 if this is the first incrementation of the reg. */
5530 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5531 if (bl == 0)
5533 /* Create and initialize new iv_class. */
5535 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5537 bl->regno = REGNO (dest_reg);
5538 bl->biv = 0;
5539 bl->giv = 0;
5540 bl->biv_count = 0;
5541 bl->giv_count = 0;
5543 /* Set initial value to the reg itself. */
5544 bl->initial_value = dest_reg;
5545 bl->final_value = 0;
5546 /* We haven't seen the initializing insn yet */
5547 bl->init_insn = 0;
5548 bl->init_set = 0;
5549 bl->initial_test = 0;
5550 bl->incremented = 0;
5551 bl->eliminable = 0;
5552 bl->nonneg = 0;
5553 bl->reversed = 0;
5554 bl->total_benefit = 0;
5556 /* Add this class to ivs->list. */
5557 bl->next = ivs->list;
5558 ivs->list = bl;
5560 /* Put it in the array of biv register classes. */
5561 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5564 /* Update IV_CLASS entry for this biv. */
5565 v->next_iv = bl->biv;
5566 bl->biv = v;
5567 bl->biv_count++;
5568 if (mult_val == const1_rtx)
5569 bl->incremented = 1;
5571 if (loop_dump_stream)
5572 loop_biv_dump (v, loop_dump_stream, 0);
5575 /* Fill in the data about one giv.
5576 V is the `struct induction' in which we record the giv. (It is
5577 allocated by the caller, with alloca.)
5578 INSN is the insn that sets it.
5579 BENEFIT estimates the savings from deleting this insn.
5580 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5581 into a register or is used as a memory address.
5583 SRC_REG is the biv reg which the giv is computed from.
5584 DEST_REG is the giv's reg (if the giv is stored in a reg).
5585 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5586 LOCATION points to the place where this giv's value appears in INSN. */
5588 static void
5589 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5590 benefit, type, not_every_iteration, maybe_multiple, location)
5591 const struct loop *loop;
5592 struct induction *v;
5593 rtx insn;
5594 rtx src_reg;
5595 rtx dest_reg;
5596 rtx mult_val, add_val, ext_val;
5597 int benefit;
5598 enum g_types type;
5599 int not_every_iteration, maybe_multiple;
5600 rtx *location;
5602 struct loop_ivs *ivs = LOOP_IVS (loop);
5603 struct induction *b;
5604 struct iv_class *bl;
5605 rtx set = single_set (insn);
5606 rtx temp;
5608 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5609 undo the MULT canonicalization that we performed earlier. */
5610 temp = simplify_rtx (add_val);
5611 if (temp
5612 && ! (GET_CODE (add_val) == MULT
5613 && GET_CODE (temp) == ASHIFT))
5614 add_val = temp;
5616 v->insn = insn;
5617 v->src_reg = src_reg;
5618 v->giv_type = type;
5619 v->dest_reg = dest_reg;
5620 v->mult_val = mult_val;
5621 v->add_val = add_val;
5622 v->ext_dependent = ext_val;
5623 v->benefit = benefit;
5624 v->location = location;
5625 v->cant_derive = 0;
5626 v->combined_with = 0;
5627 v->maybe_multiple = maybe_multiple;
5628 v->maybe_dead = 0;
5629 v->derive_adjustment = 0;
5630 v->same = 0;
5631 v->ignore = 0;
5632 v->new_reg = 0;
5633 v->final_value = 0;
5634 v->same_insn = 0;
5635 v->auto_inc_opt = 0;
5636 v->unrolled = 0;
5637 v->shared = 0;
5639 /* The v->always_computable field is used in update_giv_derive, to
5640 determine whether a giv can be used to derive another giv. For a
5641 DEST_REG giv, INSN computes a new value for the giv, so its value
5642 isn't computable if INSN insn't executed every iteration.
5643 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5644 it does not compute a new value. Hence the value is always computable
5645 regardless of whether INSN is executed each iteration. */
5647 if (type == DEST_ADDR)
5648 v->always_computable = 1;
5649 else
5650 v->always_computable = ! not_every_iteration;
5652 v->always_executed = ! not_every_iteration;
5654 if (type == DEST_ADDR)
5656 v->mode = GET_MODE (*location);
5657 v->lifetime = 1;
5659 else /* type == DEST_REG */
5661 v->mode = GET_MODE (SET_DEST (set));
5663 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5665 /* If the lifetime is zero, it means that this register is
5666 really a dead store. So mark this as a giv that can be
5667 ignored. This will not prevent the biv from being eliminated. */
5668 if (v->lifetime == 0)
5669 v->ignore = 1;
5671 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5672 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5675 /* Add the giv to the class of givs computed from one biv. */
5677 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5678 if (bl)
5680 v->next_iv = bl->giv;
5681 bl->giv = v;
5682 /* Don't count DEST_ADDR. This is supposed to count the number of
5683 insns that calculate givs. */
5684 if (type == DEST_REG)
5685 bl->giv_count++;
5686 bl->total_benefit += benefit;
5688 else
5689 /* Fatal error, biv missing for this giv? */
5690 abort ();
5692 if (type == DEST_ADDR)
5693 v->replaceable = 1;
5694 else
5696 /* The giv can be replaced outright by the reduced register only if all
5697 of the following conditions are true:
5698 - the insn that sets the giv is always executed on any iteration
5699 on which the giv is used at all
5700 (there are two ways to deduce this:
5701 either the insn is executed on every iteration,
5702 or all uses follow that insn in the same basic block),
5703 - the giv is not used outside the loop
5704 - no assignments to the biv occur during the giv's lifetime. */
5706 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5707 /* Previous line always fails if INSN was moved by loop opt. */
5708 && REGNO_LAST_LUID (REGNO (dest_reg))
5709 < INSN_LUID (loop->end)
5710 && (! not_every_iteration
5711 || last_use_this_basic_block (dest_reg, insn)))
5713 /* Now check that there are no assignments to the biv within the
5714 giv's lifetime. This requires two separate checks. */
5716 /* Check each biv update, and fail if any are between the first
5717 and last use of the giv.
5719 If this loop contains an inner loop that was unrolled, then
5720 the insn modifying the biv may have been emitted by the loop
5721 unrolling code, and hence does not have a valid luid. Just
5722 mark the biv as not replaceable in this case. It is not very
5723 useful as a biv, because it is used in two different loops.
5724 It is very unlikely that we would be able to optimize the giv
5725 using this biv anyways. */
5727 v->replaceable = 1;
5728 for (b = bl->biv; b; b = b->next_iv)
5730 if (INSN_UID (b->insn) >= max_uid_for_loop
5731 || ((INSN_LUID (b->insn)
5732 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5733 && (INSN_LUID (b->insn)
5734 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5736 v->replaceable = 0;
5737 v->not_replaceable = 1;
5738 break;
5742 /* If there are any backwards branches that go from after the
5743 biv update to before it, then this giv is not replaceable. */
5744 if (v->replaceable)
5745 for (b = bl->biv; b; b = b->next_iv)
5746 if (back_branch_in_range_p (loop, b->insn))
5748 v->replaceable = 0;
5749 v->not_replaceable = 1;
5750 break;
5753 else
5755 /* May still be replaceable, we don't have enough info here to
5756 decide. */
5757 v->replaceable = 0;
5758 v->not_replaceable = 0;
5762 /* Record whether the add_val contains a const_int, for later use by
5763 combine_givs. */
5765 rtx tem = add_val;
5767 v->no_const_addval = 1;
5768 if (tem == const0_rtx)
5770 else if (CONSTANT_P (add_val))
5771 v->no_const_addval = 0;
5772 if (GET_CODE (tem) == PLUS)
5774 while (1)
5776 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5777 tem = XEXP (tem, 0);
5778 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5779 tem = XEXP (tem, 1);
5780 else
5781 break;
5783 if (CONSTANT_P (XEXP (tem, 1)))
5784 v->no_const_addval = 0;
5788 if (loop_dump_stream)
5789 loop_giv_dump (v, loop_dump_stream, 0);
5792 /* All this does is determine whether a giv can be made replaceable because
5793 its final value can be calculated. This code can not be part of record_giv
5794 above, because final_giv_value requires that the number of loop iterations
5795 be known, and that can not be accurately calculated until after all givs
5796 have been identified. */
5798 static void
5799 check_final_value (loop, v)
5800 const struct loop *loop;
5801 struct induction *v;
5803 struct loop_ivs *ivs = LOOP_IVS (loop);
5804 struct iv_class *bl;
5805 rtx final_value = 0;
5807 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5809 /* DEST_ADDR givs will never reach here, because they are always marked
5810 replaceable above in record_giv. */
5812 /* The giv can be replaced outright by the reduced register only if all
5813 of the following conditions are true:
5814 - the insn that sets the giv is always executed on any iteration
5815 on which the giv is used at all
5816 (there are two ways to deduce this:
5817 either the insn is executed on every iteration,
5818 or all uses follow that insn in the same basic block),
5819 - its final value can be calculated (this condition is different
5820 than the one above in record_giv)
5821 - it's not used before the it's set
5822 - no assignments to the biv occur during the giv's lifetime. */
5824 #if 0
5825 /* This is only called now when replaceable is known to be false. */
5826 /* Clear replaceable, so that it won't confuse final_giv_value. */
5827 v->replaceable = 0;
5828 #endif
5830 if ((final_value = final_giv_value (loop, v))
5831 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5833 int biv_increment_seen = 0, before_giv_insn = 0;
5834 rtx p = v->insn;
5835 rtx last_giv_use;
5837 v->replaceable = 1;
5839 /* When trying to determine whether or not a biv increment occurs
5840 during the lifetime of the giv, we can ignore uses of the variable
5841 outside the loop because final_value is true. Hence we can not
5842 use regno_last_uid and regno_first_uid as above in record_giv. */
5844 /* Search the loop to determine whether any assignments to the
5845 biv occur during the giv's lifetime. Start with the insn
5846 that sets the giv, and search around the loop until we come
5847 back to that insn again.
5849 Also fail if there is a jump within the giv's lifetime that jumps
5850 to somewhere outside the lifetime but still within the loop. This
5851 catches spaghetti code where the execution order is not linear, and
5852 hence the above test fails. Here we assume that the giv lifetime
5853 does not extend from one iteration of the loop to the next, so as
5854 to make the test easier. Since the lifetime isn't known yet,
5855 this requires two loops. See also record_giv above. */
5857 last_giv_use = v->insn;
5859 while (1)
5861 p = NEXT_INSN (p);
5862 if (p == loop->end)
5864 before_giv_insn = 1;
5865 p = NEXT_INSN (loop->start);
5867 if (p == v->insn)
5868 break;
5870 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5871 || GET_CODE (p) == CALL_INSN)
5873 /* It is possible for the BIV increment to use the GIV if we
5874 have a cycle. Thus we must be sure to check each insn for
5875 both BIV and GIV uses, and we must check for BIV uses
5876 first. */
5878 if (! biv_increment_seen
5879 && reg_set_p (v->src_reg, PATTERN (p)))
5880 biv_increment_seen = 1;
5882 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5884 if (biv_increment_seen || before_giv_insn)
5886 v->replaceable = 0;
5887 v->not_replaceable = 1;
5888 break;
5890 last_giv_use = p;
5895 /* Now that the lifetime of the giv is known, check for branches
5896 from within the lifetime to outside the lifetime if it is still
5897 replaceable. */
5899 if (v->replaceable)
5901 p = v->insn;
5902 while (1)
5904 p = NEXT_INSN (p);
5905 if (p == loop->end)
5906 p = NEXT_INSN (loop->start);
5907 if (p == last_giv_use)
5908 break;
5910 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5911 && LABEL_NAME (JUMP_LABEL (p))
5912 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5913 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5914 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5915 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5917 v->replaceable = 0;
5918 v->not_replaceable = 1;
5920 if (loop_dump_stream)
5921 fprintf (loop_dump_stream,
5922 "Found branch outside giv lifetime.\n");
5924 break;
5929 /* If it is replaceable, then save the final value. */
5930 if (v->replaceable)
5931 v->final_value = final_value;
5934 if (loop_dump_stream && v->replaceable)
5935 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5936 INSN_UID (v->insn), REGNO (v->dest_reg));
5939 /* Update the status of whether a giv can derive other givs.
5941 We need to do something special if there is or may be an update to the biv
5942 between the time the giv is defined and the time it is used to derive
5943 another giv.
5945 In addition, a giv that is only conditionally set is not allowed to
5946 derive another giv once a label has been passed.
5948 The cases we look at are when a label or an update to a biv is passed. */
5950 static void
5951 update_giv_derive (loop, p)
5952 const struct loop *loop;
5953 rtx p;
5955 struct loop_ivs *ivs = LOOP_IVS (loop);
5956 struct iv_class *bl;
5957 struct induction *biv, *giv;
5958 rtx tem;
5959 int dummy;
5961 /* Search all IV classes, then all bivs, and finally all givs.
5963 There are three cases we are concerned with. First we have the situation
5964 of a giv that is only updated conditionally. In that case, it may not
5965 derive any givs after a label is passed.
5967 The second case is when a biv update occurs, or may occur, after the
5968 definition of a giv. For certain biv updates (see below) that are
5969 known to occur between the giv definition and use, we can adjust the
5970 giv definition. For others, or when the biv update is conditional,
5971 we must prevent the giv from deriving any other givs. There are two
5972 sub-cases within this case.
5974 If this is a label, we are concerned with any biv update that is done
5975 conditionally, since it may be done after the giv is defined followed by
5976 a branch here (actually, we need to pass both a jump and a label, but
5977 this extra tracking doesn't seem worth it).
5979 If this is a jump, we are concerned about any biv update that may be
5980 executed multiple times. We are actually only concerned about
5981 backward jumps, but it is probably not worth performing the test
5982 on the jump again here.
5984 If this is a biv update, we must adjust the giv status to show that a
5985 subsequent biv update was performed. If this adjustment cannot be done,
5986 the giv cannot derive further givs. */
5988 for (bl = ivs->list; bl; bl = bl->next)
5989 for (biv = bl->biv; biv; biv = biv->next_iv)
5990 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5991 || biv->insn == p)
5993 for (giv = bl->giv; giv; giv = giv->next_iv)
5995 /* If cant_derive is already true, there is no point in
5996 checking all of these conditions again. */
5997 if (giv->cant_derive)
5998 continue;
6000 /* If this giv is conditionally set and we have passed a label,
6001 it cannot derive anything. */
6002 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6003 giv->cant_derive = 1;
6005 /* Skip givs that have mult_val == 0, since
6006 they are really invariants. Also skip those that are
6007 replaceable, since we know their lifetime doesn't contain
6008 any biv update. */
6009 else if (giv->mult_val == const0_rtx || giv->replaceable)
6010 continue;
6012 /* The only way we can allow this giv to derive another
6013 is if this is a biv increment and we can form the product
6014 of biv->add_val and giv->mult_val. In this case, we will
6015 be able to compute a compensation. */
6016 else if (biv->insn == p)
6018 rtx ext_val_dummy;
6020 tem = 0;
6021 if (biv->mult_val == const1_rtx)
6022 tem = simplify_giv_expr (loop,
6023 gen_rtx_MULT (giv->mode,
6024 biv->add_val,
6025 giv->mult_val),
6026 &ext_val_dummy, &dummy);
6028 if (tem && giv->derive_adjustment)
6029 tem = simplify_giv_expr
6030 (loop,
6031 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6032 &ext_val_dummy, &dummy);
6034 if (tem)
6035 giv->derive_adjustment = tem;
6036 else
6037 giv->cant_derive = 1;
6039 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6040 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6041 giv->cant_derive = 1;
6046 /* Check whether an insn is an increment legitimate for a basic induction var.
6047 X is the source of insn P, or a part of it.
6048 MODE is the mode in which X should be interpreted.
6050 DEST_REG is the putative biv, also the destination of the insn.
6051 We accept patterns of these forms:
6052 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6053 REG = INVARIANT + REG
6055 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6056 store the additive term into *INC_VAL, and store the place where
6057 we found the additive term into *LOCATION.
6059 If X is an assignment of an invariant into DEST_REG, we set
6060 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6062 We also want to detect a BIV when it corresponds to a variable
6063 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6064 of the variable may be a PLUS that adds a SUBREG of that variable to
6065 an invariant and then sign- or zero-extends the result of the PLUS
6066 into the variable.
6068 Most GIVs in such cases will be in the promoted mode, since that is the
6069 probably the natural computation mode (and almost certainly the mode
6070 used for addresses) on the machine. So we view the pseudo-reg containing
6071 the variable as the BIV, as if it were simply incremented.
6073 Note that treating the entire pseudo as a BIV will result in making
6074 simple increments to any GIVs based on it. However, if the variable
6075 overflows in its declared mode but not its promoted mode, the result will
6076 be incorrect. This is acceptable if the variable is signed, since
6077 overflows in such cases are undefined, but not if it is unsigned, since
6078 those overflows are defined. So we only check for SIGN_EXTEND and
6079 not ZERO_EXTEND.
6081 If we cannot find a biv, we return 0. */
6083 static int
6084 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6085 const struct loop *loop;
6086 rtx x;
6087 enum machine_mode mode;
6088 rtx dest_reg;
6089 rtx p;
6090 rtx *inc_val;
6091 rtx *mult_val;
6092 rtx **location;
6094 enum rtx_code code;
6095 rtx *argp, arg;
6096 rtx insn, set = 0;
6098 code = GET_CODE (x);
6099 *location = NULL;
6100 switch (code)
6102 case PLUS:
6103 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6104 || (GET_CODE (XEXP (x, 0)) == SUBREG
6105 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6106 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6108 argp = &XEXP (x, 1);
6110 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6111 || (GET_CODE (XEXP (x, 1)) == SUBREG
6112 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6113 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6115 argp = &XEXP (x, 0);
6117 else
6118 return 0;
6120 arg = *argp;
6121 if (loop_invariant_p (loop, arg) != 1)
6122 return 0;
6124 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6125 *mult_val = const1_rtx;
6126 *location = argp;
6127 return 1;
6129 case SUBREG:
6130 /* If this is a SUBREG for a promoted variable, check the inner
6131 value. */
6132 if (SUBREG_PROMOTED_VAR_P (x))
6133 return basic_induction_var (loop, SUBREG_REG (x),
6134 GET_MODE (SUBREG_REG (x)),
6135 dest_reg, p, inc_val, mult_val, location);
6136 return 0;
6138 case REG:
6139 /* If this register is assigned in a previous insn, look at its
6140 source, but don't go outside the loop or past a label. */
6142 /* If this sets a register to itself, we would repeat any previous
6143 biv increment if we applied this strategy blindly. */
6144 if (rtx_equal_p (dest_reg, x))
6145 return 0;
6147 insn = p;
6148 while (1)
6150 rtx dest;
6153 insn = PREV_INSN (insn);
6155 while (insn && GET_CODE (insn) == NOTE
6156 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6158 if (!insn)
6159 break;
6160 set = single_set (insn);
6161 if (set == 0)
6162 break;
6163 dest = SET_DEST (set);
6164 if (dest == x
6165 || (GET_CODE (dest) == SUBREG
6166 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6167 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6168 && SUBREG_REG (dest) == x))
6169 return basic_induction_var (loop, SET_SRC (set),
6170 (GET_MODE (SET_SRC (set)) == VOIDmode
6171 ? GET_MODE (x)
6172 : GET_MODE (SET_SRC (set))),
6173 dest_reg, insn,
6174 inc_val, mult_val, location);
6176 while (GET_CODE (dest) == SIGN_EXTRACT
6177 || GET_CODE (dest) == ZERO_EXTRACT
6178 || GET_CODE (dest) == SUBREG
6179 || GET_CODE (dest) == STRICT_LOW_PART)
6180 dest = XEXP (dest, 0);
6181 if (dest == x)
6182 break;
6184 /* Fall through. */
6186 /* Can accept constant setting of biv only when inside inner most loop.
6187 Otherwise, a biv of an inner loop may be incorrectly recognized
6188 as a biv of the outer loop,
6189 causing code to be moved INTO the inner loop. */
6190 case MEM:
6191 if (loop_invariant_p (loop, x) != 1)
6192 return 0;
6193 case CONST_INT:
6194 case SYMBOL_REF:
6195 case CONST:
6196 /* convert_modes aborts if we try to convert to or from CCmode, so just
6197 exclude that case. It is very unlikely that a condition code value
6198 would be a useful iterator anyways. */
6199 if (loop->level == 1
6200 && GET_MODE_CLASS (mode) != MODE_CC
6201 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
6203 /* Possible bug here? Perhaps we don't know the mode of X. */
6204 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6205 *mult_val = const0_rtx;
6206 return 1;
6208 else
6209 return 0;
6211 case SIGN_EXTEND:
6212 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6213 dest_reg, p, inc_val, mult_val, location);
6215 case ASHIFTRT:
6216 /* Similar, since this can be a sign extension. */
6217 for (insn = PREV_INSN (p);
6218 (insn && GET_CODE (insn) == NOTE
6219 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6220 insn = PREV_INSN (insn))
6223 if (insn)
6224 set = single_set (insn);
6226 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6227 && set && SET_DEST (set) == XEXP (x, 0)
6228 && GET_CODE (XEXP (x, 1)) == CONST_INT
6229 && INTVAL (XEXP (x, 1)) >= 0
6230 && GET_CODE (SET_SRC (set)) == ASHIFT
6231 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6232 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6233 GET_MODE (XEXP (x, 0)),
6234 dest_reg, insn, inc_val, mult_val,
6235 location);
6236 return 0;
6238 default:
6239 return 0;
6243 /* A general induction variable (giv) is any quantity that is a linear
6244 function of a basic induction variable,
6245 i.e. giv = biv * mult_val + add_val.
6246 The coefficients can be any loop invariant quantity.
6247 A giv need not be computed directly from the biv;
6248 it can be computed by way of other givs. */
6250 /* Determine whether X computes a giv.
6251 If it does, return a nonzero value
6252 which is the benefit from eliminating the computation of X;
6253 set *SRC_REG to the register of the biv that it is computed from;
6254 set *ADD_VAL and *MULT_VAL to the coefficients,
6255 such that the value of X is biv * mult + add; */
6257 static int
6258 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6259 is_addr, pbenefit, addr_mode)
6260 const struct loop *loop;
6261 rtx x;
6262 rtx *src_reg;
6263 rtx *add_val;
6264 rtx *mult_val;
6265 rtx *ext_val;
6266 int is_addr;
6267 int *pbenefit;
6268 enum machine_mode addr_mode;
6270 struct loop_ivs *ivs = LOOP_IVS (loop);
6271 rtx orig_x = x;
6273 /* If this is an invariant, forget it, it isn't a giv. */
6274 if (loop_invariant_p (loop, x) == 1)
6275 return 0;
6277 *pbenefit = 0;
6278 *ext_val = NULL_RTX;
6279 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6280 if (x == 0)
6281 return 0;
6283 switch (GET_CODE (x))
6285 case USE:
6286 case CONST_INT:
6287 /* Since this is now an invariant and wasn't before, it must be a giv
6288 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6289 with. */
6290 *src_reg = ivs->list->biv->dest_reg;
6291 *mult_val = const0_rtx;
6292 *add_val = x;
6293 break;
6295 case REG:
6296 /* This is equivalent to a BIV. */
6297 *src_reg = x;
6298 *mult_val = const1_rtx;
6299 *add_val = const0_rtx;
6300 break;
6302 case PLUS:
6303 /* Either (plus (biv) (invar)) or
6304 (plus (mult (biv) (invar_1)) (invar_2)). */
6305 if (GET_CODE (XEXP (x, 0)) == MULT)
6307 *src_reg = XEXP (XEXP (x, 0), 0);
6308 *mult_val = XEXP (XEXP (x, 0), 1);
6310 else
6312 *src_reg = XEXP (x, 0);
6313 *mult_val = const1_rtx;
6315 *add_val = XEXP (x, 1);
6316 break;
6318 case MULT:
6319 /* ADD_VAL is zero. */
6320 *src_reg = XEXP (x, 0);
6321 *mult_val = XEXP (x, 1);
6322 *add_val = const0_rtx;
6323 break;
6325 default:
6326 abort ();
6329 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6330 unless they are CONST_INT). */
6331 if (GET_CODE (*add_val) == USE)
6332 *add_val = XEXP (*add_val, 0);
6333 if (GET_CODE (*mult_val) == USE)
6334 *mult_val = XEXP (*mult_val, 0);
6336 if (is_addr)
6337 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6338 else
6339 *pbenefit += rtx_cost (orig_x, SET);
6341 /* Always return true if this is a giv so it will be detected as such,
6342 even if the benefit is zero or negative. This allows elimination
6343 of bivs that might otherwise not be eliminated. */
6344 return 1;
6347 /* Given an expression, X, try to form it as a linear function of a biv.
6348 We will canonicalize it to be of the form
6349 (plus (mult (BIV) (invar_1))
6350 (invar_2))
6351 with possible degeneracies.
6353 The invariant expressions must each be of a form that can be used as a
6354 machine operand. We surround then with a USE rtx (a hack, but localized
6355 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6356 routine; it is the caller's responsibility to strip them.
6358 If no such canonicalization is possible (i.e., two biv's are used or an
6359 expression that is neither invariant nor a biv or giv), this routine
6360 returns 0.
6362 For a non-zero return, the result will have a code of CONST_INT, USE,
6363 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6365 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6367 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6368 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6370 static rtx
6371 simplify_giv_expr (loop, x, ext_val, benefit)
6372 const struct loop *loop;
6373 rtx x;
6374 rtx *ext_val;
6375 int *benefit;
6377 struct loop_ivs *ivs = LOOP_IVS (loop);
6378 struct loop_regs *regs = LOOP_REGS (loop);
6379 enum machine_mode mode = GET_MODE (x);
6380 rtx arg0, arg1;
6381 rtx tem;
6383 /* If this is not an integer mode, or if we cannot do arithmetic in this
6384 mode, this can't be a giv. */
6385 if (mode != VOIDmode
6386 && (GET_MODE_CLASS (mode) != MODE_INT
6387 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6388 return NULL_RTX;
6390 switch (GET_CODE (x))
6392 case PLUS:
6393 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6394 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6395 if (arg0 == 0 || arg1 == 0)
6396 return NULL_RTX;
6398 /* Put constant last, CONST_INT last if both constant. */
6399 if ((GET_CODE (arg0) == USE
6400 || GET_CODE (arg0) == CONST_INT)
6401 && ! ((GET_CODE (arg0) == USE
6402 && GET_CODE (arg1) == USE)
6403 || GET_CODE (arg1) == CONST_INT))
6404 tem = arg0, arg0 = arg1, arg1 = tem;
6406 /* Handle addition of zero, then addition of an invariant. */
6407 if (arg1 == const0_rtx)
6408 return arg0;
6409 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6410 switch (GET_CODE (arg0))
6412 case CONST_INT:
6413 case USE:
6414 /* Adding two invariants must result in an invariant, so enclose
6415 addition operation inside a USE and return it. */
6416 if (GET_CODE (arg0) == USE)
6417 arg0 = XEXP (arg0, 0);
6418 if (GET_CODE (arg1) == USE)
6419 arg1 = XEXP (arg1, 0);
6421 if (GET_CODE (arg0) == CONST_INT)
6422 tem = arg0, arg0 = arg1, arg1 = tem;
6423 if (GET_CODE (arg1) == CONST_INT)
6424 tem = sge_plus_constant (arg0, arg1);
6425 else
6426 tem = sge_plus (mode, arg0, arg1);
6428 if (GET_CODE (tem) != CONST_INT)
6429 tem = gen_rtx_USE (mode, tem);
6430 return tem;
6432 case REG:
6433 case MULT:
6434 /* biv + invar or mult + invar. Return sum. */
6435 return gen_rtx_PLUS (mode, arg0, arg1);
6437 case PLUS:
6438 /* (a + invar_1) + invar_2. Associate. */
6439 return
6440 simplify_giv_expr (loop,
6441 gen_rtx_PLUS (mode,
6442 XEXP (arg0, 0),
6443 gen_rtx_PLUS (mode,
6444 XEXP (arg0, 1),
6445 arg1)),
6446 ext_val, benefit);
6448 default:
6449 abort ();
6452 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6453 MULT to reduce cases. */
6454 if (GET_CODE (arg0) == REG)
6455 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6456 if (GET_CODE (arg1) == REG)
6457 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6459 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6460 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6461 Recurse to associate the second PLUS. */
6462 if (GET_CODE (arg1) == MULT)
6463 tem = arg0, arg0 = arg1, arg1 = tem;
6465 if (GET_CODE (arg1) == PLUS)
6466 return
6467 simplify_giv_expr (loop,
6468 gen_rtx_PLUS (mode,
6469 gen_rtx_PLUS (mode, arg0,
6470 XEXP (arg1, 0)),
6471 XEXP (arg1, 1)),
6472 ext_val, benefit);
6474 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6475 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6476 return NULL_RTX;
6478 if (!rtx_equal_p (arg0, arg1))
6479 return NULL_RTX;
6481 return simplify_giv_expr (loop,
6482 gen_rtx_MULT (mode,
6483 XEXP (arg0, 0),
6484 gen_rtx_PLUS (mode,
6485 XEXP (arg0, 1),
6486 XEXP (arg1, 1))),
6487 ext_val, benefit);
6489 case MINUS:
6490 /* Handle "a - b" as "a + b * (-1)". */
6491 return simplify_giv_expr (loop,
6492 gen_rtx_PLUS (mode,
6493 XEXP (x, 0),
6494 gen_rtx_MULT (mode,
6495 XEXP (x, 1),
6496 constm1_rtx)),
6497 ext_val, benefit);
6499 case MULT:
6500 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6501 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6502 if (arg0 == 0 || arg1 == 0)
6503 return NULL_RTX;
6505 /* Put constant last, CONST_INT last if both constant. */
6506 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6507 && GET_CODE (arg1) != CONST_INT)
6508 tem = arg0, arg0 = arg1, arg1 = tem;
6510 /* If second argument is not now constant, not giv. */
6511 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6512 return NULL_RTX;
6514 /* Handle multiply by 0 or 1. */
6515 if (arg1 == const0_rtx)
6516 return const0_rtx;
6518 else if (arg1 == const1_rtx)
6519 return arg0;
6521 switch (GET_CODE (arg0))
6523 case REG:
6524 /* biv * invar. Done. */
6525 return gen_rtx_MULT (mode, arg0, arg1);
6527 case CONST_INT:
6528 /* Product of two constants. */
6529 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6531 case USE:
6532 /* invar * invar is a giv, but attempt to simplify it somehow. */
6533 if (GET_CODE (arg1) != CONST_INT)
6534 return NULL_RTX;
6536 arg0 = XEXP (arg0, 0);
6537 if (GET_CODE (arg0) == MULT)
6539 /* (invar_0 * invar_1) * invar_2. Associate. */
6540 return simplify_giv_expr (loop,
6541 gen_rtx_MULT (mode,
6542 XEXP (arg0, 0),
6543 gen_rtx_MULT (mode,
6544 XEXP (arg0,
6546 arg1)),
6547 ext_val, benefit);
6549 /* Porpagate the MULT expressions to the intermost nodes. */
6550 else if (GET_CODE (arg0) == PLUS)
6552 /* (invar_0 + invar_1) * invar_2. Distribute. */
6553 return simplify_giv_expr (loop,
6554 gen_rtx_PLUS (mode,
6555 gen_rtx_MULT (mode,
6556 XEXP (arg0,
6558 arg1),
6559 gen_rtx_MULT (mode,
6560 XEXP (arg0,
6562 arg1)),
6563 ext_val, benefit);
6565 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6567 case MULT:
6568 /* (a * invar_1) * invar_2. Associate. */
6569 return simplify_giv_expr (loop,
6570 gen_rtx_MULT (mode,
6571 XEXP (arg0, 0),
6572 gen_rtx_MULT (mode,
6573 XEXP (arg0, 1),
6574 arg1)),
6575 ext_val, benefit);
6577 case PLUS:
6578 /* (a + invar_1) * invar_2. Distribute. */
6579 return simplify_giv_expr (loop,
6580 gen_rtx_PLUS (mode,
6581 gen_rtx_MULT (mode,
6582 XEXP (arg0, 0),
6583 arg1),
6584 gen_rtx_MULT (mode,
6585 XEXP (arg0, 1),
6586 arg1)),
6587 ext_val, benefit);
6589 default:
6590 abort ();
6593 case ASHIFT:
6594 /* Shift by constant is multiply by power of two. */
6595 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6596 return 0;
6598 return
6599 simplify_giv_expr (loop,
6600 gen_rtx_MULT (mode,
6601 XEXP (x, 0),
6602 GEN_INT ((HOST_WIDE_INT) 1
6603 << INTVAL (XEXP (x, 1)))),
6604 ext_val, benefit);
6606 case NEG:
6607 /* "-a" is "a * (-1)" */
6608 return simplify_giv_expr (loop,
6609 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6610 ext_val, benefit);
6612 case NOT:
6613 /* "~a" is "-a - 1". Silly, but easy. */
6614 return simplify_giv_expr (loop,
6615 gen_rtx_MINUS (mode,
6616 gen_rtx_NEG (mode, XEXP (x, 0)),
6617 const1_rtx),
6618 ext_val, benefit);
6620 case USE:
6621 /* Already in proper form for invariant. */
6622 return x;
6624 case SIGN_EXTEND:
6625 case ZERO_EXTEND:
6626 case TRUNCATE:
6627 /* Conditionally recognize extensions of simple IVs. After we've
6628 computed loop traversal counts and verified the range of the
6629 source IV, we'll reevaluate this as a GIV. */
6630 if (*ext_val == NULL_RTX)
6632 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6633 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6635 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6636 return arg0;
6639 goto do_default;
6641 case REG:
6642 /* If this is a new register, we can't deal with it. */
6643 if (REGNO (x) >= max_reg_before_loop)
6644 return 0;
6646 /* Check for biv or giv. */
6647 switch (REG_IV_TYPE (ivs, REGNO (x)))
6649 case BASIC_INDUCT:
6650 return x;
6651 case GENERAL_INDUCT:
6653 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6655 /* Form expression from giv and add benefit. Ensure this giv
6656 can derive another and subtract any needed adjustment if so. */
6658 /* Increasing the benefit here is risky. The only case in which it
6659 is arguably correct is if this is the only use of V. In other
6660 cases, this will artificially inflate the benefit of the current
6661 giv, and lead to suboptimal code. Thus, it is disabled, since
6662 potentially not reducing an only marginally beneficial giv is
6663 less harmful than reducing many givs that are not really
6664 beneficial. */
6666 rtx single_use = regs->array[REGNO (x)].single_usage;
6667 if (single_use && single_use != const0_rtx)
6668 *benefit += v->benefit;
6671 if (v->cant_derive)
6672 return 0;
6674 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6675 v->src_reg, v->mult_val),
6676 v->add_val);
6678 if (v->derive_adjustment)
6679 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6680 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6681 if (*ext_val)
6683 if (!v->ext_dependent)
6684 return arg0;
6686 else
6688 *ext_val = v->ext_dependent;
6689 return arg0;
6691 return 0;
6694 default:
6695 do_default:
6696 /* If it isn't an induction variable, and it is invariant, we
6697 may be able to simplify things further by looking through
6698 the bits we just moved outside the loop. */
6699 if (loop_invariant_p (loop, x) == 1)
6701 struct movable *m;
6702 struct loop_movables *movables = LOOP_MOVABLES (loop);
6704 for (m = movables->head; m; m = m->next)
6705 if (rtx_equal_p (x, m->set_dest))
6707 /* Ok, we found a match. Substitute and simplify. */
6709 /* If we match another movable, we must use that, as
6710 this one is going away. */
6711 if (m->match)
6712 return simplify_giv_expr (loop, m->match->set_dest,
6713 ext_val, benefit);
6715 /* If consec is non-zero, this is a member of a group of
6716 instructions that were moved together. We handle this
6717 case only to the point of seeking to the last insn and
6718 looking for a REG_EQUAL. Fail if we don't find one. */
6719 if (m->consec != 0)
6721 int i = m->consec;
6722 tem = m->insn;
6725 tem = NEXT_INSN (tem);
6727 while (--i > 0);
6729 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6730 if (tem)
6731 tem = XEXP (tem, 0);
6733 else
6735 tem = single_set (m->insn);
6736 if (tem)
6737 tem = SET_SRC (tem);
6740 if (tem)
6742 /* What we are most interested in is pointer
6743 arithmetic on invariants -- only take
6744 patterns we may be able to do something with. */
6745 if (GET_CODE (tem) == PLUS
6746 || GET_CODE (tem) == MULT
6747 || GET_CODE (tem) == ASHIFT
6748 || GET_CODE (tem) == CONST_INT
6749 || GET_CODE (tem) == SYMBOL_REF)
6751 tem = simplify_giv_expr (loop, tem, ext_val,
6752 benefit);
6753 if (tem)
6754 return tem;
6756 else if (GET_CODE (tem) == CONST
6757 && GET_CODE (XEXP (tem, 0)) == PLUS
6758 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6759 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6761 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6762 ext_val, benefit);
6763 if (tem)
6764 return tem;
6767 break;
6770 break;
6773 /* Fall through to general case. */
6774 default:
6775 /* If invariant, return as USE (unless CONST_INT).
6776 Otherwise, not giv. */
6777 if (GET_CODE (x) == USE)
6778 x = XEXP (x, 0);
6780 if (loop_invariant_p (loop, x) == 1)
6782 if (GET_CODE (x) == CONST_INT)
6783 return x;
6784 if (GET_CODE (x) == CONST
6785 && GET_CODE (XEXP (x, 0)) == PLUS
6786 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6787 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6788 x = XEXP (x, 0);
6789 return gen_rtx_USE (mode, x);
6791 else
6792 return 0;
6796 /* This routine folds invariants such that there is only ever one
6797 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6799 static rtx
6800 sge_plus_constant (x, c)
6801 rtx x, c;
6803 if (GET_CODE (x) == CONST_INT)
6804 return GEN_INT (INTVAL (x) + INTVAL (c));
6805 else if (GET_CODE (x) != PLUS)
6806 return gen_rtx_PLUS (GET_MODE (x), x, c);
6807 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6809 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6810 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6812 else if (GET_CODE (XEXP (x, 0)) == PLUS
6813 || GET_CODE (XEXP (x, 1)) != PLUS)
6815 return gen_rtx_PLUS (GET_MODE (x),
6816 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6818 else
6820 return gen_rtx_PLUS (GET_MODE (x),
6821 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6825 static rtx
6826 sge_plus (mode, x, y)
6827 enum machine_mode mode;
6828 rtx x, y;
6830 while (GET_CODE (y) == PLUS)
6832 rtx a = XEXP (y, 0);
6833 if (GET_CODE (a) == CONST_INT)
6834 x = sge_plus_constant (x, a);
6835 else
6836 x = gen_rtx_PLUS (mode, x, a);
6837 y = XEXP (y, 1);
6839 if (GET_CODE (y) == CONST_INT)
6840 x = sge_plus_constant (x, y);
6841 else
6842 x = gen_rtx_PLUS (mode, x, y);
6843 return x;
6846 /* Help detect a giv that is calculated by several consecutive insns;
6847 for example,
6848 giv = biv * M
6849 giv = giv + A
6850 The caller has already identified the first insn P as having a giv as dest;
6851 we check that all other insns that set the same register follow
6852 immediately after P, that they alter nothing else,
6853 and that the result of the last is still a giv.
6855 The value is 0 if the reg set in P is not really a giv.
6856 Otherwise, the value is the amount gained by eliminating
6857 all the consecutive insns that compute the value.
6859 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6860 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6862 The coefficients of the ultimate giv value are stored in
6863 *MULT_VAL and *ADD_VAL. */
6865 static int
6866 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6867 add_val, mult_val, ext_val, last_consec_insn)
6868 const struct loop *loop;
6869 int first_benefit;
6870 rtx p;
6871 rtx src_reg;
6872 rtx dest_reg;
6873 rtx *add_val;
6874 rtx *mult_val;
6875 rtx *ext_val;
6876 rtx *last_consec_insn;
6878 struct loop_ivs *ivs = LOOP_IVS (loop);
6879 struct loop_regs *regs = LOOP_REGS (loop);
6880 int count;
6881 enum rtx_code code;
6882 int benefit;
6883 rtx temp;
6884 rtx set;
6886 /* Indicate that this is a giv so that we can update the value produced in
6887 each insn of the multi-insn sequence.
6889 This induction structure will be used only by the call to
6890 general_induction_var below, so we can allocate it on our stack.
6891 If this is a giv, our caller will replace the induct var entry with
6892 a new induction structure. */
6893 struct induction *v;
6895 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6896 return 0;
6898 v = (struct induction *) alloca (sizeof (struct induction));
6899 v->src_reg = src_reg;
6900 v->mult_val = *mult_val;
6901 v->add_val = *add_val;
6902 v->benefit = first_benefit;
6903 v->cant_derive = 0;
6904 v->derive_adjustment = 0;
6905 v->ext_dependent = NULL_RTX;
6907 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6908 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6910 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6912 while (count > 0)
6914 p = NEXT_INSN (p);
6915 code = GET_CODE (p);
6917 /* If libcall, skip to end of call sequence. */
6918 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6919 p = XEXP (temp, 0);
6921 if (code == INSN
6922 && (set = single_set (p))
6923 && GET_CODE (SET_DEST (set)) == REG
6924 && SET_DEST (set) == dest_reg
6925 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6926 add_val, mult_val, ext_val, 0,
6927 &benefit, VOIDmode)
6928 /* Giv created by equivalent expression. */
6929 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6930 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6931 add_val, mult_val, ext_val, 0,
6932 &benefit, VOIDmode)))
6933 && src_reg == v->src_reg)
6935 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6936 benefit += libcall_benefit (p);
6938 count--;
6939 v->mult_val = *mult_val;
6940 v->add_val = *add_val;
6941 v->benefit += benefit;
6943 else if (code != NOTE)
6945 /* Allow insns that set something other than this giv to a
6946 constant. Such insns are needed on machines which cannot
6947 include long constants and should not disqualify a giv. */
6948 if (code == INSN
6949 && (set = single_set (p))
6950 && SET_DEST (set) != dest_reg
6951 && CONSTANT_P (SET_SRC (set)))
6952 continue;
6954 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6955 return 0;
6959 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6960 *last_consec_insn = p;
6961 return v->benefit;
6964 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6965 represented by G1. If no such expression can be found, or it is clear that
6966 it cannot possibly be a valid address, 0 is returned.
6968 To perform the computation, we note that
6969 G1 = x * v + a and
6970 G2 = y * v + b
6971 where `v' is the biv.
6973 So G2 = (y/b) * G1 + (b - a*y/x).
6975 Note that MULT = y/x.
6977 Update: A and B are now allowed to be additive expressions such that
6978 B contains all variables in A. That is, computing B-A will not require
6979 subtracting variables. */
6981 static rtx
6982 express_from_1 (a, b, mult)
6983 rtx a, b, mult;
6985 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6987 if (mult == const0_rtx)
6988 return b;
6990 /* If MULT is not 1, we cannot handle A with non-constants, since we
6991 would then be required to subtract multiples of the registers in A.
6992 This is theoretically possible, and may even apply to some Fortran
6993 constructs, but it is a lot of work and we do not attempt it here. */
6995 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6996 return NULL_RTX;
6998 /* In general these structures are sorted top to bottom (down the PLUS
6999 chain), but not left to right across the PLUS. If B is a higher
7000 order giv than A, we can strip one level and recurse. If A is higher
7001 order, we'll eventually bail out, but won't know that until the end.
7002 If they are the same, we'll strip one level around this loop. */
7004 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7006 rtx ra, rb, oa, ob, tmp;
7008 ra = XEXP (a, 0), oa = XEXP (a, 1);
7009 if (GET_CODE (ra) == PLUS)
7010 tmp = ra, ra = oa, oa = tmp;
7012 rb = XEXP (b, 0), ob = XEXP (b, 1);
7013 if (GET_CODE (rb) == PLUS)
7014 tmp = rb, rb = ob, ob = tmp;
7016 if (rtx_equal_p (ra, rb))
7017 /* We matched: remove one reg completely. */
7018 a = oa, b = ob;
7019 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7020 /* An alternate match. */
7021 a = oa, b = rb;
7022 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7023 /* An alternate match. */
7024 a = ra, b = ob;
7025 else
7027 /* Indicates an extra register in B. Strip one level from B and
7028 recurse, hoping B was the higher order expression. */
7029 ob = express_from_1 (a, ob, mult);
7030 if (ob == NULL_RTX)
7031 return NULL_RTX;
7032 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7036 /* Here we are at the last level of A, go through the cases hoping to
7037 get rid of everything but a constant. */
7039 if (GET_CODE (a) == PLUS)
7041 rtx ra, oa;
7043 ra = XEXP (a, 0), oa = XEXP (a, 1);
7044 if (rtx_equal_p (oa, b))
7045 oa = ra;
7046 else if (!rtx_equal_p (ra, b))
7047 return NULL_RTX;
7049 if (GET_CODE (oa) != CONST_INT)
7050 return NULL_RTX;
7052 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7054 else if (GET_CODE (a) == CONST_INT)
7056 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7058 else if (CONSTANT_P (a))
7060 enum machine_mode mode_a = GET_MODE (a);
7061 enum machine_mode mode_b = GET_MODE (b);
7062 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7063 return simplify_gen_binary (MINUS, mode, b, a);
7065 else if (GET_CODE (b) == PLUS)
7067 if (rtx_equal_p (a, XEXP (b, 0)))
7068 return XEXP (b, 1);
7069 else if (rtx_equal_p (a, XEXP (b, 1)))
7070 return XEXP (b, 0);
7071 else
7072 return NULL_RTX;
7074 else if (rtx_equal_p (a, b))
7075 return const0_rtx;
7077 return NULL_RTX;
7081 express_from (g1, g2)
7082 struct induction *g1, *g2;
7084 rtx mult, add;
7086 /* The value that G1 will be multiplied by must be a constant integer. Also,
7087 the only chance we have of getting a valid address is if b*c/a (see above
7088 for notation) is also an integer. */
7089 if (GET_CODE (g1->mult_val) == CONST_INT
7090 && GET_CODE (g2->mult_val) == CONST_INT)
7092 if (g1->mult_val == const0_rtx
7093 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7094 return NULL_RTX;
7095 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7097 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7098 mult = const1_rtx;
7099 else
7101 /* ??? Find out if the one is a multiple of the other? */
7102 return NULL_RTX;
7105 add = express_from_1 (g1->add_val, g2->add_val, mult);
7106 if (add == NULL_RTX)
7108 /* Failed. If we've got a multiplication factor between G1 and G2,
7109 scale G1's addend and try again. */
7110 if (INTVAL (mult) > 1)
7112 rtx g1_add_val = g1->add_val;
7113 if (GET_CODE (g1_add_val) == MULT
7114 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7116 HOST_WIDE_INT m;
7117 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7118 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7119 XEXP (g1_add_val, 0), GEN_INT (m));
7121 else
7123 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7124 mult);
7127 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7130 if (add == NULL_RTX)
7131 return NULL_RTX;
7133 /* Form simplified final result. */
7134 if (mult == const0_rtx)
7135 return add;
7136 else if (mult == const1_rtx)
7137 mult = g1->dest_reg;
7138 else
7139 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7141 if (add == const0_rtx)
7142 return mult;
7143 else
7145 if (GET_CODE (add) == PLUS
7146 && CONSTANT_P (XEXP (add, 1)))
7148 rtx tem = XEXP (add, 1);
7149 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7150 add = tem;
7153 return gen_rtx_PLUS (g2->mode, mult, add);
7157 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7158 represented by G1. This indicates that G2 should be combined with G1 and
7159 that G2 can use (either directly or via an address expression) a register
7160 used to represent G1. */
7162 static rtx
7163 combine_givs_p (g1, g2)
7164 struct induction *g1, *g2;
7166 rtx comb, ret;
7168 /* With the introduction of ext dependent givs, we must care for modes.
7169 G2 must not use a wider mode than G1. */
7170 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7171 return NULL_RTX;
7173 ret = comb = express_from (g1, g2);
7174 if (comb == NULL_RTX)
7175 return NULL_RTX;
7176 if (g1->mode != g2->mode)
7177 ret = gen_lowpart (g2->mode, comb);
7179 /* If these givs are identical, they can be combined. We use the results
7180 of express_from because the addends are not in a canonical form, so
7181 rtx_equal_p is a weaker test. */
7182 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7183 combination to be the other way round. */
7184 if (comb == g1->dest_reg
7185 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7187 return ret;
7190 /* If G2 can be expressed as a function of G1 and that function is valid
7191 as an address and no more expensive than using a register for G2,
7192 the expression of G2 in terms of G1 can be used. */
7193 if (ret != NULL_RTX
7194 && g2->giv_type == DEST_ADDR
7195 && memory_address_p (GET_MODE (g2->mem), ret)
7196 /* ??? Looses, especially with -fforce-addr, where *g2->location
7197 will always be a register, and so anything more complicated
7198 gets discarded. */
7199 #if 0
7200 #ifdef ADDRESS_COST
7201 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7202 #else
7203 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7204 #endif
7205 #endif
7208 return ret;
7211 return NULL_RTX;
7214 /* Check each extension dependent giv in this class to see if its
7215 root biv is safe from wrapping in the interior mode, which would
7216 make the giv illegal. */
7218 static void
7219 check_ext_dependent_givs (bl, loop_info)
7220 struct iv_class *bl;
7221 struct loop_info *loop_info;
7223 int ze_ok = 0, se_ok = 0, info_ok = 0;
7224 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7225 HOST_WIDE_INT start_val;
7226 unsigned HOST_WIDE_INT u_end_val = 0;
7227 unsigned HOST_WIDE_INT u_start_val = 0;
7228 rtx incr = pc_rtx;
7229 struct induction *v;
7231 /* Make sure the iteration data is available. We must have
7232 constants in order to be certain of no overflow. */
7233 /* ??? An unknown iteration count with an increment of +-1
7234 combined with friendly exit tests of against an invariant
7235 value is also ameanable to optimization. Not implemented. */
7236 if (loop_info->n_iterations > 0
7237 && bl->initial_value
7238 && GET_CODE (bl->initial_value) == CONST_INT
7239 && (incr = biv_total_increment (bl))
7240 && GET_CODE (incr) == CONST_INT
7241 /* Make sure the host can represent the arithmetic. */
7242 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7244 unsigned HOST_WIDE_INT abs_incr, total_incr;
7245 HOST_WIDE_INT s_end_val;
7246 int neg_incr;
7248 info_ok = 1;
7249 start_val = INTVAL (bl->initial_value);
7250 u_start_val = start_val;
7252 neg_incr = 0, abs_incr = INTVAL (incr);
7253 if (INTVAL (incr) < 0)
7254 neg_incr = 1, abs_incr = -abs_incr;
7255 total_incr = abs_incr * loop_info->n_iterations;
7257 /* Check for host arithmatic overflow. */
7258 if (total_incr / loop_info->n_iterations == abs_incr)
7260 unsigned HOST_WIDE_INT u_max;
7261 HOST_WIDE_INT s_max;
7263 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7264 s_end_val = u_end_val;
7265 u_max = GET_MODE_MASK (biv_mode);
7266 s_max = u_max >> 1;
7268 /* Check zero extension of biv ok. */
7269 if (start_val >= 0
7270 /* Check for host arithmatic overflow. */
7271 && (neg_incr
7272 ? u_end_val < u_start_val
7273 : u_end_val > u_start_val)
7274 /* Check for target arithmetic overflow. */
7275 && (neg_incr
7276 ? 1 /* taken care of with host overflow */
7277 : u_end_val <= u_max))
7279 ze_ok = 1;
7282 /* Check sign extension of biv ok. */
7283 /* ??? While it is true that overflow with signed and pointer
7284 arithmetic is undefined, I fear too many programmers don't
7285 keep this fact in mind -- myself included on occasion.
7286 So leave alone with the signed overflow optimizations. */
7287 if (start_val >= -s_max - 1
7288 /* Check for host arithmatic overflow. */
7289 && (neg_incr
7290 ? s_end_val < start_val
7291 : s_end_val > start_val)
7292 /* Check for target arithmetic overflow. */
7293 && (neg_incr
7294 ? s_end_val >= -s_max - 1
7295 : s_end_val <= s_max))
7297 se_ok = 1;
7302 /* Invalidate givs that fail the tests. */
7303 for (v = bl->giv; v; v = v->next_iv)
7304 if (v->ext_dependent)
7306 enum rtx_code code = GET_CODE (v->ext_dependent);
7307 int ok = 0;
7309 switch (code)
7311 case SIGN_EXTEND:
7312 ok = se_ok;
7313 break;
7314 case ZERO_EXTEND:
7315 ok = ze_ok;
7316 break;
7318 case TRUNCATE:
7319 /* We don't know whether this value is being used as either
7320 signed or unsigned, so to safely truncate we must satisfy
7321 both. The initial check here verifies the BIV itself;
7322 once that is successful we may check its range wrt the
7323 derived GIV. */
7324 if (se_ok && ze_ok)
7326 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7327 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7329 /* We know from the above that both endpoints are nonnegative,
7330 and that there is no wrapping. Verify that both endpoints
7331 are within the (signed) range of the outer mode. */
7332 if (u_start_val <= max && u_end_val <= max)
7333 ok = 1;
7335 break;
7337 default:
7338 abort ();
7341 if (ok)
7343 if (loop_dump_stream)
7345 fprintf (loop_dump_stream,
7346 "Verified ext dependent giv at %d of reg %d\n",
7347 INSN_UID (v->insn), bl->regno);
7350 else
7352 if (loop_dump_stream)
7354 const char *why;
7356 if (info_ok)
7357 why = "biv iteration values overflowed";
7358 else
7360 if (incr == pc_rtx)
7361 incr = biv_total_increment (bl);
7362 if (incr == const1_rtx)
7363 why = "biv iteration info incomplete; incr by 1";
7364 else
7365 why = "biv iteration info incomplete";
7368 fprintf (loop_dump_stream,
7369 "Failed ext dependent giv at %d, %s\n",
7370 INSN_UID (v->insn), why);
7372 v->ignore = 1;
7373 bl->all_reduced = 0;
7378 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7381 extend_value_for_giv (v, value)
7382 struct induction *v;
7383 rtx value;
7385 rtx ext_dep = v->ext_dependent;
7387 if (! ext_dep)
7388 return value;
7390 /* Recall that check_ext_dependent_givs verified that the known bounds
7391 of a biv did not overflow or wrap with respect to the extension for
7392 the giv. Therefore, constants need no additional adjustment. */
7393 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7394 return value;
7396 /* Otherwise, we must adjust the value to compensate for the
7397 differing modes of the biv and the giv. */
7398 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7401 struct combine_givs_stats
7403 int giv_number;
7404 int total_benefit;
7407 static int
7408 cmp_combine_givs_stats (xp, yp)
7409 const PTR xp;
7410 const PTR yp;
7412 const struct combine_givs_stats * const x =
7413 (const struct combine_givs_stats *) xp;
7414 const struct combine_givs_stats * const y =
7415 (const struct combine_givs_stats *) yp;
7416 int d;
7417 d = y->total_benefit - x->total_benefit;
7418 /* Stabilize the sort. */
7419 if (!d)
7420 d = x->giv_number - y->giv_number;
7421 return d;
7424 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7425 any other. If so, point SAME to the giv combined with and set NEW_REG to
7426 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7427 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7429 static void
7430 combine_givs (regs, bl)
7431 struct loop_regs *regs;
7432 struct iv_class *bl;
7434 /* Additional benefit to add for being combined multiple times. */
7435 const int extra_benefit = 3;
7437 struct induction *g1, *g2, **giv_array;
7438 int i, j, k, giv_count;
7439 struct combine_givs_stats *stats;
7440 rtx *can_combine;
7442 /* Count givs, because bl->giv_count is incorrect here. */
7443 giv_count = 0;
7444 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7445 if (!g1->ignore)
7446 giv_count++;
7448 giv_array
7449 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7450 i = 0;
7451 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7452 if (!g1->ignore)
7453 giv_array[i++] = g1;
7455 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7456 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7458 for (i = 0; i < giv_count; i++)
7460 int this_benefit;
7461 rtx single_use;
7463 g1 = giv_array[i];
7464 stats[i].giv_number = i;
7466 /* If a DEST_REG GIV is used only once, do not allow it to combine
7467 with anything, for in doing so we will gain nothing that cannot
7468 be had by simply letting the GIV with which we would have combined
7469 to be reduced on its own. The losage shows up in particular with
7470 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7471 be seen elsewhere as well. */
7472 if (g1->giv_type == DEST_REG
7473 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7474 && single_use != const0_rtx)
7475 continue;
7477 this_benefit = g1->benefit;
7478 /* Add an additional weight for zero addends. */
7479 if (g1->no_const_addval)
7480 this_benefit += 1;
7482 for (j = 0; j < giv_count; j++)
7484 rtx this_combine;
7486 g2 = giv_array[j];
7487 if (g1 != g2
7488 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7490 can_combine[i * giv_count + j] = this_combine;
7491 this_benefit += g2->benefit + extra_benefit;
7494 stats[i].total_benefit = this_benefit;
7497 /* Iterate, combining until we can't. */
7498 restart:
7499 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7501 if (loop_dump_stream)
7503 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7504 for (k = 0; k < giv_count; k++)
7506 g1 = giv_array[stats[k].giv_number];
7507 if (!g1->combined_with && !g1->same)
7508 fprintf (loop_dump_stream, " {%d, %d}",
7509 INSN_UID (giv_array[stats[k].giv_number]->insn),
7510 stats[k].total_benefit);
7512 putc ('\n', loop_dump_stream);
7515 for (k = 0; k < giv_count; k++)
7517 int g1_add_benefit = 0;
7519 i = stats[k].giv_number;
7520 g1 = giv_array[i];
7522 /* If it has already been combined, skip. */
7523 if (g1->combined_with || g1->same)
7524 continue;
7526 for (j = 0; j < giv_count; j++)
7528 g2 = giv_array[j];
7529 if (g1 != g2 && can_combine[i * giv_count + j]
7530 /* If it has already been combined, skip. */
7531 && ! g2->same && ! g2->combined_with)
7533 int l;
7535 g2->new_reg = can_combine[i * giv_count + j];
7536 g2->same = g1;
7537 /* For destination, we now may replace by mem expression instead
7538 of register. This changes the costs considerably, so add the
7539 compensation. */
7540 if (g2->giv_type == DEST_ADDR)
7541 g2->benefit = (g2->benefit + reg_address_cost
7542 - address_cost (g2->new_reg,
7543 GET_MODE (g2->mem)));
7544 g1->combined_with++;
7545 g1->lifetime += g2->lifetime;
7547 g1_add_benefit += g2->benefit;
7549 /* ??? The new final_[bg]iv_value code does a much better job
7550 of finding replaceable giv's, and hence this code may no
7551 longer be necessary. */
7552 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7553 g1_add_benefit -= copy_cost;
7555 /* To help optimize the next set of combinations, remove
7556 this giv from the benefits of other potential mates. */
7557 for (l = 0; l < giv_count; ++l)
7559 int m = stats[l].giv_number;
7560 if (can_combine[m * giv_count + j])
7561 stats[l].total_benefit -= g2->benefit + extra_benefit;
7564 if (loop_dump_stream)
7565 fprintf (loop_dump_stream,
7566 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7567 INSN_UID (g2->insn), INSN_UID (g1->insn),
7568 g1->benefit, g1_add_benefit, g1->lifetime);
7572 /* To help optimize the next set of combinations, remove
7573 this giv from the benefits of other potential mates. */
7574 if (g1->combined_with)
7576 for (j = 0; j < giv_count; ++j)
7578 int m = stats[j].giv_number;
7579 if (can_combine[m * giv_count + i])
7580 stats[j].total_benefit -= g1->benefit + extra_benefit;
7583 g1->benefit += g1_add_benefit;
7585 /* We've finished with this giv, and everything it touched.
7586 Restart the combination so that proper weights for the
7587 rest of the givs are properly taken into account. */
7588 /* ??? Ideally we would compact the arrays at this point, so
7589 as to not cover old ground. But sanely compacting
7590 can_combine is tricky. */
7591 goto restart;
7595 /* Clean up. */
7596 free (stats);
7597 free (can_combine);
7600 /* Generate sequence for REG = B * M + A. */
7602 static rtx
7603 gen_add_mult (b, m, a, reg)
7604 rtx b; /* initial value of basic induction variable */
7605 rtx m; /* multiplicative constant */
7606 rtx a; /* additive constant */
7607 rtx reg; /* destination register */
7609 rtx seq;
7610 rtx result;
7612 start_sequence ();
7613 /* Use unsigned arithmetic. */
7614 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7615 if (reg != result)
7616 emit_move_insn (reg, result);
7617 seq = gen_sequence ();
7618 end_sequence ();
7620 return seq;
7624 /* Update registers created in insn sequence SEQ. */
7626 static void
7627 loop_regs_update (loop, seq)
7628 const struct loop *loop ATTRIBUTE_UNUSED;
7629 rtx seq;
7631 /* Update register info for alias analysis. */
7633 if (GET_CODE (seq) == SEQUENCE)
7635 int i;
7636 for (i = 0; i < XVECLEN (seq, 0); ++i)
7638 rtx set = single_set (XVECEXP (seq, 0, i));
7639 if (set && GET_CODE (SET_DEST (set)) == REG)
7640 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7643 else
7645 if (GET_CODE (seq) == SET
7646 && GET_CODE (SET_DEST (seq)) == REG)
7647 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7652 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7654 void
7655 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7656 const struct loop *loop;
7657 rtx b; /* initial value of basic induction variable */
7658 rtx m; /* multiplicative constant */
7659 rtx a; /* additive constant */
7660 rtx reg; /* destination register */
7661 basic_block before_bb;
7662 rtx before_insn;
7664 rtx seq;
7666 if (! before_insn)
7668 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7669 return;
7672 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7673 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7675 /* Increase the lifetime of any invariants moved further in code. */
7676 update_reg_last_use (a, before_insn);
7677 update_reg_last_use (b, before_insn);
7678 update_reg_last_use (m, before_insn);
7680 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7682 /* It is possible that the expansion created lots of new registers.
7683 Iterate over the sequence we just created and record them all. */
7684 loop_regs_update (loop, seq);
7688 /* Emit insns in loop pre-header to set REG = B * M + A. */
7690 void
7691 loop_iv_add_mult_sink (loop, b, m, a, reg)
7692 const struct loop *loop;
7693 rtx b; /* initial value of basic induction variable */
7694 rtx m; /* multiplicative constant */
7695 rtx a; /* additive constant */
7696 rtx reg; /* destination register */
7698 rtx seq;
7700 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7701 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7703 /* Increase the lifetime of any invariants moved further in code.
7704 ???? Is this really necessary? */
7705 update_reg_last_use (a, loop->sink);
7706 update_reg_last_use (b, loop->sink);
7707 update_reg_last_use (m, loop->sink);
7709 loop_insn_sink (loop, seq);
7711 /* It is possible that the expansion created lots of new registers.
7712 Iterate over the sequence we just created and record them all. */
7713 loop_regs_update (loop, seq);
7717 /* Emit insns after loop to set REG = B * M + A. */
7719 void
7720 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7721 const struct loop *loop;
7722 rtx b; /* initial value of basic induction variable */
7723 rtx m; /* multiplicative constant */
7724 rtx a; /* additive constant */
7725 rtx reg; /* destination register */
7727 rtx seq;
7729 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7730 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7732 loop_insn_hoist (loop, seq);
7734 /* It is possible that the expansion created lots of new registers.
7735 Iterate over the sequence we just created and record them all. */
7736 loop_regs_update (loop, seq);
7741 /* Similar to gen_add_mult, but compute cost rather than generating
7742 sequence. */
7744 static int
7745 iv_add_mult_cost (b, m, a, reg)
7746 rtx b; /* initial value of basic induction variable */
7747 rtx m; /* multiplicative constant */
7748 rtx a; /* additive constant */
7749 rtx reg; /* destination register */
7751 int cost = 0;
7752 rtx last, result;
7754 start_sequence ();
7755 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7756 if (reg != result)
7757 emit_move_insn (reg, result);
7758 last = get_last_insn ();
7759 while (last)
7761 rtx t = single_set (last);
7762 if (t)
7763 cost += rtx_cost (SET_SRC (t), SET);
7764 last = PREV_INSN (last);
7766 end_sequence ();
7767 return cost;
7770 /* Test whether A * B can be computed without
7771 an actual multiply insn. Value is 1 if so. */
7773 static int
7774 product_cheap_p (a, b)
7775 rtx a;
7776 rtx b;
7778 int i;
7779 rtx tmp;
7780 int win = 1;
7782 /* If only one is constant, make it B. */
7783 if (GET_CODE (a) == CONST_INT)
7784 tmp = a, a = b, b = tmp;
7786 /* If first constant, both constant, so don't need multiply. */
7787 if (GET_CODE (a) == CONST_INT)
7788 return 1;
7790 /* If second not constant, neither is constant, so would need multiply. */
7791 if (GET_CODE (b) != CONST_INT)
7792 return 0;
7794 /* One operand is constant, so might not need multiply insn. Generate the
7795 code for the multiply and see if a call or multiply, or long sequence
7796 of insns is generated. */
7798 start_sequence ();
7799 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7800 tmp = gen_sequence ();
7801 end_sequence ();
7803 if (GET_CODE (tmp) == SEQUENCE)
7805 if (XVEC (tmp, 0) == 0)
7806 win = 1;
7807 else if (XVECLEN (tmp, 0) > 3)
7808 win = 0;
7809 else
7810 for (i = 0; i < XVECLEN (tmp, 0); i++)
7812 rtx insn = XVECEXP (tmp, 0, i);
7814 if (GET_CODE (insn) != INSN
7815 || (GET_CODE (PATTERN (insn)) == SET
7816 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7817 || (GET_CODE (PATTERN (insn)) == PARALLEL
7818 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7819 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7821 win = 0;
7822 break;
7826 else if (GET_CODE (tmp) == SET
7827 && GET_CODE (SET_SRC (tmp)) == MULT)
7828 win = 0;
7829 else if (GET_CODE (tmp) == PARALLEL
7830 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7831 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7832 win = 0;
7834 return win;
7837 /* Check to see if loop can be terminated by a "decrement and branch until
7838 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7839 Also try reversing an increment loop to a decrement loop
7840 to see if the optimization can be performed.
7841 Value is nonzero if optimization was performed. */
7843 /* This is useful even if the architecture doesn't have such an insn,
7844 because it might change a loops which increments from 0 to n to a loop
7845 which decrements from n to 0. A loop that decrements to zero is usually
7846 faster than one that increments from zero. */
7848 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7849 such as approx_final_value, biv_total_increment, loop_iterations, and
7850 final_[bg]iv_value. */
7852 static int
7853 check_dbra_loop (loop, insn_count)
7854 struct loop *loop;
7855 int insn_count;
7857 struct loop_info *loop_info = LOOP_INFO (loop);
7858 struct loop_regs *regs = LOOP_REGS (loop);
7859 struct loop_ivs *ivs = LOOP_IVS (loop);
7860 struct iv_class *bl;
7861 rtx reg;
7862 rtx jump_label;
7863 rtx final_value;
7864 rtx start_value;
7865 rtx new_add_val;
7866 rtx comparison;
7867 rtx before_comparison;
7868 rtx p;
7869 rtx jump;
7870 rtx first_compare;
7871 int compare_and_branch;
7872 rtx loop_start = loop->start;
7873 rtx loop_end = loop->end;
7875 /* If last insn is a conditional branch, and the insn before tests a
7876 register value, try to optimize it. Otherwise, we can't do anything. */
7878 jump = PREV_INSN (loop_end);
7879 comparison = get_condition_for_loop (loop, jump);
7880 if (comparison == 0)
7881 return 0;
7882 if (!onlyjump_p (jump))
7883 return 0;
7885 /* Try to compute whether the compare/branch at the loop end is one or
7886 two instructions. */
7887 get_condition (jump, &first_compare);
7888 if (first_compare == jump)
7889 compare_and_branch = 1;
7890 else if (first_compare == prev_nonnote_insn (jump))
7891 compare_and_branch = 2;
7892 else
7893 return 0;
7896 /* If more than one condition is present to control the loop, then
7897 do not proceed, as this function does not know how to rewrite
7898 loop tests with more than one condition.
7900 Look backwards from the first insn in the last comparison
7901 sequence and see if we've got another comparison sequence. */
7903 rtx jump1;
7904 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7905 if (GET_CODE (jump1) == JUMP_INSN)
7906 return 0;
7909 /* Check all of the bivs to see if the compare uses one of them.
7910 Skip biv's set more than once because we can't guarantee that
7911 it will be zero on the last iteration. Also skip if the biv is
7912 used between its update and the test insn. */
7914 for (bl = ivs->list; bl; bl = bl->next)
7916 if (bl->biv_count == 1
7917 && ! bl->biv->maybe_multiple
7918 && bl->biv->dest_reg == XEXP (comparison, 0)
7919 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7920 first_compare))
7921 break;
7924 if (! bl)
7925 return 0;
7927 /* Look for the case where the basic induction variable is always
7928 nonnegative, and equals zero on the last iteration.
7929 In this case, add a reg_note REG_NONNEG, which allows the
7930 m68k DBRA instruction to be used. */
7932 if (((GET_CODE (comparison) == GT
7933 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7934 && INTVAL (XEXP (comparison, 1)) == -1)
7935 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7936 && GET_CODE (bl->biv->add_val) == CONST_INT
7937 && INTVAL (bl->biv->add_val) < 0)
7939 /* Initial value must be greater than 0,
7940 init_val % -dec_value == 0 to ensure that it equals zero on
7941 the last iteration */
7943 if (GET_CODE (bl->initial_value) == CONST_INT
7944 && INTVAL (bl->initial_value) > 0
7945 && (INTVAL (bl->initial_value)
7946 % (-INTVAL (bl->biv->add_val))) == 0)
7948 /* register always nonnegative, add REG_NOTE to branch */
7949 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7950 REG_NOTES (jump)
7951 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7952 REG_NOTES (jump));
7953 bl->nonneg = 1;
7955 return 1;
7958 /* If the decrement is 1 and the value was tested as >= 0 before
7959 the loop, then we can safely optimize. */
7960 for (p = loop_start; p; p = PREV_INSN (p))
7962 if (GET_CODE (p) == CODE_LABEL)
7963 break;
7964 if (GET_CODE (p) != JUMP_INSN)
7965 continue;
7967 before_comparison = get_condition_for_loop (loop, p);
7968 if (before_comparison
7969 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7970 && GET_CODE (before_comparison) == LT
7971 && XEXP (before_comparison, 1) == const0_rtx
7972 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7973 && INTVAL (bl->biv->add_val) == -1)
7975 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7976 REG_NOTES (jump)
7977 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7978 REG_NOTES (jump));
7979 bl->nonneg = 1;
7981 return 1;
7985 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7986 && INTVAL (bl->biv->add_val) > 0)
7988 /* Try to change inc to dec, so can apply above optimization. */
7989 /* Can do this if:
7990 all registers modified are induction variables or invariant,
7991 all memory references have non-overlapping addresses
7992 (obviously true if only one write)
7993 allow 2 insns for the compare/jump at the end of the loop. */
7994 /* Also, we must avoid any instructions which use both the reversed
7995 biv and another biv. Such instructions will fail if the loop is
7996 reversed. We meet this condition by requiring that either
7997 no_use_except_counting is true, or else that there is only
7998 one biv. */
7999 int num_nonfixed_reads = 0;
8000 /* 1 if the iteration var is used only to count iterations. */
8001 int no_use_except_counting = 0;
8002 /* 1 if the loop has no memory store, or it has a single memory store
8003 which is reversible. */
8004 int reversible_mem_store = 1;
8006 if (bl->giv_count == 0
8007 && !loop->exit_count
8008 && !loop_info->has_multiple_exit_targets)
8010 rtx bivreg = regno_reg_rtx[bl->regno];
8011 struct iv_class *blt;
8013 /* If there are no givs for this biv, and the only exit is the
8014 fall through at the end of the loop, then
8015 see if perhaps there are no uses except to count. */
8016 no_use_except_counting = 1;
8017 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8018 if (INSN_P (p))
8020 rtx set = single_set (p);
8022 if (set && GET_CODE (SET_DEST (set)) == REG
8023 && REGNO (SET_DEST (set)) == bl->regno)
8024 /* An insn that sets the biv is okay. */
8026 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8027 || p == prev_nonnote_insn (loop_end))
8028 && reg_mentioned_p (bivreg, PATTERN (p)))
8030 /* If either of these insns uses the biv and sets a pseudo
8031 that has more than one usage, then the biv has uses
8032 other than counting since it's used to derive a value
8033 that is used more than one time. */
8034 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8035 regs);
8036 if (regs->multiple_uses)
8038 no_use_except_counting = 0;
8039 break;
8042 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8044 no_use_except_counting = 0;
8045 break;
8049 /* A biv has uses besides counting if it is used to set
8050 another biv. */
8051 for (blt = ivs->list; blt; blt = blt->next)
8052 if (blt->init_set
8053 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8055 no_use_except_counting = 0;
8056 break;
8060 if (no_use_except_counting)
8061 /* No need to worry about MEMs. */
8063 else if (loop_info->num_mem_sets <= 1)
8065 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8066 if (INSN_P (p))
8067 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8069 /* If the loop has a single store, and the destination address is
8070 invariant, then we can't reverse the loop, because this address
8071 might then have the wrong value at loop exit.
8072 This would work if the source was invariant also, however, in that
8073 case, the insn should have been moved out of the loop. */
8075 if (loop_info->num_mem_sets == 1)
8077 struct induction *v;
8079 /* If we could prove that each of the memory locations
8080 written to was different, then we could reverse the
8081 store -- but we don't presently have any way of
8082 knowing that. */
8083 reversible_mem_store = 0;
8085 /* If the store depends on a register that is set after the
8086 store, it depends on the initial value, and is thus not
8087 reversible. */
8088 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8090 if (v->giv_type == DEST_REG
8091 && reg_mentioned_p (v->dest_reg,
8092 PATTERN (loop_info->first_loop_store_insn))
8093 && loop_insn_first_p (loop_info->first_loop_store_insn,
8094 v->insn))
8095 reversible_mem_store = 0;
8099 else
8100 return 0;
8102 /* This code only acts for innermost loops. Also it simplifies
8103 the memory address check by only reversing loops with
8104 zero or one memory access.
8105 Two memory accesses could involve parts of the same array,
8106 and that can't be reversed.
8107 If the biv is used only for counting, than we don't need to worry
8108 about all these things. */
8110 if ((num_nonfixed_reads <= 1
8111 && ! loop_info->has_nonconst_call
8112 && ! loop_info->has_volatile
8113 && reversible_mem_store
8114 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8115 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8116 && (bl == ivs->list && bl->next == 0))
8117 || no_use_except_counting)
8119 rtx tem;
8121 /* Loop can be reversed. */
8122 if (loop_dump_stream)
8123 fprintf (loop_dump_stream, "Can reverse loop\n");
8125 /* Now check other conditions:
8127 The increment must be a constant, as must the initial value,
8128 and the comparison code must be LT.
8130 This test can probably be improved since +/- 1 in the constant
8131 can be obtained by changing LT to LE and vice versa; this is
8132 confusing. */
8134 if (comparison
8135 /* for constants, LE gets turned into LT */
8136 && (GET_CODE (comparison) == LT
8137 || (GET_CODE (comparison) == LE
8138 && no_use_except_counting)))
8140 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8141 rtx initial_value, comparison_value;
8142 int nonneg = 0;
8143 enum rtx_code cmp_code;
8144 int comparison_const_width;
8145 unsigned HOST_WIDE_INT comparison_sign_mask;
8147 add_val = INTVAL (bl->biv->add_val);
8148 comparison_value = XEXP (comparison, 1);
8149 if (GET_MODE (comparison_value) == VOIDmode)
8150 comparison_const_width
8151 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8152 else
8153 comparison_const_width
8154 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8155 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8156 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8157 comparison_sign_mask
8158 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8160 /* If the comparison value is not a loop invariant, then we
8161 can not reverse this loop.
8163 ??? If the insns which initialize the comparison value as
8164 a whole compute an invariant result, then we could move
8165 them out of the loop and proceed with loop reversal. */
8166 if (! loop_invariant_p (loop, comparison_value))
8167 return 0;
8169 if (GET_CODE (comparison_value) == CONST_INT)
8170 comparison_val = INTVAL (comparison_value);
8171 initial_value = bl->initial_value;
8173 /* Normalize the initial value if it is an integer and
8174 has no other use except as a counter. This will allow
8175 a few more loops to be reversed. */
8176 if (no_use_except_counting
8177 && GET_CODE (comparison_value) == CONST_INT
8178 && GET_CODE (initial_value) == CONST_INT)
8180 comparison_val = comparison_val - INTVAL (bl->initial_value);
8181 /* The code below requires comparison_val to be a multiple
8182 of add_val in order to do the loop reversal, so
8183 round up comparison_val to a multiple of add_val.
8184 Since comparison_value is constant, we know that the
8185 current comparison code is LT. */
8186 comparison_val = comparison_val + add_val - 1;
8187 comparison_val
8188 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8189 /* We postpone overflow checks for COMPARISON_VAL here;
8190 even if there is an overflow, we might still be able to
8191 reverse the loop, if converting the loop exit test to
8192 NE is possible. */
8193 initial_value = const0_rtx;
8196 /* First check if we can do a vanilla loop reversal. */
8197 if (initial_value == const0_rtx
8198 /* If we have a decrement_and_branch_on_count,
8199 prefer the NE test, since this will allow that
8200 instruction to be generated. Note that we must
8201 use a vanilla loop reversal if the biv is used to
8202 calculate a giv or has a non-counting use. */
8203 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8204 && defined (HAVE_decrement_and_branch_on_count)
8205 && (! (add_val == 1 && loop->vtop
8206 && (bl->biv_count == 0
8207 || no_use_except_counting)))
8208 #endif
8209 && GET_CODE (comparison_value) == CONST_INT
8210 /* Now do postponed overflow checks on COMPARISON_VAL. */
8211 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8212 & comparison_sign_mask))
8214 /* Register will always be nonnegative, with value
8215 0 on last iteration */
8216 add_adjust = add_val;
8217 nonneg = 1;
8218 cmp_code = GE;
8220 else if (add_val == 1 && loop->vtop
8221 && (bl->biv_count == 0
8222 || no_use_except_counting))
8224 add_adjust = 0;
8225 cmp_code = NE;
8227 else
8228 return 0;
8230 if (GET_CODE (comparison) == LE)
8231 add_adjust -= add_val;
8233 /* If the initial value is not zero, or if the comparison
8234 value is not an exact multiple of the increment, then we
8235 can not reverse this loop. */
8236 if (initial_value == const0_rtx
8237 && GET_CODE (comparison_value) == CONST_INT)
8239 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8240 return 0;
8242 else
8244 if (! no_use_except_counting || add_val != 1)
8245 return 0;
8248 final_value = comparison_value;
8250 /* Reset these in case we normalized the initial value
8251 and comparison value above. */
8252 if (GET_CODE (comparison_value) == CONST_INT
8253 && GET_CODE (initial_value) == CONST_INT)
8255 comparison_value = GEN_INT (comparison_val);
8256 final_value
8257 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8259 bl->initial_value = initial_value;
8261 /* Save some info needed to produce the new insns. */
8262 reg = bl->biv->dest_reg;
8263 jump_label = condjump_label (PREV_INSN (loop_end));
8264 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8266 /* Set start_value; if this is not a CONST_INT, we need
8267 to generate a SUB.
8268 Initialize biv to start_value before loop start.
8269 The old initializing insn will be deleted as a
8270 dead store by flow.c. */
8271 if (initial_value == const0_rtx
8272 && GET_CODE (comparison_value) == CONST_INT)
8274 start_value = GEN_INT (comparison_val - add_adjust);
8275 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8277 else if (GET_CODE (initial_value) == CONST_INT)
8279 enum machine_mode mode = GET_MODE (reg);
8280 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8281 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8283 if (add_insn == 0)
8284 return 0;
8286 start_value
8287 = gen_rtx_PLUS (mode, comparison_value, offset);
8288 loop_insn_hoist (loop, add_insn);
8289 if (GET_CODE (comparison) == LE)
8290 final_value = gen_rtx_PLUS (mode, comparison_value,
8291 GEN_INT (add_val));
8293 else if (! add_adjust)
8295 enum machine_mode mode = GET_MODE (reg);
8296 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8297 initial_value);
8299 if (sub_insn == 0)
8300 return 0;
8301 start_value
8302 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8303 loop_insn_hoist (loop, sub_insn);
8305 else
8306 /* We could handle the other cases too, but it'll be
8307 better to have a testcase first. */
8308 return 0;
8310 /* We may not have a single insn which can increment a reg, so
8311 create a sequence to hold all the insns from expand_inc. */
8312 start_sequence ();
8313 expand_inc (reg, new_add_val);
8314 tem = gen_sequence ();
8315 end_sequence ();
8317 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8318 delete_insn (bl->biv->insn);
8320 /* Update biv info to reflect its new status. */
8321 bl->biv->insn = p;
8322 bl->initial_value = start_value;
8323 bl->biv->add_val = new_add_val;
8325 /* Update loop info. */
8326 loop_info->initial_value = reg;
8327 loop_info->initial_equiv_value = reg;
8328 loop_info->final_value = const0_rtx;
8329 loop_info->final_equiv_value = const0_rtx;
8330 loop_info->comparison_value = const0_rtx;
8331 loop_info->comparison_code = cmp_code;
8332 loop_info->increment = new_add_val;
8334 /* Inc LABEL_NUSES so that delete_insn will
8335 not delete the label. */
8336 LABEL_NUSES (XEXP (jump_label, 0))++;
8338 /* Emit an insn after the end of the loop to set the biv's
8339 proper exit value if it is used anywhere outside the loop. */
8340 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8341 || ! bl->init_insn
8342 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8343 loop_insn_sink (loop, gen_move_insn (reg, final_value));
8345 /* Delete compare/branch at end of loop. */
8346 delete_related_insns (PREV_INSN (loop_end));
8347 if (compare_and_branch == 2)
8348 delete_related_insns (first_compare);
8350 /* Add new compare/branch insn at end of loop. */
8351 start_sequence ();
8352 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8353 GET_MODE (reg), 0,
8354 XEXP (jump_label, 0));
8355 tem = gen_sequence ();
8356 end_sequence ();
8357 emit_jump_insn_before (tem, loop_end);
8359 for (tem = PREV_INSN (loop_end);
8360 tem && GET_CODE (tem) != JUMP_INSN;
8361 tem = PREV_INSN (tem))
8364 if (tem)
8365 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8367 if (nonneg)
8369 if (tem)
8371 /* Increment of LABEL_NUSES done above. */
8372 /* Register is now always nonnegative,
8373 so add REG_NONNEG note to the branch. */
8374 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8375 REG_NOTES (tem));
8377 bl->nonneg = 1;
8380 /* No insn may reference both the reversed and another biv or it
8381 will fail (see comment near the top of the loop reversal
8382 code).
8383 Earlier on, we have verified that the biv has no use except
8384 counting, or it is the only biv in this function.
8385 However, the code that computes no_use_except_counting does
8386 not verify reg notes. It's possible to have an insn that
8387 references another biv, and has a REG_EQUAL note with an
8388 expression based on the reversed biv. To avoid this case,
8389 remove all REG_EQUAL notes based on the reversed biv
8390 here. */
8391 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8392 if (INSN_P (p))
8394 rtx *pnote;
8395 rtx set = single_set (p);
8396 /* If this is a set of a GIV based on the reversed biv, any
8397 REG_EQUAL notes should still be correct. */
8398 if (! set
8399 || GET_CODE (SET_DEST (set)) != REG
8400 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8401 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8402 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8403 for (pnote = &REG_NOTES (p); *pnote;)
8405 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8406 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8407 XEXP (*pnote, 0)))
8408 *pnote = XEXP (*pnote, 1);
8409 else
8410 pnote = &XEXP (*pnote, 1);
8414 /* Mark that this biv has been reversed. Each giv which depends
8415 on this biv, and which is also live past the end of the loop
8416 will have to be fixed up. */
8418 bl->reversed = 1;
8420 if (loop_dump_stream)
8422 fprintf (loop_dump_stream, "Reversed loop");
8423 if (bl->nonneg)
8424 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8425 else
8426 fprintf (loop_dump_stream, "\n");
8429 return 1;
8434 return 0;
8437 /* Verify whether the biv BL appears to be eliminable,
8438 based on the insns in the loop that refer to it.
8440 If ELIMINATE_P is non-zero, actually do the elimination.
8442 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8443 determine whether invariant insns should be placed inside or at the
8444 start of the loop. */
8446 static int
8447 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8448 const struct loop *loop;
8449 struct iv_class *bl;
8450 int eliminate_p;
8451 int threshold, insn_count;
8453 struct loop_ivs *ivs = LOOP_IVS (loop);
8454 rtx reg = bl->biv->dest_reg;
8455 rtx p;
8457 /* Scan all insns in the loop, stopping if we find one that uses the
8458 biv in a way that we cannot eliminate. */
8460 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8462 enum rtx_code code = GET_CODE (p);
8463 basic_block where_bb = 0;
8464 rtx where_insn = threshold >= insn_count ? 0 : p;
8466 /* If this is a libcall that sets a giv, skip ahead to its end. */
8467 if (GET_RTX_CLASS (code) == 'i')
8469 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8471 if (note)
8473 rtx last = XEXP (note, 0);
8474 rtx set = single_set (last);
8476 if (set && GET_CODE (SET_DEST (set)) == REG)
8478 unsigned int regno = REGNO (SET_DEST (set));
8480 if (regno < ivs->n_regs
8481 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8482 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8483 p = last;
8487 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8488 && reg_mentioned_p (reg, PATTERN (p))
8489 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8490 eliminate_p, where_bb, where_insn))
8492 if (loop_dump_stream)
8493 fprintf (loop_dump_stream,
8494 "Cannot eliminate biv %d: biv used in insn %d.\n",
8495 bl->regno, INSN_UID (p));
8496 break;
8500 if (p == loop->end)
8502 if (loop_dump_stream)
8503 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8504 bl->regno, eliminate_p ? "was" : "can be");
8505 return 1;
8508 return 0;
8511 /* INSN and REFERENCE are instructions in the same insn chain.
8512 Return non-zero if INSN is first. */
8515 loop_insn_first_p (insn, reference)
8516 rtx insn, reference;
8518 rtx p, q;
8520 for (p = insn, q = reference;;)
8522 /* Start with test for not first so that INSN == REFERENCE yields not
8523 first. */
8524 if (q == insn || ! p)
8525 return 0;
8526 if (p == reference || ! q)
8527 return 1;
8529 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8530 previous insn, hence the <= comparison below does not work if
8531 P is a note. */
8532 if (INSN_UID (p) < max_uid_for_loop
8533 && INSN_UID (q) < max_uid_for_loop
8534 && GET_CODE (p) != NOTE)
8535 return INSN_LUID (p) <= INSN_LUID (q);
8537 if (INSN_UID (p) >= max_uid_for_loop
8538 || GET_CODE (p) == NOTE)
8539 p = NEXT_INSN (p);
8540 if (INSN_UID (q) >= max_uid_for_loop)
8541 q = NEXT_INSN (q);
8545 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8546 the offset that we have to take into account due to auto-increment /
8547 div derivation is zero. */
8548 static int
8549 biv_elimination_giv_has_0_offset (biv, giv, insn)
8550 struct induction *biv, *giv;
8551 rtx insn;
8553 /* If the giv V had the auto-inc address optimization applied
8554 to it, and INSN occurs between the giv insn and the biv
8555 insn, then we'd have to adjust the value used here.
8556 This is rare, so we don't bother to make this possible. */
8557 if (giv->auto_inc_opt
8558 && ((loop_insn_first_p (giv->insn, insn)
8559 && loop_insn_first_p (insn, biv->insn))
8560 || (loop_insn_first_p (biv->insn, insn)
8561 && loop_insn_first_p (insn, giv->insn))))
8562 return 0;
8564 return 1;
8567 /* If BL appears in X (part of the pattern of INSN), see if we can
8568 eliminate its use. If so, return 1. If not, return 0.
8570 If BIV does not appear in X, return 1.
8572 If ELIMINATE_P is non-zero, actually do the elimination.
8573 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8574 Depending on how many items have been moved out of the loop, it
8575 will either be before INSN (when WHERE_INSN is non-zero) or at the
8576 start of the loop (when WHERE_INSN is zero). */
8578 static int
8579 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8580 const struct loop *loop;
8581 rtx x, insn;
8582 struct iv_class *bl;
8583 int eliminate_p;
8584 basic_block where_bb;
8585 rtx where_insn;
8587 enum rtx_code code = GET_CODE (x);
8588 rtx reg = bl->biv->dest_reg;
8589 enum machine_mode mode = GET_MODE (reg);
8590 struct induction *v;
8591 rtx arg, tem;
8592 #ifdef HAVE_cc0
8593 rtx new;
8594 #endif
8595 int arg_operand;
8596 const char *fmt;
8597 int i, j;
8599 switch (code)
8601 case REG:
8602 /* If we haven't already been able to do something with this BIV,
8603 we can't eliminate it. */
8604 if (x == reg)
8605 return 0;
8606 return 1;
8608 case SET:
8609 /* If this sets the BIV, it is not a problem. */
8610 if (SET_DEST (x) == reg)
8611 return 1;
8613 /* If this is an insn that defines a giv, it is also ok because
8614 it will go away when the giv is reduced. */
8615 for (v = bl->giv; v; v = v->next_iv)
8616 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8617 return 1;
8619 #ifdef HAVE_cc0
8620 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8622 /* Can replace with any giv that was reduced and
8623 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8624 Require a constant for MULT_VAL, so we know it's nonzero.
8625 ??? We disable this optimization to avoid potential
8626 overflows. */
8628 for (v = bl->giv; v; v = v->next_iv)
8629 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8630 && v->add_val == const0_rtx
8631 && ! v->ignore && ! v->maybe_dead && v->always_computable
8632 && v->mode == mode
8633 && 0)
8635 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8636 continue;
8638 if (! eliminate_p)
8639 return 1;
8641 /* If the giv has the opposite direction of change,
8642 then reverse the comparison. */
8643 if (INTVAL (v->mult_val) < 0)
8644 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8645 const0_rtx, v->new_reg);
8646 else
8647 new = v->new_reg;
8649 /* We can probably test that giv's reduced reg. */
8650 if (validate_change (insn, &SET_SRC (x), new, 0))
8651 return 1;
8654 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8655 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8656 Require a constant for MULT_VAL, so we know it's nonzero.
8657 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8658 overflow problem. */
8660 for (v = bl->giv; v; v = v->next_iv)
8661 if (GET_CODE (v->mult_val) == CONST_INT
8662 && v->mult_val != const0_rtx
8663 && ! v->ignore && ! v->maybe_dead && v->always_computable
8664 && v->mode == mode
8665 && (GET_CODE (v->add_val) == SYMBOL_REF
8666 || GET_CODE (v->add_val) == LABEL_REF
8667 || GET_CODE (v->add_val) == CONST
8668 || (GET_CODE (v->add_val) == REG
8669 && REG_POINTER (v->add_val))))
8671 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8672 continue;
8674 if (! eliminate_p)
8675 return 1;
8677 /* If the giv has the opposite direction of change,
8678 then reverse the comparison. */
8679 if (INTVAL (v->mult_val) < 0)
8680 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8681 v->new_reg);
8682 else
8683 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8684 copy_rtx (v->add_val));
8686 /* Replace biv with the giv's reduced register. */
8687 update_reg_last_use (v->add_val, insn);
8688 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8689 return 1;
8691 /* Insn doesn't support that constant or invariant. Copy it
8692 into a register (it will be a loop invariant.) */
8693 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8695 loop_insn_emit_before (loop, 0, where_insn,
8696 gen_move_insn (tem,
8697 copy_rtx (v->add_val)));
8699 /* Substitute the new register for its invariant value in
8700 the compare expression. */
8701 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8702 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8703 return 1;
8706 #endif
8707 break;
8709 case COMPARE:
8710 case EQ: case NE:
8711 case GT: case GE: case GTU: case GEU:
8712 case LT: case LE: case LTU: case LEU:
8713 /* See if either argument is the biv. */
8714 if (XEXP (x, 0) == reg)
8715 arg = XEXP (x, 1), arg_operand = 1;
8716 else if (XEXP (x, 1) == reg)
8717 arg = XEXP (x, 0), arg_operand = 0;
8718 else
8719 break;
8721 if (CONSTANT_P (arg))
8723 /* First try to replace with any giv that has constant positive
8724 mult_val and constant add_val. We might be able to support
8725 negative mult_val, but it seems complex to do it in general. */
8727 for (v = bl->giv; v; v = v->next_iv)
8728 if (GET_CODE (v->mult_val) == CONST_INT
8729 && INTVAL (v->mult_val) > 0
8730 && (GET_CODE (v->add_val) == SYMBOL_REF
8731 || GET_CODE (v->add_val) == LABEL_REF
8732 || GET_CODE (v->add_val) == CONST
8733 || (GET_CODE (v->add_val) == REG
8734 && REG_POINTER (v->add_val)))
8735 && ! v->ignore && ! v->maybe_dead && v->always_computable
8736 && v->mode == mode)
8738 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8739 continue;
8741 if (! eliminate_p)
8742 return 1;
8744 /* Replace biv with the giv's reduced reg. */
8745 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8747 /* If all constants are actually constant integers and
8748 the derived constant can be directly placed in the COMPARE,
8749 do so. */
8750 if (GET_CODE (arg) == CONST_INT
8751 && GET_CODE (v->mult_val) == CONST_INT
8752 && GET_CODE (v->add_val) == CONST_INT)
8754 validate_change (insn, &XEXP (x, arg_operand),
8755 GEN_INT (INTVAL (arg)
8756 * INTVAL (v->mult_val)
8757 + INTVAL (v->add_val)), 1);
8759 else
8761 /* Otherwise, load it into a register. */
8762 tem = gen_reg_rtx (mode);
8763 loop_iv_add_mult_emit_before (loop, arg,
8764 v->mult_val, v->add_val,
8765 tem, where_bb, where_insn);
8766 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8768 if (apply_change_group ())
8769 return 1;
8772 /* Look for giv with positive constant mult_val and nonconst add_val.
8773 Insert insns to calculate new compare value.
8774 ??? Turn this off due to possible overflow. */
8776 for (v = bl->giv; v; v = v->next_iv)
8777 if (GET_CODE (v->mult_val) == CONST_INT
8778 && INTVAL (v->mult_val) > 0
8779 && ! v->ignore && ! v->maybe_dead && v->always_computable
8780 && v->mode == mode
8781 && 0)
8783 rtx tem;
8785 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8786 continue;
8788 if (! eliminate_p)
8789 return 1;
8791 tem = gen_reg_rtx (mode);
8793 /* Replace biv with giv's reduced register. */
8794 validate_change (insn, &XEXP (x, 1 - arg_operand),
8795 v->new_reg, 1);
8797 /* Compute value to compare against. */
8798 loop_iv_add_mult_emit_before (loop, arg,
8799 v->mult_val, v->add_val,
8800 tem, where_bb, where_insn);
8801 /* Use it in this insn. */
8802 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8803 if (apply_change_group ())
8804 return 1;
8807 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8809 if (loop_invariant_p (loop, arg) == 1)
8811 /* Look for giv with constant positive mult_val and nonconst
8812 add_val. Insert insns to compute new compare value.
8813 ??? Turn this off due to possible overflow. */
8815 for (v = bl->giv; v; v = v->next_iv)
8816 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8817 && ! v->ignore && ! v->maybe_dead && v->always_computable
8818 && v->mode == mode
8819 && 0)
8821 rtx tem;
8823 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8824 continue;
8826 if (! eliminate_p)
8827 return 1;
8829 tem = gen_reg_rtx (mode);
8831 /* Replace biv with giv's reduced register. */
8832 validate_change (insn, &XEXP (x, 1 - arg_operand),
8833 v->new_reg, 1);
8835 /* Compute value to compare against. */
8836 loop_iv_add_mult_emit_before (loop, arg,
8837 v->mult_val, v->add_val,
8838 tem, where_bb, where_insn);
8839 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8840 if (apply_change_group ())
8841 return 1;
8845 /* This code has problems. Basically, you can't know when
8846 seeing if we will eliminate BL, whether a particular giv
8847 of ARG will be reduced. If it isn't going to be reduced,
8848 we can't eliminate BL. We can try forcing it to be reduced,
8849 but that can generate poor code.
8851 The problem is that the benefit of reducing TV, below should
8852 be increased if BL can actually be eliminated, but this means
8853 we might have to do a topological sort of the order in which
8854 we try to process biv. It doesn't seem worthwhile to do
8855 this sort of thing now. */
8857 #if 0
8858 /* Otherwise the reg compared with had better be a biv. */
8859 if (GET_CODE (arg) != REG
8860 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8861 return 0;
8863 /* Look for a pair of givs, one for each biv,
8864 with identical coefficients. */
8865 for (v = bl->giv; v; v = v->next_iv)
8867 struct induction *tv;
8869 if (v->ignore || v->maybe_dead || v->mode != mode)
8870 continue;
8872 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8873 tv = tv->next_iv)
8874 if (! tv->ignore && ! tv->maybe_dead
8875 && rtx_equal_p (tv->mult_val, v->mult_val)
8876 && rtx_equal_p (tv->add_val, v->add_val)
8877 && tv->mode == mode)
8879 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8880 continue;
8882 if (! eliminate_p)
8883 return 1;
8885 /* Replace biv with its giv's reduced reg. */
8886 XEXP (x, 1 - arg_operand) = v->new_reg;
8887 /* Replace other operand with the other giv's
8888 reduced reg. */
8889 XEXP (x, arg_operand) = tv->new_reg;
8890 return 1;
8893 #endif
8896 /* If we get here, the biv can't be eliminated. */
8897 return 0;
8899 case MEM:
8900 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8901 biv is used in it, since it will be replaced. */
8902 for (v = bl->giv; v; v = v->next_iv)
8903 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8904 return 1;
8905 break;
8907 default:
8908 break;
8911 /* See if any subexpression fails elimination. */
8912 fmt = GET_RTX_FORMAT (code);
8913 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8915 switch (fmt[i])
8917 case 'e':
8918 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8919 eliminate_p, where_bb, where_insn))
8920 return 0;
8921 break;
8923 case 'E':
8924 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8925 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8926 eliminate_p, where_bb, where_insn))
8927 return 0;
8928 break;
8932 return 1;
8935 /* Return nonzero if the last use of REG
8936 is in an insn following INSN in the same basic block. */
8938 static int
8939 last_use_this_basic_block (reg, insn)
8940 rtx reg;
8941 rtx insn;
8943 rtx n;
8944 for (n = insn;
8945 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8946 n = NEXT_INSN (n))
8948 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8949 return 1;
8951 return 0;
8954 /* Called via `note_stores' to record the initial value of a biv. Here we
8955 just record the location of the set and process it later. */
8957 static void
8958 record_initial (dest, set, data)
8959 rtx dest;
8960 rtx set;
8961 void *data ATTRIBUTE_UNUSED;
8963 struct loop_ivs *ivs = (struct loop_ivs *) data;
8964 struct iv_class *bl;
8966 if (GET_CODE (dest) != REG
8967 || REGNO (dest) >= ivs->n_regs
8968 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8969 return;
8971 bl = REG_IV_CLASS (ivs, REGNO (dest));
8973 /* If this is the first set found, record it. */
8974 if (bl->init_insn == 0)
8976 bl->init_insn = note_insn;
8977 bl->init_set = set;
8981 /* If any of the registers in X are "old" and currently have a last use earlier
8982 than INSN, update them to have a last use of INSN. Their actual last use
8983 will be the previous insn but it will not have a valid uid_luid so we can't
8984 use it. X must be a source expression only. */
8986 static void
8987 update_reg_last_use (x, insn)
8988 rtx x;
8989 rtx insn;
8991 /* Check for the case where INSN does not have a valid luid. In this case,
8992 there is no need to modify the regno_last_uid, as this can only happen
8993 when code is inserted after the loop_end to set a pseudo's final value,
8994 and hence this insn will never be the last use of x.
8995 ???? This comment is not correct. See for example loop_givs_reduce.
8996 This may insert an insn before another new insn. */
8997 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8998 && INSN_UID (insn) < max_uid_for_loop
8999 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9001 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9003 else
9005 int i, j;
9006 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9007 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9009 if (fmt[i] == 'e')
9010 update_reg_last_use (XEXP (x, i), insn);
9011 else if (fmt[i] == 'E')
9012 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9013 update_reg_last_use (XVECEXP (x, i, j), insn);
9018 /* Given an insn INSN and condition COND, return the condition in a
9019 canonical form to simplify testing by callers. Specifically:
9021 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9022 (2) Both operands will be machine operands; (cc0) will have been replaced.
9023 (3) If an operand is a constant, it will be the second operand.
9024 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9025 for GE, GEU, and LEU.
9027 If the condition cannot be understood, or is an inequality floating-point
9028 comparison which needs to be reversed, 0 will be returned.
9030 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9032 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9033 insn used in locating the condition was found. If a replacement test
9034 of the condition is desired, it should be placed in front of that
9035 insn and we will be sure that the inputs are still valid.
9037 If WANT_REG is non-zero, we wish the condition to be relative to that
9038 register, if possible. Therefore, do not canonicalize the condition
9039 further. */
9042 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9043 rtx insn;
9044 rtx cond;
9045 int reverse;
9046 rtx *earliest;
9047 rtx want_reg;
9049 enum rtx_code code;
9050 rtx prev = insn;
9051 rtx set;
9052 rtx tem;
9053 rtx op0, op1;
9054 int reverse_code = 0;
9055 enum machine_mode mode;
9057 code = GET_CODE (cond);
9058 mode = GET_MODE (cond);
9059 op0 = XEXP (cond, 0);
9060 op1 = XEXP (cond, 1);
9062 if (reverse)
9063 code = reversed_comparison_code (cond, insn);
9064 if (code == UNKNOWN)
9065 return 0;
9067 if (earliest)
9068 *earliest = insn;
9070 /* If we are comparing a register with zero, see if the register is set
9071 in the previous insn to a COMPARE or a comparison operation. Perform
9072 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9073 in cse.c */
9075 while (GET_RTX_CLASS (code) == '<'
9076 && op1 == CONST0_RTX (GET_MODE (op0))
9077 && op0 != want_reg)
9079 /* Set non-zero when we find something of interest. */
9080 rtx x = 0;
9082 #ifdef HAVE_cc0
9083 /* If comparison with cc0, import actual comparison from compare
9084 insn. */
9085 if (op0 == cc0_rtx)
9087 if ((prev = prev_nonnote_insn (prev)) == 0
9088 || GET_CODE (prev) != INSN
9089 || (set = single_set (prev)) == 0
9090 || SET_DEST (set) != cc0_rtx)
9091 return 0;
9093 op0 = SET_SRC (set);
9094 op1 = CONST0_RTX (GET_MODE (op0));
9095 if (earliest)
9096 *earliest = prev;
9098 #endif
9100 /* If this is a COMPARE, pick up the two things being compared. */
9101 if (GET_CODE (op0) == COMPARE)
9103 op1 = XEXP (op0, 1);
9104 op0 = XEXP (op0, 0);
9105 continue;
9107 else if (GET_CODE (op0) != REG)
9108 break;
9110 /* Go back to the previous insn. Stop if it is not an INSN. We also
9111 stop if it isn't a single set or if it has a REG_INC note because
9112 we don't want to bother dealing with it. */
9114 if ((prev = prev_nonnote_insn (prev)) == 0
9115 || GET_CODE (prev) != INSN
9116 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9117 break;
9119 set = set_of (op0, prev);
9121 if (set
9122 && (GET_CODE (set) != SET
9123 || !rtx_equal_p (SET_DEST (set), op0)))
9124 break;
9126 /* If this is setting OP0, get what it sets it to if it looks
9127 relevant. */
9128 if (set)
9130 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9132 /* ??? We may not combine comparisons done in a CCmode with
9133 comparisons not done in a CCmode. This is to aid targets
9134 like Alpha that have an IEEE compliant EQ instruction, and
9135 a non-IEEE compliant BEQ instruction. The use of CCmode is
9136 actually artificial, simply to prevent the combination, but
9137 should not affect other platforms.
9139 However, we must allow VOIDmode comparisons to match either
9140 CCmode or non-CCmode comparison, because some ports have
9141 modeless comparisons inside branch patterns.
9143 ??? This mode check should perhaps look more like the mode check
9144 in simplify_comparison in combine. */
9146 if ((GET_CODE (SET_SRC (set)) == COMPARE
9147 || (((code == NE
9148 || (code == LT
9149 && GET_MODE_CLASS (inner_mode) == MODE_INT
9150 && (GET_MODE_BITSIZE (inner_mode)
9151 <= HOST_BITS_PER_WIDE_INT)
9152 && (STORE_FLAG_VALUE
9153 & ((HOST_WIDE_INT) 1
9154 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9155 #ifdef FLOAT_STORE_FLAG_VALUE
9156 || (code == LT
9157 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9158 && (REAL_VALUE_NEGATIVE
9159 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9160 #endif
9162 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9163 && (((GET_MODE_CLASS (mode) == MODE_CC)
9164 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9165 || mode == VOIDmode || inner_mode == VOIDmode))
9166 x = SET_SRC (set);
9167 else if (((code == EQ
9168 || (code == GE
9169 && (GET_MODE_BITSIZE (inner_mode)
9170 <= HOST_BITS_PER_WIDE_INT)
9171 && GET_MODE_CLASS (inner_mode) == MODE_INT
9172 && (STORE_FLAG_VALUE
9173 & ((HOST_WIDE_INT) 1
9174 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9175 #ifdef FLOAT_STORE_FLAG_VALUE
9176 || (code == GE
9177 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9178 && (REAL_VALUE_NEGATIVE
9179 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9180 #endif
9182 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9183 && (((GET_MODE_CLASS (mode) == MODE_CC)
9184 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9185 || mode == VOIDmode || inner_mode == VOIDmode))
9188 reverse_code = 1;
9189 x = SET_SRC (set);
9191 else
9192 break;
9195 else if (reg_set_p (op0, prev))
9196 /* If this sets OP0, but not directly, we have to give up. */
9197 break;
9199 if (x)
9201 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9202 code = GET_CODE (x);
9203 if (reverse_code)
9205 code = reversed_comparison_code (x, prev);
9206 if (code == UNKNOWN)
9207 return 0;
9208 reverse_code = 0;
9211 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9212 if (earliest)
9213 *earliest = prev;
9217 /* If constant is first, put it last. */
9218 if (CONSTANT_P (op0))
9219 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9221 /* If OP0 is the result of a comparison, we weren't able to find what
9222 was really being compared, so fail. */
9223 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9224 return 0;
9226 /* Canonicalize any ordered comparison with integers involving equality
9227 if we can do computations in the relevant mode and we do not
9228 overflow. */
9230 if (GET_CODE (op1) == CONST_INT
9231 && GET_MODE (op0) != VOIDmode
9232 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9234 HOST_WIDE_INT const_val = INTVAL (op1);
9235 unsigned HOST_WIDE_INT uconst_val = const_val;
9236 unsigned HOST_WIDE_INT max_val
9237 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9239 switch (code)
9241 case LE:
9242 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9243 code = LT, op1 = GEN_INT (const_val + 1);
9244 break;
9246 /* When cross-compiling, const_val might be sign-extended from
9247 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9248 case GE:
9249 if ((HOST_WIDE_INT) (const_val & max_val)
9250 != (((HOST_WIDE_INT) 1
9251 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9252 code = GT, op1 = GEN_INT (const_val - 1);
9253 break;
9255 case LEU:
9256 if (uconst_val < max_val)
9257 code = LTU, op1 = GEN_INT (uconst_val + 1);
9258 break;
9260 case GEU:
9261 if (uconst_val != 0)
9262 code = GTU, op1 = GEN_INT (uconst_val - 1);
9263 break;
9265 default:
9266 break;
9270 #ifdef HAVE_cc0
9271 /* Never return CC0; return zero instead. */
9272 if (op0 == cc0_rtx)
9273 return 0;
9274 #endif
9276 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9279 /* Given a jump insn JUMP, return the condition that will cause it to branch
9280 to its JUMP_LABEL. If the condition cannot be understood, or is an
9281 inequality floating-point comparison which needs to be reversed, 0 will
9282 be returned.
9284 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9285 insn used in locating the condition was found. If a replacement test
9286 of the condition is desired, it should be placed in front of that
9287 insn and we will be sure that the inputs are still valid. */
9290 get_condition (jump, earliest)
9291 rtx jump;
9292 rtx *earliest;
9294 rtx cond;
9295 int reverse;
9296 rtx set;
9298 /* If this is not a standard conditional jump, we can't parse it. */
9299 if (GET_CODE (jump) != JUMP_INSN
9300 || ! any_condjump_p (jump))
9301 return 0;
9302 set = pc_set (jump);
9304 cond = XEXP (SET_SRC (set), 0);
9306 /* If this branches to JUMP_LABEL when the condition is false, reverse
9307 the condition. */
9308 reverse
9309 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9310 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9312 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9315 /* Similar to above routine, except that we also put an invariant last
9316 unless both operands are invariants. */
9319 get_condition_for_loop (loop, x)
9320 const struct loop *loop;
9321 rtx x;
9323 rtx comparison = get_condition (x, (rtx*) 0);
9325 if (comparison == 0
9326 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9327 || loop_invariant_p (loop, XEXP (comparison, 1)))
9328 return comparison;
9330 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9331 XEXP (comparison, 1), XEXP (comparison, 0));
9334 /* Scan the function and determine whether it has indirect (computed) jumps.
9336 This is taken mostly from flow.c; similar code exists elsewhere
9337 in the compiler. It may be useful to put this into rtlanal.c. */
9338 static int
9339 indirect_jump_in_function_p (start)
9340 rtx start;
9342 rtx insn;
9344 for (insn = start; insn; insn = NEXT_INSN (insn))
9345 if (computed_jump_p (insn))
9346 return 1;
9348 return 0;
9351 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9352 documentation for LOOP_MEMS for the definition of `appropriate'.
9353 This function is called from prescan_loop via for_each_rtx. */
9355 static int
9356 insert_loop_mem (mem, data)
9357 rtx *mem;
9358 void *data ATTRIBUTE_UNUSED;
9360 struct loop_info *loop_info = data;
9361 int i;
9362 rtx m = *mem;
9364 if (m == NULL_RTX)
9365 return 0;
9367 switch (GET_CODE (m))
9369 case MEM:
9370 break;
9372 case CLOBBER:
9373 /* We're not interested in MEMs that are only clobbered. */
9374 return -1;
9376 case CONST_DOUBLE:
9377 /* We're not interested in the MEM associated with a
9378 CONST_DOUBLE, so there's no need to traverse into this. */
9379 return -1;
9381 case EXPR_LIST:
9382 /* We're not interested in any MEMs that only appear in notes. */
9383 return -1;
9385 default:
9386 /* This is not a MEM. */
9387 return 0;
9390 /* See if we've already seen this MEM. */
9391 for (i = 0; i < loop_info->mems_idx; ++i)
9392 if (rtx_equal_p (m, loop_info->mems[i].mem))
9394 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9395 /* The modes of the two memory accesses are different. If
9396 this happens, something tricky is going on, and we just
9397 don't optimize accesses to this MEM. */
9398 loop_info->mems[i].optimize = 0;
9400 return 0;
9403 /* Resize the array, if necessary. */
9404 if (loop_info->mems_idx == loop_info->mems_allocated)
9406 if (loop_info->mems_allocated != 0)
9407 loop_info->mems_allocated *= 2;
9408 else
9409 loop_info->mems_allocated = 32;
9411 loop_info->mems = (loop_mem_info *)
9412 xrealloc (loop_info->mems,
9413 loop_info->mems_allocated * sizeof (loop_mem_info));
9416 /* Actually insert the MEM. */
9417 loop_info->mems[loop_info->mems_idx].mem = m;
9418 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9419 because we can't put it in a register. We still store it in the
9420 table, though, so that if we see the same address later, but in a
9421 non-BLK mode, we'll not think we can optimize it at that point. */
9422 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9423 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9424 ++loop_info->mems_idx;
9426 return 0;
9430 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9432 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9433 register that is modified by an insn between FROM and TO. If the
9434 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9435 more, stop incrementing it, to avoid overflow.
9437 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9438 register I is used, if it is only used once. Otherwise, it is set
9439 to 0 (for no uses) or const0_rtx for more than one use. This
9440 parameter may be zero, in which case this processing is not done.
9442 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9443 optimize register I. */
9445 static void
9446 loop_regs_scan (loop, extra_size)
9447 const struct loop *loop;
9448 int extra_size;
9450 struct loop_regs *regs = LOOP_REGS (loop);
9451 int old_nregs;
9452 /* last_set[n] is nonzero iff reg n has been set in the current
9453 basic block. In that case, it is the insn that last set reg n. */
9454 rtx *last_set;
9455 rtx insn;
9456 int i;
9458 old_nregs = regs->num;
9459 regs->num = max_reg_num ();
9461 /* Grow the regs array if not allocated or too small. */
9462 if (regs->num >= regs->size)
9464 regs->size = regs->num + extra_size;
9466 regs->array = (struct loop_reg *)
9467 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9469 /* Zero the new elements. */
9470 memset (regs->array + old_nregs, 0,
9471 (regs->size - old_nregs) * sizeof (*regs->array));
9474 /* Clear previously scanned fields but do not clear n_times_set. */
9475 for (i = 0; i < old_nregs; i++)
9477 regs->array[i].set_in_loop = 0;
9478 regs->array[i].may_not_optimize = 0;
9479 regs->array[i].single_usage = NULL_RTX;
9482 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9484 /* Scan the loop, recording register usage. */
9485 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9486 insn = NEXT_INSN (insn))
9488 if (INSN_P (insn))
9490 /* Record registers that have exactly one use. */
9491 find_single_use_in_loop (regs, insn, PATTERN (insn));
9493 /* Include uses in REG_EQUAL notes. */
9494 if (REG_NOTES (insn))
9495 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9497 if (GET_CODE (PATTERN (insn)) == SET
9498 || GET_CODE (PATTERN (insn)) == CLOBBER)
9499 count_one_set (regs, insn, PATTERN (insn), last_set);
9500 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9502 int i;
9503 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9504 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9505 last_set);
9509 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9510 memset (last_set, 0, regs->num * sizeof (rtx));
9513 /* Invalidate all hard registers clobbered by calls. With one exception:
9514 a call-clobbered PIC register is still function-invariant for our
9515 purposes, since we can hoist any PIC calculations out of the loop.
9516 Thus the call to rtx_varies_p. */
9517 if (LOOP_INFO (loop)->has_call)
9518 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9519 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9520 && rtx_varies_p (gen_rtx_REG (Pmode, i), /*for_alias=*/1))
9522 regs->array[i].may_not_optimize = 1;
9523 regs->array[i].set_in_loop = 1;
9526 #ifdef AVOID_CCMODE_COPIES
9527 /* Don't try to move insns which set CC registers if we should not
9528 create CCmode register copies. */
9529 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9530 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9531 regs->array[i].may_not_optimize = 1;
9532 #endif
9534 /* Set regs->array[I].n_times_set for the new registers. */
9535 for (i = old_nregs; i < regs->num; i++)
9536 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9538 free (last_set);
9541 /* Returns the number of real INSNs in the LOOP. */
9543 static int
9544 count_insns_in_loop (loop)
9545 const struct loop *loop;
9547 int count = 0;
9548 rtx insn;
9550 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9551 insn = NEXT_INSN (insn))
9552 if (INSN_P (insn))
9553 ++count;
9555 return count;
9558 /* Move MEMs into registers for the duration of the loop. */
9560 static void
9561 load_mems (loop)
9562 const struct loop *loop;
9564 struct loop_info *loop_info = LOOP_INFO (loop);
9565 struct loop_regs *regs = LOOP_REGS (loop);
9566 int maybe_never = 0;
9567 int i;
9568 rtx p, prev_ebb_head;
9569 rtx label = NULL_RTX;
9570 rtx end_label;
9571 /* Nonzero if the next instruction may never be executed. */
9572 int next_maybe_never = 0;
9573 unsigned int last_max_reg = max_reg_num ();
9575 if (loop_info->mems_idx == 0)
9576 return;
9578 /* We cannot use next_label here because it skips over normal insns. */
9579 end_label = next_nonnote_insn (loop->end);
9580 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9581 end_label = NULL_RTX;
9583 /* Check to see if it's possible that some instructions in the loop are
9584 never executed. Also check if there is a goto out of the loop other
9585 than right after the end of the loop. */
9586 for (p = next_insn_in_loop (loop, loop->scan_start);
9587 p != NULL_RTX;
9588 p = next_insn_in_loop (loop, p))
9590 if (GET_CODE (p) == CODE_LABEL)
9591 maybe_never = 1;
9592 else if (GET_CODE (p) == JUMP_INSN
9593 /* If we enter the loop in the middle, and scan
9594 around to the beginning, don't set maybe_never
9595 for that. This must be an unconditional jump,
9596 otherwise the code at the top of the loop might
9597 never be executed. Unconditional jumps are
9598 followed a by barrier then loop end. */
9599 && ! (GET_CODE (p) == JUMP_INSN
9600 && JUMP_LABEL (p) == loop->top
9601 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9602 && any_uncondjump_p (p)))
9604 /* If this is a jump outside of the loop but not right
9605 after the end of the loop, we would have to emit new fixup
9606 sequences for each such label. */
9607 if (/* If we can't tell where control might go when this
9608 JUMP_INSN is executed, we must be conservative. */
9609 !JUMP_LABEL (p)
9610 || (JUMP_LABEL (p) != end_label
9611 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9612 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9613 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9614 return;
9616 if (!any_condjump_p (p))
9617 /* Something complicated. */
9618 maybe_never = 1;
9619 else
9620 /* If there are any more instructions in the loop, they
9621 might not be reached. */
9622 next_maybe_never = 1;
9624 else if (next_maybe_never)
9625 maybe_never = 1;
9628 /* Find start of the extended basic block that enters the loop. */
9629 for (p = loop->start;
9630 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9631 p = PREV_INSN (p))
9633 prev_ebb_head = p;
9635 cselib_init ();
9637 /* Build table of mems that get set to constant values before the
9638 loop. */
9639 for (; p != loop->start; p = NEXT_INSN (p))
9640 cselib_process_insn (p);
9642 /* Actually move the MEMs. */
9643 for (i = 0; i < loop_info->mems_idx; ++i)
9645 regset_head load_copies;
9646 regset_head store_copies;
9647 int written = 0;
9648 rtx reg;
9649 rtx mem = loop_info->mems[i].mem;
9650 rtx mem_list_entry;
9652 if (MEM_VOLATILE_P (mem)
9653 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9654 /* There's no telling whether or not MEM is modified. */
9655 loop_info->mems[i].optimize = 0;
9657 /* Go through the MEMs written to in the loop to see if this
9658 one is aliased by one of them. */
9659 mem_list_entry = loop_info->store_mems;
9660 while (mem_list_entry)
9662 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9663 written = 1;
9664 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9665 mem, rtx_varies_p))
9667 /* MEM is indeed aliased by this store. */
9668 loop_info->mems[i].optimize = 0;
9669 break;
9671 mem_list_entry = XEXP (mem_list_entry, 1);
9674 if (flag_float_store && written
9675 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9676 loop_info->mems[i].optimize = 0;
9678 /* If this MEM is written to, we must be sure that there
9679 are no reads from another MEM that aliases this one. */
9680 if (loop_info->mems[i].optimize && written)
9682 int j;
9684 for (j = 0; j < loop_info->mems_idx; ++j)
9686 if (j == i)
9687 continue;
9688 else if (true_dependence (mem,
9689 VOIDmode,
9690 loop_info->mems[j].mem,
9691 rtx_varies_p))
9693 /* It's not safe to hoist loop_info->mems[i] out of
9694 the loop because writes to it might not be
9695 seen by reads from loop_info->mems[j]. */
9696 loop_info->mems[i].optimize = 0;
9697 break;
9702 if (maybe_never && may_trap_p (mem))
9703 /* We can't access the MEM outside the loop; it might
9704 cause a trap that wouldn't have happened otherwise. */
9705 loop_info->mems[i].optimize = 0;
9707 if (!loop_info->mems[i].optimize)
9708 /* We thought we were going to lift this MEM out of the
9709 loop, but later discovered that we could not. */
9710 continue;
9712 INIT_REG_SET (&load_copies);
9713 INIT_REG_SET (&store_copies);
9715 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9716 order to keep scan_loop from moving stores to this MEM
9717 out of the loop just because this REG is neither a
9718 user-variable nor used in the loop test. */
9719 reg = gen_reg_rtx (GET_MODE (mem));
9720 REG_USERVAR_P (reg) = 1;
9721 loop_info->mems[i].reg = reg;
9723 /* Now, replace all references to the MEM with the
9724 corresponding pseudos. */
9725 maybe_never = 0;
9726 for (p = next_insn_in_loop (loop, loop->scan_start);
9727 p != NULL_RTX;
9728 p = next_insn_in_loop (loop, p))
9730 if (INSN_P (p))
9732 rtx set;
9734 set = single_set (p);
9736 /* See if this copies the mem into a register that isn't
9737 modified afterwards. We'll try to do copy propagation
9738 a little further on. */
9739 if (set
9740 /* @@@ This test is _way_ too conservative. */
9741 && ! maybe_never
9742 && GET_CODE (SET_DEST (set)) == REG
9743 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9744 && REGNO (SET_DEST (set)) < last_max_reg
9745 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9746 && rtx_equal_p (SET_SRC (set), mem))
9747 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9749 /* See if this copies the mem from a register that isn't
9750 modified afterwards. We'll try to remove the
9751 redundant copy later on by doing a little register
9752 renaming and copy propagation. This will help
9753 to untangle things for the BIV detection code. */
9754 if (set
9755 && ! maybe_never
9756 && GET_CODE (SET_SRC (set)) == REG
9757 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9758 && REGNO (SET_SRC (set)) < last_max_reg
9759 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9760 && rtx_equal_p (SET_DEST (set), mem))
9761 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9763 /* Replace the memory reference with the shadow register. */
9764 replace_loop_mems (p, loop_info->mems[i].mem,
9765 loop_info->mems[i].reg);
9768 if (GET_CODE (p) == CODE_LABEL
9769 || GET_CODE (p) == JUMP_INSN)
9770 maybe_never = 1;
9773 if (! apply_change_group ())
9774 /* We couldn't replace all occurrences of the MEM. */
9775 loop_info->mems[i].optimize = 0;
9776 else
9778 /* Load the memory immediately before LOOP->START, which is
9779 the NOTE_LOOP_BEG. */
9780 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9781 rtx set;
9782 rtx best = mem;
9783 int j;
9784 struct elt_loc_list *const_equiv = 0;
9786 if (e)
9788 struct elt_loc_list *equiv;
9789 struct elt_loc_list *best_equiv = 0;
9790 for (equiv = e->locs; equiv; equiv = equiv->next)
9792 if (CONSTANT_P (equiv->loc))
9793 const_equiv = equiv;
9794 else if (GET_CODE (equiv->loc) == REG
9795 /* Extending hard register lifetimes causes crash
9796 on SRC targets. Doing so on non-SRC is
9797 probably also not good idea, since we most
9798 probably have pseudoregister equivalence as
9799 well. */
9800 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9801 best_equiv = equiv;
9803 /* Use the constant equivalence if that is cheap enough. */
9804 if (! best_equiv)
9805 best_equiv = const_equiv;
9806 else if (const_equiv
9807 && (rtx_cost (const_equiv->loc, SET)
9808 <= rtx_cost (best_equiv->loc, SET)))
9810 best_equiv = const_equiv;
9811 const_equiv = 0;
9814 /* If best_equiv is nonzero, we know that MEM is set to a
9815 constant or register before the loop. We will use this
9816 knowledge to initialize the shadow register with that
9817 constant or reg rather than by loading from MEM. */
9818 if (best_equiv)
9819 best = copy_rtx (best_equiv->loc);
9822 set = gen_move_insn (reg, best);
9823 set = loop_insn_hoist (loop, set);
9824 if (REG_P (best))
9826 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9827 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9829 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9830 break;
9834 if (const_equiv)
9835 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9837 if (written)
9839 if (label == NULL_RTX)
9841 label = gen_label_rtx ();
9842 emit_label_after (label, loop->end);
9845 /* Store the memory immediately after END, which is
9846 the NOTE_LOOP_END. */
9847 set = gen_move_insn (copy_rtx (mem), reg);
9848 loop_insn_emit_after (loop, 0, label, set);
9851 if (loop_dump_stream)
9853 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9854 REGNO (reg), (written ? "r/w" : "r/o"));
9855 print_rtl (loop_dump_stream, mem);
9856 fputc ('\n', loop_dump_stream);
9859 /* Attempt a bit of copy propagation. This helps untangle the
9860 data flow, and enables {basic,general}_induction_var to find
9861 more bivs/givs. */
9862 EXECUTE_IF_SET_IN_REG_SET
9863 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9865 try_copy_prop (loop, reg, j);
9867 CLEAR_REG_SET (&load_copies);
9869 EXECUTE_IF_SET_IN_REG_SET
9870 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9872 try_swap_copy_prop (loop, reg, j);
9874 CLEAR_REG_SET (&store_copies);
9878 if (label != NULL_RTX && end_label != NULL_RTX)
9880 /* Now, we need to replace all references to the previous exit
9881 label with the new one. */
9882 rtx_pair rr;
9883 rr.r1 = end_label;
9884 rr.r2 = label;
9886 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9888 for_each_rtx (&p, replace_label, &rr);
9890 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9891 field. This is not handled by for_each_rtx because it doesn't
9892 handle unprinted ('0') fields. We need to update JUMP_LABEL
9893 because the immediately following unroll pass will use it.
9894 replace_label would not work anyways, because that only handles
9895 LABEL_REFs. */
9896 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9897 JUMP_LABEL (p) = label;
9901 cselib_finish ();
9904 /* For communication between note_reg_stored and its caller. */
9905 struct note_reg_stored_arg
9907 int set_seen;
9908 rtx reg;
9911 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9912 is equal to ARG. */
9913 static void
9914 note_reg_stored (x, setter, arg)
9915 rtx x, setter ATTRIBUTE_UNUSED;
9916 void *arg;
9918 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9919 if (t->reg == x)
9920 t->set_seen = 1;
9923 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9924 There must be exactly one insn that sets this pseudo; it will be
9925 deleted if all replacements succeed and we can prove that the register
9926 is not used after the loop. */
9928 static void
9929 try_copy_prop (loop, replacement, regno)
9930 const struct loop *loop;
9931 rtx replacement;
9932 unsigned int regno;
9934 /* This is the reg that we are copying from. */
9935 rtx reg_rtx = regno_reg_rtx[regno];
9936 rtx init_insn = 0;
9937 rtx insn;
9938 /* These help keep track of whether we replaced all uses of the reg. */
9939 int replaced_last = 0;
9940 int store_is_first = 0;
9942 for (insn = next_insn_in_loop (loop, loop->scan_start);
9943 insn != NULL_RTX;
9944 insn = next_insn_in_loop (loop, insn))
9946 rtx set;
9948 /* Only substitute within one extended basic block from the initializing
9949 insn. */
9950 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9951 break;
9953 if (! INSN_P (insn))
9954 continue;
9956 /* Is this the initializing insn? */
9957 set = single_set (insn);
9958 if (set
9959 && GET_CODE (SET_DEST (set)) == REG
9960 && REGNO (SET_DEST (set)) == regno)
9962 if (init_insn)
9963 abort ();
9965 init_insn = insn;
9966 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9967 store_is_first = 1;
9970 /* Only substitute after seeing the initializing insn. */
9971 if (init_insn && insn != init_insn)
9973 struct note_reg_stored_arg arg;
9975 replace_loop_regs (insn, reg_rtx, replacement);
9976 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9977 replaced_last = 1;
9979 /* Stop replacing when REPLACEMENT is modified. */
9980 arg.reg = replacement;
9981 arg.set_seen = 0;
9982 note_stores (PATTERN (insn), note_reg_stored, &arg);
9983 if (arg.set_seen)
9985 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
9987 /* It is possible that we've turned previously valid REG_EQUAL to
9988 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
9989 REPLACEMENT is modified, we get different meaning. */
9990 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
9991 remove_note (insn, note);
9992 break;
9996 if (! init_insn)
9997 abort ();
9998 if (apply_change_group ())
10000 if (loop_dump_stream)
10001 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10002 if (store_is_first && replaced_last)
10004 rtx first;
10005 rtx retval_note;
10007 /* Assume we're just deleting INIT_INSN. */
10008 first = init_insn;
10009 /* Look for REG_RETVAL note. If we're deleting the end of
10010 the libcall sequence, the whole sequence can go. */
10011 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10012 /* If we found a REG_RETVAL note, find the first instruction
10013 in the sequence. */
10014 if (retval_note)
10015 first = XEXP (retval_note, 0);
10017 /* Delete the instructions. */
10018 loop_delete_insns (first, init_insn);
10020 if (loop_dump_stream)
10021 fprintf (loop_dump_stream, ".\n");
10025 /* Replace all the instructions from FIRST up to and including LAST
10026 with NOTE_INSN_DELETED notes. */
10028 static void
10029 loop_delete_insns (first, last)
10030 rtx first;
10031 rtx last;
10033 while (1)
10035 if (loop_dump_stream)
10036 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10037 INSN_UID (first));
10038 delete_insn (first);
10040 /* If this was the LAST instructions we're supposed to delete,
10041 we're done. */
10042 if (first == last)
10043 break;
10045 first = NEXT_INSN (first);
10049 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10050 loop LOOP if the order of the sets of these registers can be
10051 swapped. There must be exactly one insn within the loop that sets
10052 this pseudo followed immediately by a move insn that sets
10053 REPLACEMENT with REGNO. */
10054 static void
10055 try_swap_copy_prop (loop, replacement, regno)
10056 const struct loop *loop;
10057 rtx replacement;
10058 unsigned int regno;
10060 rtx insn;
10061 rtx set = NULL_RTX;
10062 unsigned int new_regno;
10064 new_regno = REGNO (replacement);
10066 for (insn = next_insn_in_loop (loop, loop->scan_start);
10067 insn != NULL_RTX;
10068 insn = next_insn_in_loop (loop, insn))
10070 /* Search for the insn that copies REGNO to NEW_REGNO? */
10071 if (INSN_P (insn)
10072 && (set = single_set (insn))
10073 && GET_CODE (SET_DEST (set)) == REG
10074 && REGNO (SET_DEST (set)) == new_regno
10075 && GET_CODE (SET_SRC (set)) == REG
10076 && REGNO (SET_SRC (set)) == regno)
10077 break;
10080 if (insn != NULL_RTX)
10082 rtx prev_insn;
10083 rtx prev_set;
10085 /* Some DEF-USE info would come in handy here to make this
10086 function more general. For now, just check the previous insn
10087 which is the most likely candidate for setting REGNO. */
10089 prev_insn = PREV_INSN (insn);
10091 if (INSN_P (insn)
10092 && (prev_set = single_set (prev_insn))
10093 && GET_CODE (SET_DEST (prev_set)) == REG
10094 && REGNO (SET_DEST (prev_set)) == regno)
10096 /* We have:
10097 (set (reg regno) (expr))
10098 (set (reg new_regno) (reg regno))
10100 so try converting this to:
10101 (set (reg new_regno) (expr))
10102 (set (reg regno) (reg new_regno))
10104 The former construct is often generated when a global
10105 variable used for an induction variable is shadowed by a
10106 register (NEW_REGNO). The latter construct improves the
10107 chances of GIV replacement and BIV elimination. */
10109 validate_change (prev_insn, &SET_DEST (prev_set),
10110 replacement, 1);
10111 validate_change (insn, &SET_DEST (set),
10112 SET_SRC (set), 1);
10113 validate_change (insn, &SET_SRC (set),
10114 replacement, 1);
10116 if (apply_change_group ())
10118 if (loop_dump_stream)
10119 fprintf (loop_dump_stream,
10120 " Swapped set of reg %d at %d with reg %d at %d.\n",
10121 regno, INSN_UID (insn),
10122 new_regno, INSN_UID (prev_insn));
10124 /* Update first use of REGNO. */
10125 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10126 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10128 /* Now perform copy propagation to hopefully
10129 remove all uses of REGNO within the loop. */
10130 try_copy_prop (loop, replacement, regno);
10136 /* Replace MEM with its associated pseudo register. This function is
10137 called from load_mems via for_each_rtx. DATA is actually a pointer
10138 to a structure describing the instruction currently being scanned
10139 and the MEM we are currently replacing. */
10141 static int
10142 replace_loop_mem (mem, data)
10143 rtx *mem;
10144 void *data;
10146 loop_replace_args *args = (loop_replace_args *) data;
10147 rtx m = *mem;
10149 if (m == NULL_RTX)
10150 return 0;
10152 switch (GET_CODE (m))
10154 case MEM:
10155 break;
10157 case CONST_DOUBLE:
10158 /* We're not interested in the MEM associated with a
10159 CONST_DOUBLE, so there's no need to traverse into one. */
10160 return -1;
10162 default:
10163 /* This is not a MEM. */
10164 return 0;
10167 if (!rtx_equal_p (args->match, m))
10168 /* This is not the MEM we are currently replacing. */
10169 return 0;
10171 /* Actually replace the MEM. */
10172 validate_change (args->insn, mem, args->replacement, 1);
10174 return 0;
10177 static void
10178 replace_loop_mems (insn, mem, reg)
10179 rtx insn;
10180 rtx mem;
10181 rtx reg;
10183 loop_replace_args args;
10185 args.insn = insn;
10186 args.match = mem;
10187 args.replacement = reg;
10189 for_each_rtx (&insn, replace_loop_mem, &args);
10192 /* Replace one register with another. Called through for_each_rtx; PX points
10193 to the rtx being scanned. DATA is actually a pointer to
10194 a structure of arguments. */
10196 static int
10197 replace_loop_reg (px, data)
10198 rtx *px;
10199 void *data;
10201 rtx x = *px;
10202 loop_replace_args *args = (loop_replace_args *) data;
10204 if (x == NULL_RTX)
10205 return 0;
10207 if (x == args->match)
10208 validate_change (args->insn, px, args->replacement, 1);
10210 return 0;
10213 static void
10214 replace_loop_regs (insn, reg, replacement)
10215 rtx insn;
10216 rtx reg;
10217 rtx replacement;
10219 loop_replace_args args;
10221 args.insn = insn;
10222 args.match = reg;
10223 args.replacement = replacement;
10225 for_each_rtx (&insn, replace_loop_reg, &args);
10228 /* Replace occurrences of the old exit label for the loop with the new
10229 one. DATA is an rtx_pair containing the old and new labels,
10230 respectively. */
10232 static int
10233 replace_label (x, data)
10234 rtx *x;
10235 void *data;
10237 rtx l = *x;
10238 rtx old_label = ((rtx_pair *) data)->r1;
10239 rtx new_label = ((rtx_pair *) data)->r2;
10241 if (l == NULL_RTX)
10242 return 0;
10244 if (GET_CODE (l) != LABEL_REF)
10245 return 0;
10247 if (XEXP (l, 0) != old_label)
10248 return 0;
10250 XEXP (l, 0) = new_label;
10251 ++LABEL_NUSES (new_label);
10252 --LABEL_NUSES (old_label);
10254 return 0;
10257 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10258 (ignored in the interim). */
10260 static rtx
10261 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10262 const struct loop *loop ATTRIBUTE_UNUSED;
10263 basic_block where_bb ATTRIBUTE_UNUSED;
10264 rtx where_insn;
10265 rtx pattern;
10267 return emit_insn_after (pattern, where_insn);
10271 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10272 in basic block WHERE_BB (ignored in the interim) within the loop
10273 otherwise hoist PATTERN into the loop pre-header. */
10276 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10277 const struct loop *loop;
10278 basic_block where_bb ATTRIBUTE_UNUSED;
10279 rtx where_insn;
10280 rtx pattern;
10282 if (! where_insn)
10283 return loop_insn_hoist (loop, pattern);
10284 return emit_insn_before (pattern, where_insn);
10288 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10289 WHERE_BB (ignored in the interim) within the loop. */
10291 static rtx
10292 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10293 const struct loop *loop ATTRIBUTE_UNUSED;
10294 basic_block where_bb ATTRIBUTE_UNUSED;
10295 rtx where_insn;
10296 rtx pattern;
10298 return emit_call_insn_before (pattern, where_insn);
10302 /* Hoist insn for PATTERN into the loop pre-header. */
10305 loop_insn_hoist (loop, pattern)
10306 const struct loop *loop;
10307 rtx pattern;
10309 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10313 /* Hoist call insn for PATTERN into the loop pre-header. */
10315 static rtx
10316 loop_call_insn_hoist (loop, pattern)
10317 const struct loop *loop;
10318 rtx pattern;
10320 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10324 /* Sink insn for PATTERN after the loop end. */
10327 loop_insn_sink (loop, pattern)
10328 const struct loop *loop;
10329 rtx pattern;
10331 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10335 /* If the loop has multiple exits, emit insn for PATTERN before the
10336 loop to ensure that it will always be executed no matter how the
10337 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10338 since this is slightly more efficient. */
10340 static rtx
10341 loop_insn_sink_or_swim (loop, pattern)
10342 const struct loop *loop;
10343 rtx pattern;
10345 if (loop->exit_count)
10346 return loop_insn_hoist (loop, pattern);
10347 else
10348 return loop_insn_sink (loop, pattern);
10351 static void
10352 loop_ivs_dump (loop, file, verbose)
10353 const struct loop *loop;
10354 FILE *file;
10355 int verbose;
10357 struct iv_class *bl;
10358 int iv_num = 0;
10360 if (! loop || ! file)
10361 return;
10363 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10364 iv_num++;
10366 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10368 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10370 loop_iv_class_dump (bl, file, verbose);
10371 fputc ('\n', file);
10376 static void
10377 loop_iv_class_dump (bl, file, verbose)
10378 const struct iv_class *bl;
10379 FILE *file;
10380 int verbose ATTRIBUTE_UNUSED;
10382 struct induction *v;
10383 rtx incr;
10384 int i;
10386 if (! bl || ! file)
10387 return;
10389 fprintf (file, "IV class for reg %d, benefit %d\n",
10390 bl->regno, bl->total_benefit);
10392 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10393 if (bl->initial_value)
10395 fprintf (file, ", init val: ");
10396 print_simple_rtl (file, bl->initial_value);
10398 if (bl->initial_test)
10400 fprintf (file, ", init test: ");
10401 print_simple_rtl (file, bl->initial_test);
10403 fputc ('\n', file);
10405 if (bl->final_value)
10407 fprintf (file, " Final val: ");
10408 print_simple_rtl (file, bl->final_value);
10409 fputc ('\n', file);
10412 if ((incr = biv_total_increment (bl)))
10414 fprintf (file, " Total increment: ");
10415 print_simple_rtl (file, incr);
10416 fputc ('\n', file);
10419 /* List the increments. */
10420 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10422 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10423 print_simple_rtl (file, v->add_val);
10424 fputc ('\n', file);
10427 /* List the givs. */
10428 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10430 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10431 i, INSN_UID (v->insn), v->benefit);
10432 if (v->giv_type == DEST_ADDR)
10433 print_simple_rtl (file, v->mem);
10434 else
10435 print_simple_rtl (file, single_set (v->insn));
10436 fputc ('\n', file);
10441 static void
10442 loop_biv_dump (v, file, verbose)
10443 const struct induction *v;
10444 FILE *file;
10445 int verbose;
10447 if (! v || ! file)
10448 return;
10450 fprintf (file,
10451 "Biv %d: insn %d",
10452 REGNO (v->dest_reg), INSN_UID (v->insn));
10453 fprintf (file, " const ");
10454 print_simple_rtl (file, v->add_val);
10456 if (verbose && v->final_value)
10458 fputc ('\n', file);
10459 fprintf (file, " final ");
10460 print_simple_rtl (file, v->final_value);
10463 fputc ('\n', file);
10467 static void
10468 loop_giv_dump (v, file, verbose)
10469 const struct induction *v;
10470 FILE *file;
10471 int verbose;
10473 if (! v || ! file)
10474 return;
10476 if (v->giv_type == DEST_REG)
10477 fprintf (file, "Giv %d: insn %d",
10478 REGNO (v->dest_reg), INSN_UID (v->insn));
10479 else
10480 fprintf (file, "Dest address: insn %d",
10481 INSN_UID (v->insn));
10483 fprintf (file, " src reg %d benefit %d",
10484 REGNO (v->src_reg), v->benefit);
10485 fprintf (file, " lifetime %d",
10486 v->lifetime);
10488 if (v->replaceable)
10489 fprintf (file, " replaceable");
10491 if (v->no_const_addval)
10492 fprintf (file, " ncav");
10494 if (v->ext_dependent)
10496 switch (GET_CODE (v->ext_dependent))
10498 case SIGN_EXTEND:
10499 fprintf (file, " ext se");
10500 break;
10501 case ZERO_EXTEND:
10502 fprintf (file, " ext ze");
10503 break;
10504 case TRUNCATE:
10505 fprintf (file, " ext tr");
10506 break;
10507 default:
10508 abort ();
10512 fputc ('\n', file);
10513 fprintf (file, " mult ");
10514 print_simple_rtl (file, v->mult_val);
10516 fputc ('\n', file);
10517 fprintf (file, " add ");
10518 print_simple_rtl (file, v->add_val);
10520 if (verbose && v->final_value)
10522 fputc ('\n', file);
10523 fprintf (file, " final ");
10524 print_simple_rtl (file, v->final_value);
10527 fputc ('\n', file);
10531 void
10532 debug_ivs (loop)
10533 const struct loop *loop;
10535 loop_ivs_dump (loop, stderr, 1);
10539 void
10540 debug_iv_class (bl)
10541 const struct iv_class *bl;
10543 loop_iv_class_dump (bl, stderr, 1);
10547 void
10548 debug_biv (v)
10549 const struct induction *v;
10551 loop_biv_dump (v, stderr, 1);
10555 void
10556 debug_giv (v)
10557 const struct induction *v;
10559 loop_giv_dump (v, stderr, 1);
10563 #define LOOP_BLOCK_NUM_1(INSN) \
10564 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10566 /* The notes do not have an assigned block, so look at the next insn. */
10567 #define LOOP_BLOCK_NUM(INSN) \
10568 ((INSN) ? (GET_CODE (INSN) == NOTE \
10569 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10570 : LOOP_BLOCK_NUM_1 (INSN)) \
10571 : -1)
10573 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10575 static void
10576 loop_dump_aux (loop, file, verbose)
10577 const struct loop *loop;
10578 FILE *file;
10579 int verbose ATTRIBUTE_UNUSED;
10581 rtx label;
10583 if (! loop || ! file)
10584 return;
10586 /* Print diagnostics to compare our concept of a loop with
10587 what the loop notes say. */
10588 if (! PREV_INSN (loop->first->head)
10589 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10590 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10591 != NOTE_INSN_LOOP_BEG)
10592 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10593 INSN_UID (PREV_INSN (loop->first->head)));
10594 if (! NEXT_INSN (loop->last->end)
10595 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10596 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10597 != NOTE_INSN_LOOP_END)
10598 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10599 INSN_UID (NEXT_INSN (loop->last->end)));
10601 if (loop->start)
10603 fprintf (file,
10604 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10605 LOOP_BLOCK_NUM (loop->start),
10606 LOOP_INSN_UID (loop->start),
10607 LOOP_BLOCK_NUM (loop->cont),
10608 LOOP_INSN_UID (loop->cont),
10609 LOOP_BLOCK_NUM (loop->cont),
10610 LOOP_INSN_UID (loop->cont),
10611 LOOP_BLOCK_NUM (loop->vtop),
10612 LOOP_INSN_UID (loop->vtop),
10613 LOOP_BLOCK_NUM (loop->end),
10614 LOOP_INSN_UID (loop->end));
10615 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10616 LOOP_BLOCK_NUM (loop->top),
10617 LOOP_INSN_UID (loop->top),
10618 LOOP_BLOCK_NUM (loop->scan_start),
10619 LOOP_INSN_UID (loop->scan_start));
10620 fprintf (file, ";; exit_count %d", loop->exit_count);
10621 if (loop->exit_count)
10623 fputs (", labels:", file);
10624 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10626 fprintf (file, " %d ",
10627 LOOP_INSN_UID (XEXP (label, 0)));
10630 fputs ("\n", file);
10632 /* This can happen when a marked loop appears as two nested loops,
10633 say from while (a || b) {}. The inner loop won't match
10634 the loop markers but the outer one will. */
10635 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10636 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10640 /* Call this function from the debugger to dump LOOP. */
10642 void
10643 debug_loop (loop)
10644 const struct loop *loop;
10646 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10649 /* Call this function from the debugger to dump LOOPS. */
10651 void
10652 debug_loops (loops)
10653 const struct loops *loops;
10655 flow_loops_dump (loops, stderr, loop_dump_aux, 1);