* loop.c (loop_invariant_p): Special case pic_offset_table_rtx.
[official-gcc.git] / gcc / loop.c
bloba65595f2a447ee30f7453287d1edd9fb32f1d292
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* The minimal number of prefetch blocks that a loop must consume to make
83 the emitting of prefetch instruction in the body of loop worthwhile. */
84 #define PREFETCH_BLOCKS_IN_LOOP_MIN 6
86 /* Parameterize some prefetch heuristics so they can be turned on and off
87 easily for performance testing on new architecures. These can be
88 defined in target-dependent files. */
90 /* Prefetch is worthwhile only when loads/stores are dense. */
91 #ifndef PREFETCH_ONLY_DENSE_MEM
92 #define PREFETCH_ONLY_DENSE_MEM 1
93 #endif
95 /* Define what we mean by "dense" loads and stores; This value divided by 256
96 is the minimum percentage of memory references that worth prefetching. */
97 #ifndef PREFETCH_DENSE_MEM
98 #define PREFETCH_DENSE_MEM 220
99 #endif
101 /* Do not prefetch for a loop whose iteration count is known to be low. */
102 #ifndef PREFETCH_NO_LOW_LOOPCNT
103 #define PREFETCH_NO_LOW_LOOPCNT 1
104 #endif
106 /* Define what we mean by a "low" iteration count. */
107 #ifndef PREFETCH_LOW_LOOPCNT
108 #define PREFETCH_LOW_LOOPCNT 32
109 #endif
111 /* Do not prefetch for a loop that contains a function call; such a loop is
112 probably not an internal loop. */
113 #ifndef PREFETCH_NO_CALL
114 #define PREFETCH_NO_CALL 1
115 #endif
117 /* Do not prefetch accesses with an extreme stride. */
118 #ifndef PREFETCH_NO_EXTREME_STRIDE
119 #define PREFETCH_NO_EXTREME_STRIDE 1
120 #endif
122 /* Define what we mean by an "extreme" stride. */
123 #ifndef PREFETCH_EXTREME_STRIDE
124 #define PREFETCH_EXTREME_STRIDE 4096
125 #endif
127 /* Do not handle reversed order prefetches (negative stride). */
128 #ifndef PREFETCH_NO_REVERSE_ORDER
129 #define PREFETCH_NO_REVERSE_ORDER 1
130 #endif
132 /* Prefetch even if the GIV is not always executed. */
133 #ifndef PREFETCH_NOT_ALWAYS
134 #define PREFETCH_NOT_ALWAYS 0
135 #endif
137 /* If the loop requires more prefetches than the target can process in
138 parallel then don't prefetch anything in that loop. */
139 #ifndef PREFETCH_LIMIT_TO_SIMULTANEOUS
140 #define PREFETCH_LIMIT_TO_SIMULTANEOUS 1
141 #endif
143 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
144 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
146 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
147 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
148 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
150 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
151 ((REGNO) < FIRST_PSEUDO_REGISTER \
152 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
155 /* Vector mapping INSN_UIDs to luids.
156 The luids are like uids but increase monotonically always.
157 We use them to see whether a jump comes from outside a given loop. */
159 int *uid_luid;
161 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
162 number the insn is contained in. */
164 struct loop **uid_loop;
166 /* 1 + largest uid of any insn. */
168 int max_uid_for_loop;
170 /* 1 + luid of last insn. */
172 static int max_luid;
174 /* Number of loops detected in current function. Used as index to the
175 next few tables. */
177 static int max_loop_num;
179 /* Bound on pseudo register number before loop optimization.
180 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
181 unsigned int max_reg_before_loop;
183 /* The value to pass to the next call of reg_scan_update. */
184 static int loop_max_reg;
186 #define obstack_chunk_alloc xmalloc
187 #define obstack_chunk_free free
189 /* During the analysis of a loop, a chain of `struct movable's
190 is made to record all the movable insns found.
191 Then the entire chain can be scanned to decide which to move. */
193 struct movable
195 rtx insn; /* A movable insn */
196 rtx set_src; /* The expression this reg is set from. */
197 rtx set_dest; /* The destination of this SET. */
198 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
199 of any registers used within the LIBCALL. */
200 int consec; /* Number of consecutive following insns
201 that must be moved with this one. */
202 unsigned int regno; /* The register it sets */
203 short lifetime; /* lifetime of that register;
204 may be adjusted when matching movables
205 that load the same value are found. */
206 short savings; /* Number of insns we can move for this reg,
207 including other movables that force this
208 or match this one. */
209 unsigned int cond : 1; /* 1 if only conditionally movable */
210 unsigned int force : 1; /* 1 means MUST move this insn */
211 unsigned int global : 1; /* 1 means reg is live outside this loop */
212 /* If PARTIAL is 1, GLOBAL means something different:
213 that the reg is live outside the range from where it is set
214 to the following label. */
215 unsigned int done : 1; /* 1 inhibits further processing of this */
217 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
218 In particular, moving it does not make it
219 invariant. */
220 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
221 load SRC, rather than copying INSN. */
222 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
223 first insn of a consecutive sets group. */
224 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
225 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
226 that we should avoid changing when clearing
227 the rest of the reg. */
228 struct movable *match; /* First entry for same value */
229 struct movable *forces; /* An insn that must be moved if this is */
230 struct movable *next;
234 FILE *loop_dump_stream;
236 /* Forward declarations. */
238 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
239 static void mark_loop_jump PARAMS ((rtx, struct loop *));
240 static void prescan_loop PARAMS ((struct loop *));
241 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
242 static int consec_sets_invariant_p PARAMS ((const struct loop *,
243 rtx, int, rtx));
244 static int labels_in_range_p PARAMS ((rtx, int));
245 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
246 static void note_addr_stored PARAMS ((rtx, rtx, void *));
247 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
248 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
249 static void scan_loop PARAMS ((struct loop*, int));
250 #if 0
251 static void replace_call_address PARAMS ((rtx, rtx, rtx));
252 #endif
253 static rtx skip_consec_insns PARAMS ((rtx, int));
254 static int libcall_benefit PARAMS ((rtx));
255 static void ignore_some_movables PARAMS ((struct loop_movables *));
256 static void force_movables PARAMS ((struct loop_movables *));
257 static void combine_movables PARAMS ((struct loop_movables *,
258 struct loop_regs *));
259 static int num_unmoved_movables PARAMS ((const struct loop *));
260 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
261 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
262 struct loop_regs *));
263 static void add_label_notes PARAMS ((rtx, rtx));
264 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
265 int, int));
266 static void loop_movables_add PARAMS((struct loop_movables *,
267 struct movable *));
268 static void loop_movables_free PARAMS((struct loop_movables *));
269 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
270 static void loop_bivs_find PARAMS((struct loop *));
271 static void loop_bivs_init_find PARAMS((struct loop *));
272 static void loop_bivs_check PARAMS((struct loop *));
273 static void loop_givs_find PARAMS((struct loop *));
274 static void loop_givs_check PARAMS((struct loop *));
275 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
276 int, int));
277 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
278 struct induction *, rtx));
279 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
280 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
282 rtx *));
283 static void loop_ivs_free PARAMS((struct loop *));
284 static void strength_reduce PARAMS ((struct loop *, int));
285 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
286 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
287 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
288 static void record_biv PARAMS ((struct loop *, struct induction *,
289 rtx, rtx, rtx, rtx, rtx *,
290 int, int));
291 static void check_final_value PARAMS ((const struct loop *,
292 struct induction *));
293 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
294 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
295 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
296 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
297 static void record_giv PARAMS ((const struct loop *, struct induction *,
298 rtx, rtx, rtx, rtx, rtx, rtx, int,
299 enum g_types, int, int, rtx *));
300 static void update_giv_derive PARAMS ((const struct loop *, rtx));
301 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
302 struct loop_info *));
303 static int basic_induction_var PARAMS ((const struct loop *, rtx,
304 enum machine_mode, rtx, rtx,
305 rtx *, rtx *, rtx **));
306 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
307 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
308 rtx *, rtx *, rtx *, int, int *,
309 enum machine_mode));
310 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
311 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
312 static int check_dbra_loop PARAMS ((struct loop *, int));
313 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
314 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
315 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
316 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
317 static int product_cheap_p PARAMS ((rtx, rtx));
318 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
319 int, int, int));
320 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
321 struct iv_class *, int,
322 basic_block, rtx));
323 static int last_use_this_basic_block PARAMS ((rtx, rtx));
324 static void record_initial PARAMS ((rtx, rtx, void *));
325 static void update_reg_last_use PARAMS ((rtx, rtx));
326 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
327 static void loop_regs_scan PARAMS ((const struct loop *, int));
328 static int count_insns_in_loop PARAMS ((const struct loop *));
329 static void load_mems PARAMS ((const struct loop *));
330 static int insert_loop_mem PARAMS ((rtx *, void *));
331 static int replace_loop_mem PARAMS ((rtx *, void *));
332 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
333 static int replace_loop_reg PARAMS ((rtx *, void *));
334 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
335 static void note_reg_stored PARAMS ((rtx, rtx, void *));
336 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
337 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
338 unsigned int));
339 static int replace_label PARAMS ((rtx *, void *));
340 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
341 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
342 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
343 static void loop_regs_update PARAMS ((const struct loop *, rtx));
344 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
346 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
347 rtx, rtx));
348 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
349 basic_block, rtx, rtx));
350 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
351 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
353 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
354 static void loop_delete_insns PARAMS ((rtx, rtx));
355 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
356 void debug_ivs PARAMS ((const struct loop *));
357 void debug_iv_class PARAMS ((const struct iv_class *));
358 void debug_biv PARAMS ((const struct induction *));
359 void debug_giv PARAMS ((const struct induction *));
360 void debug_loop PARAMS ((const struct loop *));
361 void debug_loops PARAMS ((const struct loops *));
363 typedef struct rtx_pair
365 rtx r1;
366 rtx r2;
367 } rtx_pair;
369 typedef struct loop_replace_args
371 rtx match;
372 rtx replacement;
373 rtx insn;
374 } loop_replace_args;
376 /* Nonzero iff INSN is between START and END, inclusive. */
377 #define INSN_IN_RANGE_P(INSN, START, END) \
378 (INSN_UID (INSN) < max_uid_for_loop \
379 && INSN_LUID (INSN) >= INSN_LUID (START) \
380 && INSN_LUID (INSN) <= INSN_LUID (END))
382 /* Indirect_jump_in_function is computed once per function. */
383 static int indirect_jump_in_function;
384 static int indirect_jump_in_function_p PARAMS ((rtx));
386 static int compute_luids PARAMS ((rtx, rtx, int));
388 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
389 struct induction *,
390 rtx));
392 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
393 copy the value of the strength reduced giv to its original register. */
394 static int copy_cost;
396 /* Cost of using a register, to normalize the benefits of a giv. */
397 static int reg_address_cost;
399 void
400 init_loop ()
402 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
404 reg_address_cost = address_cost (reg, SImode);
406 copy_cost = COSTS_N_INSNS (1);
409 /* Compute the mapping from uids to luids.
410 LUIDs are numbers assigned to insns, like uids,
411 except that luids increase monotonically through the code.
412 Start at insn START and stop just before END. Assign LUIDs
413 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
414 static int
415 compute_luids (start, end, prev_luid)
416 rtx start, end;
417 int prev_luid;
419 int i;
420 rtx insn;
422 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
424 if (INSN_UID (insn) >= max_uid_for_loop)
425 continue;
426 /* Don't assign luids to line-number NOTEs, so that the distance in
427 luids between two insns is not affected by -g. */
428 if (GET_CODE (insn) != NOTE
429 || NOTE_LINE_NUMBER (insn) <= 0)
430 uid_luid[INSN_UID (insn)] = ++i;
431 else
432 /* Give a line number note the same luid as preceding insn. */
433 uid_luid[INSN_UID (insn)] = i;
435 return i + 1;
438 /* Entry point of this file. Perform loop optimization
439 on the current function. F is the first insn of the function
440 and DUMPFILE is a stream for output of a trace of actions taken
441 (or 0 if none should be output). */
443 void
444 loop_optimize (f, dumpfile, flags)
445 /* f is the first instruction of a chain of insns for one function */
446 rtx f;
447 FILE *dumpfile;
448 int flags;
450 rtx insn;
451 int i;
452 struct loops loops_data;
453 struct loops *loops = &loops_data;
454 struct loop_info *loops_info;
456 loop_dump_stream = dumpfile;
458 init_recog_no_volatile ();
460 max_reg_before_loop = max_reg_num ();
461 loop_max_reg = max_reg_before_loop;
463 regs_may_share = 0;
465 /* Count the number of loops. */
467 max_loop_num = 0;
468 for (insn = f; insn; insn = NEXT_INSN (insn))
470 if (GET_CODE (insn) == NOTE
471 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
472 max_loop_num++;
475 /* Don't waste time if no loops. */
476 if (max_loop_num == 0)
477 return;
479 loops->num = max_loop_num;
481 /* Get size to use for tables indexed by uids.
482 Leave some space for labels allocated by find_and_verify_loops. */
483 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
485 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
486 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
487 sizeof (struct loop *));
489 /* Allocate storage for array of loops. */
490 loops->array = (struct loop *)
491 xcalloc (loops->num, sizeof (struct loop));
493 /* Find and process each loop.
494 First, find them, and record them in order of their beginnings. */
495 find_and_verify_loops (f, loops);
497 /* Allocate and initialize auxiliary loop information. */
498 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
499 for (i = 0; i < loops->num; i++)
500 loops->array[i].aux = loops_info + i;
502 /* Now find all register lifetimes. This must be done after
503 find_and_verify_loops, because it might reorder the insns in the
504 function. */
505 reg_scan (f, max_reg_before_loop, 1);
507 /* This must occur after reg_scan so that registers created by gcse
508 will have entries in the register tables.
510 We could have added a call to reg_scan after gcse_main in toplev.c,
511 but moving this call to init_alias_analysis is more efficient. */
512 init_alias_analysis ();
514 /* See if we went too far. Note that get_max_uid already returns
515 one more that the maximum uid of all insn. */
516 if (get_max_uid () > max_uid_for_loop)
517 abort ();
518 /* Now reset it to the actual size we need. See above. */
519 max_uid_for_loop = get_max_uid ();
521 /* find_and_verify_loops has already called compute_luids, but it
522 might have rearranged code afterwards, so we need to recompute
523 the luids now. */
524 max_luid = compute_luids (f, NULL_RTX, 0);
526 /* Don't leave gaps in uid_luid for insns that have been
527 deleted. It is possible that the first or last insn
528 using some register has been deleted by cross-jumping.
529 Make sure that uid_luid for that former insn's uid
530 points to the general area where that insn used to be. */
531 for (i = 0; i < max_uid_for_loop; i++)
533 uid_luid[0] = uid_luid[i];
534 if (uid_luid[0] != 0)
535 break;
537 for (i = 0; i < max_uid_for_loop; i++)
538 if (uid_luid[i] == 0)
539 uid_luid[i] = uid_luid[i - 1];
541 /* Determine if the function has indirect jump. On some systems
542 this prevents low overhead loop instructions from being used. */
543 indirect_jump_in_function = indirect_jump_in_function_p (f);
545 /* Now scan the loops, last ones first, since this means inner ones are done
546 before outer ones. */
547 for (i = max_loop_num - 1; i >= 0; i--)
549 struct loop *loop = &loops->array[i];
551 if (! loop->invalid && loop->end)
552 scan_loop (loop, flags);
555 /* If there were lexical blocks inside the loop, they have been
556 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
557 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
558 the BLOCKs as well. */
559 if (write_symbols != NO_DEBUG)
560 reorder_blocks ();
562 end_alias_analysis ();
564 /* Clean up. */
565 free (uid_luid);
566 free (uid_loop);
567 free (loops_info);
568 free (loops->array);
571 /* Returns the next insn, in execution order, after INSN. START and
572 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
573 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
574 insn-stream; it is used with loops that are entered near the
575 bottom. */
577 static rtx
578 next_insn_in_loop (loop, insn)
579 const struct loop *loop;
580 rtx insn;
582 insn = NEXT_INSN (insn);
584 if (insn == loop->end)
586 if (loop->top)
587 /* Go to the top of the loop, and continue there. */
588 insn = loop->top;
589 else
590 /* We're done. */
591 insn = NULL_RTX;
594 if (insn == loop->scan_start)
595 /* We're done. */
596 insn = NULL_RTX;
598 return insn;
601 /* Optimize one loop described by LOOP. */
603 /* ??? Could also move memory writes out of loops if the destination address
604 is invariant, the source is invariant, the memory write is not volatile,
605 and if we can prove that no read inside the loop can read this address
606 before the write occurs. If there is a read of this address after the
607 write, then we can also mark the memory read as invariant. */
609 static void
610 scan_loop (loop, flags)
611 struct loop *loop;
612 int flags;
614 struct loop_info *loop_info = LOOP_INFO (loop);
615 struct loop_regs *regs = LOOP_REGS (loop);
616 int i;
617 rtx loop_start = loop->start;
618 rtx loop_end = loop->end;
619 rtx p;
620 /* 1 if we are scanning insns that could be executed zero times. */
621 int maybe_never = 0;
622 /* 1 if we are scanning insns that might never be executed
623 due to a subroutine call which might exit before they are reached. */
624 int call_passed = 0;
625 /* Jump insn that enters the loop, or 0 if control drops in. */
626 rtx loop_entry_jump = 0;
627 /* Number of insns in the loop. */
628 int insn_count;
629 int tem;
630 rtx temp, update_start, update_end;
631 /* The SET from an insn, if it is the only SET in the insn. */
632 rtx set, set1;
633 /* Chain describing insns movable in current loop. */
634 struct loop_movables *movables = LOOP_MOVABLES (loop);
635 /* Ratio of extra register life span we can justify
636 for saving an instruction. More if loop doesn't call subroutines
637 since in that case saving an insn makes more difference
638 and more registers are available. */
639 int threshold;
640 /* Nonzero if we are scanning instructions in a sub-loop. */
641 int loop_depth = 0;
643 loop->top = 0;
645 movables->head = 0;
646 movables->last = 0;
648 /* Determine whether this loop starts with a jump down to a test at
649 the end. This will occur for a small number of loops with a test
650 that is too complex to duplicate in front of the loop.
652 We search for the first insn or label in the loop, skipping NOTEs.
653 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
654 (because we might have a loop executed only once that contains a
655 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
656 (in case we have a degenerate loop).
658 Note that if we mistakenly think that a loop is entered at the top
659 when, in fact, it is entered at the exit test, the only effect will be
660 slightly poorer optimization. Making the opposite error can generate
661 incorrect code. Since very few loops now start with a jump to the
662 exit test, the code here to detect that case is very conservative. */
664 for (p = NEXT_INSN (loop_start);
665 p != loop_end
666 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
667 && (GET_CODE (p) != NOTE
668 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
669 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
670 p = NEXT_INSN (p))
673 loop->scan_start = p;
675 /* If loop end is the end of the current function, then emit a
676 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
677 note insn. This is the position we use when sinking insns out of
678 the loop. */
679 if (NEXT_INSN (loop->end) != 0)
680 loop->sink = NEXT_INSN (loop->end);
681 else
682 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
684 /* Set up variables describing this loop. */
685 prescan_loop (loop);
686 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
688 /* If loop has a jump before the first label,
689 the true entry is the target of that jump.
690 Start scan from there.
691 But record in LOOP->TOP the place where the end-test jumps
692 back to so we can scan that after the end of the loop. */
693 if (GET_CODE (p) == JUMP_INSN)
695 loop_entry_jump = p;
697 /* Loop entry must be unconditional jump (and not a RETURN) */
698 if (any_uncondjump_p (p)
699 && JUMP_LABEL (p) != 0
700 /* Check to see whether the jump actually
701 jumps out of the loop (meaning it's no loop).
702 This case can happen for things like
703 do {..} while (0). If this label was generated previously
704 by loop, we can't tell anything about it and have to reject
705 the loop. */
706 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
708 loop->top = next_label (loop->scan_start);
709 loop->scan_start = JUMP_LABEL (p);
713 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
714 as required by loop_reg_used_before_p. So skip such loops. (This
715 test may never be true, but it's best to play it safe.)
717 Also, skip loops where we do not start scanning at a label. This
718 test also rejects loops starting with a JUMP_INSN that failed the
719 test above. */
721 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
722 || GET_CODE (loop->scan_start) != CODE_LABEL)
724 if (loop_dump_stream)
725 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
726 INSN_UID (loop_start), INSN_UID (loop_end));
727 return;
730 /* Allocate extra space for REGs that might be created by load_mems.
731 We allocate a little extra slop as well, in the hopes that we
732 won't have to reallocate the regs array. */
733 loop_regs_scan (loop, loop_info->mems_idx + 16);
734 insn_count = count_insns_in_loop (loop);
736 if (loop_dump_stream)
738 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
739 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
740 if (loop->cont)
741 fprintf (loop_dump_stream, "Continue at insn %d.\n",
742 INSN_UID (loop->cont));
745 /* Scan through the loop finding insns that are safe to move.
746 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
747 this reg will be considered invariant for subsequent insns.
748 We consider whether subsequent insns use the reg
749 in deciding whether it is worth actually moving.
751 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
752 and therefore it is possible that the insns we are scanning
753 would never be executed. At such times, we must make sure
754 that it is safe to execute the insn once instead of zero times.
755 When MAYBE_NEVER is 0, all insns will be executed at least once
756 so that is not a problem. */
758 for (p = next_insn_in_loop (loop, loop->scan_start);
759 p != NULL_RTX;
760 p = next_insn_in_loop (loop, p))
762 if (GET_CODE (p) == INSN
763 && (set = single_set (p))
764 && GET_CODE (SET_DEST (set)) == REG
765 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
766 && SET_DEST (set) != pic_offset_table_rtx
767 #endif
768 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
770 int tem1 = 0;
771 int tem2 = 0;
772 int move_insn = 0;
773 rtx src = SET_SRC (set);
774 rtx dependencies = 0;
776 /* Figure out what to use as a source of this insn. If a REG_EQUIV
777 note is given or if a REG_EQUAL note with a constant operand is
778 specified, use it as the source and mark that we should move
779 this insn by calling emit_move_insn rather that duplicating the
780 insn.
782 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
783 is present. */
784 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
785 if (temp)
786 src = XEXP (temp, 0), move_insn = 1;
787 else
789 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
790 if (temp && CONSTANT_P (XEXP (temp, 0)))
791 src = XEXP (temp, 0), move_insn = 1;
792 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
794 src = XEXP (temp, 0);
795 /* A libcall block can use regs that don't appear in
796 the equivalent expression. To move the libcall,
797 we must move those regs too. */
798 dependencies = libcall_other_reg (p, src);
802 /* For parallels, add any possible uses to the depencies, as we can't move
803 the insn without resolving them first. */
804 if (GET_CODE (PATTERN (p)) == PARALLEL)
806 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
808 rtx x = XVECEXP (PATTERN (p), 0, i);
809 if (GET_CODE (x) == USE)
810 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
814 /* Don't try to optimize a register that was made
815 by loop-optimization for an inner loop.
816 We don't know its life-span, so we can't compute the benefit. */
817 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
819 else if (/* The register is used in basic blocks other
820 than the one where it is set (meaning that
821 something after this point in the loop might
822 depend on its value before the set). */
823 ! reg_in_basic_block_p (p, SET_DEST (set))
824 /* And the set is not guaranteed to be executed once
825 the loop starts, or the value before the set is
826 needed before the set occurs...
828 ??? Note we have quadratic behaviour here, mitigated
829 by the fact that the previous test will often fail for
830 large loops. Rather than re-scanning the entire loop
831 each time for register usage, we should build tables
832 of the register usage and use them here instead. */
833 && (maybe_never
834 || loop_reg_used_before_p (loop, set, p)))
835 /* It is unsafe to move the set.
837 This code used to consider it OK to move a set of a variable
838 which was not created by the user and not used in an exit test.
839 That behavior is incorrect and was removed. */
841 else if ((tem = loop_invariant_p (loop, src))
842 && (dependencies == 0
843 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
844 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
845 || (tem1
846 = consec_sets_invariant_p
847 (loop, SET_DEST (set),
848 regs->array[REGNO (SET_DEST (set))].set_in_loop,
849 p)))
850 /* If the insn can cause a trap (such as divide by zero),
851 can't move it unless it's guaranteed to be executed
852 once loop is entered. Even a function call might
853 prevent the trap insn from being reached
854 (since it might exit!) */
855 && ! ((maybe_never || call_passed)
856 && may_trap_p (src)))
858 struct movable *m;
859 int regno = REGNO (SET_DEST (set));
861 /* A potential lossage is where we have a case where two insns
862 can be combined as long as they are both in the loop, but
863 we move one of them outside the loop. For large loops,
864 this can lose. The most common case of this is the address
865 of a function being called.
867 Therefore, if this register is marked as being used exactly
868 once if we are in a loop with calls (a "large loop"), see if
869 we can replace the usage of this register with the source
870 of this SET. If we can, delete this insn.
872 Don't do this if P has a REG_RETVAL note or if we have
873 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
875 if (loop_info->has_call
876 && regs->array[regno].single_usage != 0
877 && regs->array[regno].single_usage != const0_rtx
878 && REGNO_FIRST_UID (regno) == INSN_UID (p)
879 && (REGNO_LAST_UID (regno)
880 == INSN_UID (regs->array[regno].single_usage))
881 && regs->array[regno].set_in_loop == 1
882 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
883 && ! side_effects_p (SET_SRC (set))
884 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
885 && (! SMALL_REGISTER_CLASSES
886 || (! (GET_CODE (SET_SRC (set)) == REG
887 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
888 /* This test is not redundant; SET_SRC (set) might be
889 a call-clobbered register and the life of REGNO
890 might span a call. */
891 && ! modified_between_p (SET_SRC (set), p,
892 regs->array[regno].single_usage)
893 && no_labels_between_p (p, regs->array[regno].single_usage)
894 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
895 regs->array[regno].single_usage))
897 /* Replace any usage in a REG_EQUAL note. Must copy the
898 new source, so that we don't get rtx sharing between the
899 SET_SOURCE and REG_NOTES of insn p. */
900 REG_NOTES (regs->array[regno].single_usage)
901 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
902 SET_DEST (set), copy_rtx (SET_SRC (set)));
904 delete_insn (p);
905 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
906 regs->array[regno+i].set_in_loop = 0;
907 continue;
910 m = (struct movable *) xmalloc (sizeof (struct movable));
911 m->next = 0;
912 m->insn = p;
913 m->set_src = src;
914 m->dependencies = dependencies;
915 m->set_dest = SET_DEST (set);
916 m->force = 0;
917 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
918 m->done = 0;
919 m->forces = 0;
920 m->partial = 0;
921 m->move_insn = move_insn;
922 m->move_insn_first = 0;
923 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
924 m->savemode = VOIDmode;
925 m->regno = regno;
926 /* Set M->cond if either loop_invariant_p
927 or consec_sets_invariant_p returned 2
928 (only conditionally invariant). */
929 m->cond = ((tem | tem1 | tem2) > 1);
930 m->global = LOOP_REG_GLOBAL_P (loop, regno);
931 m->match = 0;
932 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
933 m->savings = regs->array[regno].n_times_set;
934 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
935 m->savings += libcall_benefit (p);
936 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
937 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
938 /* Add M to the end of the chain MOVABLES. */
939 loop_movables_add (movables, m);
941 if (m->consec > 0)
943 /* It is possible for the first instruction to have a
944 REG_EQUAL note but a non-invariant SET_SRC, so we must
945 remember the status of the first instruction in case
946 the last instruction doesn't have a REG_EQUAL note. */
947 m->move_insn_first = m->move_insn;
949 /* Skip this insn, not checking REG_LIBCALL notes. */
950 p = next_nonnote_insn (p);
951 /* Skip the consecutive insns, if there are any. */
952 p = skip_consec_insns (p, m->consec);
953 /* Back up to the last insn of the consecutive group. */
954 p = prev_nonnote_insn (p);
956 /* We must now reset m->move_insn, m->is_equiv, and possibly
957 m->set_src to correspond to the effects of all the
958 insns. */
959 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
960 if (temp)
961 m->set_src = XEXP (temp, 0), m->move_insn = 1;
962 else
964 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
965 if (temp && CONSTANT_P (XEXP (temp, 0)))
966 m->set_src = XEXP (temp, 0), m->move_insn = 1;
967 else
968 m->move_insn = 0;
971 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
974 /* If this register is always set within a STRICT_LOW_PART
975 or set to zero, then its high bytes are constant.
976 So clear them outside the loop and within the loop
977 just load the low bytes.
978 We must check that the machine has an instruction to do so.
979 Also, if the value loaded into the register
980 depends on the same register, this cannot be done. */
981 else if (SET_SRC (set) == const0_rtx
982 && GET_CODE (NEXT_INSN (p)) == INSN
983 && (set1 = single_set (NEXT_INSN (p)))
984 && GET_CODE (set1) == SET
985 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
986 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
987 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
988 == SET_DEST (set))
989 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
991 int regno = REGNO (SET_DEST (set));
992 if (regs->array[regno].set_in_loop == 2)
994 struct movable *m;
995 m = (struct movable *) xmalloc (sizeof (struct movable));
996 m->next = 0;
997 m->insn = p;
998 m->set_dest = SET_DEST (set);
999 m->dependencies = 0;
1000 m->force = 0;
1001 m->consec = 0;
1002 m->done = 0;
1003 m->forces = 0;
1004 m->move_insn = 0;
1005 m->move_insn_first = 0;
1006 m->partial = 1;
1007 /* If the insn may not be executed on some cycles,
1008 we can't clear the whole reg; clear just high part.
1009 Not even if the reg is used only within this loop.
1010 Consider this:
1011 while (1)
1012 while (s != t) {
1013 if (foo ()) x = *s;
1014 use (x);
1016 Clearing x before the inner loop could clobber a value
1017 being saved from the last time around the outer loop.
1018 However, if the reg is not used outside this loop
1019 and all uses of the register are in the same
1020 basic block as the store, there is no problem.
1022 If this insn was made by loop, we don't know its
1023 INSN_LUID and hence must make a conservative
1024 assumption. */
1025 m->global = (INSN_UID (p) >= max_uid_for_loop
1026 || LOOP_REG_GLOBAL_P (loop, regno)
1027 || (labels_in_range_p
1028 (p, REGNO_FIRST_LUID (regno))));
1029 if (maybe_never && m->global)
1030 m->savemode = GET_MODE (SET_SRC (set1));
1031 else
1032 m->savemode = VOIDmode;
1033 m->regno = regno;
1034 m->cond = 0;
1035 m->match = 0;
1036 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1037 m->savings = 1;
1038 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1039 regs->array[regno+i].set_in_loop = -1;
1040 /* Add M to the end of the chain MOVABLES. */
1041 loop_movables_add (movables, m);
1045 /* Past a call insn, we get to insns which might not be executed
1046 because the call might exit. This matters for insns that trap.
1047 Constant and pure call insns always return, so they don't count. */
1048 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1049 call_passed = 1;
1050 /* Past a label or a jump, we get to insns for which we
1051 can't count on whether or how many times they will be
1052 executed during each iteration. Therefore, we can
1053 only move out sets of trivial variables
1054 (those not used after the loop). */
1055 /* Similar code appears twice in strength_reduce. */
1056 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1057 /* If we enter the loop in the middle, and scan around to the
1058 beginning, don't set maybe_never for that. This must be an
1059 unconditional jump, otherwise the code at the top of the
1060 loop might never be executed. Unconditional jumps are
1061 followed by a barrier then the loop_end. */
1062 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1063 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1064 && any_uncondjump_p (p)))
1065 maybe_never = 1;
1066 else if (GET_CODE (p) == NOTE)
1068 /* At the virtual top of a converted loop, insns are again known to
1069 be executed: logically, the loop begins here even though the exit
1070 code has been duplicated. */
1071 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1072 maybe_never = call_passed = 0;
1073 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1074 loop_depth++;
1075 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1076 loop_depth--;
1080 /* If one movable subsumes another, ignore that other. */
1082 ignore_some_movables (movables);
1084 /* For each movable insn, see if the reg that it loads
1085 leads when it dies right into another conditionally movable insn.
1086 If so, record that the second insn "forces" the first one,
1087 since the second can be moved only if the first is. */
1089 force_movables (movables);
1091 /* See if there are multiple movable insns that load the same value.
1092 If there are, make all but the first point at the first one
1093 through the `match' field, and add the priorities of them
1094 all together as the priority of the first. */
1096 combine_movables (movables, regs);
1098 /* Now consider each movable insn to decide whether it is worth moving.
1099 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1101 Generally this increases code size, so do not move moveables when
1102 optimizing for code size. */
1104 if (! optimize_size)
1105 move_movables (loop, movables, threshold, insn_count);
1107 /* Now candidates that still are negative are those not moved.
1108 Change regs->array[I].set_in_loop to indicate that those are not actually
1109 invariant. */
1110 for (i = 0; i < regs->num; i++)
1111 if (regs->array[i].set_in_loop < 0)
1112 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1114 /* Now that we've moved some things out of the loop, we might be able to
1115 hoist even more memory references. */
1116 load_mems (loop);
1118 /* Recalculate regs->array if load_mems has created new registers. */
1119 if (max_reg_num () > regs->num)
1120 loop_regs_scan (loop, 0);
1122 for (update_start = loop_start;
1123 PREV_INSN (update_start)
1124 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1125 update_start = PREV_INSN (update_start))
1127 update_end = NEXT_INSN (loop_end);
1129 reg_scan_update (update_start, update_end, loop_max_reg);
1130 loop_max_reg = max_reg_num ();
1132 if (flag_strength_reduce)
1134 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1135 /* Ensure our label doesn't go away. */
1136 LABEL_NUSES (update_end)++;
1138 strength_reduce (loop, flags);
1140 reg_scan_update (update_start, update_end, loop_max_reg);
1141 loop_max_reg = max_reg_num ();
1143 if (update_end && GET_CODE (update_end) == CODE_LABEL
1144 && --LABEL_NUSES (update_end) == 0)
1145 delete_related_insns (update_end);
1149 /* The movable information is required for strength reduction. */
1150 loop_movables_free (movables);
1152 free (regs->array);
1153 regs->array = 0;
1154 regs->num = 0;
1157 /* Add elements to *OUTPUT to record all the pseudo-regs
1158 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1160 void
1161 record_excess_regs (in_this, not_in_this, output)
1162 rtx in_this, not_in_this;
1163 rtx *output;
1165 enum rtx_code code;
1166 const char *fmt;
1167 int i;
1169 code = GET_CODE (in_this);
1171 switch (code)
1173 case PC:
1174 case CC0:
1175 case CONST_INT:
1176 case CONST_DOUBLE:
1177 case CONST:
1178 case SYMBOL_REF:
1179 case LABEL_REF:
1180 return;
1182 case REG:
1183 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1184 && ! reg_mentioned_p (in_this, not_in_this))
1185 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1186 return;
1188 default:
1189 break;
1192 fmt = GET_RTX_FORMAT (code);
1193 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1195 int j;
1197 switch (fmt[i])
1199 case 'E':
1200 for (j = 0; j < XVECLEN (in_this, i); j++)
1201 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1202 break;
1204 case 'e':
1205 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1206 break;
1211 /* Check what regs are referred to in the libcall block ending with INSN,
1212 aside from those mentioned in the equivalent value.
1213 If there are none, return 0.
1214 If there are one or more, return an EXPR_LIST containing all of them. */
1217 libcall_other_reg (insn, equiv)
1218 rtx insn, equiv;
1220 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1221 rtx p = XEXP (note, 0);
1222 rtx output = 0;
1224 /* First, find all the regs used in the libcall block
1225 that are not mentioned as inputs to the result. */
1227 while (p != insn)
1229 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1230 || GET_CODE (p) == CALL_INSN)
1231 record_excess_regs (PATTERN (p), equiv, &output);
1232 p = NEXT_INSN (p);
1235 return output;
1238 /* Return 1 if all uses of REG
1239 are between INSN and the end of the basic block. */
1241 static int
1242 reg_in_basic_block_p (insn, reg)
1243 rtx insn, reg;
1245 int regno = REGNO (reg);
1246 rtx p;
1248 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1249 return 0;
1251 /* Search this basic block for the already recorded last use of the reg. */
1252 for (p = insn; p; p = NEXT_INSN (p))
1254 switch (GET_CODE (p))
1256 case NOTE:
1257 break;
1259 case INSN:
1260 case CALL_INSN:
1261 /* Ordinary insn: if this is the last use, we win. */
1262 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1263 return 1;
1264 break;
1266 case JUMP_INSN:
1267 /* Jump insn: if this is the last use, we win. */
1268 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1269 return 1;
1270 /* Otherwise, it's the end of the basic block, so we lose. */
1271 return 0;
1273 case CODE_LABEL:
1274 case BARRIER:
1275 /* It's the end of the basic block, so we lose. */
1276 return 0;
1278 default:
1279 break;
1283 /* The "last use" that was recorded can't be found after the first
1284 use. This can happen when the last use was deleted while
1285 processing an inner loop, this inner loop was then completely
1286 unrolled, and the outer loop is always exited after the inner loop,
1287 so that everything after the first use becomes a single basic block. */
1288 return 1;
1291 /* Compute the benefit of eliminating the insns in the block whose
1292 last insn is LAST. This may be a group of insns used to compute a
1293 value directly or can contain a library call. */
1295 static int
1296 libcall_benefit (last)
1297 rtx last;
1299 rtx insn;
1300 int benefit = 0;
1302 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1303 insn != last; insn = NEXT_INSN (insn))
1305 if (GET_CODE (insn) == CALL_INSN)
1306 benefit += 10; /* Assume at least this many insns in a library
1307 routine. */
1308 else if (GET_CODE (insn) == INSN
1309 && GET_CODE (PATTERN (insn)) != USE
1310 && GET_CODE (PATTERN (insn)) != CLOBBER)
1311 benefit++;
1314 return benefit;
1317 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1319 static rtx
1320 skip_consec_insns (insn, count)
1321 rtx insn;
1322 int count;
1324 for (; count > 0; count--)
1326 rtx temp;
1328 /* If first insn of libcall sequence, skip to end. */
1329 /* Do this at start of loop, since INSN is guaranteed to
1330 be an insn here. */
1331 if (GET_CODE (insn) != NOTE
1332 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1333 insn = XEXP (temp, 0);
1336 insn = NEXT_INSN (insn);
1337 while (GET_CODE (insn) == NOTE);
1340 return insn;
1343 /* Ignore any movable whose insn falls within a libcall
1344 which is part of another movable.
1345 We make use of the fact that the movable for the libcall value
1346 was made later and so appears later on the chain. */
1348 static void
1349 ignore_some_movables (movables)
1350 struct loop_movables *movables;
1352 struct movable *m, *m1;
1354 for (m = movables->head; m; m = m->next)
1356 /* Is this a movable for the value of a libcall? */
1357 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1358 if (note)
1360 rtx insn;
1361 /* Check for earlier movables inside that range,
1362 and mark them invalid. We cannot use LUIDs here because
1363 insns created by loop.c for prior loops don't have LUIDs.
1364 Rather than reject all such insns from movables, we just
1365 explicitly check each insn in the libcall (since invariant
1366 libcalls aren't that common). */
1367 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1368 for (m1 = movables->head; m1 != m; m1 = m1->next)
1369 if (m1->insn == insn)
1370 m1->done = 1;
1375 /* For each movable insn, see if the reg that it loads
1376 leads when it dies right into another conditionally movable insn.
1377 If so, record that the second insn "forces" the first one,
1378 since the second can be moved only if the first is. */
1380 static void
1381 force_movables (movables)
1382 struct loop_movables *movables;
1384 struct movable *m, *m1;
1386 for (m1 = movables->head; m1; m1 = m1->next)
1387 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1388 if (!m1->partial && !m1->done)
1390 int regno = m1->regno;
1391 for (m = m1->next; m; m = m->next)
1392 /* ??? Could this be a bug? What if CSE caused the
1393 register of M1 to be used after this insn?
1394 Since CSE does not update regno_last_uid,
1395 this insn M->insn might not be where it dies.
1396 But very likely this doesn't matter; what matters is
1397 that M's reg is computed from M1's reg. */
1398 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1399 && !m->done)
1400 break;
1401 if (m != 0 && m->set_src == m1->set_dest
1402 /* If m->consec, m->set_src isn't valid. */
1403 && m->consec == 0)
1404 m = 0;
1406 /* Increase the priority of the moving the first insn
1407 since it permits the second to be moved as well. */
1408 if (m != 0)
1410 m->forces = m1;
1411 m1->lifetime += m->lifetime;
1412 m1->savings += m->savings;
1417 /* Find invariant expressions that are equal and can be combined into
1418 one register. */
1420 static void
1421 combine_movables (movables, regs)
1422 struct loop_movables *movables;
1423 struct loop_regs *regs;
1425 struct movable *m;
1426 char *matched_regs = (char *) xmalloc (regs->num);
1427 enum machine_mode mode;
1429 /* Regs that are set more than once are not allowed to match
1430 or be matched. I'm no longer sure why not. */
1431 /* Perhaps testing m->consec_sets would be more appropriate here? */
1433 for (m = movables->head; m; m = m->next)
1434 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1435 && !m->partial)
1437 struct movable *m1;
1438 int regno = m->regno;
1440 memset (matched_regs, 0, regs->num);
1441 matched_regs[regno] = 1;
1443 /* We want later insns to match the first one. Don't make the first
1444 one match any later ones. So start this loop at m->next. */
1445 for (m1 = m->next; m1; m1 = m1->next)
1446 /* ??? HACK! move_movables does not verify that the replacement
1447 is valid, which can have disasterous effects with hard regs
1448 and match_dup. Turn combination off for now. */
1449 if (0 && m != m1 && m1->match == 0
1450 && regs->array[m1->regno].n_times_set == 1
1451 /* A reg used outside the loop mustn't be eliminated. */
1452 && !m1->global
1453 /* A reg used for zero-extending mustn't be eliminated. */
1454 && !m1->partial
1455 && (matched_regs[m1->regno]
1458 /* Can combine regs with different modes loaded from the
1459 same constant only if the modes are the same or
1460 if both are integer modes with M wider or the same
1461 width as M1. The check for integer is redundant, but
1462 safe, since the only case of differing destination
1463 modes with equal sources is when both sources are
1464 VOIDmode, i.e., CONST_INT. */
1465 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1466 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1467 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1468 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1469 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1470 /* See if the source of M1 says it matches M. */
1471 && ((GET_CODE (m1->set_src) == REG
1472 && matched_regs[REGNO (m1->set_src)])
1473 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1474 movables, regs))))
1475 && ((m->dependencies == m1->dependencies)
1476 || rtx_equal_p (m->dependencies, m1->dependencies)))
1478 m->lifetime += m1->lifetime;
1479 m->savings += m1->savings;
1480 m1->done = 1;
1481 m1->match = m;
1482 matched_regs[m1->regno] = 1;
1486 /* Now combine the regs used for zero-extension.
1487 This can be done for those not marked `global'
1488 provided their lives don't overlap. */
1490 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1491 mode = GET_MODE_WIDER_MODE (mode))
1493 struct movable *m0 = 0;
1495 /* Combine all the registers for extension from mode MODE.
1496 Don't combine any that are used outside this loop. */
1497 for (m = movables->head; m; m = m->next)
1498 if (m->partial && ! m->global
1499 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1501 struct movable *m1;
1503 int first = REGNO_FIRST_LUID (m->regno);
1504 int last = REGNO_LAST_LUID (m->regno);
1506 if (m0 == 0)
1508 /* First one: don't check for overlap, just record it. */
1509 m0 = m;
1510 continue;
1513 /* Make sure they extend to the same mode.
1514 (Almost always true.) */
1515 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1516 continue;
1518 /* We already have one: check for overlap with those
1519 already combined together. */
1520 for (m1 = movables->head; m1 != m; m1 = m1->next)
1521 if (m1 == m0 || (m1->partial && m1->match == m0))
1522 if (! (REGNO_FIRST_LUID (m1->regno) > last
1523 || REGNO_LAST_LUID (m1->regno) < first))
1524 goto overlap;
1526 /* No overlap: we can combine this with the others. */
1527 m0->lifetime += m->lifetime;
1528 m0->savings += m->savings;
1529 m->done = 1;
1530 m->match = m0;
1532 overlap:
1537 /* Clean up. */
1538 free (matched_regs);
1541 /* Returns the number of movable instructions in LOOP that were not
1542 moved outside the loop. */
1544 static int
1545 num_unmoved_movables (loop)
1546 const struct loop *loop;
1548 int num = 0;
1549 struct movable *m;
1551 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1552 if (!m->done)
1553 ++num;
1555 return num;
1559 /* Return 1 if regs X and Y will become the same if moved. */
1561 static int
1562 regs_match_p (x, y, movables)
1563 rtx x, y;
1564 struct loop_movables *movables;
1566 unsigned int xn = REGNO (x);
1567 unsigned int yn = REGNO (y);
1568 struct movable *mx, *my;
1570 for (mx = movables->head; mx; mx = mx->next)
1571 if (mx->regno == xn)
1572 break;
1574 for (my = movables->head; my; my = my->next)
1575 if (my->regno == yn)
1576 break;
1578 return (mx && my
1579 && ((mx->match == my->match && mx->match != 0)
1580 || mx->match == my
1581 || mx == my->match));
1584 /* Return 1 if X and Y are identical-looking rtx's.
1585 This is the Lisp function EQUAL for rtx arguments.
1587 If two registers are matching movables or a movable register and an
1588 equivalent constant, consider them equal. */
1590 static int
1591 rtx_equal_for_loop_p (x, y, movables, regs)
1592 rtx x, y;
1593 struct loop_movables *movables;
1594 struct loop_regs *regs;
1596 int i;
1597 int j;
1598 struct movable *m;
1599 enum rtx_code code;
1600 const char *fmt;
1602 if (x == y)
1603 return 1;
1604 if (x == 0 || y == 0)
1605 return 0;
1607 code = GET_CODE (x);
1609 /* If we have a register and a constant, they may sometimes be
1610 equal. */
1611 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1612 && CONSTANT_P (y))
1614 for (m = movables->head; m; m = m->next)
1615 if (m->move_insn && m->regno == REGNO (x)
1616 && rtx_equal_p (m->set_src, y))
1617 return 1;
1619 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1620 && CONSTANT_P (x))
1622 for (m = movables->head; m; m = m->next)
1623 if (m->move_insn && m->regno == REGNO (y)
1624 && rtx_equal_p (m->set_src, x))
1625 return 1;
1628 /* Otherwise, rtx's of different codes cannot be equal. */
1629 if (code != GET_CODE (y))
1630 return 0;
1632 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1633 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1635 if (GET_MODE (x) != GET_MODE (y))
1636 return 0;
1638 /* These three types of rtx's can be compared nonrecursively. */
1639 if (code == REG)
1640 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1642 if (code == LABEL_REF)
1643 return XEXP (x, 0) == XEXP (y, 0);
1644 if (code == SYMBOL_REF)
1645 return XSTR (x, 0) == XSTR (y, 0);
1647 /* Compare the elements. If any pair of corresponding elements
1648 fail to match, return 0 for the whole things. */
1650 fmt = GET_RTX_FORMAT (code);
1651 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1653 switch (fmt[i])
1655 case 'w':
1656 if (XWINT (x, i) != XWINT (y, i))
1657 return 0;
1658 break;
1660 case 'i':
1661 if (XINT (x, i) != XINT (y, i))
1662 return 0;
1663 break;
1665 case 'E':
1666 /* Two vectors must have the same length. */
1667 if (XVECLEN (x, i) != XVECLEN (y, i))
1668 return 0;
1670 /* And the corresponding elements must match. */
1671 for (j = 0; j < XVECLEN (x, i); j++)
1672 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1673 movables, regs) == 0)
1674 return 0;
1675 break;
1677 case 'e':
1678 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1679 == 0)
1680 return 0;
1681 break;
1683 case 's':
1684 if (strcmp (XSTR (x, i), XSTR (y, i)))
1685 return 0;
1686 break;
1688 case 'u':
1689 /* These are just backpointers, so they don't matter. */
1690 break;
1692 case '0':
1693 break;
1695 /* It is believed that rtx's at this level will never
1696 contain anything but integers and other rtx's,
1697 except for within LABEL_REFs and SYMBOL_REFs. */
1698 default:
1699 abort ();
1702 return 1;
1705 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1706 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1707 references is incremented once for each added note. */
1709 static void
1710 add_label_notes (x, insns)
1711 rtx x;
1712 rtx insns;
1714 enum rtx_code code = GET_CODE (x);
1715 int i, j;
1716 const char *fmt;
1717 rtx insn;
1719 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1721 /* This code used to ignore labels that referred to dispatch tables to
1722 avoid flow generating (slighly) worse code.
1724 We no longer ignore such label references (see LABEL_REF handling in
1725 mark_jump_label for additional information). */
1726 for (insn = insns; insn; insn = NEXT_INSN (insn))
1727 if (reg_mentioned_p (XEXP (x, 0), insn))
1729 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1730 REG_NOTES (insn));
1731 if (LABEL_P (XEXP (x, 0)))
1732 LABEL_NUSES (XEXP (x, 0))++;
1736 fmt = GET_RTX_FORMAT (code);
1737 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1739 if (fmt[i] == 'e')
1740 add_label_notes (XEXP (x, i), insns);
1741 else if (fmt[i] == 'E')
1742 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1743 add_label_notes (XVECEXP (x, i, j), insns);
1747 /* Scan MOVABLES, and move the insns that deserve to be moved.
1748 If two matching movables are combined, replace one reg with the
1749 other throughout. */
1751 static void
1752 move_movables (loop, movables, threshold, insn_count)
1753 struct loop *loop;
1754 struct loop_movables *movables;
1755 int threshold;
1756 int insn_count;
1758 struct loop_regs *regs = LOOP_REGS (loop);
1759 int nregs = regs->num;
1760 rtx new_start = 0;
1761 struct movable *m;
1762 rtx p;
1763 rtx loop_start = loop->start;
1764 rtx loop_end = loop->end;
1765 /* Map of pseudo-register replacements to handle combining
1766 when we move several insns that load the same value
1767 into different pseudo-registers. */
1768 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1769 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1771 for (m = movables->head; m; m = m->next)
1773 /* Describe this movable insn. */
1775 if (loop_dump_stream)
1777 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1778 INSN_UID (m->insn), m->regno, m->lifetime);
1779 if (m->consec > 0)
1780 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1781 if (m->cond)
1782 fprintf (loop_dump_stream, "cond ");
1783 if (m->force)
1784 fprintf (loop_dump_stream, "force ");
1785 if (m->global)
1786 fprintf (loop_dump_stream, "global ");
1787 if (m->done)
1788 fprintf (loop_dump_stream, "done ");
1789 if (m->move_insn)
1790 fprintf (loop_dump_stream, "move-insn ");
1791 if (m->match)
1792 fprintf (loop_dump_stream, "matches %d ",
1793 INSN_UID (m->match->insn));
1794 if (m->forces)
1795 fprintf (loop_dump_stream, "forces %d ",
1796 INSN_UID (m->forces->insn));
1799 /* Ignore the insn if it's already done (it matched something else).
1800 Otherwise, see if it is now safe to move. */
1802 if (!m->done
1803 && (! m->cond
1804 || (1 == loop_invariant_p (loop, m->set_src)
1805 && (m->dependencies == 0
1806 || 1 == loop_invariant_p (loop, m->dependencies))
1807 && (m->consec == 0
1808 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1809 m->consec + 1,
1810 m->insn))))
1811 && (! m->forces || m->forces->done))
1813 int regno;
1814 rtx p;
1815 int savings = m->savings;
1817 /* We have an insn that is safe to move.
1818 Compute its desirability. */
1820 p = m->insn;
1821 regno = m->regno;
1823 if (loop_dump_stream)
1824 fprintf (loop_dump_stream, "savings %d ", savings);
1826 if (regs->array[regno].moved_once && loop_dump_stream)
1827 fprintf (loop_dump_stream, "halved since already moved ");
1829 /* An insn MUST be moved if we already moved something else
1830 which is safe only if this one is moved too: that is,
1831 if already_moved[REGNO] is nonzero. */
1833 /* An insn is desirable to move if the new lifetime of the
1834 register is no more than THRESHOLD times the old lifetime.
1835 If it's not desirable, it means the loop is so big
1836 that moving won't speed things up much,
1837 and it is liable to make register usage worse. */
1839 /* It is also desirable to move if it can be moved at no
1840 extra cost because something else was already moved. */
1842 if (already_moved[regno]
1843 || flag_move_all_movables
1844 || (threshold * savings * m->lifetime) >=
1845 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1846 || (m->forces && m->forces->done
1847 && regs->array[m->forces->regno].n_times_set == 1))
1849 int count;
1850 struct movable *m1;
1851 rtx first = NULL_RTX;
1853 /* Now move the insns that set the reg. */
1855 if (m->partial && m->match)
1857 rtx newpat, i1;
1858 rtx r1, r2;
1859 /* Find the end of this chain of matching regs.
1860 Thus, we load each reg in the chain from that one reg.
1861 And that reg is loaded with 0 directly,
1862 since it has ->match == 0. */
1863 for (m1 = m; m1->match; m1 = m1->match);
1864 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1865 SET_DEST (PATTERN (m1->insn)));
1866 i1 = loop_insn_hoist (loop, newpat);
1868 /* Mark the moved, invariant reg as being allowed to
1869 share a hard reg with the other matching invariant. */
1870 REG_NOTES (i1) = REG_NOTES (m->insn);
1871 r1 = SET_DEST (PATTERN (m->insn));
1872 r2 = SET_DEST (PATTERN (m1->insn));
1873 regs_may_share
1874 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1875 gen_rtx_EXPR_LIST (VOIDmode, r2,
1876 regs_may_share));
1877 delete_insn (m->insn);
1879 if (new_start == 0)
1880 new_start = i1;
1882 if (loop_dump_stream)
1883 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1885 /* If we are to re-generate the item being moved with a
1886 new move insn, first delete what we have and then emit
1887 the move insn before the loop. */
1888 else if (m->move_insn)
1890 rtx i1, temp, seq;
1892 for (count = m->consec; count >= 0; count--)
1894 /* If this is the first insn of a library call sequence,
1895 skip to the end. */
1896 if (GET_CODE (p) != NOTE
1897 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1898 p = XEXP (temp, 0);
1900 /* If this is the last insn of a libcall sequence, then
1901 delete every insn in the sequence except the last.
1902 The last insn is handled in the normal manner. */
1903 if (GET_CODE (p) != NOTE
1904 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1906 temp = XEXP (temp, 0);
1907 while (temp != p)
1908 temp = delete_insn (temp);
1911 temp = p;
1912 p = delete_insn (p);
1914 /* simplify_giv_expr expects that it can walk the insns
1915 at m->insn forwards and see this old sequence we are
1916 tossing here. delete_insn does preserve the next
1917 pointers, but when we skip over a NOTE we must fix
1918 it up. Otherwise that code walks into the non-deleted
1919 insn stream. */
1920 while (p && GET_CODE (p) == NOTE)
1921 p = NEXT_INSN (temp) = NEXT_INSN (p);
1924 start_sequence ();
1925 emit_move_insn (m->set_dest, m->set_src);
1926 temp = get_insns ();
1927 seq = gen_sequence ();
1928 end_sequence ();
1930 add_label_notes (m->set_src, temp);
1932 i1 = loop_insn_hoist (loop, seq);
1933 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1934 set_unique_reg_note (i1,
1935 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1936 m->set_src);
1938 if (loop_dump_stream)
1939 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1941 /* The more regs we move, the less we like moving them. */
1942 threshold -= 3;
1944 else
1946 for (count = m->consec; count >= 0; count--)
1948 rtx i1, temp;
1950 /* If first insn of libcall sequence, skip to end. */
1951 /* Do this at start of loop, since p is guaranteed to
1952 be an insn here. */
1953 if (GET_CODE (p) != NOTE
1954 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1955 p = XEXP (temp, 0);
1957 /* If last insn of libcall sequence, move all
1958 insns except the last before the loop. The last
1959 insn is handled in the normal manner. */
1960 if (GET_CODE (p) != NOTE
1961 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1963 rtx fn_address = 0;
1964 rtx fn_reg = 0;
1965 rtx fn_address_insn = 0;
1967 first = 0;
1968 for (temp = XEXP (temp, 0); temp != p;
1969 temp = NEXT_INSN (temp))
1971 rtx body;
1972 rtx n;
1973 rtx next;
1975 if (GET_CODE (temp) == NOTE)
1976 continue;
1978 body = PATTERN (temp);
1980 /* Find the next insn after TEMP,
1981 not counting USE or NOTE insns. */
1982 for (next = NEXT_INSN (temp); next != p;
1983 next = NEXT_INSN (next))
1984 if (! (GET_CODE (next) == INSN
1985 && GET_CODE (PATTERN (next)) == USE)
1986 && GET_CODE (next) != NOTE)
1987 break;
1989 /* If that is the call, this may be the insn
1990 that loads the function address.
1992 Extract the function address from the insn
1993 that loads it into a register.
1994 If this insn was cse'd, we get incorrect code.
1996 So emit a new move insn that copies the
1997 function address into the register that the
1998 call insn will use. flow.c will delete any
1999 redundant stores that we have created. */
2000 if (GET_CODE (next) == CALL_INSN
2001 && GET_CODE (body) == SET
2002 && GET_CODE (SET_DEST (body)) == REG
2003 && (n = find_reg_note (temp, REG_EQUAL,
2004 NULL_RTX)))
2006 fn_reg = SET_SRC (body);
2007 if (GET_CODE (fn_reg) != REG)
2008 fn_reg = SET_DEST (body);
2009 fn_address = XEXP (n, 0);
2010 fn_address_insn = temp;
2012 /* We have the call insn.
2013 If it uses the register we suspect it might,
2014 load it with the correct address directly. */
2015 if (GET_CODE (temp) == CALL_INSN
2016 && fn_address != 0
2017 && reg_referenced_p (fn_reg, body))
2018 loop_insn_emit_after (loop, 0, fn_address_insn,
2019 gen_move_insn
2020 (fn_reg, fn_address));
2022 if (GET_CODE (temp) == CALL_INSN)
2024 i1 = loop_call_insn_hoist (loop, body);
2025 /* Because the USAGE information potentially
2026 contains objects other than hard registers
2027 we need to copy it. */
2028 if (CALL_INSN_FUNCTION_USAGE (temp))
2029 CALL_INSN_FUNCTION_USAGE (i1)
2030 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2032 else
2033 i1 = loop_insn_hoist (loop, body);
2034 if (first == 0)
2035 first = i1;
2036 if (temp == fn_address_insn)
2037 fn_address_insn = i1;
2038 REG_NOTES (i1) = REG_NOTES (temp);
2039 REG_NOTES (temp) = NULL;
2040 delete_insn (temp);
2042 if (new_start == 0)
2043 new_start = first;
2045 if (m->savemode != VOIDmode)
2047 /* P sets REG to zero; but we should clear only
2048 the bits that are not covered by the mode
2049 m->savemode. */
2050 rtx reg = m->set_dest;
2051 rtx sequence;
2052 rtx tem;
2054 start_sequence ();
2055 tem = expand_simple_binop
2056 (GET_MODE (reg), AND, reg,
2057 GEN_INT ((((HOST_WIDE_INT) 1
2058 << GET_MODE_BITSIZE (m->savemode)))
2059 - 1),
2060 reg, 1, OPTAB_LIB_WIDEN);
2061 if (tem == 0)
2062 abort ();
2063 if (tem != reg)
2064 emit_move_insn (reg, tem);
2065 sequence = gen_sequence ();
2066 end_sequence ();
2067 i1 = loop_insn_hoist (loop, sequence);
2069 else if (GET_CODE (p) == CALL_INSN)
2071 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2072 /* Because the USAGE information potentially
2073 contains objects other than hard registers
2074 we need to copy it. */
2075 if (CALL_INSN_FUNCTION_USAGE (p))
2076 CALL_INSN_FUNCTION_USAGE (i1)
2077 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2079 else if (count == m->consec && m->move_insn_first)
2081 rtx seq;
2082 /* The SET_SRC might not be invariant, so we must
2083 use the REG_EQUAL note. */
2084 start_sequence ();
2085 emit_move_insn (m->set_dest, m->set_src);
2086 temp = get_insns ();
2087 seq = gen_sequence ();
2088 end_sequence ();
2090 add_label_notes (m->set_src, temp);
2092 i1 = loop_insn_hoist (loop, seq);
2093 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2094 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2095 : REG_EQUAL, m->set_src);
2097 else
2098 i1 = loop_insn_hoist (loop, PATTERN (p));
2100 if (REG_NOTES (i1) == 0)
2102 REG_NOTES (i1) = REG_NOTES (p);
2103 REG_NOTES (p) = NULL;
2105 /* If there is a REG_EQUAL note present whose value
2106 is not loop invariant, then delete it, since it
2107 may cause problems with later optimization passes.
2108 It is possible for cse to create such notes
2109 like this as a result of record_jump_cond. */
2111 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2112 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2113 remove_note (i1, temp);
2116 if (new_start == 0)
2117 new_start = i1;
2119 if (loop_dump_stream)
2120 fprintf (loop_dump_stream, " moved to %d",
2121 INSN_UID (i1));
2123 /* If library call, now fix the REG_NOTES that contain
2124 insn pointers, namely REG_LIBCALL on FIRST
2125 and REG_RETVAL on I1. */
2126 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2128 XEXP (temp, 0) = first;
2129 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2130 XEXP (temp, 0) = i1;
2133 temp = p;
2134 delete_insn (p);
2135 p = NEXT_INSN (p);
2137 /* simplify_giv_expr expects that it can walk the insns
2138 at m->insn forwards and see this old sequence we are
2139 tossing here. delete_insn does preserve the next
2140 pointers, but when we skip over a NOTE we must fix
2141 it up. Otherwise that code walks into the non-deleted
2142 insn stream. */
2143 while (p && GET_CODE (p) == NOTE)
2144 p = NEXT_INSN (temp) = NEXT_INSN (p);
2147 /* The more regs we move, the less we like moving them. */
2148 threshold -= 3;
2151 /* Any other movable that loads the same register
2152 MUST be moved. */
2153 already_moved[regno] = 1;
2155 /* This reg has been moved out of one loop. */
2156 regs->array[regno].moved_once = 1;
2158 /* The reg set here is now invariant. */
2159 if (! m->partial)
2161 int i;
2162 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2163 regs->array[regno+i].set_in_loop = 0;
2166 m->done = 1;
2168 /* Change the length-of-life info for the register
2169 to say it lives at least the full length of this loop.
2170 This will help guide optimizations in outer loops. */
2172 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2173 /* This is the old insn before all the moved insns.
2174 We can't use the moved insn because it is out of range
2175 in uid_luid. Only the old insns have luids. */
2176 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2177 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2178 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2180 /* Combine with this moved insn any other matching movables. */
2182 if (! m->partial)
2183 for (m1 = movables->head; m1; m1 = m1->next)
2184 if (m1->match == m)
2186 rtx temp;
2188 /* Schedule the reg loaded by M1
2189 for replacement so that shares the reg of M.
2190 If the modes differ (only possible in restricted
2191 circumstances, make a SUBREG.
2193 Note this assumes that the target dependent files
2194 treat REG and SUBREG equally, including within
2195 GO_IF_LEGITIMATE_ADDRESS and in all the
2196 predicates since we never verify that replacing the
2197 original register with a SUBREG results in a
2198 recognizable insn. */
2199 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2200 reg_map[m1->regno] = m->set_dest;
2201 else
2202 reg_map[m1->regno]
2203 = gen_lowpart_common (GET_MODE (m1->set_dest),
2204 m->set_dest);
2206 /* Get rid of the matching insn
2207 and prevent further processing of it. */
2208 m1->done = 1;
2210 /* if library call, delete all insns. */
2211 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2212 NULL_RTX)))
2213 delete_insn_chain (XEXP (temp, 0), m1->insn);
2214 else
2215 delete_insn (m1->insn);
2217 /* Any other movable that loads the same register
2218 MUST be moved. */
2219 already_moved[m1->regno] = 1;
2221 /* The reg merged here is now invariant,
2222 if the reg it matches is invariant. */
2223 if (! m->partial)
2225 int i;
2226 for (i = 0;
2227 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2228 i++)
2229 regs->array[m1->regno+i].set_in_loop = 0;
2233 else if (loop_dump_stream)
2234 fprintf (loop_dump_stream, "not desirable");
2236 else if (loop_dump_stream && !m->match)
2237 fprintf (loop_dump_stream, "not safe");
2239 if (loop_dump_stream)
2240 fprintf (loop_dump_stream, "\n");
2243 if (new_start == 0)
2244 new_start = loop_start;
2246 /* Go through all the instructions in the loop, making
2247 all the register substitutions scheduled in REG_MAP. */
2248 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2249 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2250 || GET_CODE (p) == CALL_INSN)
2252 replace_regs (PATTERN (p), reg_map, nregs, 0);
2253 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2254 INSN_CODE (p) = -1;
2257 /* Clean up. */
2258 free (reg_map);
2259 free (already_moved);
2263 static void
2264 loop_movables_add (movables, m)
2265 struct loop_movables *movables;
2266 struct movable *m;
2268 if (movables->head == 0)
2269 movables->head = m;
2270 else
2271 movables->last->next = m;
2272 movables->last = m;
2276 static void
2277 loop_movables_free (movables)
2278 struct loop_movables *movables;
2280 struct movable *m;
2281 struct movable *m_next;
2283 for (m = movables->head; m; m = m_next)
2285 m_next = m->next;
2286 free (m);
2290 #if 0
2291 /* Scan X and replace the address of any MEM in it with ADDR.
2292 REG is the address that MEM should have before the replacement. */
2294 static void
2295 replace_call_address (x, reg, addr)
2296 rtx x, reg, addr;
2298 enum rtx_code code;
2299 int i;
2300 const char *fmt;
2302 if (x == 0)
2303 return;
2304 code = GET_CODE (x);
2305 switch (code)
2307 case PC:
2308 case CC0:
2309 case CONST_INT:
2310 case CONST_DOUBLE:
2311 case CONST:
2312 case SYMBOL_REF:
2313 case LABEL_REF:
2314 case REG:
2315 return;
2317 case SET:
2318 /* Short cut for very common case. */
2319 replace_call_address (XEXP (x, 1), reg, addr);
2320 return;
2322 case CALL:
2323 /* Short cut for very common case. */
2324 replace_call_address (XEXP (x, 0), reg, addr);
2325 return;
2327 case MEM:
2328 /* If this MEM uses a reg other than the one we expected,
2329 something is wrong. */
2330 if (XEXP (x, 0) != reg)
2331 abort ();
2332 XEXP (x, 0) = addr;
2333 return;
2335 default:
2336 break;
2339 fmt = GET_RTX_FORMAT (code);
2340 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2342 if (fmt[i] == 'e')
2343 replace_call_address (XEXP (x, i), reg, addr);
2344 else if (fmt[i] == 'E')
2346 int j;
2347 for (j = 0; j < XVECLEN (x, i); j++)
2348 replace_call_address (XVECEXP (x, i, j), reg, addr);
2352 #endif
2354 /* Return the number of memory refs to addresses that vary
2355 in the rtx X. */
2357 static int
2358 count_nonfixed_reads (loop, x)
2359 const struct loop *loop;
2360 rtx x;
2362 enum rtx_code code;
2363 int i;
2364 const char *fmt;
2365 int value;
2367 if (x == 0)
2368 return 0;
2370 code = GET_CODE (x);
2371 switch (code)
2373 case PC:
2374 case CC0:
2375 case CONST_INT:
2376 case CONST_DOUBLE:
2377 case CONST:
2378 case SYMBOL_REF:
2379 case LABEL_REF:
2380 case REG:
2381 return 0;
2383 case MEM:
2384 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2385 + count_nonfixed_reads (loop, XEXP (x, 0)));
2387 default:
2388 break;
2391 value = 0;
2392 fmt = GET_RTX_FORMAT (code);
2393 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2395 if (fmt[i] == 'e')
2396 value += count_nonfixed_reads (loop, XEXP (x, i));
2397 if (fmt[i] == 'E')
2399 int j;
2400 for (j = 0; j < XVECLEN (x, i); j++)
2401 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2404 return value;
2407 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2408 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2409 `unknown_address_altered', `unknown_constant_address_altered', and
2410 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2411 list `store_mems' in LOOP. */
2413 static void
2414 prescan_loop (loop)
2415 struct loop *loop;
2417 int level = 1;
2418 rtx insn;
2419 struct loop_info *loop_info = LOOP_INFO (loop);
2420 rtx start = loop->start;
2421 rtx end = loop->end;
2422 /* The label after END. Jumping here is just like falling off the
2423 end of the loop. We use next_nonnote_insn instead of next_label
2424 as a hedge against the (pathological) case where some actual insn
2425 might end up between the two. */
2426 rtx exit_target = next_nonnote_insn (end);
2428 loop_info->has_indirect_jump = indirect_jump_in_function;
2429 loop_info->pre_header_has_call = 0;
2430 loop_info->has_call = 0;
2431 loop_info->has_nonconst_call = 0;
2432 loop_info->has_volatile = 0;
2433 loop_info->has_tablejump = 0;
2434 loop_info->has_multiple_exit_targets = 0;
2435 loop->level = 1;
2437 loop_info->unknown_address_altered = 0;
2438 loop_info->unknown_constant_address_altered = 0;
2439 loop_info->store_mems = NULL_RTX;
2440 loop_info->first_loop_store_insn = NULL_RTX;
2441 loop_info->mems_idx = 0;
2442 loop_info->num_mem_sets = 0;
2445 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2446 insn = PREV_INSN (insn))
2448 if (GET_CODE (insn) == CALL_INSN)
2450 loop_info->pre_header_has_call = 1;
2451 break;
2455 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2456 insn = NEXT_INSN (insn))
2458 switch (GET_CODE (insn))
2460 case NOTE:
2461 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2463 ++level;
2464 /* Count number of loops contained in this one. */
2465 loop->level++;
2467 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2468 --level;
2469 break;
2471 case CALL_INSN:
2472 if (! CONST_OR_PURE_CALL_P (insn))
2474 loop_info->unknown_address_altered = 1;
2475 loop_info->has_nonconst_call = 1;
2477 loop_info->has_call = 1;
2478 if (can_throw_internal (insn))
2479 loop_info->has_multiple_exit_targets = 1;
2480 break;
2482 case JUMP_INSN:
2483 if (! loop_info->has_multiple_exit_targets)
2485 rtx set = pc_set (insn);
2487 if (set)
2489 rtx label1, label2;
2491 if (GET_CODE (SET_SRC (set)) == IF_THEN_ELSE)
2493 label1 = XEXP (SET_SRC (set), 1);
2494 label2 = XEXP (SET_SRC (set), 2);
2496 else
2498 label1 = SET_SRC (PATTERN (insn));
2499 label2 = NULL_RTX;
2504 if (label1 && label1 != pc_rtx)
2506 if (GET_CODE (label1) != LABEL_REF)
2508 /* Something tricky. */
2509 loop_info->has_multiple_exit_targets = 1;
2510 break;
2512 else if (XEXP (label1, 0) != exit_target
2513 && LABEL_OUTSIDE_LOOP_P (label1))
2515 /* A jump outside the current loop. */
2516 loop_info->has_multiple_exit_targets = 1;
2517 break;
2521 label1 = label2;
2522 label2 = NULL_RTX;
2524 while (label1);
2526 else
2528 /* A return, or something tricky. */
2529 loop_info->has_multiple_exit_targets = 1;
2532 /* FALLTHRU */
2534 case INSN:
2535 if (volatile_refs_p (PATTERN (insn)))
2536 loop_info->has_volatile = 1;
2538 if (GET_CODE (insn) == JUMP_INSN
2539 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2540 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2541 loop_info->has_tablejump = 1;
2543 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2544 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2545 loop_info->first_loop_store_insn = insn;
2547 if (flag_non_call_exceptions && can_throw_internal (insn))
2548 loop_info->has_multiple_exit_targets = 1;
2549 break;
2551 default:
2552 break;
2556 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2557 if (/* An exception thrown by a called function might land us
2558 anywhere. */
2559 ! loop_info->has_nonconst_call
2560 /* We don't want loads for MEMs moved to a location before the
2561 one at which their stack memory becomes allocated. (Note
2562 that this is not a problem for malloc, etc., since those
2563 require actual function calls. */
2564 && ! current_function_calls_alloca
2565 /* There are ways to leave the loop other than falling off the
2566 end. */
2567 && ! loop_info->has_multiple_exit_targets)
2568 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2569 insn = NEXT_INSN (insn))
2570 for_each_rtx (&insn, insert_loop_mem, loop_info);
2572 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2573 that loop_invariant_p and load_mems can use true_dependence
2574 to determine what is really clobbered. */
2575 if (loop_info->unknown_address_altered)
2577 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2579 loop_info->store_mems
2580 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2582 if (loop_info->unknown_constant_address_altered)
2584 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2586 RTX_UNCHANGING_P (mem) = 1;
2587 loop_info->store_mems
2588 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2592 /* Scan the function looking for loops. Record the start and end of each loop.
2593 Also mark as invalid loops any loops that contain a setjmp or are branched
2594 to from outside the loop. */
2596 static void
2597 find_and_verify_loops (f, loops)
2598 rtx f;
2599 struct loops *loops;
2601 rtx insn;
2602 rtx label;
2603 int num_loops;
2604 struct loop *current_loop;
2605 struct loop *next_loop;
2606 struct loop *loop;
2608 num_loops = loops->num;
2610 compute_luids (f, NULL_RTX, 0);
2612 /* If there are jumps to undefined labels,
2613 treat them as jumps out of any/all loops.
2614 This also avoids writing past end of tables when there are no loops. */
2615 uid_loop[0] = NULL;
2617 /* Find boundaries of loops, mark which loops are contained within
2618 loops, and invalidate loops that have setjmp. */
2620 num_loops = 0;
2621 current_loop = NULL;
2622 for (insn = f; insn; insn = NEXT_INSN (insn))
2624 if (GET_CODE (insn) == NOTE)
2625 switch (NOTE_LINE_NUMBER (insn))
2627 case NOTE_INSN_LOOP_BEG:
2628 next_loop = loops->array + num_loops;
2629 next_loop->num = num_loops;
2630 num_loops++;
2631 next_loop->start = insn;
2632 next_loop->outer = current_loop;
2633 current_loop = next_loop;
2634 break;
2636 case NOTE_INSN_LOOP_CONT:
2637 current_loop->cont = insn;
2638 break;
2640 case NOTE_INSN_LOOP_VTOP:
2641 current_loop->vtop = insn;
2642 break;
2644 case NOTE_INSN_LOOP_END:
2645 if (! current_loop)
2646 abort ();
2648 current_loop->end = insn;
2649 current_loop = current_loop->outer;
2650 break;
2652 default:
2653 break;
2656 if (GET_CODE (insn) == CALL_INSN
2657 && find_reg_note (insn, REG_SETJMP, NULL))
2659 /* In this case, we must invalidate our current loop and any
2660 enclosing loop. */
2661 for (loop = current_loop; loop; loop = loop->outer)
2663 loop->invalid = 1;
2664 if (loop_dump_stream)
2665 fprintf (loop_dump_stream,
2666 "\nLoop at %d ignored due to setjmp.\n",
2667 INSN_UID (loop->start));
2671 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2672 enclosing loop, but this doesn't matter. */
2673 uid_loop[INSN_UID (insn)] = current_loop;
2676 /* Any loop containing a label used in an initializer must be invalidated,
2677 because it can be jumped into from anywhere. */
2679 for (label = forced_labels; label; label = XEXP (label, 1))
2681 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2682 loop; loop = loop->outer)
2683 loop->invalid = 1;
2686 /* Any loop containing a label used for an exception handler must be
2687 invalidated, because it can be jumped into from anywhere. */
2689 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2691 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2692 loop; loop = loop->outer)
2693 loop->invalid = 1;
2696 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2697 loop that it is not contained within, that loop is marked invalid.
2698 If any INSN or CALL_INSN uses a label's address, then the loop containing
2699 that label is marked invalid, because it could be jumped into from
2700 anywhere.
2702 Also look for blocks of code ending in an unconditional branch that
2703 exits the loop. If such a block is surrounded by a conditional
2704 branch around the block, move the block elsewhere (see below) and
2705 invert the jump to point to the code block. This may eliminate a
2706 label in our loop and will simplify processing by both us and a
2707 possible second cse pass. */
2709 for (insn = f; insn; insn = NEXT_INSN (insn))
2710 if (INSN_P (insn))
2712 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2714 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2716 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2717 if (note)
2719 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2720 loop; loop = loop->outer)
2721 loop->invalid = 1;
2725 if (GET_CODE (insn) != JUMP_INSN)
2726 continue;
2728 mark_loop_jump (PATTERN (insn), this_loop);
2730 /* See if this is an unconditional branch outside the loop. */
2731 if (this_loop
2732 && (GET_CODE (PATTERN (insn)) == RETURN
2733 || (any_uncondjump_p (insn)
2734 && onlyjump_p (insn)
2735 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2736 != this_loop)))
2737 && get_max_uid () < max_uid_for_loop)
2739 rtx p;
2740 rtx our_next = next_real_insn (insn);
2741 rtx last_insn_to_move = NEXT_INSN (insn);
2742 struct loop *dest_loop;
2743 struct loop *outer_loop = NULL;
2745 /* Go backwards until we reach the start of the loop, a label,
2746 or a JUMP_INSN. */
2747 for (p = PREV_INSN (insn);
2748 GET_CODE (p) != CODE_LABEL
2749 && ! (GET_CODE (p) == NOTE
2750 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2751 && GET_CODE (p) != JUMP_INSN;
2752 p = PREV_INSN (p))
2755 /* Check for the case where we have a jump to an inner nested
2756 loop, and do not perform the optimization in that case. */
2758 if (JUMP_LABEL (insn))
2760 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2761 if (dest_loop)
2763 for (outer_loop = dest_loop; outer_loop;
2764 outer_loop = outer_loop->outer)
2765 if (outer_loop == this_loop)
2766 break;
2770 /* Make sure that the target of P is within the current loop. */
2772 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2773 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2774 outer_loop = this_loop;
2776 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2777 we have a block of code to try to move.
2779 We look backward and then forward from the target of INSN
2780 to find a BARRIER at the same loop depth as the target.
2781 If we find such a BARRIER, we make a new label for the start
2782 of the block, invert the jump in P and point it to that label,
2783 and move the block of code to the spot we found. */
2785 if (! outer_loop
2786 && GET_CODE (p) == JUMP_INSN
2787 && JUMP_LABEL (p) != 0
2788 /* Just ignore jumps to labels that were never emitted.
2789 These always indicate compilation errors. */
2790 && INSN_UID (JUMP_LABEL (p)) != 0
2791 && any_condjump_p (p) && onlyjump_p (p)
2792 && next_real_insn (JUMP_LABEL (p)) == our_next
2793 /* If it's not safe to move the sequence, then we
2794 mustn't try. */
2795 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2796 &last_insn_to_move))
2798 rtx target
2799 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2800 struct loop *target_loop = uid_loop[INSN_UID (target)];
2801 rtx loc, loc2;
2802 rtx tmp;
2804 /* Search for possible garbage past the conditional jumps
2805 and look for the last barrier. */
2806 for (tmp = last_insn_to_move;
2807 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2808 if (GET_CODE (tmp) == BARRIER)
2809 last_insn_to_move = tmp;
2811 for (loc = target; loc; loc = PREV_INSN (loc))
2812 if (GET_CODE (loc) == BARRIER
2813 /* Don't move things inside a tablejump. */
2814 && ((loc2 = next_nonnote_insn (loc)) == 0
2815 || GET_CODE (loc2) != CODE_LABEL
2816 || (loc2 = next_nonnote_insn (loc2)) == 0
2817 || GET_CODE (loc2) != JUMP_INSN
2818 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2819 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2820 && uid_loop[INSN_UID (loc)] == target_loop)
2821 break;
2823 if (loc == 0)
2824 for (loc = target; loc; loc = NEXT_INSN (loc))
2825 if (GET_CODE (loc) == BARRIER
2826 /* Don't move things inside a tablejump. */
2827 && ((loc2 = next_nonnote_insn (loc)) == 0
2828 || GET_CODE (loc2) != CODE_LABEL
2829 || (loc2 = next_nonnote_insn (loc2)) == 0
2830 || GET_CODE (loc2) != JUMP_INSN
2831 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2832 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2833 && uid_loop[INSN_UID (loc)] == target_loop)
2834 break;
2836 if (loc)
2838 rtx cond_label = JUMP_LABEL (p);
2839 rtx new_label = get_label_after (p);
2841 /* Ensure our label doesn't go away. */
2842 LABEL_NUSES (cond_label)++;
2844 /* Verify that uid_loop is large enough and that
2845 we can invert P. */
2846 if (invert_jump (p, new_label, 1))
2848 rtx q, r;
2850 /* If no suitable BARRIER was found, create a suitable
2851 one before TARGET. Since TARGET is a fall through
2852 path, we'll need to insert an jump around our block
2853 and add a BARRIER before TARGET.
2855 This creates an extra unconditional jump outside
2856 the loop. However, the benefits of removing rarely
2857 executed instructions from inside the loop usually
2858 outweighs the cost of the extra unconditional jump
2859 outside the loop. */
2860 if (loc == 0)
2862 rtx temp;
2864 temp = gen_jump (JUMP_LABEL (insn));
2865 temp = emit_jump_insn_before (temp, target);
2866 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2867 LABEL_NUSES (JUMP_LABEL (insn))++;
2868 loc = emit_barrier_before (target);
2871 /* Include the BARRIER after INSN and copy the
2872 block after LOC. */
2873 if (squeeze_notes (&new_label, &last_insn_to_move))
2874 abort ();
2875 reorder_insns (new_label, last_insn_to_move, loc);
2877 /* All those insns are now in TARGET_LOOP. */
2878 for (q = new_label;
2879 q != NEXT_INSN (last_insn_to_move);
2880 q = NEXT_INSN (q))
2881 uid_loop[INSN_UID (q)] = target_loop;
2883 /* The label jumped to by INSN is no longer a loop
2884 exit. Unless INSN does not have a label (e.g.,
2885 it is a RETURN insn), search loop->exit_labels
2886 to find its label_ref, and remove it. Also turn
2887 off LABEL_OUTSIDE_LOOP_P bit. */
2888 if (JUMP_LABEL (insn))
2890 for (q = 0, r = this_loop->exit_labels;
2892 q = r, r = LABEL_NEXTREF (r))
2893 if (XEXP (r, 0) == JUMP_LABEL (insn))
2895 LABEL_OUTSIDE_LOOP_P (r) = 0;
2896 if (q)
2897 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2898 else
2899 this_loop->exit_labels = LABEL_NEXTREF (r);
2900 break;
2903 for (loop = this_loop; loop && loop != target_loop;
2904 loop = loop->outer)
2905 loop->exit_count--;
2907 /* If we didn't find it, then something is
2908 wrong. */
2909 if (! r)
2910 abort ();
2913 /* P is now a jump outside the loop, so it must be put
2914 in loop->exit_labels, and marked as such.
2915 The easiest way to do this is to just call
2916 mark_loop_jump again for P. */
2917 mark_loop_jump (PATTERN (p), this_loop);
2919 /* If INSN now jumps to the insn after it,
2920 delete INSN. */
2921 if (JUMP_LABEL (insn) != 0
2922 && (next_real_insn (JUMP_LABEL (insn))
2923 == next_real_insn (insn)))
2924 delete_related_insns (insn);
2927 /* Continue the loop after where the conditional
2928 branch used to jump, since the only branch insn
2929 in the block (if it still remains) is an inter-loop
2930 branch and hence needs no processing. */
2931 insn = NEXT_INSN (cond_label);
2933 if (--LABEL_NUSES (cond_label) == 0)
2934 delete_related_insns (cond_label);
2936 /* This loop will be continued with NEXT_INSN (insn). */
2937 insn = PREV_INSN (insn);
2944 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2945 loops it is contained in, mark the target loop invalid.
2947 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2949 static void
2950 mark_loop_jump (x, loop)
2951 rtx x;
2952 struct loop *loop;
2954 struct loop *dest_loop;
2955 struct loop *outer_loop;
2956 int i;
2958 switch (GET_CODE (x))
2960 case PC:
2961 case USE:
2962 case CLOBBER:
2963 case REG:
2964 case MEM:
2965 case CONST_INT:
2966 case CONST_DOUBLE:
2967 case RETURN:
2968 return;
2970 case CONST:
2971 /* There could be a label reference in here. */
2972 mark_loop_jump (XEXP (x, 0), loop);
2973 return;
2975 case PLUS:
2976 case MINUS:
2977 case MULT:
2978 mark_loop_jump (XEXP (x, 0), loop);
2979 mark_loop_jump (XEXP (x, 1), loop);
2980 return;
2982 case LO_SUM:
2983 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2984 mark_loop_jump (XEXP (x, 1), loop);
2985 return;
2987 case SIGN_EXTEND:
2988 case ZERO_EXTEND:
2989 mark_loop_jump (XEXP (x, 0), loop);
2990 return;
2992 case LABEL_REF:
2993 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2995 /* Link together all labels that branch outside the loop. This
2996 is used by final_[bg]iv_value and the loop unrolling code. Also
2997 mark this LABEL_REF so we know that this branch should predict
2998 false. */
3000 /* A check to make sure the label is not in an inner nested loop,
3001 since this does not count as a loop exit. */
3002 if (dest_loop)
3004 for (outer_loop = dest_loop; outer_loop;
3005 outer_loop = outer_loop->outer)
3006 if (outer_loop == loop)
3007 break;
3009 else
3010 outer_loop = NULL;
3012 if (loop && ! outer_loop)
3014 LABEL_OUTSIDE_LOOP_P (x) = 1;
3015 LABEL_NEXTREF (x) = loop->exit_labels;
3016 loop->exit_labels = x;
3018 for (outer_loop = loop;
3019 outer_loop && outer_loop != dest_loop;
3020 outer_loop = outer_loop->outer)
3021 outer_loop->exit_count++;
3024 /* If this is inside a loop, but not in the current loop or one enclosed
3025 by it, it invalidates at least one loop. */
3027 if (! dest_loop)
3028 return;
3030 /* We must invalidate every nested loop containing the target of this
3031 label, except those that also contain the jump insn. */
3033 for (; dest_loop; dest_loop = dest_loop->outer)
3035 /* Stop when we reach a loop that also contains the jump insn. */
3036 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3037 if (dest_loop == outer_loop)
3038 return;
3040 /* If we get here, we know we need to invalidate a loop. */
3041 if (loop_dump_stream && ! dest_loop->invalid)
3042 fprintf (loop_dump_stream,
3043 "\nLoop at %d ignored due to multiple entry points.\n",
3044 INSN_UID (dest_loop->start));
3046 dest_loop->invalid = 1;
3048 return;
3050 case SET:
3051 /* If this is not setting pc, ignore. */
3052 if (SET_DEST (x) == pc_rtx)
3053 mark_loop_jump (SET_SRC (x), loop);
3054 return;
3056 case IF_THEN_ELSE:
3057 mark_loop_jump (XEXP (x, 1), loop);
3058 mark_loop_jump (XEXP (x, 2), loop);
3059 return;
3061 case PARALLEL:
3062 case ADDR_VEC:
3063 for (i = 0; i < XVECLEN (x, 0); i++)
3064 mark_loop_jump (XVECEXP (x, 0, i), loop);
3065 return;
3067 case ADDR_DIFF_VEC:
3068 for (i = 0; i < XVECLEN (x, 1); i++)
3069 mark_loop_jump (XVECEXP (x, 1, i), loop);
3070 return;
3072 default:
3073 /* Strictly speaking this is not a jump into the loop, only a possible
3074 jump out of the loop. However, we have no way to link the destination
3075 of this jump onto the list of exit labels. To be safe we mark this
3076 loop and any containing loops as invalid. */
3077 if (loop)
3079 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3081 if (loop_dump_stream && ! outer_loop->invalid)
3082 fprintf (loop_dump_stream,
3083 "\nLoop at %d ignored due to unknown exit jump.\n",
3084 INSN_UID (outer_loop->start));
3085 outer_loop->invalid = 1;
3088 return;
3092 /* Return nonzero if there is a label in the range from
3093 insn INSN to and including the insn whose luid is END
3094 INSN must have an assigned luid (i.e., it must not have
3095 been previously created by loop.c). */
3097 static int
3098 labels_in_range_p (insn, end)
3099 rtx insn;
3100 int end;
3102 while (insn && INSN_LUID (insn) <= end)
3104 if (GET_CODE (insn) == CODE_LABEL)
3105 return 1;
3106 insn = NEXT_INSN (insn);
3109 return 0;
3112 /* Record that a memory reference X is being set. */
3114 static void
3115 note_addr_stored (x, y, data)
3116 rtx x;
3117 rtx y ATTRIBUTE_UNUSED;
3118 void *data ATTRIBUTE_UNUSED;
3120 struct loop_info *loop_info = data;
3122 if (x == 0 || GET_CODE (x) != MEM)
3123 return;
3125 /* Count number of memory writes.
3126 This affects heuristics in strength_reduce. */
3127 loop_info->num_mem_sets++;
3129 /* BLKmode MEM means all memory is clobbered. */
3130 if (GET_MODE (x) == BLKmode)
3132 if (RTX_UNCHANGING_P (x))
3133 loop_info->unknown_constant_address_altered = 1;
3134 else
3135 loop_info->unknown_address_altered = 1;
3137 return;
3140 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3141 loop_info->store_mems);
3144 /* X is a value modified by an INSN that references a biv inside a loop
3145 exit test (ie, X is somehow related to the value of the biv). If X
3146 is a pseudo that is used more than once, then the biv is (effectively)
3147 used more than once. DATA is a pointer to a loop_regs structure. */
3149 static void
3150 note_set_pseudo_multiple_uses (x, y, data)
3151 rtx x;
3152 rtx y ATTRIBUTE_UNUSED;
3153 void *data;
3155 struct loop_regs *regs = (struct loop_regs *) data;
3157 if (x == 0)
3158 return;
3160 while (GET_CODE (x) == STRICT_LOW_PART
3161 || GET_CODE (x) == SIGN_EXTRACT
3162 || GET_CODE (x) == ZERO_EXTRACT
3163 || GET_CODE (x) == SUBREG)
3164 x = XEXP (x, 0);
3166 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3167 return;
3169 /* If we do not have usage information, or if we know the register
3170 is used more than once, note that fact for check_dbra_loop. */
3171 if (REGNO (x) >= max_reg_before_loop
3172 || ! regs->array[REGNO (x)].single_usage
3173 || regs->array[REGNO (x)].single_usage == const0_rtx)
3174 regs->multiple_uses = 1;
3177 /* Return nonzero if the rtx X is invariant over the current loop.
3179 The value is 2 if we refer to something only conditionally invariant.
3181 A memory ref is invariant if it is not volatile and does not conflict
3182 with anything stored in `loop_info->store_mems'. */
3185 loop_invariant_p (loop, x)
3186 const struct loop *loop;
3187 rtx x;
3189 struct loop_info *loop_info = LOOP_INFO (loop);
3190 struct loop_regs *regs = LOOP_REGS (loop);
3191 int i;
3192 enum rtx_code code;
3193 const char *fmt;
3194 int conditional = 0;
3195 rtx mem_list_entry;
3197 if (x == 0)
3198 return 1;
3199 code = GET_CODE (x);
3200 switch (code)
3202 case CONST_INT:
3203 case CONST_DOUBLE:
3204 case SYMBOL_REF:
3205 case CONST:
3206 return 1;
3208 case LABEL_REF:
3209 /* A LABEL_REF is normally invariant, however, if we are unrolling
3210 loops, and this label is inside the loop, then it isn't invariant.
3211 This is because each unrolled copy of the loop body will have
3212 a copy of this label. If this was invariant, then an insn loading
3213 the address of this label into a register might get moved outside
3214 the loop, and then each loop body would end up using the same label.
3216 We don't know the loop bounds here though, so just fail for all
3217 labels. */
3218 if (flag_unroll_loops)
3219 return 0;
3220 else
3221 return 1;
3223 case PC:
3224 case CC0:
3225 case UNSPEC_VOLATILE:
3226 return 0;
3228 case REG:
3229 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3230 since the reg might be set by initialization within the loop. */
3232 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3233 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3234 && ! current_function_has_nonlocal_goto)
3235 return 1;
3237 if (LOOP_INFO (loop)->has_call
3238 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3239 return 0;
3241 if (regs->array[REGNO (x)].set_in_loop < 0)
3242 return 2;
3244 return regs->array[REGNO (x)].set_in_loop == 0;
3246 case MEM:
3247 /* Volatile memory references must be rejected. Do this before
3248 checking for read-only items, so that volatile read-only items
3249 will be rejected also. */
3250 if (MEM_VOLATILE_P (x))
3251 return 0;
3253 /* See if there is any dependence between a store and this load. */
3254 mem_list_entry = loop_info->store_mems;
3255 while (mem_list_entry)
3257 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3258 x, rtx_varies_p))
3259 return 0;
3261 mem_list_entry = XEXP (mem_list_entry, 1);
3264 /* It's not invalidated by a store in memory
3265 but we must still verify the address is invariant. */
3266 break;
3268 case ASM_OPERANDS:
3269 /* Don't mess with insns declared volatile. */
3270 if (MEM_VOLATILE_P (x))
3271 return 0;
3272 break;
3274 default:
3275 break;
3278 fmt = GET_RTX_FORMAT (code);
3279 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3281 if (fmt[i] == 'e')
3283 int tem = loop_invariant_p (loop, XEXP (x, i));
3284 if (tem == 0)
3285 return 0;
3286 if (tem == 2)
3287 conditional = 1;
3289 else if (fmt[i] == 'E')
3291 int j;
3292 for (j = 0; j < XVECLEN (x, i); j++)
3294 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3295 if (tem == 0)
3296 return 0;
3297 if (tem == 2)
3298 conditional = 1;
3304 return 1 + conditional;
3307 /* Return nonzero if all the insns in the loop that set REG
3308 are INSN and the immediately following insns,
3309 and if each of those insns sets REG in an invariant way
3310 (not counting uses of REG in them).
3312 The value is 2 if some of these insns are only conditionally invariant.
3314 We assume that INSN itself is the first set of REG
3315 and that its source is invariant. */
3317 static int
3318 consec_sets_invariant_p (loop, reg, n_sets, insn)
3319 const struct loop *loop;
3320 int n_sets;
3321 rtx reg, insn;
3323 struct loop_regs *regs = LOOP_REGS (loop);
3324 rtx p = insn;
3325 unsigned int regno = REGNO (reg);
3326 rtx temp;
3327 /* Number of sets we have to insist on finding after INSN. */
3328 int count = n_sets - 1;
3329 int old = regs->array[regno].set_in_loop;
3330 int value = 0;
3331 int this;
3333 /* If N_SETS hit the limit, we can't rely on its value. */
3334 if (n_sets == 127)
3335 return 0;
3337 regs->array[regno].set_in_loop = 0;
3339 while (count > 0)
3341 enum rtx_code code;
3342 rtx set;
3344 p = NEXT_INSN (p);
3345 code = GET_CODE (p);
3347 /* If library call, skip to end of it. */
3348 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3349 p = XEXP (temp, 0);
3351 this = 0;
3352 if (code == INSN
3353 && (set = single_set (p))
3354 && GET_CODE (SET_DEST (set)) == REG
3355 && REGNO (SET_DEST (set)) == regno)
3357 this = loop_invariant_p (loop, SET_SRC (set));
3358 if (this != 0)
3359 value |= this;
3360 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3362 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3363 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3364 notes are OK. */
3365 this = (CONSTANT_P (XEXP (temp, 0))
3366 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3367 && loop_invariant_p (loop, XEXP (temp, 0))));
3368 if (this != 0)
3369 value |= this;
3372 if (this != 0)
3373 count--;
3374 else if (code != NOTE)
3376 regs->array[regno].set_in_loop = old;
3377 return 0;
3381 regs->array[regno].set_in_loop = old;
3382 /* If loop_invariant_p ever returned 2, we return 2. */
3383 return 1 + (value & 2);
3386 #if 0
3387 /* I don't think this condition is sufficient to allow INSN
3388 to be moved, so we no longer test it. */
3390 /* Return 1 if all insns in the basic block of INSN and following INSN
3391 that set REG are invariant according to TABLE. */
3393 static int
3394 all_sets_invariant_p (reg, insn, table)
3395 rtx reg, insn;
3396 short *table;
3398 rtx p = insn;
3399 int regno = REGNO (reg);
3401 while (1)
3403 enum rtx_code code;
3404 p = NEXT_INSN (p);
3405 code = GET_CODE (p);
3406 if (code == CODE_LABEL || code == JUMP_INSN)
3407 return 1;
3408 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3409 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3410 && REGNO (SET_DEST (PATTERN (p))) == regno)
3412 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3413 return 0;
3417 #endif /* 0 */
3419 /* Look at all uses (not sets) of registers in X. For each, if it is
3420 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3421 a different insn, set USAGE[REGNO] to const0_rtx. */
3423 static void
3424 find_single_use_in_loop (regs, insn, x)
3425 struct loop_regs *regs;
3426 rtx insn;
3427 rtx x;
3429 enum rtx_code code = GET_CODE (x);
3430 const char *fmt = GET_RTX_FORMAT (code);
3431 int i, j;
3433 if (code == REG)
3434 regs->array[REGNO (x)].single_usage
3435 = (regs->array[REGNO (x)].single_usage != 0
3436 && regs->array[REGNO (x)].single_usage != insn)
3437 ? const0_rtx : insn;
3439 else if (code == SET)
3441 /* Don't count SET_DEST if it is a REG; otherwise count things
3442 in SET_DEST because if a register is partially modified, it won't
3443 show up as a potential movable so we don't care how USAGE is set
3444 for it. */
3445 if (GET_CODE (SET_DEST (x)) != REG)
3446 find_single_use_in_loop (regs, insn, SET_DEST (x));
3447 find_single_use_in_loop (regs, insn, SET_SRC (x));
3449 else
3450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3452 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3453 find_single_use_in_loop (regs, insn, XEXP (x, i));
3454 else if (fmt[i] == 'E')
3455 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3456 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3460 /* Count and record any set in X which is contained in INSN. Update
3461 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3462 in X. */
3464 static void
3465 count_one_set (regs, insn, x, last_set)
3466 struct loop_regs *regs;
3467 rtx insn, x;
3468 rtx *last_set;
3470 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3471 /* Don't move a reg that has an explicit clobber.
3472 It's not worth the pain to try to do it correctly. */
3473 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3475 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3477 rtx dest = SET_DEST (x);
3478 while (GET_CODE (dest) == SUBREG
3479 || GET_CODE (dest) == ZERO_EXTRACT
3480 || GET_CODE (dest) == SIGN_EXTRACT
3481 || GET_CODE (dest) == STRICT_LOW_PART)
3482 dest = XEXP (dest, 0);
3483 if (GET_CODE (dest) == REG)
3485 int i;
3486 int regno = REGNO (dest);
3487 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3489 /* If this is the first setting of this reg
3490 in current basic block, and it was set before,
3491 it must be set in two basic blocks, so it cannot
3492 be moved out of the loop. */
3493 if (regs->array[regno].set_in_loop > 0
3494 && last_set == 0)
3495 regs->array[regno+i].may_not_optimize = 1;
3496 /* If this is not first setting in current basic block,
3497 see if reg was used in between previous one and this.
3498 If so, neither one can be moved. */
3499 if (last_set[regno] != 0
3500 && reg_used_between_p (dest, last_set[regno], insn))
3501 regs->array[regno+i].may_not_optimize = 1;
3502 if (regs->array[regno+i].set_in_loop < 127)
3503 ++regs->array[regno+i].set_in_loop;
3504 last_set[regno+i] = insn;
3510 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3511 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3512 contained in insn INSN is used by any insn that precedes INSN in
3513 cyclic order starting from the loop entry point.
3515 We don't want to use INSN_LUID here because if we restrict INSN to those
3516 that have a valid INSN_LUID, it means we cannot move an invariant out
3517 from an inner loop past two loops. */
3519 static int
3520 loop_reg_used_before_p (loop, set, insn)
3521 const struct loop *loop;
3522 rtx set, insn;
3524 rtx reg = SET_DEST (set);
3525 rtx p;
3527 /* Scan forward checking for register usage. If we hit INSN, we
3528 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3529 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3531 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3532 return 1;
3534 if (p == loop->end)
3535 p = loop->start;
3538 return 0;
3542 /* Information we collect about arrays that we might want to prefetch. */
3543 struct prefetch_info
3545 struct iv_class *class; /* Class this prefetch is based on. */
3546 struct induction *giv; /* GIV this prefetch is based on. */
3547 rtx base_address; /* Start prefetching from this address plus
3548 index. */
3549 HOST_WIDE_INT index;
3550 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3551 iteration. */
3552 unsigned int bytes_accesed; /* Sum of sizes of all acceses to this
3553 prefetch area in one iteration. */
3554 unsigned int total_bytes; /* Total bytes loop will access in this block.
3555 This is set only for loops with known
3556 iteration counts and is 0xffffffff
3557 otherwise. */
3558 unsigned int write : 1; /* 1 for read/write prefetches. */
3559 unsigned int prefetch_in_loop : 1;
3560 /* 1 for those chosen for prefetching. */
3561 unsigned int prefetch_before_loop : 1;
3562 /* 1 for those chosen for prefetching. */
3565 /* Data used by check_store function. */
3566 struct check_store_data
3568 rtx mem_address;
3569 int mem_write;
3572 static void check_store PARAMS ((rtx, rtx, void *));
3573 static void emit_prefetch_instructions PARAMS ((struct loop *));
3574 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3576 /* Set mem_write when mem_address is found. Used as callback to
3577 note_stores. */
3578 static void
3579 check_store (x, pat, data)
3580 rtx x, pat ATTRIBUTE_UNUSED;
3581 void *data;
3583 struct check_store_data *d = (struct check_store_data *) data;
3585 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3586 d->mem_write = 1;
3589 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3590 important to get some addresses combined. Later more sophisticated
3591 transformations can be added when necesary.
3593 ??? Same trick with swapping operand is done at several other places.
3594 It can be nice to develop some common way to handle this. */
3596 static int
3597 rtx_equal_for_prefetch_p (x, y)
3598 rtx x, y;
3600 int i;
3601 int j;
3602 enum rtx_code code = GET_CODE (x);
3603 const char *fmt;
3605 if (x == y)
3606 return 1;
3607 if (code != GET_CODE (y))
3608 return 0;
3610 code = GET_CODE (x);
3612 if (GET_RTX_CLASS (code) == 'c')
3614 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3615 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3616 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3617 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3619 /* Compare the elements. If any pair of corresponding elements fails to
3620 match, return 0 for the whole thing. */
3622 fmt = GET_RTX_FORMAT (code);
3623 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3625 switch (fmt[i])
3627 case 'w':
3628 if (XWINT (x, i) != XWINT (y, i))
3629 return 0;
3630 break;
3632 case 'i':
3633 if (XINT (x, i) != XINT (y, i))
3634 return 0;
3635 break;
3637 case 'E':
3638 /* Two vectors must have the same length. */
3639 if (XVECLEN (x, i) != XVECLEN (y, i))
3640 return 0;
3642 /* And the corresponding elements must match. */
3643 for (j = 0; j < XVECLEN (x, i); j++)
3644 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3645 XVECEXP (y, i, j)) == 0)
3646 return 0;
3647 break;
3649 case 'e':
3650 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3651 return 0;
3652 break;
3654 case 's':
3655 if (strcmp (XSTR (x, i), XSTR (y, i)))
3656 return 0;
3657 break;
3659 case 'u':
3660 /* These are just backpointers, so they don't matter. */
3661 break;
3663 case '0':
3664 break;
3666 /* It is believed that rtx's at this level will never
3667 contain anything but integers and other rtx's,
3668 except for within LABEL_REFs and SYMBOL_REFs. */
3669 default:
3670 abort ();
3673 return 1;
3676 /* Remove constant addition value from the expression X (when present)
3677 and return it. */
3679 static HOST_WIDE_INT
3680 remove_constant_addition (x)
3681 rtx *x;
3683 HOST_WIDE_INT addval = 0;
3684 rtx exp = *x;
3686 if (GET_CODE (exp) == CONST)
3687 exp = XEXP (exp, 0);
3688 if (GET_CODE (exp) == CONST_INT)
3690 addval = INTVAL (exp);
3691 *x = const0_rtx;
3694 /* For plus expression recurse on ourself. */
3695 else if (GET_CODE (exp) == PLUS)
3697 addval += remove_constant_addition (&XEXP (exp, 0));
3698 addval += remove_constant_addition (&XEXP (exp, 1));
3700 /* In case our parameter was constant, remove extra zero from the
3701 expression. */
3702 if (XEXP (exp, 0) == const0_rtx)
3703 *x = XEXP (exp, 1);
3704 else if (XEXP (exp, 1) == const0_rtx)
3705 *x = XEXP (exp, 0);
3708 return addval;
3711 /* Attempt to identify accesses to arrays that are most likely to cause cache
3712 misses, and emit prefetch instructions a few prefetch blocks forward.
3714 To detect the arrays we use the GIV information that was collected by the
3715 strength reduction pass.
3717 The prefetch instructions are generated after the GIV information is done
3718 and before the strength reduction process. The new GIVs are injected into
3719 the strength reduction tables, so the prefetch addresses are optimized as
3720 well.
3722 GIVs are split into base address, stride, and constant addition values.
3723 GIVs with the same address, stride and close addition values are combined
3724 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3725 for write instructions can be used for the block we write to, on machines
3726 that support write prefetches.
3728 Several heuristics are used to determine when to prefetch. They are
3729 controlled by defined symbols that can be overridden for each target. */
3731 static void
3732 emit_prefetch_instructions (loop)
3733 struct loop *loop;
3735 int num_prefetches = 0;
3736 int num_real_prefetches = 0;
3737 int num_real_write_prefetches = 0;
3738 int ahead;
3739 int i;
3740 struct iv_class *bl;
3741 struct induction *iv;
3742 struct prefetch_info info[MAX_PREFETCHES];
3743 struct loop_ivs *ivs = LOOP_IVS (loop);
3745 if (!HAVE_prefetch)
3746 return;
3748 /* Consider only loops w/o calls. When a call is done, the loop is probably
3749 slow enough to read the memory. */
3750 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3752 if (loop_dump_stream)
3753 fprintf (loop_dump_stream, "Prefetch: ignoring loop - has call.\n");
3755 return;
3758 if (PREFETCH_NO_LOW_LOOPCNT
3759 && LOOP_INFO (loop)->n_iterations
3760 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3762 if (loop_dump_stream)
3763 fprintf (loop_dump_stream,
3764 "Prefetch: ignoring loop - not enought iterations.\n");
3765 return;
3768 /* Search all induction variables and pick those interesting for the prefetch
3769 machinery. */
3770 for (bl = ivs->list; bl; bl = bl->next)
3772 struct induction *biv = bl->biv, *biv1;
3773 int basestride = 0;
3775 biv1 = biv;
3777 /* Expect all BIVs to be executed in each iteration. This makes our
3778 analysis more conservative. */
3779 while (biv1)
3781 /* Discard non-constant additions that we can't handle well yet, and
3782 BIVs that are executed multiple times; such BIVs ought to be
3783 handled in the nested loop. We accept not_every_iteration BIVs,
3784 since these only result in larger strides and make our
3785 heuristics more conservative.
3786 ??? What does the last sentence mean? */
3787 if (GET_CODE (biv->add_val) != CONST_INT)
3789 if (loop_dump_stream)
3791 fprintf (loop_dump_stream,
3792 "Prefetch: biv %i ignored: non-constant addition at insn %i:",
3793 REGNO (biv->src_reg), INSN_UID (biv->insn));
3794 print_rtl (loop_dump_stream, biv->add_val);
3795 fprintf (loop_dump_stream, "\n");
3797 break;
3800 if (biv->maybe_multiple)
3802 if (loop_dump_stream)
3804 fprintf (loop_dump_stream,
3805 "Prefetch: biv %i ignored: maybe_multiple at insn %i:",
3806 REGNO (biv->src_reg), INSN_UID (biv->insn));
3807 print_rtl (loop_dump_stream, biv->add_val);
3808 fprintf (loop_dump_stream, "\n");
3810 break;
3813 basestride += INTVAL (biv1->add_val);
3814 biv1 = biv1->next_iv;
3817 if (biv1 || !basestride)
3818 continue;
3820 for (iv = bl->giv; iv; iv = iv->next_iv)
3822 rtx address;
3823 rtx temp;
3824 HOST_WIDE_INT index = 0;
3825 int add = 1;
3826 HOST_WIDE_INT stride;
3827 struct check_store_data d;
3828 int size = GET_MODE_SIZE (GET_MODE (iv));
3830 /* There are several reasons why an induction variable is not
3831 interesting to us. */
3832 if (iv->giv_type != DEST_ADDR
3833 /* We are interested only in constant stride memory references
3834 in order to be able to compute density easily. */
3835 || GET_CODE (iv->mult_val) != CONST_INT
3836 /* Don't handle reversed order prefetches, since they are usually
3837 ineffective. Later we may be able to reverse such BIVs. */
3838 || (PREFETCH_NO_REVERSE_ORDER
3839 && (stride = INTVAL (iv->mult_val) * basestride) < 0)
3840 /* Prefetching of accesses with such an extreme stride is probably
3841 not worthwhile, either. */
3842 || (PREFETCH_NO_EXTREME_STRIDE
3843 && stride > PREFETCH_EXTREME_STRIDE)
3844 /* Ignore GIVs with varying add values; we can't predict the
3845 value for the next iteration. */
3846 || !loop_invariant_p (loop, iv->add_val)
3847 /* Ignore GIVs in the nested loops; they ought to have been
3848 handled already. */
3849 || iv->maybe_multiple)
3851 if (loop_dump_stream)
3852 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %i\n",
3853 INSN_UID (iv->insn));
3854 continue;
3857 /* Determine the pointer to the basic array we are examining. It is
3858 the sum of the BIV's initial value and the GIV's add_val. */
3859 index = 0;
3861 address = copy_rtx (iv->add_val);
3862 temp = copy_rtx (bl->initial_value);
3864 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3865 index = remove_constant_addition (&address);
3867 index += size;
3868 d.mem_write = 0;
3869 d.mem_address = *iv->location;
3871 /* When the GIV is not always executed, we might be better off by
3872 not dirtying the cache pages. */
3873 if (PREFETCH_NOT_ALWAYS || iv->always_executed)
3874 note_stores (PATTERN (iv->insn), check_store, &d);
3876 /* Attempt to find another prefetch to the same array and see if we
3877 can merge this one. */
3878 for (i = 0; i < num_prefetches; i++)
3879 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3880 && stride == info[i].stride)
3882 /* In case both access same array (same location
3883 just with small difference in constant indexes), merge
3884 the prefetches. Just do the later and the earlier will
3885 get prefetched from previous iteration.
3886 4096 is artificial threshold. It should not be too small,
3887 but also not bigger than small portion of memory usually
3888 traversed by single loop. */
3889 if (index >= info[i].index && index - info[i].index < 4096)
3891 info[i].write |= d.mem_write;
3892 info[i].bytes_accesed += size;
3893 info[i].index = index;
3894 info[i].giv = iv;
3895 info[i].class = bl;
3896 info[num_prefetches].base_address = address;
3897 add = 0;
3898 break;
3901 if (index < info[i].index && info[i].index - index < 4096)
3903 info[i].write |= d.mem_write;
3904 info[i].bytes_accesed += size;
3905 add = 0;
3906 break;
3910 /* Merging failed. */
3911 if (add)
3913 info[num_prefetches].giv = iv;
3914 info[num_prefetches].class = bl;
3915 info[num_prefetches].index = index;
3916 info[num_prefetches].stride = stride;
3917 info[num_prefetches].base_address = address;
3918 info[num_prefetches].write = d.mem_write;
3919 info[num_prefetches].bytes_accesed = size;
3920 num_prefetches++;
3921 if (num_prefetches >= MAX_PREFETCHES)
3923 if (loop_dump_stream)
3924 fprintf (loop_dump_stream,
3925 "Maximal number of prefetches exceeded.\n");
3926 return;
3932 for (i = 0; i < num_prefetches; i++)
3934 /* Attempt to calculate the number of bytes fetched by the loop.
3935 Avoid overflow. */
3936 if (LOOP_INFO (loop)->n_iterations
3937 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3938 >= LOOP_INFO (loop)->n_iterations))
3939 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
3940 else
3941 info[i].total_bytes = 0xffffffff;
3943 /* Prefetch is worthwhile only when the loads/stores are dense. */
3944 if (PREFETCH_ONLY_DENSE_MEM
3945 && info[i].bytes_accesed * 256 / info[i].stride > PREFETCH_DENSE_MEM
3946 && (info[i].total_bytes / PREFETCH_BLOCK
3947 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
3949 info[i].prefetch_before_loop = 1;
3950 info[i].prefetch_in_loop
3951 = (info[i].total_bytes / PREFETCH_BLOCK
3952 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
3954 else
3955 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
3957 if (info[i].prefetch_in_loop)
3959 num_real_prefetches += ((info[i].stride + PREFETCH_BLOCK - 1)
3960 / PREFETCH_BLOCK);
3961 if (info[i].write)
3962 num_real_write_prefetches
3963 += (info[i].stride + PREFETCH_BLOCK - 1) / PREFETCH_BLOCK;
3967 if (loop_dump_stream)
3969 for (i = 0; i < num_prefetches; i++)
3971 fprintf (loop_dump_stream, "Prefetch insn %i address: ",
3972 INSN_UID (info[i].giv->insn));
3973 print_rtl (loop_dump_stream, info[i].base_address);
3974 fprintf (loop_dump_stream, " Index: ");
3975 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
3976 fprintf (loop_dump_stream, " stride: ");
3977 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
3978 fprintf (loop_dump_stream,
3979 " density: %i%% total_bytes: %u%sin loop: %s before: %s\n",
3980 (int) (info[i].bytes_accesed * 100 / info[i].stride),
3981 info[i].total_bytes,
3982 info[i].write ? " read/write " : " read only ",
3983 info[i].prefetch_in_loop ? "yes" : "no",
3984 info[i].prefetch_before_loop ? "yes" : "no");
3987 fprintf (loop_dump_stream, "Real prefetches needed: %i (write: %i)\n",
3988 num_real_prefetches, num_real_write_prefetches);
3991 if (!num_real_prefetches)
3992 return;
3994 ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches;
3996 if (!ahead)
3997 return;
3999 for (i = 0; i < num_prefetches; i++)
4001 if (info[i].prefetch_in_loop)
4003 int y;
4005 for (y = 0; y < ((info[i].stride + PREFETCH_BLOCK - 1)
4006 / PREFETCH_BLOCK); y++)
4008 rtx loc = copy_rtx (*info[i].giv->location);
4009 rtx insn;
4010 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4011 rtx before_insn = info[i].giv->insn;
4012 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4014 /* We can save some effort by offsetting the address on
4015 architectures with offsettable memory references. */
4016 if (offsettable_address_p (0, VOIDmode, loc))
4017 loc = plus_constant (loc, bytes_ahead);
4018 else
4020 rtx reg = gen_reg_rtx (Pmode);
4021 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4022 GEN_INT (bytes_ahead), reg,
4023 0, before_insn);
4024 loc = reg;
4027 /* Make sure the address operand is valid for prefetch. */
4028 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4029 (loc,
4030 insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4031 loc = force_reg (Pmode, loc);
4032 emit_insn_before (gen_prefetch (loc, GEN_INT (info[i].write),
4033 GEN_INT (3)),
4034 before_insn);
4036 /* Check all insns emitted and record the new GIV
4037 information. */
4038 insn = NEXT_INSN (prev_insn);
4039 while (insn != before_insn)
4041 insn = check_insn_for_givs (loop, insn,
4042 info[i].giv->always_executed,
4043 info[i].giv->maybe_multiple);
4044 insn = NEXT_INSN (insn);
4049 if (info[i].prefetch_before_loop)
4051 int y;
4053 /* Emit INSNs before the loop to fetch the first cache lines. */
4054 for (y = 0;
4055 (!info[i].prefetch_in_loop || y < ahead)
4056 && y * PREFETCH_BLOCK < (int) info[i].total_bytes; y ++)
4058 rtx reg = gen_reg_rtx (Pmode);
4059 rtx loop_start = loop->start;
4060 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4061 info[i].giv->add_val,
4062 GEN_INT (y * PREFETCH_BLOCK));
4064 loop_iv_add_mult_emit_before (loop, info[i].class->initial_value,
4065 info[i].giv->mult_val,
4066 add_val, reg, 0, loop_start);
4067 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4068 GEN_INT (3)),
4069 loop_start);
4074 return;
4077 /* A "basic induction variable" or biv is a pseudo reg that is set
4078 (within this loop) only by incrementing or decrementing it. */
4079 /* A "general induction variable" or giv is a pseudo reg whose
4080 value is a linear function of a biv. */
4082 /* Bivs are recognized by `basic_induction_var';
4083 Givs by `general_induction_var'. */
4085 /* Communication with routines called via `note_stores'. */
4087 static rtx note_insn;
4089 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4091 static rtx addr_placeholder;
4093 /* ??? Unfinished optimizations, and possible future optimizations,
4094 for the strength reduction code. */
4096 /* ??? The interaction of biv elimination, and recognition of 'constant'
4097 bivs, may cause problems. */
4099 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4100 performance problems.
4102 Perhaps don't eliminate things that can be combined with an addressing
4103 mode. Find all givs that have the same biv, mult_val, and add_val;
4104 then for each giv, check to see if its only use dies in a following
4105 memory address. If so, generate a new memory address and check to see
4106 if it is valid. If it is valid, then store the modified memory address,
4107 otherwise, mark the giv as not done so that it will get its own iv. */
4109 /* ??? Could try to optimize branches when it is known that a biv is always
4110 positive. */
4112 /* ??? When replace a biv in a compare insn, we should replace with closest
4113 giv so that an optimized branch can still be recognized by the combiner,
4114 e.g. the VAX acb insn. */
4116 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4117 was rerun in loop_optimize whenever a register was added or moved.
4118 Also, some of the optimizations could be a little less conservative. */
4120 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4121 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4122 callback.
4124 NOT_EVERY_ITERATION if current insn is not executed at least once for every
4125 loop iteration except for the last one.
4127 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4128 loop iteration.
4130 void
4131 for_each_insn_in_loop (loop, fncall)
4132 struct loop *loop;
4133 loop_insn_callback fncall;
4135 /* This is 1 if current insn is not executed at least once for every loop
4136 iteration. */
4137 int not_every_iteration = 0;
4138 int maybe_multiple = 0;
4139 int past_loop_latch = 0;
4140 int loop_depth = 0;
4141 rtx p;
4143 /* If loop_scan_start points to the loop exit test, we have to be wary of
4144 subversive use of gotos inside expression statements. */
4145 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4146 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4148 /* Scan through loop to find all possible bivs. */
4150 for (p = next_insn_in_loop (loop, loop->scan_start);
4151 p != NULL_RTX;
4152 p = next_insn_in_loop (loop, p))
4154 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4156 /* Past CODE_LABEL, we get to insns that may be executed multiple
4157 times. The only way we can be sure that they can't is if every
4158 jump insn between here and the end of the loop either
4159 returns, exits the loop, is a jump to a location that is still
4160 behind the label, or is a jump to the loop start. */
4162 if (GET_CODE (p) == CODE_LABEL)
4164 rtx insn = p;
4166 maybe_multiple = 0;
4168 while (1)
4170 insn = NEXT_INSN (insn);
4171 if (insn == loop->scan_start)
4172 break;
4173 if (insn == loop->end)
4175 if (loop->top != 0)
4176 insn = loop->top;
4177 else
4178 break;
4179 if (insn == loop->scan_start)
4180 break;
4183 if (GET_CODE (insn) == JUMP_INSN
4184 && GET_CODE (PATTERN (insn)) != RETURN
4185 && (!any_condjump_p (insn)
4186 || (JUMP_LABEL (insn) != 0
4187 && JUMP_LABEL (insn) != loop->scan_start
4188 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4190 maybe_multiple = 1;
4191 break;
4196 /* Past a jump, we get to insns for which we can't count
4197 on whether they will be executed during each iteration. */
4198 /* This code appears twice in strength_reduce. There is also similar
4199 code in scan_loop. */
4200 if (GET_CODE (p) == JUMP_INSN
4201 /* If we enter the loop in the middle, and scan around to the
4202 beginning, don't set not_every_iteration for that.
4203 This can be any kind of jump, since we want to know if insns
4204 will be executed if the loop is executed. */
4205 && !(JUMP_LABEL (p) == loop->top
4206 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4207 && any_uncondjump_p (p))
4208 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4210 rtx label = 0;
4212 /* If this is a jump outside the loop, then it also doesn't
4213 matter. Check to see if the target of this branch is on the
4214 loop->exits_labels list. */
4216 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4217 if (XEXP (label, 0) == JUMP_LABEL (p))
4218 break;
4220 if (!label)
4221 not_every_iteration = 1;
4224 else if (GET_CODE (p) == NOTE)
4226 /* At the virtual top of a converted loop, insns are again known to
4227 be executed each iteration: logically, the loop begins here
4228 even though the exit code has been duplicated.
4230 Insns are also again known to be executed each iteration at
4231 the LOOP_CONT note. */
4232 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4233 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4234 && loop_depth == 0)
4235 not_every_iteration = 0;
4236 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4237 loop_depth++;
4238 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4239 loop_depth--;
4242 /* Note if we pass a loop latch. If we do, then we can not clear
4243 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4244 a loop since a jump before the last CODE_LABEL may have started
4245 a new loop iteration.
4247 Note that LOOP_TOP is only set for rotated loops and we need
4248 this check for all loops, so compare against the CODE_LABEL
4249 which immediately follows LOOP_START. */
4250 if (GET_CODE (p) == JUMP_INSN
4251 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4252 past_loop_latch = 1;
4254 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4255 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4256 or not an insn is known to be executed each iteration of the
4257 loop, whether or not any iterations are known to occur.
4259 Therefore, if we have just passed a label and have no more labels
4260 between here and the test insn of the loop, and we have not passed
4261 a jump to the top of the loop, then we know these insns will be
4262 executed each iteration. */
4264 if (not_every_iteration
4265 && !past_loop_latch
4266 && GET_CODE (p) == CODE_LABEL
4267 && no_labels_between_p (p, loop->end)
4268 && loop_insn_first_p (p, loop->cont))
4269 not_every_iteration = 0;
4273 static void
4274 loop_bivs_find (loop)
4275 struct loop *loop;
4277 struct loop_regs *regs = LOOP_REGS (loop);
4278 struct loop_ivs *ivs = LOOP_IVS (loop);
4279 /* Temporary list pointers for traversing ivs->list. */
4280 struct iv_class *bl, **backbl;
4282 ivs->list = 0;
4284 for_each_insn_in_loop (loop, check_insn_for_bivs);
4286 /* Scan ivs->list to remove all regs that proved not to be bivs.
4287 Make a sanity check against regs->n_times_set. */
4288 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4290 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4291 /* Above happens if register modified by subreg, etc. */
4292 /* Make sure it is not recognized as a basic induction var: */
4293 || regs->array[bl->regno].n_times_set != bl->biv_count
4294 /* If never incremented, it is invariant that we decided not to
4295 move. So leave it alone. */
4296 || ! bl->incremented)
4298 if (loop_dump_stream)
4299 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4300 bl->regno,
4301 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4302 ? "not induction variable"
4303 : (! bl->incremented ? "never incremented"
4304 : "count error")));
4306 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4307 *backbl = bl->next;
4309 else
4311 backbl = &bl->next;
4313 if (loop_dump_stream)
4314 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4320 /* Determine how BIVS are initialised by looking through pre-header
4321 extended basic block. */
4322 static void
4323 loop_bivs_init_find (loop)
4324 struct loop *loop;
4326 struct loop_ivs *ivs = LOOP_IVS (loop);
4327 /* Temporary list pointers for traversing ivs->list. */
4328 struct iv_class *bl;
4329 int call_seen;
4330 rtx p;
4332 /* Find initial value for each biv by searching backwards from loop_start,
4333 halting at first label. Also record any test condition. */
4335 call_seen = 0;
4336 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4338 rtx test;
4340 note_insn = p;
4342 if (GET_CODE (p) == CALL_INSN)
4343 call_seen = 1;
4345 if (INSN_P (p))
4346 note_stores (PATTERN (p), record_initial, ivs);
4348 /* Record any test of a biv that branches around the loop if no store
4349 between it and the start of loop. We only care about tests with
4350 constants and registers and only certain of those. */
4351 if (GET_CODE (p) == JUMP_INSN
4352 && JUMP_LABEL (p) != 0
4353 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4354 && (test = get_condition_for_loop (loop, p)) != 0
4355 && GET_CODE (XEXP (test, 0)) == REG
4356 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4357 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4358 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4359 && bl->init_insn == 0)
4361 /* If an NE test, we have an initial value! */
4362 if (GET_CODE (test) == NE)
4364 bl->init_insn = p;
4365 bl->init_set = gen_rtx_SET (VOIDmode,
4366 XEXP (test, 0), XEXP (test, 1));
4368 else
4369 bl->initial_test = test;
4375 /* Look at the each biv and see if we can say anything better about its
4376 initial value from any initializing insns set up above. (This is done
4377 in two passes to avoid missing SETs in a PARALLEL.) */
4378 static void
4379 loop_bivs_check (loop)
4380 struct loop *loop;
4382 struct loop_ivs *ivs = LOOP_IVS (loop);
4383 /* Temporary list pointers for traversing ivs->list. */
4384 struct iv_class *bl;
4385 struct iv_class **backbl;
4387 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4389 rtx src;
4390 rtx note;
4392 if (! bl->init_insn)
4393 continue;
4395 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4396 is a constant, use the value of that. */
4397 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4398 && CONSTANT_P (XEXP (note, 0)))
4399 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4400 && CONSTANT_P (XEXP (note, 0))))
4401 src = XEXP (note, 0);
4402 else
4403 src = SET_SRC (bl->init_set);
4405 if (loop_dump_stream)
4406 fprintf (loop_dump_stream,
4407 "Biv %d: initialized at insn %d: initial value ",
4408 bl->regno, INSN_UID (bl->init_insn));
4410 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4411 || GET_MODE (src) == VOIDmode)
4412 && valid_initial_value_p (src, bl->init_insn,
4413 LOOP_INFO (loop)->pre_header_has_call,
4414 loop->start))
4416 bl->initial_value = src;
4418 if (loop_dump_stream)
4420 print_simple_rtl (loop_dump_stream, src);
4421 fputc ('\n', loop_dump_stream);
4424 /* If we can't make it a giv,
4425 let biv keep initial value of "itself". */
4426 else if (loop_dump_stream)
4427 fprintf (loop_dump_stream, "is complex\n");
4432 /* Search the loop for general induction variables. */
4434 static void
4435 loop_givs_find (loop)
4436 struct loop* loop;
4438 for_each_insn_in_loop (loop, check_insn_for_givs);
4442 /* For each giv for which we still don't know whether or not it is
4443 replaceable, check to see if it is replaceable because its final value
4444 can be calculated. */
4446 static void
4447 loop_givs_check (loop)
4448 struct loop *loop;
4450 struct loop_ivs *ivs = LOOP_IVS (loop);
4451 struct iv_class *bl;
4453 for (bl = ivs->list; bl; bl = bl->next)
4455 struct induction *v;
4457 for (v = bl->giv; v; v = v->next_iv)
4458 if (! v->replaceable && ! v->not_replaceable)
4459 check_final_value (loop, v);
4464 /* Return non-zero if it is possible to eliminate the biv BL provided
4465 all givs are reduced. This is possible if either the reg is not
4466 used outside the loop, or we can compute what its final value will
4467 be. */
4469 static int
4470 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4471 struct loop *loop;
4472 struct iv_class *bl;
4473 int threshold;
4474 int insn_count;
4476 /* For architectures with a decrement_and_branch_until_zero insn,
4477 don't do this if we put a REG_NONNEG note on the endtest for this
4478 biv. */
4480 #ifdef HAVE_decrement_and_branch_until_zero
4481 if (bl->nonneg)
4483 if (loop_dump_stream)
4484 fprintf (loop_dump_stream,
4485 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4486 return 0;
4488 #endif
4490 /* Check that biv is used outside loop or if it has a final value.
4491 Compare against bl->init_insn rather than loop->start. We aren't
4492 concerned with any uses of the biv between init_insn and
4493 loop->start since these won't be affected by the value of the biv
4494 elsewhere in the function, so long as init_insn doesn't use the
4495 biv itself. */
4497 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4498 && bl->init_insn
4499 && INSN_UID (bl->init_insn) < max_uid_for_loop
4500 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4501 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4502 || (bl->final_value = final_biv_value (loop, bl)))
4503 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4505 if (loop_dump_stream)
4507 fprintf (loop_dump_stream,
4508 "Cannot eliminate biv %d.\n",
4509 bl->regno);
4510 fprintf (loop_dump_stream,
4511 "First use: insn %d, last use: insn %d.\n",
4512 REGNO_FIRST_UID (bl->regno),
4513 REGNO_LAST_UID (bl->regno));
4515 return 0;
4519 /* Reduce each giv of BL that we have decided to reduce. */
4521 static void
4522 loop_givs_reduce (loop, bl)
4523 struct loop *loop;
4524 struct iv_class *bl;
4526 struct induction *v;
4528 for (v = bl->giv; v; v = v->next_iv)
4530 struct induction *tv;
4531 if (! v->ignore && v->same == 0)
4533 int auto_inc_opt = 0;
4535 /* If the code for derived givs immediately below has already
4536 allocated a new_reg, we must keep it. */
4537 if (! v->new_reg)
4538 v->new_reg = gen_reg_rtx (v->mode);
4540 #ifdef AUTO_INC_DEC
4541 /* If the target has auto-increment addressing modes, and
4542 this is an address giv, then try to put the increment
4543 immediately after its use, so that flow can create an
4544 auto-increment addressing mode. */
4545 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4546 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4547 /* We don't handle reversed biv's because bl->biv->insn
4548 does not have a valid INSN_LUID. */
4549 && ! bl->reversed
4550 && v->always_executed && ! v->maybe_multiple
4551 && INSN_UID (v->insn) < max_uid_for_loop)
4553 /* If other giv's have been combined with this one, then
4554 this will work only if all uses of the other giv's occur
4555 before this giv's insn. This is difficult to check.
4557 We simplify this by looking for the common case where
4558 there is one DEST_REG giv, and this giv's insn is the
4559 last use of the dest_reg of that DEST_REG giv. If the
4560 increment occurs after the address giv, then we can
4561 perform the optimization. (Otherwise, the increment
4562 would have to go before other_giv, and we would not be
4563 able to combine it with the address giv to get an
4564 auto-inc address.) */
4565 if (v->combined_with)
4567 struct induction *other_giv = 0;
4569 for (tv = bl->giv; tv; tv = tv->next_iv)
4570 if (tv->same == v)
4572 if (other_giv)
4573 break;
4574 else
4575 other_giv = tv;
4577 if (! tv && other_giv
4578 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4579 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4580 == INSN_UID (v->insn))
4581 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4582 auto_inc_opt = 1;
4584 /* Check for case where increment is before the address
4585 giv. Do this test in "loop order". */
4586 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4587 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4588 || (INSN_LUID (bl->biv->insn)
4589 > INSN_LUID (loop->scan_start))))
4590 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4591 && (INSN_LUID (loop->scan_start)
4592 < INSN_LUID (bl->biv->insn))))
4593 auto_inc_opt = -1;
4594 else
4595 auto_inc_opt = 1;
4597 #ifdef HAVE_cc0
4599 rtx prev;
4601 /* We can't put an insn immediately after one setting
4602 cc0, or immediately before one using cc0. */
4603 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4604 || (auto_inc_opt == -1
4605 && (prev = prev_nonnote_insn (v->insn)) != 0
4606 && INSN_P (prev)
4607 && sets_cc0_p (PATTERN (prev))))
4608 auto_inc_opt = 0;
4610 #endif
4612 if (auto_inc_opt)
4613 v->auto_inc_opt = 1;
4615 #endif
4617 /* For each place where the biv is incremented, add an insn
4618 to increment the new, reduced reg for the giv. */
4619 for (tv = bl->biv; tv; tv = tv->next_iv)
4621 rtx insert_before;
4623 if (! auto_inc_opt)
4624 insert_before = tv->insn;
4625 else if (auto_inc_opt == 1)
4626 insert_before = NEXT_INSN (v->insn);
4627 else
4628 insert_before = v->insn;
4630 if (tv->mult_val == const1_rtx)
4631 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4632 v->new_reg, v->new_reg,
4633 0, insert_before);
4634 else /* tv->mult_val == const0_rtx */
4635 /* A multiply is acceptable here
4636 since this is presumed to be seldom executed. */
4637 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4638 v->add_val, v->new_reg,
4639 0, insert_before);
4642 /* Add code at loop start to initialize giv's reduced reg. */
4644 loop_iv_add_mult_hoist (loop,
4645 extend_value_for_giv (v, bl->initial_value),
4646 v->mult_val, v->add_val, v->new_reg);
4652 /* Check for givs whose first use is their definition and whose
4653 last use is the definition of another giv. If so, it is likely
4654 dead and should not be used to derive another giv nor to
4655 eliminate a biv. */
4657 static void
4658 loop_givs_dead_check (loop, bl)
4659 struct loop *loop ATTRIBUTE_UNUSED;
4660 struct iv_class *bl;
4662 struct induction *v;
4664 for (v = bl->giv; v; v = v->next_iv)
4666 if (v->ignore
4667 || (v->same && v->same->ignore))
4668 continue;
4670 if (v->giv_type == DEST_REG
4671 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4673 struct induction *v1;
4675 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4676 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4677 v->maybe_dead = 1;
4683 static void
4684 loop_givs_rescan (loop, bl, reg_map)
4685 struct loop *loop;
4686 struct iv_class *bl;
4687 rtx *reg_map;
4689 struct induction *v;
4691 for (v = bl->giv; v; v = v->next_iv)
4693 if (v->same && v->same->ignore)
4694 v->ignore = 1;
4696 if (v->ignore)
4697 continue;
4699 /* Update expression if this was combined, in case other giv was
4700 replaced. */
4701 if (v->same)
4702 v->new_reg = replace_rtx (v->new_reg,
4703 v->same->dest_reg, v->same->new_reg);
4705 /* See if this register is known to be a pointer to something. If
4706 so, see if we can find the alignment. First see if there is a
4707 destination register that is a pointer. If so, this shares the
4708 alignment too. Next see if we can deduce anything from the
4709 computational information. If not, and this is a DEST_ADDR
4710 giv, at least we know that it's a pointer, though we don't know
4711 the alignment. */
4712 if (GET_CODE (v->new_reg) == REG
4713 && v->giv_type == DEST_REG
4714 && REG_POINTER (v->dest_reg))
4715 mark_reg_pointer (v->new_reg,
4716 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4717 else if (GET_CODE (v->new_reg) == REG
4718 && REG_POINTER (v->src_reg))
4720 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4722 if (align == 0
4723 || GET_CODE (v->add_val) != CONST_INT
4724 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4725 align = 0;
4727 mark_reg_pointer (v->new_reg, align);
4729 else if (GET_CODE (v->new_reg) == REG
4730 && GET_CODE (v->add_val) == REG
4731 && REG_POINTER (v->add_val))
4733 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4735 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4736 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4737 align = 0;
4739 mark_reg_pointer (v->new_reg, align);
4741 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4742 mark_reg_pointer (v->new_reg, 0);
4744 if (v->giv_type == DEST_ADDR)
4745 /* Store reduced reg as the address in the memref where we found
4746 this giv. */
4747 validate_change (v->insn, v->location, v->new_reg, 0);
4748 else if (v->replaceable)
4750 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4752 else
4754 /* Not replaceable; emit an insn to set the original giv reg from
4755 the reduced giv, same as above. */
4756 loop_insn_emit_after (loop, 0, v->insn,
4757 gen_move_insn (v->dest_reg, v->new_reg));
4760 /* When a loop is reversed, givs which depend on the reversed
4761 biv, and which are live outside the loop, must be set to their
4762 correct final value. This insn is only needed if the giv is
4763 not replaceable. The correct final value is the same as the
4764 value that the giv starts the reversed loop with. */
4765 if (bl->reversed && ! v->replaceable)
4766 loop_iv_add_mult_sink (loop,
4767 extend_value_for_giv (v, bl->initial_value),
4768 v->mult_val, v->add_val, v->dest_reg);
4769 else if (v->final_value)
4770 loop_insn_sink_or_swim (loop,
4771 gen_move_insn (v->dest_reg, v->final_value));
4773 if (loop_dump_stream)
4775 fprintf (loop_dump_stream, "giv at %d reduced to ",
4776 INSN_UID (v->insn));
4777 print_simple_rtl (loop_dump_stream, v->new_reg);
4778 fprintf (loop_dump_stream, "\n");
4784 static int
4785 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4786 struct loop *loop ATTRIBUTE_UNUSED;
4787 struct iv_class *bl;
4788 struct induction *v;
4789 rtx test_reg;
4791 int add_cost;
4792 int benefit;
4794 benefit = v->benefit;
4795 PUT_MODE (test_reg, v->mode);
4796 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4797 test_reg, test_reg);
4799 /* Reduce benefit if not replaceable, since we will insert a
4800 move-insn to replace the insn that calculates this giv. Don't do
4801 this unless the giv is a user variable, since it will often be
4802 marked non-replaceable because of the duplication of the exit
4803 code outside the loop. In such a case, the copies we insert are
4804 dead and will be deleted. So they don't have a cost. Similar
4805 situations exist. */
4806 /* ??? The new final_[bg]iv_value code does a much better job of
4807 finding replaceable giv's, and hence this code may no longer be
4808 necessary. */
4809 if (! v->replaceable && ! bl->eliminable
4810 && REG_USERVAR_P (v->dest_reg))
4811 benefit -= copy_cost;
4813 /* Decrease the benefit to count the add-insns that we will insert
4814 to increment the reduced reg for the giv. ??? This can
4815 overestimate the run-time cost of the additional insns, e.g. if
4816 there are multiple basic blocks that increment the biv, but only
4817 one of these blocks is executed during each iteration. There is
4818 no good way to detect cases like this with the current structure
4819 of the loop optimizer. This code is more accurate for
4820 determining code size than run-time benefits. */
4821 benefit -= add_cost * bl->biv_count;
4823 /* Decide whether to strength-reduce this giv or to leave the code
4824 unchanged (recompute it from the biv each time it is used). This
4825 decision can be made independently for each giv. */
4827 #ifdef AUTO_INC_DEC
4828 /* Attempt to guess whether autoincrement will handle some of the
4829 new add insns; if so, increase BENEFIT (undo the subtraction of
4830 add_cost that was done above). */
4831 if (v->giv_type == DEST_ADDR
4832 /* Increasing the benefit is risky, since this is only a guess.
4833 Avoid increasing register pressure in cases where there would
4834 be no other benefit from reducing this giv. */
4835 && benefit > 0
4836 && GET_CODE (v->mult_val) == CONST_INT)
4838 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4840 if (HAVE_POST_INCREMENT
4841 && INTVAL (v->mult_val) == size)
4842 benefit += add_cost * bl->biv_count;
4843 else if (HAVE_PRE_INCREMENT
4844 && INTVAL (v->mult_val) == size)
4845 benefit += add_cost * bl->biv_count;
4846 else if (HAVE_POST_DECREMENT
4847 && -INTVAL (v->mult_val) == size)
4848 benefit += add_cost * bl->biv_count;
4849 else if (HAVE_PRE_DECREMENT
4850 && -INTVAL (v->mult_val) == size)
4851 benefit += add_cost * bl->biv_count;
4853 #endif
4855 return benefit;
4859 /* Free IV structures for LOOP. */
4861 static void
4862 loop_ivs_free (loop)
4863 struct loop *loop;
4865 struct loop_ivs *ivs = LOOP_IVS (loop);
4866 struct iv_class *iv = ivs->list;
4868 free (ivs->regs);
4870 while (iv)
4872 struct iv_class *next = iv->next;
4873 struct induction *induction;
4874 struct induction *next_induction;
4876 for (induction = iv->biv; induction; induction = next_induction)
4878 next_induction = induction->next_iv;
4879 free (induction);
4881 for (induction = iv->giv; induction; induction = next_induction)
4883 next_induction = induction->next_iv;
4884 free (induction);
4887 free (iv);
4888 iv = next;
4893 /* Perform strength reduction and induction variable elimination.
4895 Pseudo registers created during this function will be beyond the
4896 last valid index in several tables including
4897 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4898 problem here, because the added registers cannot be givs outside of
4899 their loop, and hence will never be reconsidered. But scan_loop
4900 must check regnos to make sure they are in bounds. */
4902 static void
4903 strength_reduce (loop, flags)
4904 struct loop *loop;
4905 int flags;
4907 struct loop_info *loop_info = LOOP_INFO (loop);
4908 struct loop_regs *regs = LOOP_REGS (loop);
4909 struct loop_ivs *ivs = LOOP_IVS (loop);
4910 rtx p;
4911 /* Temporary list pointer for traversing ivs->list. */
4912 struct iv_class *bl;
4913 /* Ratio of extra register life span we can justify
4914 for saving an instruction. More if loop doesn't call subroutines
4915 since in that case saving an insn makes more difference
4916 and more registers are available. */
4917 /* ??? could set this to last value of threshold in move_movables */
4918 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4919 /* Map of pseudo-register replacements. */
4920 rtx *reg_map = NULL;
4921 int reg_map_size;
4922 int unrolled_insn_copies = 0;
4923 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4924 int insn_count = count_insns_in_loop (loop);
4926 addr_placeholder = gen_reg_rtx (Pmode);
4928 ivs->n_regs = max_reg_before_loop;
4929 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4931 /* Find all BIVs in loop. */
4932 loop_bivs_find (loop);
4934 /* Exit if there are no bivs. */
4935 if (! ivs->list)
4937 /* Can still unroll the loop anyways, but indicate that there is no
4938 strength reduction info available. */
4939 if (flags & LOOP_UNROLL)
4940 unroll_loop (loop, insn_count, 0);
4942 loop_ivs_free (loop);
4943 return;
4946 /* Determine how BIVS are initialised by looking through pre-header
4947 extended basic block. */
4948 loop_bivs_init_find (loop);
4950 /* Look at the each biv and see if we can say anything better about its
4951 initial value from any initializing insns set up above. */
4952 loop_bivs_check (loop);
4954 /* Search the loop for general induction variables. */
4955 loop_givs_find (loop);
4957 /* Try to calculate and save the number of loop iterations. This is
4958 set to zero if the actual number can not be calculated. This must
4959 be called after all giv's have been identified, since otherwise it may
4960 fail if the iteration variable is a giv. */
4961 loop_iterations (loop);
4963 #ifdef HAVE_prefetch
4964 if (flags & LOOP_PREFETCH)
4965 emit_prefetch_instructions (loop);
4966 #endif
4968 /* Now for each giv for which we still don't know whether or not it is
4969 replaceable, check to see if it is replaceable because its final value
4970 can be calculated. This must be done after loop_iterations is called,
4971 so that final_giv_value will work correctly. */
4972 loop_givs_check (loop);
4974 /* Try to prove that the loop counter variable (if any) is always
4975 nonnegative; if so, record that fact with a REG_NONNEG note
4976 so that "decrement and branch until zero" insn can be used. */
4977 check_dbra_loop (loop, insn_count);
4979 /* Create reg_map to hold substitutions for replaceable giv regs.
4980 Some givs might have been made from biv increments, so look at
4981 ivs->reg_iv_type for a suitable size. */
4982 reg_map_size = ivs->n_regs;
4983 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4985 /* Examine each iv class for feasibility of strength reduction/induction
4986 variable elimination. */
4988 for (bl = ivs->list; bl; bl = bl->next)
4990 struct induction *v;
4991 int benefit;
4993 /* Test whether it will be possible to eliminate this biv
4994 provided all givs are reduced. */
4995 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
4997 /* This will be true at the end, if all givs which depend on this
4998 biv have been strength reduced.
4999 We can't (currently) eliminate the biv unless this is so. */
5000 bl->all_reduced = 1;
5002 /* Check each extension dependent giv in this class to see if its
5003 root biv is safe from wrapping in the interior mode. */
5004 check_ext_dependent_givs (bl, loop_info);
5006 /* Combine all giv's for this iv_class. */
5007 combine_givs (regs, bl);
5009 for (v = bl->giv; v; v = v->next_iv)
5011 struct induction *tv;
5013 if (v->ignore || v->same)
5014 continue;
5016 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5018 /* If an insn is not to be strength reduced, then set its ignore
5019 flag, and clear bl->all_reduced. */
5021 /* A giv that depends on a reversed biv must be reduced if it is
5022 used after the loop exit, otherwise, it would have the wrong
5023 value after the loop exit. To make it simple, just reduce all
5024 of such giv's whether or not we know they are used after the loop
5025 exit. */
5027 if (! flag_reduce_all_givs
5028 && v->lifetime * threshold * benefit < insn_count
5029 && ! bl->reversed)
5031 if (loop_dump_stream)
5032 fprintf (loop_dump_stream,
5033 "giv of insn %d not worth while, %d vs %d.\n",
5034 INSN_UID (v->insn),
5035 v->lifetime * threshold * benefit, insn_count);
5036 v->ignore = 1;
5037 bl->all_reduced = 0;
5039 else
5041 /* Check that we can increment the reduced giv without a
5042 multiply insn. If not, reject it. */
5044 for (tv = bl->biv; tv; tv = tv->next_iv)
5045 if (tv->mult_val == const1_rtx
5046 && ! product_cheap_p (tv->add_val, v->mult_val))
5048 if (loop_dump_stream)
5049 fprintf (loop_dump_stream,
5050 "giv of insn %d: would need a multiply.\n",
5051 INSN_UID (v->insn));
5052 v->ignore = 1;
5053 bl->all_reduced = 0;
5054 break;
5059 /* Check for givs whose first use is their definition and whose
5060 last use is the definition of another giv. If so, it is likely
5061 dead and should not be used to derive another giv nor to
5062 eliminate a biv. */
5063 loop_givs_dead_check (loop, bl);
5065 /* Reduce each giv that we decided to reduce. */
5066 loop_givs_reduce (loop, bl);
5068 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5069 as not reduced.
5071 For each giv register that can be reduced now: if replaceable,
5072 substitute reduced reg wherever the old giv occurs;
5073 else add new move insn "giv_reg = reduced_reg". */
5074 loop_givs_rescan (loop, bl, reg_map);
5076 /* All the givs based on the biv bl have been reduced if they
5077 merit it. */
5079 /* For each giv not marked as maybe dead that has been combined with a
5080 second giv, clear any "maybe dead" mark on that second giv.
5081 v->new_reg will either be or refer to the register of the giv it
5082 combined with.
5084 Doing this clearing avoids problems in biv elimination where
5085 a giv's new_reg is a complex value that can't be put in the
5086 insn but the giv combined with (with a reg as new_reg) is
5087 marked maybe_dead. Since the register will be used in either
5088 case, we'd prefer it be used from the simpler giv. */
5090 for (v = bl->giv; v; v = v->next_iv)
5091 if (! v->maybe_dead && v->same)
5092 v->same->maybe_dead = 0;
5094 /* Try to eliminate the biv, if it is a candidate.
5095 This won't work if ! bl->all_reduced,
5096 since the givs we planned to use might not have been reduced.
5098 We have to be careful that we didn't initially think we could
5099 eliminate this biv because of a giv that we now think may be
5100 dead and shouldn't be used as a biv replacement.
5102 Also, there is the possibility that we may have a giv that looks
5103 like it can be used to eliminate a biv, but the resulting insn
5104 isn't valid. This can happen, for example, on the 88k, where a
5105 JUMP_INSN can compare a register only with zero. Attempts to
5106 replace it with a compare with a constant will fail.
5108 Note that in cases where this call fails, we may have replaced some
5109 of the occurrences of the biv with a giv, but no harm was done in
5110 doing so in the rare cases where it can occur. */
5112 if (bl->all_reduced == 1 && bl->eliminable
5113 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5115 /* ?? If we created a new test to bypass the loop entirely,
5116 or otherwise drop straight in, based on this test, then
5117 we might want to rewrite it also. This way some later
5118 pass has more hope of removing the initialization of this
5119 biv entirely. */
5121 /* If final_value != 0, then the biv may be used after loop end
5122 and we must emit an insn to set it just in case.
5124 Reversed bivs already have an insn after the loop setting their
5125 value, so we don't need another one. We can't calculate the
5126 proper final value for such a biv here anyways. */
5127 if (bl->final_value && ! bl->reversed)
5128 loop_insn_sink_or_swim (loop, gen_move_insn
5129 (bl->biv->dest_reg, bl->final_value));
5131 if (loop_dump_stream)
5132 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5133 bl->regno);
5137 /* Go through all the instructions in the loop, making all the
5138 register substitutions scheduled in REG_MAP. */
5140 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5141 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5142 || GET_CODE (p) == CALL_INSN)
5144 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5145 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5146 INSN_CODE (p) = -1;
5149 if (loop_info->n_iterations > 0)
5151 /* When we completely unroll a loop we will likely not need the increment
5152 of the loop BIV and we will not need the conditional branch at the
5153 end of the loop. */
5154 unrolled_insn_copies = insn_count - 2;
5156 #ifdef HAVE_cc0
5157 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5158 need the comparison before the conditional branch at the end of the
5159 loop. */
5160 unrolled_insn_copies -= 1;
5161 #endif
5163 /* We'll need one copy for each loop iteration. */
5164 unrolled_insn_copies *= loop_info->n_iterations;
5166 /* A little slop to account for the ability to remove initialization
5167 code, better CSE, and other secondary benefits of completely
5168 unrolling some loops. */
5169 unrolled_insn_copies -= 1;
5171 /* Clamp the value. */
5172 if (unrolled_insn_copies < 0)
5173 unrolled_insn_copies = 0;
5176 /* Unroll loops from within strength reduction so that we can use the
5177 induction variable information that strength_reduce has already
5178 collected. Always unroll loops that would be as small or smaller
5179 unrolled than when rolled. */
5180 if ((flags & LOOP_UNROLL)
5181 || (loop_info->n_iterations > 0
5182 && unrolled_insn_copies <= insn_count))
5183 unroll_loop (loop, insn_count, 1);
5185 #ifdef HAVE_doloop_end
5186 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5187 doloop_optimize (loop);
5188 #endif /* HAVE_doloop_end */
5190 /* In case number of iterations is known, drop branch prediction note
5191 in the branch. Do that only in second loop pass, as loop unrolling
5192 may change the number of iterations performed. */
5193 if ((flags & LOOP_BCT)
5194 && loop_info->n_iterations / loop_info->unroll_number > 1)
5196 int n = loop_info->n_iterations / loop_info->unroll_number;
5197 predict_insn (PREV_INSN (loop->end),
5198 PRED_LOOP_ITERATIONS,
5199 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5202 if (loop_dump_stream)
5203 fprintf (loop_dump_stream, "\n");
5205 loop_ivs_free (loop);
5206 if (reg_map)
5207 free (reg_map);
5210 /*Record all basic induction variables calculated in the insn. */
5211 static rtx
5212 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5213 struct loop *loop;
5214 rtx p;
5215 int not_every_iteration;
5216 int maybe_multiple;
5218 struct loop_ivs *ivs = LOOP_IVS (loop);
5219 rtx set;
5220 rtx dest_reg;
5221 rtx inc_val;
5222 rtx mult_val;
5223 rtx *location;
5225 if (GET_CODE (p) == INSN
5226 && (set = single_set (p))
5227 && GET_CODE (SET_DEST (set)) == REG)
5229 dest_reg = SET_DEST (set);
5230 if (REGNO (dest_reg) < max_reg_before_loop
5231 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5232 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5234 if (basic_induction_var (loop, SET_SRC (set),
5235 GET_MODE (SET_SRC (set)),
5236 dest_reg, p, &inc_val, &mult_val,
5237 &location))
5239 /* It is a possible basic induction variable.
5240 Create and initialize an induction structure for it. */
5242 struct induction *v
5243 = (struct induction *) xmalloc (sizeof (struct induction));
5245 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5246 not_every_iteration, maybe_multiple);
5247 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5249 else if (REGNO (dest_reg) < ivs->n_regs)
5250 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5253 return p;
5256 /* Record all givs calculated in the insn.
5257 A register is a giv if: it is only set once, it is a function of a
5258 biv and a constant (or invariant), and it is not a biv. */
5259 static rtx
5260 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5261 struct loop *loop;
5262 rtx p;
5263 int not_every_iteration;
5264 int maybe_multiple;
5266 struct loop_regs *regs = LOOP_REGS (loop);
5268 rtx set;
5269 /* Look for a general induction variable in a register. */
5270 if (GET_CODE (p) == INSN
5271 && (set = single_set (p))
5272 && GET_CODE (SET_DEST (set)) == REG
5273 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5275 rtx src_reg;
5276 rtx dest_reg;
5277 rtx add_val;
5278 rtx mult_val;
5279 rtx ext_val;
5280 int benefit;
5281 rtx regnote = 0;
5282 rtx last_consec_insn;
5284 dest_reg = SET_DEST (set);
5285 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5286 return p;
5288 if (/* SET_SRC is a giv. */
5289 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5290 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5291 /* Equivalent expression is a giv. */
5292 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5293 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5294 &add_val, &mult_val, &ext_val, 0,
5295 &benefit, VOIDmode)))
5296 /* Don't try to handle any regs made by loop optimization.
5297 We have nothing on them in regno_first_uid, etc. */
5298 && REGNO (dest_reg) < max_reg_before_loop
5299 /* Don't recognize a BASIC_INDUCT_VAR here. */
5300 && dest_reg != src_reg
5301 /* This must be the only place where the register is set. */
5302 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5303 /* or all sets must be consecutive and make a giv. */
5304 || (benefit = consec_sets_giv (loop, benefit, p,
5305 src_reg, dest_reg,
5306 &add_val, &mult_val, &ext_val,
5307 &last_consec_insn))))
5309 struct induction *v
5310 = (struct induction *) xmalloc (sizeof (struct induction));
5312 /* If this is a library call, increase benefit. */
5313 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5314 benefit += libcall_benefit (p);
5316 /* Skip the consecutive insns, if there are any. */
5317 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5318 p = last_consec_insn;
5320 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5321 ext_val, benefit, DEST_REG, not_every_iteration,
5322 maybe_multiple, (rtx*) 0);
5327 #ifndef DONT_REDUCE_ADDR
5328 /* Look for givs which are memory addresses. */
5329 /* This resulted in worse code on a VAX 8600. I wonder if it
5330 still does. */
5331 if (GET_CODE (p) == INSN)
5332 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5333 maybe_multiple);
5334 #endif
5336 /* Update the status of whether giv can derive other givs. This can
5337 change when we pass a label or an insn that updates a biv. */
5338 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5339 || GET_CODE (p) == CODE_LABEL)
5340 update_giv_derive (loop, p);
5341 return p;
5344 /* Return 1 if X is a valid source for an initial value (or as value being
5345 compared against in an initial test).
5347 X must be either a register or constant and must not be clobbered between
5348 the current insn and the start of the loop.
5350 INSN is the insn containing X. */
5352 static int
5353 valid_initial_value_p (x, insn, call_seen, loop_start)
5354 rtx x;
5355 rtx insn;
5356 int call_seen;
5357 rtx loop_start;
5359 if (CONSTANT_P (x))
5360 return 1;
5362 /* Only consider pseudos we know about initialized in insns whose luids
5363 we know. */
5364 if (GET_CODE (x) != REG
5365 || REGNO (x) >= max_reg_before_loop)
5366 return 0;
5368 /* Don't use call-clobbered registers across a call which clobbers it. On
5369 some machines, don't use any hard registers at all. */
5370 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5371 && (SMALL_REGISTER_CLASSES
5372 || (call_used_regs[REGNO (x)] && call_seen)))
5373 return 0;
5375 /* Don't use registers that have been clobbered before the start of the
5376 loop. */
5377 if (reg_set_between_p (x, insn, loop_start))
5378 return 0;
5380 return 1;
5383 /* Scan X for memory refs and check each memory address
5384 as a possible giv. INSN is the insn whose pattern X comes from.
5385 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5386 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5387 more thanonce in each loop iteration. */
5389 static void
5390 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5391 const struct loop *loop;
5392 rtx x;
5393 rtx insn;
5394 int not_every_iteration, maybe_multiple;
5396 int i, j;
5397 enum rtx_code code;
5398 const char *fmt;
5400 if (x == 0)
5401 return;
5403 code = GET_CODE (x);
5404 switch (code)
5406 case REG:
5407 case CONST_INT:
5408 case CONST:
5409 case CONST_DOUBLE:
5410 case SYMBOL_REF:
5411 case LABEL_REF:
5412 case PC:
5413 case CC0:
5414 case ADDR_VEC:
5415 case ADDR_DIFF_VEC:
5416 case USE:
5417 case CLOBBER:
5418 return;
5420 case MEM:
5422 rtx src_reg;
5423 rtx add_val;
5424 rtx mult_val;
5425 rtx ext_val;
5426 int benefit;
5428 /* This code used to disable creating GIVs with mult_val == 1 and
5429 add_val == 0. However, this leads to lost optimizations when
5430 it comes time to combine a set of related DEST_ADDR GIVs, since
5431 this one would not be seen. */
5433 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5434 &mult_val, &ext_val, 1, &benefit,
5435 GET_MODE (x)))
5437 /* Found one; record it. */
5438 struct induction *v
5439 = (struct induction *) xmalloc (sizeof (struct induction));
5441 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5442 add_val, ext_val, benefit, DEST_ADDR,
5443 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5445 v->mem = x;
5448 return;
5450 default:
5451 break;
5454 /* Recursively scan the subexpressions for other mem refs. */
5456 fmt = GET_RTX_FORMAT (code);
5457 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5458 if (fmt[i] == 'e')
5459 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5460 maybe_multiple);
5461 else if (fmt[i] == 'E')
5462 for (j = 0; j < XVECLEN (x, i); j++)
5463 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5464 maybe_multiple);
5467 /* Fill in the data about one biv update.
5468 V is the `struct induction' in which we record the biv. (It is
5469 allocated by the caller, with alloca.)
5470 INSN is the insn that sets it.
5471 DEST_REG is the biv's reg.
5473 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5474 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5475 being set to INC_VAL.
5477 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5478 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5479 can be executed more than once per iteration. If MAYBE_MULTIPLE
5480 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5481 executed exactly once per iteration. */
5483 static void
5484 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5485 not_every_iteration, maybe_multiple)
5486 struct loop *loop;
5487 struct induction *v;
5488 rtx insn;
5489 rtx dest_reg;
5490 rtx inc_val;
5491 rtx mult_val;
5492 rtx *location;
5493 int not_every_iteration;
5494 int maybe_multiple;
5496 struct loop_ivs *ivs = LOOP_IVS (loop);
5497 struct iv_class *bl;
5499 v->insn = insn;
5500 v->src_reg = dest_reg;
5501 v->dest_reg = dest_reg;
5502 v->mult_val = mult_val;
5503 v->add_val = inc_val;
5504 v->ext_dependent = NULL_RTX;
5505 v->location = location;
5506 v->mode = GET_MODE (dest_reg);
5507 v->always_computable = ! not_every_iteration;
5508 v->always_executed = ! not_every_iteration;
5509 v->maybe_multiple = maybe_multiple;
5511 /* Add this to the reg's iv_class, creating a class
5512 if this is the first incrementation of the reg. */
5514 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5515 if (bl == 0)
5517 /* Create and initialize new iv_class. */
5519 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5521 bl->regno = REGNO (dest_reg);
5522 bl->biv = 0;
5523 bl->giv = 0;
5524 bl->biv_count = 0;
5525 bl->giv_count = 0;
5527 /* Set initial value to the reg itself. */
5528 bl->initial_value = dest_reg;
5529 bl->final_value = 0;
5530 /* We haven't seen the initializing insn yet */
5531 bl->init_insn = 0;
5532 bl->init_set = 0;
5533 bl->initial_test = 0;
5534 bl->incremented = 0;
5535 bl->eliminable = 0;
5536 bl->nonneg = 0;
5537 bl->reversed = 0;
5538 bl->total_benefit = 0;
5540 /* Add this class to ivs->list. */
5541 bl->next = ivs->list;
5542 ivs->list = bl;
5544 /* Put it in the array of biv register classes. */
5545 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5548 /* Update IV_CLASS entry for this biv. */
5549 v->next_iv = bl->biv;
5550 bl->biv = v;
5551 bl->biv_count++;
5552 if (mult_val == const1_rtx)
5553 bl->incremented = 1;
5555 if (loop_dump_stream)
5556 loop_biv_dump (v, loop_dump_stream, 0);
5559 /* Fill in the data about one giv.
5560 V is the `struct induction' in which we record the giv. (It is
5561 allocated by the caller, with alloca.)
5562 INSN is the insn that sets it.
5563 BENEFIT estimates the savings from deleting this insn.
5564 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5565 into a register or is used as a memory address.
5567 SRC_REG is the biv reg which the giv is computed from.
5568 DEST_REG is the giv's reg (if the giv is stored in a reg).
5569 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5570 LOCATION points to the place where this giv's value appears in INSN. */
5572 static void
5573 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5574 benefit, type, not_every_iteration, maybe_multiple, location)
5575 const struct loop *loop;
5576 struct induction *v;
5577 rtx insn;
5578 rtx src_reg;
5579 rtx dest_reg;
5580 rtx mult_val, add_val, ext_val;
5581 int benefit;
5582 enum g_types type;
5583 int not_every_iteration, maybe_multiple;
5584 rtx *location;
5586 struct loop_ivs *ivs = LOOP_IVS (loop);
5587 struct induction *b;
5588 struct iv_class *bl;
5589 rtx set = single_set (insn);
5590 rtx temp;
5592 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5593 undo the MULT canonicalization that we performed earlier. */
5594 temp = simplify_rtx (add_val);
5595 if (temp
5596 && ! (GET_CODE (add_val) == MULT
5597 && GET_CODE (temp) == ASHIFT))
5598 add_val = temp;
5600 v->insn = insn;
5601 v->src_reg = src_reg;
5602 v->giv_type = type;
5603 v->dest_reg = dest_reg;
5604 v->mult_val = mult_val;
5605 v->add_val = add_val;
5606 v->ext_dependent = ext_val;
5607 v->benefit = benefit;
5608 v->location = location;
5609 v->cant_derive = 0;
5610 v->combined_with = 0;
5611 v->maybe_multiple = maybe_multiple;
5612 v->maybe_dead = 0;
5613 v->derive_adjustment = 0;
5614 v->same = 0;
5615 v->ignore = 0;
5616 v->new_reg = 0;
5617 v->final_value = 0;
5618 v->same_insn = 0;
5619 v->auto_inc_opt = 0;
5620 v->unrolled = 0;
5621 v->shared = 0;
5623 /* The v->always_computable field is used in update_giv_derive, to
5624 determine whether a giv can be used to derive another giv. For a
5625 DEST_REG giv, INSN computes a new value for the giv, so its value
5626 isn't computable if INSN insn't executed every iteration.
5627 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5628 it does not compute a new value. Hence the value is always computable
5629 regardless of whether INSN is executed each iteration. */
5631 if (type == DEST_ADDR)
5632 v->always_computable = 1;
5633 else
5634 v->always_computable = ! not_every_iteration;
5636 v->always_executed = ! not_every_iteration;
5638 if (type == DEST_ADDR)
5640 v->mode = GET_MODE (*location);
5641 v->lifetime = 1;
5643 else /* type == DEST_REG */
5645 v->mode = GET_MODE (SET_DEST (set));
5647 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5649 /* If the lifetime is zero, it means that this register is
5650 really a dead store. So mark this as a giv that can be
5651 ignored. This will not prevent the biv from being eliminated. */
5652 if (v->lifetime == 0)
5653 v->ignore = 1;
5655 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5656 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5659 /* Add the giv to the class of givs computed from one biv. */
5661 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5662 if (bl)
5664 v->next_iv = bl->giv;
5665 bl->giv = v;
5666 /* Don't count DEST_ADDR. This is supposed to count the number of
5667 insns that calculate givs. */
5668 if (type == DEST_REG)
5669 bl->giv_count++;
5670 bl->total_benefit += benefit;
5672 else
5673 /* Fatal error, biv missing for this giv? */
5674 abort ();
5676 if (type == DEST_ADDR)
5677 v->replaceable = 1;
5678 else
5680 /* The giv can be replaced outright by the reduced register only if all
5681 of the following conditions are true:
5682 - the insn that sets the giv is always executed on any iteration
5683 on which the giv is used at all
5684 (there are two ways to deduce this:
5685 either the insn is executed on every iteration,
5686 or all uses follow that insn in the same basic block),
5687 - the giv is not used outside the loop
5688 - no assignments to the biv occur during the giv's lifetime. */
5690 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5691 /* Previous line always fails if INSN was moved by loop opt. */
5692 && REGNO_LAST_LUID (REGNO (dest_reg))
5693 < INSN_LUID (loop->end)
5694 && (! not_every_iteration
5695 || last_use_this_basic_block (dest_reg, insn)))
5697 /* Now check that there are no assignments to the biv within the
5698 giv's lifetime. This requires two separate checks. */
5700 /* Check each biv update, and fail if any are between the first
5701 and last use of the giv.
5703 If this loop contains an inner loop that was unrolled, then
5704 the insn modifying the biv may have been emitted by the loop
5705 unrolling code, and hence does not have a valid luid. Just
5706 mark the biv as not replaceable in this case. It is not very
5707 useful as a biv, because it is used in two different loops.
5708 It is very unlikely that we would be able to optimize the giv
5709 using this biv anyways. */
5711 v->replaceable = 1;
5712 for (b = bl->biv; b; b = b->next_iv)
5714 if (INSN_UID (b->insn) >= max_uid_for_loop
5715 || ((INSN_LUID (b->insn)
5716 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5717 && (INSN_LUID (b->insn)
5718 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5720 v->replaceable = 0;
5721 v->not_replaceable = 1;
5722 break;
5726 /* If there are any backwards branches that go from after the
5727 biv update to before it, then this giv is not replaceable. */
5728 if (v->replaceable)
5729 for (b = bl->biv; b; b = b->next_iv)
5730 if (back_branch_in_range_p (loop, b->insn))
5732 v->replaceable = 0;
5733 v->not_replaceable = 1;
5734 break;
5737 else
5739 /* May still be replaceable, we don't have enough info here to
5740 decide. */
5741 v->replaceable = 0;
5742 v->not_replaceable = 0;
5746 /* Record whether the add_val contains a const_int, for later use by
5747 combine_givs. */
5749 rtx tem = add_val;
5751 v->no_const_addval = 1;
5752 if (tem == const0_rtx)
5754 else if (CONSTANT_P (add_val))
5755 v->no_const_addval = 0;
5756 if (GET_CODE (tem) == PLUS)
5758 while (1)
5760 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5761 tem = XEXP (tem, 0);
5762 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5763 tem = XEXP (tem, 1);
5764 else
5765 break;
5767 if (CONSTANT_P (XEXP (tem, 1)))
5768 v->no_const_addval = 0;
5772 if (loop_dump_stream)
5773 loop_giv_dump (v, loop_dump_stream, 0);
5776 /* All this does is determine whether a giv can be made replaceable because
5777 its final value can be calculated. This code can not be part of record_giv
5778 above, because final_giv_value requires that the number of loop iterations
5779 be known, and that can not be accurately calculated until after all givs
5780 have been identified. */
5782 static void
5783 check_final_value (loop, v)
5784 const struct loop *loop;
5785 struct induction *v;
5787 struct loop_ivs *ivs = LOOP_IVS (loop);
5788 struct iv_class *bl;
5789 rtx final_value = 0;
5791 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5793 /* DEST_ADDR givs will never reach here, because they are always marked
5794 replaceable above in record_giv. */
5796 /* The giv can be replaced outright by the reduced register only if all
5797 of the following conditions are true:
5798 - the insn that sets the giv is always executed on any iteration
5799 on which the giv is used at all
5800 (there are two ways to deduce this:
5801 either the insn is executed on every iteration,
5802 or all uses follow that insn in the same basic block),
5803 - its final value can be calculated (this condition is different
5804 than the one above in record_giv)
5805 - it's not used before the it's set
5806 - no assignments to the biv occur during the giv's lifetime. */
5808 #if 0
5809 /* This is only called now when replaceable is known to be false. */
5810 /* Clear replaceable, so that it won't confuse final_giv_value. */
5811 v->replaceable = 0;
5812 #endif
5814 if ((final_value = final_giv_value (loop, v))
5815 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5817 int biv_increment_seen = 0, before_giv_insn = 0;
5818 rtx p = v->insn;
5819 rtx last_giv_use;
5821 v->replaceable = 1;
5823 /* When trying to determine whether or not a biv increment occurs
5824 during the lifetime of the giv, we can ignore uses of the variable
5825 outside the loop because final_value is true. Hence we can not
5826 use regno_last_uid and regno_first_uid as above in record_giv. */
5828 /* Search the loop to determine whether any assignments to the
5829 biv occur during the giv's lifetime. Start with the insn
5830 that sets the giv, and search around the loop until we come
5831 back to that insn again.
5833 Also fail if there is a jump within the giv's lifetime that jumps
5834 to somewhere outside the lifetime but still within the loop. This
5835 catches spaghetti code where the execution order is not linear, and
5836 hence the above test fails. Here we assume that the giv lifetime
5837 does not extend from one iteration of the loop to the next, so as
5838 to make the test easier. Since the lifetime isn't known yet,
5839 this requires two loops. See also record_giv above. */
5841 last_giv_use = v->insn;
5843 while (1)
5845 p = NEXT_INSN (p);
5846 if (p == loop->end)
5848 before_giv_insn = 1;
5849 p = NEXT_INSN (loop->start);
5851 if (p == v->insn)
5852 break;
5854 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5855 || GET_CODE (p) == CALL_INSN)
5857 /* It is possible for the BIV increment to use the GIV if we
5858 have a cycle. Thus we must be sure to check each insn for
5859 both BIV and GIV uses, and we must check for BIV uses
5860 first. */
5862 if (! biv_increment_seen
5863 && reg_set_p (v->src_reg, PATTERN (p)))
5864 biv_increment_seen = 1;
5866 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5868 if (biv_increment_seen || before_giv_insn)
5870 v->replaceable = 0;
5871 v->not_replaceable = 1;
5872 break;
5874 last_giv_use = p;
5879 /* Now that the lifetime of the giv is known, check for branches
5880 from within the lifetime to outside the lifetime if it is still
5881 replaceable. */
5883 if (v->replaceable)
5885 p = v->insn;
5886 while (1)
5888 p = NEXT_INSN (p);
5889 if (p == loop->end)
5890 p = NEXT_INSN (loop->start);
5891 if (p == last_giv_use)
5892 break;
5894 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5895 && LABEL_NAME (JUMP_LABEL (p))
5896 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5897 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5898 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5899 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5901 v->replaceable = 0;
5902 v->not_replaceable = 1;
5904 if (loop_dump_stream)
5905 fprintf (loop_dump_stream,
5906 "Found branch outside giv lifetime.\n");
5908 break;
5913 /* If it is replaceable, then save the final value. */
5914 if (v->replaceable)
5915 v->final_value = final_value;
5918 if (loop_dump_stream && v->replaceable)
5919 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5920 INSN_UID (v->insn), REGNO (v->dest_reg));
5923 /* Update the status of whether a giv can derive other givs.
5925 We need to do something special if there is or may be an update to the biv
5926 between the time the giv is defined and the time it is used to derive
5927 another giv.
5929 In addition, a giv that is only conditionally set is not allowed to
5930 derive another giv once a label has been passed.
5932 The cases we look at are when a label or an update to a biv is passed. */
5934 static void
5935 update_giv_derive (loop, p)
5936 const struct loop *loop;
5937 rtx p;
5939 struct loop_ivs *ivs = LOOP_IVS (loop);
5940 struct iv_class *bl;
5941 struct induction *biv, *giv;
5942 rtx tem;
5943 int dummy;
5945 /* Search all IV classes, then all bivs, and finally all givs.
5947 There are three cases we are concerned with. First we have the situation
5948 of a giv that is only updated conditionally. In that case, it may not
5949 derive any givs after a label is passed.
5951 The second case is when a biv update occurs, or may occur, after the
5952 definition of a giv. For certain biv updates (see below) that are
5953 known to occur between the giv definition and use, we can adjust the
5954 giv definition. For others, or when the biv update is conditional,
5955 we must prevent the giv from deriving any other givs. There are two
5956 sub-cases within this case.
5958 If this is a label, we are concerned with any biv update that is done
5959 conditionally, since it may be done after the giv is defined followed by
5960 a branch here (actually, we need to pass both a jump and a label, but
5961 this extra tracking doesn't seem worth it).
5963 If this is a jump, we are concerned about any biv update that may be
5964 executed multiple times. We are actually only concerned about
5965 backward jumps, but it is probably not worth performing the test
5966 on the jump again here.
5968 If this is a biv update, we must adjust the giv status to show that a
5969 subsequent biv update was performed. If this adjustment cannot be done,
5970 the giv cannot derive further givs. */
5972 for (bl = ivs->list; bl; bl = bl->next)
5973 for (biv = bl->biv; biv; biv = biv->next_iv)
5974 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5975 || biv->insn == p)
5977 for (giv = bl->giv; giv; giv = giv->next_iv)
5979 /* If cant_derive is already true, there is no point in
5980 checking all of these conditions again. */
5981 if (giv->cant_derive)
5982 continue;
5984 /* If this giv is conditionally set and we have passed a label,
5985 it cannot derive anything. */
5986 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5987 giv->cant_derive = 1;
5989 /* Skip givs that have mult_val == 0, since
5990 they are really invariants. Also skip those that are
5991 replaceable, since we know their lifetime doesn't contain
5992 any biv update. */
5993 else if (giv->mult_val == const0_rtx || giv->replaceable)
5994 continue;
5996 /* The only way we can allow this giv to derive another
5997 is if this is a biv increment and we can form the product
5998 of biv->add_val and giv->mult_val. In this case, we will
5999 be able to compute a compensation. */
6000 else if (biv->insn == p)
6002 rtx ext_val_dummy;
6004 tem = 0;
6005 if (biv->mult_val == const1_rtx)
6006 tem = simplify_giv_expr (loop,
6007 gen_rtx_MULT (giv->mode,
6008 biv->add_val,
6009 giv->mult_val),
6010 &ext_val_dummy, &dummy);
6012 if (tem && giv->derive_adjustment)
6013 tem = simplify_giv_expr
6014 (loop,
6015 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6016 &ext_val_dummy, &dummy);
6018 if (tem)
6019 giv->derive_adjustment = tem;
6020 else
6021 giv->cant_derive = 1;
6023 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6024 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6025 giv->cant_derive = 1;
6030 /* Check whether an insn is an increment legitimate for a basic induction var.
6031 X is the source of insn P, or a part of it.
6032 MODE is the mode in which X should be interpreted.
6034 DEST_REG is the putative biv, also the destination of the insn.
6035 We accept patterns of these forms:
6036 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6037 REG = INVARIANT + REG
6039 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6040 store the additive term into *INC_VAL, and store the place where
6041 we found the additive term into *LOCATION.
6043 If X is an assignment of an invariant into DEST_REG, we set
6044 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6046 We also want to detect a BIV when it corresponds to a variable
6047 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6048 of the variable may be a PLUS that adds a SUBREG of that variable to
6049 an invariant and then sign- or zero-extends the result of the PLUS
6050 into the variable.
6052 Most GIVs in such cases will be in the promoted mode, since that is the
6053 probably the natural computation mode (and almost certainly the mode
6054 used for addresses) on the machine. So we view the pseudo-reg containing
6055 the variable as the BIV, as if it were simply incremented.
6057 Note that treating the entire pseudo as a BIV will result in making
6058 simple increments to any GIVs based on it. However, if the variable
6059 overflows in its declared mode but not its promoted mode, the result will
6060 be incorrect. This is acceptable if the variable is signed, since
6061 overflows in such cases are undefined, but not if it is unsigned, since
6062 those overflows are defined. So we only check for SIGN_EXTEND and
6063 not ZERO_EXTEND.
6065 If we cannot find a biv, we return 0. */
6067 static int
6068 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6069 const struct loop *loop;
6070 rtx x;
6071 enum machine_mode mode;
6072 rtx dest_reg;
6073 rtx p;
6074 rtx *inc_val;
6075 rtx *mult_val;
6076 rtx **location;
6078 enum rtx_code code;
6079 rtx *argp, arg;
6080 rtx insn, set = 0;
6082 code = GET_CODE (x);
6083 *location = NULL;
6084 switch (code)
6086 case PLUS:
6087 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6088 || (GET_CODE (XEXP (x, 0)) == SUBREG
6089 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6090 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6092 argp = &XEXP (x, 1);
6094 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6095 || (GET_CODE (XEXP (x, 1)) == SUBREG
6096 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6097 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6099 argp = &XEXP (x, 0);
6101 else
6102 return 0;
6104 arg = *argp;
6105 if (loop_invariant_p (loop, arg) != 1)
6106 return 0;
6108 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6109 *mult_val = const1_rtx;
6110 *location = argp;
6111 return 1;
6113 case SUBREG:
6114 /* If this is a SUBREG for a promoted variable, check the inner
6115 value. */
6116 if (SUBREG_PROMOTED_VAR_P (x))
6117 return basic_induction_var (loop, SUBREG_REG (x),
6118 GET_MODE (SUBREG_REG (x)),
6119 dest_reg, p, inc_val, mult_val, location);
6120 return 0;
6122 case REG:
6123 /* If this register is assigned in a previous insn, look at its
6124 source, but don't go outside the loop or past a label. */
6126 /* If this sets a register to itself, we would repeat any previous
6127 biv increment if we applied this strategy blindly. */
6128 if (rtx_equal_p (dest_reg, x))
6129 return 0;
6131 insn = p;
6132 while (1)
6134 rtx dest;
6137 insn = PREV_INSN (insn);
6139 while (insn && GET_CODE (insn) == NOTE
6140 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6142 if (!insn)
6143 break;
6144 set = single_set (insn);
6145 if (set == 0)
6146 break;
6147 dest = SET_DEST (set);
6148 if (dest == x
6149 || (GET_CODE (dest) == SUBREG
6150 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6151 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6152 && SUBREG_REG (dest) == x))
6153 return basic_induction_var (loop, SET_SRC (set),
6154 (GET_MODE (SET_SRC (set)) == VOIDmode
6155 ? GET_MODE (x)
6156 : GET_MODE (SET_SRC (set))),
6157 dest_reg, insn,
6158 inc_val, mult_val, location);
6160 while (GET_CODE (dest) == SIGN_EXTRACT
6161 || GET_CODE (dest) == ZERO_EXTRACT
6162 || GET_CODE (dest) == SUBREG
6163 || GET_CODE (dest) == STRICT_LOW_PART)
6164 dest = XEXP (dest, 0);
6165 if (dest == x)
6166 break;
6168 /* Fall through. */
6170 /* Can accept constant setting of biv only when inside inner most loop.
6171 Otherwise, a biv of an inner loop may be incorrectly recognized
6172 as a biv of the outer loop,
6173 causing code to be moved INTO the inner loop. */
6174 case MEM:
6175 if (loop_invariant_p (loop, x) != 1)
6176 return 0;
6177 case CONST_INT:
6178 case SYMBOL_REF:
6179 case CONST:
6180 /* convert_modes aborts if we try to convert to or from CCmode, so just
6181 exclude that case. It is very unlikely that a condition code value
6182 would be a useful iterator anyways. */
6183 if (loop->level == 1
6184 && GET_MODE_CLASS (mode) != MODE_CC
6185 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
6187 /* Possible bug here? Perhaps we don't know the mode of X. */
6188 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6189 *mult_val = const0_rtx;
6190 return 1;
6192 else
6193 return 0;
6195 case SIGN_EXTEND:
6196 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6197 dest_reg, p, inc_val, mult_val, location);
6199 case ASHIFTRT:
6200 /* Similar, since this can be a sign extension. */
6201 for (insn = PREV_INSN (p);
6202 (insn && GET_CODE (insn) == NOTE
6203 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6204 insn = PREV_INSN (insn))
6207 if (insn)
6208 set = single_set (insn);
6210 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6211 && set && SET_DEST (set) == XEXP (x, 0)
6212 && GET_CODE (XEXP (x, 1)) == CONST_INT
6213 && INTVAL (XEXP (x, 1)) >= 0
6214 && GET_CODE (SET_SRC (set)) == ASHIFT
6215 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6216 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6217 GET_MODE (XEXP (x, 0)),
6218 dest_reg, insn, inc_val, mult_val,
6219 location);
6220 return 0;
6222 default:
6223 return 0;
6227 /* A general induction variable (giv) is any quantity that is a linear
6228 function of a basic induction variable,
6229 i.e. giv = biv * mult_val + add_val.
6230 The coefficients can be any loop invariant quantity.
6231 A giv need not be computed directly from the biv;
6232 it can be computed by way of other givs. */
6234 /* Determine whether X computes a giv.
6235 If it does, return a nonzero value
6236 which is the benefit from eliminating the computation of X;
6237 set *SRC_REG to the register of the biv that it is computed from;
6238 set *ADD_VAL and *MULT_VAL to the coefficients,
6239 such that the value of X is biv * mult + add; */
6241 static int
6242 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6243 is_addr, pbenefit, addr_mode)
6244 const struct loop *loop;
6245 rtx x;
6246 rtx *src_reg;
6247 rtx *add_val;
6248 rtx *mult_val;
6249 rtx *ext_val;
6250 int is_addr;
6251 int *pbenefit;
6252 enum machine_mode addr_mode;
6254 struct loop_ivs *ivs = LOOP_IVS (loop);
6255 rtx orig_x = x;
6257 /* If this is an invariant, forget it, it isn't a giv. */
6258 if (loop_invariant_p (loop, x) == 1)
6259 return 0;
6261 *pbenefit = 0;
6262 *ext_val = NULL_RTX;
6263 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6264 if (x == 0)
6265 return 0;
6267 switch (GET_CODE (x))
6269 case USE:
6270 case CONST_INT:
6271 /* Since this is now an invariant and wasn't before, it must be a giv
6272 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6273 with. */
6274 *src_reg = ivs->list->biv->dest_reg;
6275 *mult_val = const0_rtx;
6276 *add_val = x;
6277 break;
6279 case REG:
6280 /* This is equivalent to a BIV. */
6281 *src_reg = x;
6282 *mult_val = const1_rtx;
6283 *add_val = const0_rtx;
6284 break;
6286 case PLUS:
6287 /* Either (plus (biv) (invar)) or
6288 (plus (mult (biv) (invar_1)) (invar_2)). */
6289 if (GET_CODE (XEXP (x, 0)) == MULT)
6291 *src_reg = XEXP (XEXP (x, 0), 0);
6292 *mult_val = XEXP (XEXP (x, 0), 1);
6294 else
6296 *src_reg = XEXP (x, 0);
6297 *mult_val = const1_rtx;
6299 *add_val = XEXP (x, 1);
6300 break;
6302 case MULT:
6303 /* ADD_VAL is zero. */
6304 *src_reg = XEXP (x, 0);
6305 *mult_val = XEXP (x, 1);
6306 *add_val = const0_rtx;
6307 break;
6309 default:
6310 abort ();
6313 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6314 unless they are CONST_INT). */
6315 if (GET_CODE (*add_val) == USE)
6316 *add_val = XEXP (*add_val, 0);
6317 if (GET_CODE (*mult_val) == USE)
6318 *mult_val = XEXP (*mult_val, 0);
6320 if (is_addr)
6321 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6322 else
6323 *pbenefit += rtx_cost (orig_x, SET);
6325 /* Always return true if this is a giv so it will be detected as such,
6326 even if the benefit is zero or negative. This allows elimination
6327 of bivs that might otherwise not be eliminated. */
6328 return 1;
6331 /* Given an expression, X, try to form it as a linear function of a biv.
6332 We will canonicalize it to be of the form
6333 (plus (mult (BIV) (invar_1))
6334 (invar_2))
6335 with possible degeneracies.
6337 The invariant expressions must each be of a form that can be used as a
6338 machine operand. We surround then with a USE rtx (a hack, but localized
6339 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6340 routine; it is the caller's responsibility to strip them.
6342 If no such canonicalization is possible (i.e., two biv's are used or an
6343 expression that is neither invariant nor a biv or giv), this routine
6344 returns 0.
6346 For a non-zero return, the result will have a code of CONST_INT, USE,
6347 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6349 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6351 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6352 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6354 static rtx
6355 simplify_giv_expr (loop, x, ext_val, benefit)
6356 const struct loop *loop;
6357 rtx x;
6358 rtx *ext_val;
6359 int *benefit;
6361 struct loop_ivs *ivs = LOOP_IVS (loop);
6362 struct loop_regs *regs = LOOP_REGS (loop);
6363 enum machine_mode mode = GET_MODE (x);
6364 rtx arg0, arg1;
6365 rtx tem;
6367 /* If this is not an integer mode, or if we cannot do arithmetic in this
6368 mode, this can't be a giv. */
6369 if (mode != VOIDmode
6370 && (GET_MODE_CLASS (mode) != MODE_INT
6371 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6372 return NULL_RTX;
6374 switch (GET_CODE (x))
6376 case PLUS:
6377 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6378 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6379 if (arg0 == 0 || arg1 == 0)
6380 return NULL_RTX;
6382 /* Put constant last, CONST_INT last if both constant. */
6383 if ((GET_CODE (arg0) == USE
6384 || GET_CODE (arg0) == CONST_INT)
6385 && ! ((GET_CODE (arg0) == USE
6386 && GET_CODE (arg1) == USE)
6387 || GET_CODE (arg1) == CONST_INT))
6388 tem = arg0, arg0 = arg1, arg1 = tem;
6390 /* Handle addition of zero, then addition of an invariant. */
6391 if (arg1 == const0_rtx)
6392 return arg0;
6393 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6394 switch (GET_CODE (arg0))
6396 case CONST_INT:
6397 case USE:
6398 /* Adding two invariants must result in an invariant, so enclose
6399 addition operation inside a USE and return it. */
6400 if (GET_CODE (arg0) == USE)
6401 arg0 = XEXP (arg0, 0);
6402 if (GET_CODE (arg1) == USE)
6403 arg1 = XEXP (arg1, 0);
6405 if (GET_CODE (arg0) == CONST_INT)
6406 tem = arg0, arg0 = arg1, arg1 = tem;
6407 if (GET_CODE (arg1) == CONST_INT)
6408 tem = sge_plus_constant (arg0, arg1);
6409 else
6410 tem = sge_plus (mode, arg0, arg1);
6412 if (GET_CODE (tem) != CONST_INT)
6413 tem = gen_rtx_USE (mode, tem);
6414 return tem;
6416 case REG:
6417 case MULT:
6418 /* biv + invar or mult + invar. Return sum. */
6419 return gen_rtx_PLUS (mode, arg0, arg1);
6421 case PLUS:
6422 /* (a + invar_1) + invar_2. Associate. */
6423 return
6424 simplify_giv_expr (loop,
6425 gen_rtx_PLUS (mode,
6426 XEXP (arg0, 0),
6427 gen_rtx_PLUS (mode,
6428 XEXP (arg0, 1),
6429 arg1)),
6430 ext_val, benefit);
6432 default:
6433 abort ();
6436 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6437 MULT to reduce cases. */
6438 if (GET_CODE (arg0) == REG)
6439 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6440 if (GET_CODE (arg1) == REG)
6441 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6443 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6444 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6445 Recurse to associate the second PLUS. */
6446 if (GET_CODE (arg1) == MULT)
6447 tem = arg0, arg0 = arg1, arg1 = tem;
6449 if (GET_CODE (arg1) == PLUS)
6450 return
6451 simplify_giv_expr (loop,
6452 gen_rtx_PLUS (mode,
6453 gen_rtx_PLUS (mode, arg0,
6454 XEXP (arg1, 0)),
6455 XEXP (arg1, 1)),
6456 ext_val, benefit);
6458 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6459 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6460 return NULL_RTX;
6462 if (!rtx_equal_p (arg0, arg1))
6463 return NULL_RTX;
6465 return simplify_giv_expr (loop,
6466 gen_rtx_MULT (mode,
6467 XEXP (arg0, 0),
6468 gen_rtx_PLUS (mode,
6469 XEXP (arg0, 1),
6470 XEXP (arg1, 1))),
6471 ext_val, benefit);
6473 case MINUS:
6474 /* Handle "a - b" as "a + b * (-1)". */
6475 return simplify_giv_expr (loop,
6476 gen_rtx_PLUS (mode,
6477 XEXP (x, 0),
6478 gen_rtx_MULT (mode,
6479 XEXP (x, 1),
6480 constm1_rtx)),
6481 ext_val, benefit);
6483 case MULT:
6484 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6485 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6486 if (arg0 == 0 || arg1 == 0)
6487 return NULL_RTX;
6489 /* Put constant last, CONST_INT last if both constant. */
6490 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6491 && GET_CODE (arg1) != CONST_INT)
6492 tem = arg0, arg0 = arg1, arg1 = tem;
6494 /* If second argument is not now constant, not giv. */
6495 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6496 return NULL_RTX;
6498 /* Handle multiply by 0 or 1. */
6499 if (arg1 == const0_rtx)
6500 return const0_rtx;
6502 else if (arg1 == const1_rtx)
6503 return arg0;
6505 switch (GET_CODE (arg0))
6507 case REG:
6508 /* biv * invar. Done. */
6509 return gen_rtx_MULT (mode, arg0, arg1);
6511 case CONST_INT:
6512 /* Product of two constants. */
6513 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6515 case USE:
6516 /* invar * invar is a giv, but attempt to simplify it somehow. */
6517 if (GET_CODE (arg1) != CONST_INT)
6518 return NULL_RTX;
6520 arg0 = XEXP (arg0, 0);
6521 if (GET_CODE (arg0) == MULT)
6523 /* (invar_0 * invar_1) * invar_2. Associate. */
6524 return simplify_giv_expr (loop,
6525 gen_rtx_MULT (mode,
6526 XEXP (arg0, 0),
6527 gen_rtx_MULT (mode,
6528 XEXP (arg0,
6530 arg1)),
6531 ext_val, benefit);
6533 /* Porpagate the MULT expressions to the intermost nodes. */
6534 else if (GET_CODE (arg0) == PLUS)
6536 /* (invar_0 + invar_1) * invar_2. Distribute. */
6537 return simplify_giv_expr (loop,
6538 gen_rtx_PLUS (mode,
6539 gen_rtx_MULT (mode,
6540 XEXP (arg0,
6542 arg1),
6543 gen_rtx_MULT (mode,
6544 XEXP (arg0,
6546 arg1)),
6547 ext_val, benefit);
6549 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6551 case MULT:
6552 /* (a * invar_1) * invar_2. Associate. */
6553 return simplify_giv_expr (loop,
6554 gen_rtx_MULT (mode,
6555 XEXP (arg0, 0),
6556 gen_rtx_MULT (mode,
6557 XEXP (arg0, 1),
6558 arg1)),
6559 ext_val, benefit);
6561 case PLUS:
6562 /* (a + invar_1) * invar_2. Distribute. */
6563 return simplify_giv_expr (loop,
6564 gen_rtx_PLUS (mode,
6565 gen_rtx_MULT (mode,
6566 XEXP (arg0, 0),
6567 arg1),
6568 gen_rtx_MULT (mode,
6569 XEXP (arg0, 1),
6570 arg1)),
6571 ext_val, benefit);
6573 default:
6574 abort ();
6577 case ASHIFT:
6578 /* Shift by constant is multiply by power of two. */
6579 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6580 return 0;
6582 return
6583 simplify_giv_expr (loop,
6584 gen_rtx_MULT (mode,
6585 XEXP (x, 0),
6586 GEN_INT ((HOST_WIDE_INT) 1
6587 << INTVAL (XEXP (x, 1)))),
6588 ext_val, benefit);
6590 case NEG:
6591 /* "-a" is "a * (-1)" */
6592 return simplify_giv_expr (loop,
6593 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6594 ext_val, benefit);
6596 case NOT:
6597 /* "~a" is "-a - 1". Silly, but easy. */
6598 return simplify_giv_expr (loop,
6599 gen_rtx_MINUS (mode,
6600 gen_rtx_NEG (mode, XEXP (x, 0)),
6601 const1_rtx),
6602 ext_val, benefit);
6604 case USE:
6605 /* Already in proper form for invariant. */
6606 return x;
6608 case SIGN_EXTEND:
6609 case ZERO_EXTEND:
6610 case TRUNCATE:
6611 /* Conditionally recognize extensions of simple IVs. After we've
6612 computed loop traversal counts and verified the range of the
6613 source IV, we'll reevaluate this as a GIV. */
6614 if (*ext_val == NULL_RTX)
6616 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6617 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6619 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6620 return arg0;
6623 goto do_default;
6625 case REG:
6626 /* If this is a new register, we can't deal with it. */
6627 if (REGNO (x) >= max_reg_before_loop)
6628 return 0;
6630 /* Check for biv or giv. */
6631 switch (REG_IV_TYPE (ivs, REGNO (x)))
6633 case BASIC_INDUCT:
6634 return x;
6635 case GENERAL_INDUCT:
6637 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6639 /* Form expression from giv and add benefit. Ensure this giv
6640 can derive another and subtract any needed adjustment if so. */
6642 /* Increasing the benefit here is risky. The only case in which it
6643 is arguably correct is if this is the only use of V. In other
6644 cases, this will artificially inflate the benefit of the current
6645 giv, and lead to suboptimal code. Thus, it is disabled, since
6646 potentially not reducing an only marginally beneficial giv is
6647 less harmful than reducing many givs that are not really
6648 beneficial. */
6650 rtx single_use = regs->array[REGNO (x)].single_usage;
6651 if (single_use && single_use != const0_rtx)
6652 *benefit += v->benefit;
6655 if (v->cant_derive)
6656 return 0;
6658 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6659 v->src_reg, v->mult_val),
6660 v->add_val);
6662 if (v->derive_adjustment)
6663 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6664 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6665 if (*ext_val)
6667 if (!v->ext_dependent)
6668 return arg0;
6670 else
6672 *ext_val = v->ext_dependent;
6673 return arg0;
6675 return 0;
6678 default:
6679 do_default:
6680 /* If it isn't an induction variable, and it is invariant, we
6681 may be able to simplify things further by looking through
6682 the bits we just moved outside the loop. */
6683 if (loop_invariant_p (loop, x) == 1)
6685 struct movable *m;
6686 struct loop_movables *movables = LOOP_MOVABLES (loop);
6688 for (m = movables->head; m; m = m->next)
6689 if (rtx_equal_p (x, m->set_dest))
6691 /* Ok, we found a match. Substitute and simplify. */
6693 /* If we match another movable, we must use that, as
6694 this one is going away. */
6695 if (m->match)
6696 return simplify_giv_expr (loop, m->match->set_dest,
6697 ext_val, benefit);
6699 /* If consec is non-zero, this is a member of a group of
6700 instructions that were moved together. We handle this
6701 case only to the point of seeking to the last insn and
6702 looking for a REG_EQUAL. Fail if we don't find one. */
6703 if (m->consec != 0)
6705 int i = m->consec;
6706 tem = m->insn;
6709 tem = NEXT_INSN (tem);
6711 while (--i > 0);
6713 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6714 if (tem)
6715 tem = XEXP (tem, 0);
6717 else
6719 tem = single_set (m->insn);
6720 if (tem)
6721 tem = SET_SRC (tem);
6724 if (tem)
6726 /* What we are most interested in is pointer
6727 arithmetic on invariants -- only take
6728 patterns we may be able to do something with. */
6729 if (GET_CODE (tem) == PLUS
6730 || GET_CODE (tem) == MULT
6731 || GET_CODE (tem) == ASHIFT
6732 || GET_CODE (tem) == CONST_INT
6733 || GET_CODE (tem) == SYMBOL_REF)
6735 tem = simplify_giv_expr (loop, tem, ext_val,
6736 benefit);
6737 if (tem)
6738 return tem;
6740 else if (GET_CODE (tem) == CONST
6741 && GET_CODE (XEXP (tem, 0)) == PLUS
6742 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6743 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6745 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6746 ext_val, benefit);
6747 if (tem)
6748 return tem;
6751 break;
6754 break;
6757 /* Fall through to general case. */
6758 default:
6759 /* If invariant, return as USE (unless CONST_INT).
6760 Otherwise, not giv. */
6761 if (GET_CODE (x) == USE)
6762 x = XEXP (x, 0);
6764 if (loop_invariant_p (loop, x) == 1)
6766 if (GET_CODE (x) == CONST_INT)
6767 return x;
6768 if (GET_CODE (x) == CONST
6769 && GET_CODE (XEXP (x, 0)) == PLUS
6770 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6771 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6772 x = XEXP (x, 0);
6773 return gen_rtx_USE (mode, x);
6775 else
6776 return 0;
6780 /* This routine folds invariants such that there is only ever one
6781 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6783 static rtx
6784 sge_plus_constant (x, c)
6785 rtx x, c;
6787 if (GET_CODE (x) == CONST_INT)
6788 return GEN_INT (INTVAL (x) + INTVAL (c));
6789 else if (GET_CODE (x) != PLUS)
6790 return gen_rtx_PLUS (GET_MODE (x), x, c);
6791 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6793 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6794 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6796 else if (GET_CODE (XEXP (x, 0)) == PLUS
6797 || GET_CODE (XEXP (x, 1)) != PLUS)
6799 return gen_rtx_PLUS (GET_MODE (x),
6800 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6802 else
6804 return gen_rtx_PLUS (GET_MODE (x),
6805 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6809 static rtx
6810 sge_plus (mode, x, y)
6811 enum machine_mode mode;
6812 rtx x, y;
6814 while (GET_CODE (y) == PLUS)
6816 rtx a = XEXP (y, 0);
6817 if (GET_CODE (a) == CONST_INT)
6818 x = sge_plus_constant (x, a);
6819 else
6820 x = gen_rtx_PLUS (mode, x, a);
6821 y = XEXP (y, 1);
6823 if (GET_CODE (y) == CONST_INT)
6824 x = sge_plus_constant (x, y);
6825 else
6826 x = gen_rtx_PLUS (mode, x, y);
6827 return x;
6830 /* Help detect a giv that is calculated by several consecutive insns;
6831 for example,
6832 giv = biv * M
6833 giv = giv + A
6834 The caller has already identified the first insn P as having a giv as dest;
6835 we check that all other insns that set the same register follow
6836 immediately after P, that they alter nothing else,
6837 and that the result of the last is still a giv.
6839 The value is 0 if the reg set in P is not really a giv.
6840 Otherwise, the value is the amount gained by eliminating
6841 all the consecutive insns that compute the value.
6843 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6844 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6846 The coefficients of the ultimate giv value are stored in
6847 *MULT_VAL and *ADD_VAL. */
6849 static int
6850 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6851 add_val, mult_val, ext_val, last_consec_insn)
6852 const struct loop *loop;
6853 int first_benefit;
6854 rtx p;
6855 rtx src_reg;
6856 rtx dest_reg;
6857 rtx *add_val;
6858 rtx *mult_val;
6859 rtx *ext_val;
6860 rtx *last_consec_insn;
6862 struct loop_ivs *ivs = LOOP_IVS (loop);
6863 struct loop_regs *regs = LOOP_REGS (loop);
6864 int count;
6865 enum rtx_code code;
6866 int benefit;
6867 rtx temp;
6868 rtx set;
6870 /* Indicate that this is a giv so that we can update the value produced in
6871 each insn of the multi-insn sequence.
6873 This induction structure will be used only by the call to
6874 general_induction_var below, so we can allocate it on our stack.
6875 If this is a giv, our caller will replace the induct var entry with
6876 a new induction structure. */
6877 struct induction *v;
6879 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6880 return 0;
6882 v = (struct induction *) alloca (sizeof (struct induction));
6883 v->src_reg = src_reg;
6884 v->mult_val = *mult_val;
6885 v->add_val = *add_val;
6886 v->benefit = first_benefit;
6887 v->cant_derive = 0;
6888 v->derive_adjustment = 0;
6889 v->ext_dependent = NULL_RTX;
6891 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6892 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6894 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6896 while (count > 0)
6898 p = NEXT_INSN (p);
6899 code = GET_CODE (p);
6901 /* If libcall, skip to end of call sequence. */
6902 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6903 p = XEXP (temp, 0);
6905 if (code == INSN
6906 && (set = single_set (p))
6907 && GET_CODE (SET_DEST (set)) == REG
6908 && SET_DEST (set) == dest_reg
6909 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6910 add_val, mult_val, ext_val, 0,
6911 &benefit, VOIDmode)
6912 /* Giv created by equivalent expression. */
6913 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6914 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6915 add_val, mult_val, ext_val, 0,
6916 &benefit, VOIDmode)))
6917 && src_reg == v->src_reg)
6919 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6920 benefit += libcall_benefit (p);
6922 count--;
6923 v->mult_val = *mult_val;
6924 v->add_val = *add_val;
6925 v->benefit += benefit;
6927 else if (code != NOTE)
6929 /* Allow insns that set something other than this giv to a
6930 constant. Such insns are needed on machines which cannot
6931 include long constants and should not disqualify a giv. */
6932 if (code == INSN
6933 && (set = single_set (p))
6934 && SET_DEST (set) != dest_reg
6935 && CONSTANT_P (SET_SRC (set)))
6936 continue;
6938 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6939 return 0;
6943 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6944 *last_consec_insn = p;
6945 return v->benefit;
6948 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6949 represented by G1. If no such expression can be found, or it is clear that
6950 it cannot possibly be a valid address, 0 is returned.
6952 To perform the computation, we note that
6953 G1 = x * v + a and
6954 G2 = y * v + b
6955 where `v' is the biv.
6957 So G2 = (y/b) * G1 + (b - a*y/x).
6959 Note that MULT = y/x.
6961 Update: A and B are now allowed to be additive expressions such that
6962 B contains all variables in A. That is, computing B-A will not require
6963 subtracting variables. */
6965 static rtx
6966 express_from_1 (a, b, mult)
6967 rtx a, b, mult;
6969 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6971 if (mult == const0_rtx)
6972 return b;
6974 /* If MULT is not 1, we cannot handle A with non-constants, since we
6975 would then be required to subtract multiples of the registers in A.
6976 This is theoretically possible, and may even apply to some Fortran
6977 constructs, but it is a lot of work and we do not attempt it here. */
6979 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6980 return NULL_RTX;
6982 /* In general these structures are sorted top to bottom (down the PLUS
6983 chain), but not left to right across the PLUS. If B is a higher
6984 order giv than A, we can strip one level and recurse. If A is higher
6985 order, we'll eventually bail out, but won't know that until the end.
6986 If they are the same, we'll strip one level around this loop. */
6988 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6990 rtx ra, rb, oa, ob, tmp;
6992 ra = XEXP (a, 0), oa = XEXP (a, 1);
6993 if (GET_CODE (ra) == PLUS)
6994 tmp = ra, ra = oa, oa = tmp;
6996 rb = XEXP (b, 0), ob = XEXP (b, 1);
6997 if (GET_CODE (rb) == PLUS)
6998 tmp = rb, rb = ob, ob = tmp;
7000 if (rtx_equal_p (ra, rb))
7001 /* We matched: remove one reg completely. */
7002 a = oa, b = ob;
7003 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7004 /* An alternate match. */
7005 a = oa, b = rb;
7006 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7007 /* An alternate match. */
7008 a = ra, b = ob;
7009 else
7011 /* Indicates an extra register in B. Strip one level from B and
7012 recurse, hoping B was the higher order expression. */
7013 ob = express_from_1 (a, ob, mult);
7014 if (ob == NULL_RTX)
7015 return NULL_RTX;
7016 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7020 /* Here we are at the last level of A, go through the cases hoping to
7021 get rid of everything but a constant. */
7023 if (GET_CODE (a) == PLUS)
7025 rtx ra, oa;
7027 ra = XEXP (a, 0), oa = XEXP (a, 1);
7028 if (rtx_equal_p (oa, b))
7029 oa = ra;
7030 else if (!rtx_equal_p (ra, b))
7031 return NULL_RTX;
7033 if (GET_CODE (oa) != CONST_INT)
7034 return NULL_RTX;
7036 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7038 else if (GET_CODE (a) == CONST_INT)
7040 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7042 else if (CONSTANT_P (a))
7044 enum machine_mode mode_a = GET_MODE (a);
7045 enum machine_mode mode_b = GET_MODE (b);
7046 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7047 return simplify_gen_binary (MINUS, mode, b, a);
7049 else if (GET_CODE (b) == PLUS)
7051 if (rtx_equal_p (a, XEXP (b, 0)))
7052 return XEXP (b, 1);
7053 else if (rtx_equal_p (a, XEXP (b, 1)))
7054 return XEXP (b, 0);
7055 else
7056 return NULL_RTX;
7058 else if (rtx_equal_p (a, b))
7059 return const0_rtx;
7061 return NULL_RTX;
7065 express_from (g1, g2)
7066 struct induction *g1, *g2;
7068 rtx mult, add;
7070 /* The value that G1 will be multiplied by must be a constant integer. Also,
7071 the only chance we have of getting a valid address is if b*c/a (see above
7072 for notation) is also an integer. */
7073 if (GET_CODE (g1->mult_val) == CONST_INT
7074 && GET_CODE (g2->mult_val) == CONST_INT)
7076 if (g1->mult_val == const0_rtx
7077 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7078 return NULL_RTX;
7079 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7081 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7082 mult = const1_rtx;
7083 else
7085 /* ??? Find out if the one is a multiple of the other? */
7086 return NULL_RTX;
7089 add = express_from_1 (g1->add_val, g2->add_val, mult);
7090 if (add == NULL_RTX)
7092 /* Failed. If we've got a multiplication factor between G1 and G2,
7093 scale G1's addend and try again. */
7094 if (INTVAL (mult) > 1)
7096 rtx g1_add_val = g1->add_val;
7097 if (GET_CODE (g1_add_val) == MULT
7098 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7100 HOST_WIDE_INT m;
7101 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7102 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7103 XEXP (g1_add_val, 0), GEN_INT (m));
7105 else
7107 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7108 mult);
7111 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7114 if (add == NULL_RTX)
7115 return NULL_RTX;
7117 /* Form simplified final result. */
7118 if (mult == const0_rtx)
7119 return add;
7120 else if (mult == const1_rtx)
7121 mult = g1->dest_reg;
7122 else
7123 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7125 if (add == const0_rtx)
7126 return mult;
7127 else
7129 if (GET_CODE (add) == PLUS
7130 && CONSTANT_P (XEXP (add, 1)))
7132 rtx tem = XEXP (add, 1);
7133 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7134 add = tem;
7137 return gen_rtx_PLUS (g2->mode, mult, add);
7141 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7142 represented by G1. This indicates that G2 should be combined with G1 and
7143 that G2 can use (either directly or via an address expression) a register
7144 used to represent G1. */
7146 static rtx
7147 combine_givs_p (g1, g2)
7148 struct induction *g1, *g2;
7150 rtx comb, ret;
7152 /* With the introduction of ext dependent givs, we must care for modes.
7153 G2 must not use a wider mode than G1. */
7154 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7155 return NULL_RTX;
7157 ret = comb = express_from (g1, g2);
7158 if (comb == NULL_RTX)
7159 return NULL_RTX;
7160 if (g1->mode != g2->mode)
7161 ret = gen_lowpart (g2->mode, comb);
7163 /* If these givs are identical, they can be combined. We use the results
7164 of express_from because the addends are not in a canonical form, so
7165 rtx_equal_p is a weaker test. */
7166 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7167 combination to be the other way round. */
7168 if (comb == g1->dest_reg
7169 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7171 return ret;
7174 /* If G2 can be expressed as a function of G1 and that function is valid
7175 as an address and no more expensive than using a register for G2,
7176 the expression of G2 in terms of G1 can be used. */
7177 if (ret != NULL_RTX
7178 && g2->giv_type == DEST_ADDR
7179 && memory_address_p (GET_MODE (g2->mem), ret)
7180 /* ??? Looses, especially with -fforce-addr, where *g2->location
7181 will always be a register, and so anything more complicated
7182 gets discarded. */
7183 #if 0
7184 #ifdef ADDRESS_COST
7185 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7186 #else
7187 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7188 #endif
7189 #endif
7192 return ret;
7195 return NULL_RTX;
7198 /* Check each extension dependent giv in this class to see if its
7199 root biv is safe from wrapping in the interior mode, which would
7200 make the giv illegal. */
7202 static void
7203 check_ext_dependent_givs (bl, loop_info)
7204 struct iv_class *bl;
7205 struct loop_info *loop_info;
7207 int ze_ok = 0, se_ok = 0, info_ok = 0;
7208 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7209 HOST_WIDE_INT start_val;
7210 unsigned HOST_WIDE_INT u_end_val = 0;
7211 unsigned HOST_WIDE_INT u_start_val = 0;
7212 rtx incr = pc_rtx;
7213 struct induction *v;
7215 /* Make sure the iteration data is available. We must have
7216 constants in order to be certain of no overflow. */
7217 /* ??? An unknown iteration count with an increment of +-1
7218 combined with friendly exit tests of against an invariant
7219 value is also ameanable to optimization. Not implemented. */
7220 if (loop_info->n_iterations > 0
7221 && bl->initial_value
7222 && GET_CODE (bl->initial_value) == CONST_INT
7223 && (incr = biv_total_increment (bl))
7224 && GET_CODE (incr) == CONST_INT
7225 /* Make sure the host can represent the arithmetic. */
7226 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7228 unsigned HOST_WIDE_INT abs_incr, total_incr;
7229 HOST_WIDE_INT s_end_val;
7230 int neg_incr;
7232 info_ok = 1;
7233 start_val = INTVAL (bl->initial_value);
7234 u_start_val = start_val;
7236 neg_incr = 0, abs_incr = INTVAL (incr);
7237 if (INTVAL (incr) < 0)
7238 neg_incr = 1, abs_incr = -abs_incr;
7239 total_incr = abs_incr * loop_info->n_iterations;
7241 /* Check for host arithmatic overflow. */
7242 if (total_incr / loop_info->n_iterations == abs_incr)
7244 unsigned HOST_WIDE_INT u_max;
7245 HOST_WIDE_INT s_max;
7247 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7248 s_end_val = u_end_val;
7249 u_max = GET_MODE_MASK (biv_mode);
7250 s_max = u_max >> 1;
7252 /* Check zero extension of biv ok. */
7253 if (start_val >= 0
7254 /* Check for host arithmatic overflow. */
7255 && (neg_incr
7256 ? u_end_val < u_start_val
7257 : u_end_val > u_start_val)
7258 /* Check for target arithmetic overflow. */
7259 && (neg_incr
7260 ? 1 /* taken care of with host overflow */
7261 : u_end_val <= u_max))
7263 ze_ok = 1;
7266 /* Check sign extension of biv ok. */
7267 /* ??? While it is true that overflow with signed and pointer
7268 arithmetic is undefined, I fear too many programmers don't
7269 keep this fact in mind -- myself included on occasion.
7270 So leave alone with the signed overflow optimizations. */
7271 if (start_val >= -s_max - 1
7272 /* Check for host arithmatic overflow. */
7273 && (neg_incr
7274 ? s_end_val < start_val
7275 : s_end_val > start_val)
7276 /* Check for target arithmetic overflow. */
7277 && (neg_incr
7278 ? s_end_val >= -s_max - 1
7279 : s_end_val <= s_max))
7281 se_ok = 1;
7286 /* Invalidate givs that fail the tests. */
7287 for (v = bl->giv; v; v = v->next_iv)
7288 if (v->ext_dependent)
7290 enum rtx_code code = GET_CODE (v->ext_dependent);
7291 int ok = 0;
7293 switch (code)
7295 case SIGN_EXTEND:
7296 ok = se_ok;
7297 break;
7298 case ZERO_EXTEND:
7299 ok = ze_ok;
7300 break;
7302 case TRUNCATE:
7303 /* We don't know whether this value is being used as either
7304 signed or unsigned, so to safely truncate we must satisfy
7305 both. The initial check here verifies the BIV itself;
7306 once that is successful we may check its range wrt the
7307 derived GIV. */
7308 if (se_ok && ze_ok)
7310 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7311 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7313 /* We know from the above that both endpoints are nonnegative,
7314 and that there is no wrapping. Verify that both endpoints
7315 are within the (signed) range of the outer mode. */
7316 if (u_start_val <= max && u_end_val <= max)
7317 ok = 1;
7319 break;
7321 default:
7322 abort ();
7325 if (ok)
7327 if (loop_dump_stream)
7329 fprintf (loop_dump_stream,
7330 "Verified ext dependent giv at %d of reg %d\n",
7331 INSN_UID (v->insn), bl->regno);
7334 else
7336 if (loop_dump_stream)
7338 const char *why;
7340 if (info_ok)
7341 why = "biv iteration values overflowed";
7342 else
7344 if (incr == pc_rtx)
7345 incr = biv_total_increment (bl);
7346 if (incr == const1_rtx)
7347 why = "biv iteration info incomplete; incr by 1";
7348 else
7349 why = "biv iteration info incomplete";
7352 fprintf (loop_dump_stream,
7353 "Failed ext dependent giv at %d, %s\n",
7354 INSN_UID (v->insn), why);
7356 v->ignore = 1;
7357 bl->all_reduced = 0;
7362 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7365 extend_value_for_giv (v, value)
7366 struct induction *v;
7367 rtx value;
7369 rtx ext_dep = v->ext_dependent;
7371 if (! ext_dep)
7372 return value;
7374 /* Recall that check_ext_dependent_givs verified that the known bounds
7375 of a biv did not overflow or wrap with respect to the extension for
7376 the giv. Therefore, constants need no additional adjustment. */
7377 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7378 return value;
7380 /* Otherwise, we must adjust the value to compensate for the
7381 differing modes of the biv and the giv. */
7382 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7385 struct combine_givs_stats
7387 int giv_number;
7388 int total_benefit;
7391 static int
7392 cmp_combine_givs_stats (xp, yp)
7393 const PTR xp;
7394 const PTR yp;
7396 const struct combine_givs_stats * const x =
7397 (const struct combine_givs_stats *) xp;
7398 const struct combine_givs_stats * const y =
7399 (const struct combine_givs_stats *) yp;
7400 int d;
7401 d = y->total_benefit - x->total_benefit;
7402 /* Stabilize the sort. */
7403 if (!d)
7404 d = x->giv_number - y->giv_number;
7405 return d;
7408 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7409 any other. If so, point SAME to the giv combined with and set NEW_REG to
7410 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7411 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7413 static void
7414 combine_givs (regs, bl)
7415 struct loop_regs *regs;
7416 struct iv_class *bl;
7418 /* Additional benefit to add for being combined multiple times. */
7419 const int extra_benefit = 3;
7421 struct induction *g1, *g2, **giv_array;
7422 int i, j, k, giv_count;
7423 struct combine_givs_stats *stats;
7424 rtx *can_combine;
7426 /* Count givs, because bl->giv_count is incorrect here. */
7427 giv_count = 0;
7428 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7429 if (!g1->ignore)
7430 giv_count++;
7432 giv_array
7433 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7434 i = 0;
7435 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7436 if (!g1->ignore)
7437 giv_array[i++] = g1;
7439 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7440 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7442 for (i = 0; i < giv_count; i++)
7444 int this_benefit;
7445 rtx single_use;
7447 g1 = giv_array[i];
7448 stats[i].giv_number = i;
7450 /* If a DEST_REG GIV is used only once, do not allow it to combine
7451 with anything, for in doing so we will gain nothing that cannot
7452 be had by simply letting the GIV with which we would have combined
7453 to be reduced on its own. The losage shows up in particular with
7454 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7455 be seen elsewhere as well. */
7456 if (g1->giv_type == DEST_REG
7457 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7458 && single_use != const0_rtx)
7459 continue;
7461 this_benefit = g1->benefit;
7462 /* Add an additional weight for zero addends. */
7463 if (g1->no_const_addval)
7464 this_benefit += 1;
7466 for (j = 0; j < giv_count; j++)
7468 rtx this_combine;
7470 g2 = giv_array[j];
7471 if (g1 != g2
7472 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7474 can_combine[i * giv_count + j] = this_combine;
7475 this_benefit += g2->benefit + extra_benefit;
7478 stats[i].total_benefit = this_benefit;
7481 /* Iterate, combining until we can't. */
7482 restart:
7483 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7485 if (loop_dump_stream)
7487 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7488 for (k = 0; k < giv_count; k++)
7490 g1 = giv_array[stats[k].giv_number];
7491 if (!g1->combined_with && !g1->same)
7492 fprintf (loop_dump_stream, " {%d, %d}",
7493 INSN_UID (giv_array[stats[k].giv_number]->insn),
7494 stats[k].total_benefit);
7496 putc ('\n', loop_dump_stream);
7499 for (k = 0; k < giv_count; k++)
7501 int g1_add_benefit = 0;
7503 i = stats[k].giv_number;
7504 g1 = giv_array[i];
7506 /* If it has already been combined, skip. */
7507 if (g1->combined_with || g1->same)
7508 continue;
7510 for (j = 0; j < giv_count; j++)
7512 g2 = giv_array[j];
7513 if (g1 != g2 && can_combine[i * giv_count + j]
7514 /* If it has already been combined, skip. */
7515 && ! g2->same && ! g2->combined_with)
7517 int l;
7519 g2->new_reg = can_combine[i * giv_count + j];
7520 g2->same = g1;
7521 /* For destination, we now may replace by mem expression instead
7522 of register. This changes the costs considerably, so add the
7523 compensation. */
7524 if (g2->giv_type == DEST_ADDR)
7525 g2->benefit = (g2->benefit + reg_address_cost
7526 - address_cost (g2->new_reg,
7527 GET_MODE (g2->mem)));
7528 g1->combined_with++;
7529 g1->lifetime += g2->lifetime;
7531 g1_add_benefit += g2->benefit;
7533 /* ??? The new final_[bg]iv_value code does a much better job
7534 of finding replaceable giv's, and hence this code may no
7535 longer be necessary. */
7536 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7537 g1_add_benefit -= copy_cost;
7539 /* To help optimize the next set of combinations, remove
7540 this giv from the benefits of other potential mates. */
7541 for (l = 0; l < giv_count; ++l)
7543 int m = stats[l].giv_number;
7544 if (can_combine[m * giv_count + j])
7545 stats[l].total_benefit -= g2->benefit + extra_benefit;
7548 if (loop_dump_stream)
7549 fprintf (loop_dump_stream,
7550 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7551 INSN_UID (g2->insn), INSN_UID (g1->insn),
7552 g1->benefit, g1_add_benefit, g1->lifetime);
7556 /* To help optimize the next set of combinations, remove
7557 this giv from the benefits of other potential mates. */
7558 if (g1->combined_with)
7560 for (j = 0; j < giv_count; ++j)
7562 int m = stats[j].giv_number;
7563 if (can_combine[m * giv_count + i])
7564 stats[j].total_benefit -= g1->benefit + extra_benefit;
7567 g1->benefit += g1_add_benefit;
7569 /* We've finished with this giv, and everything it touched.
7570 Restart the combination so that proper weights for the
7571 rest of the givs are properly taken into account. */
7572 /* ??? Ideally we would compact the arrays at this point, so
7573 as to not cover old ground. But sanely compacting
7574 can_combine is tricky. */
7575 goto restart;
7579 /* Clean up. */
7580 free (stats);
7581 free (can_combine);
7584 /* Generate sequence for REG = B * M + A. */
7586 static rtx
7587 gen_add_mult (b, m, a, reg)
7588 rtx b; /* initial value of basic induction variable */
7589 rtx m; /* multiplicative constant */
7590 rtx a; /* additive constant */
7591 rtx reg; /* destination register */
7593 rtx seq;
7594 rtx result;
7596 start_sequence ();
7597 /* Use unsigned arithmetic. */
7598 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7599 if (reg != result)
7600 emit_move_insn (reg, result);
7601 seq = gen_sequence ();
7602 end_sequence ();
7604 return seq;
7608 /* Update registers created in insn sequence SEQ. */
7610 static void
7611 loop_regs_update (loop, seq)
7612 const struct loop *loop ATTRIBUTE_UNUSED;
7613 rtx seq;
7615 /* Update register info for alias analysis. */
7617 if (GET_CODE (seq) == SEQUENCE)
7619 int i;
7620 for (i = 0; i < XVECLEN (seq, 0); ++i)
7622 rtx set = single_set (XVECEXP (seq, 0, i));
7623 if (set && GET_CODE (SET_DEST (set)) == REG)
7624 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7627 else
7629 rtx set = single_set (seq);
7630 if (set && GET_CODE (SET_DEST (set)) == REG)
7631 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7636 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7638 void
7639 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7640 const struct loop *loop;
7641 rtx b; /* initial value of basic induction variable */
7642 rtx m; /* multiplicative constant */
7643 rtx a; /* additive constant */
7644 rtx reg; /* destination register */
7645 basic_block before_bb;
7646 rtx before_insn;
7648 rtx seq;
7650 if (! before_insn)
7652 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7653 return;
7656 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7657 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7659 /* Increase the lifetime of any invariants moved further in code. */
7660 update_reg_last_use (a, before_insn);
7661 update_reg_last_use (b, before_insn);
7662 update_reg_last_use (m, before_insn);
7664 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7666 /* It is possible that the expansion created lots of new registers.
7667 Iterate over the sequence we just created and record them all. */
7668 loop_regs_update (loop, seq);
7672 /* Emit insns in loop pre-header to set REG = B * M + A. */
7674 void
7675 loop_iv_add_mult_sink (loop, b, m, a, reg)
7676 const struct loop *loop;
7677 rtx b; /* initial value of basic induction variable */
7678 rtx m; /* multiplicative constant */
7679 rtx a; /* additive constant */
7680 rtx reg; /* destination register */
7682 rtx seq;
7684 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7685 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7687 /* Increase the lifetime of any invariants moved further in code.
7688 ???? Is this really necessary? */
7689 update_reg_last_use (a, loop->sink);
7690 update_reg_last_use (b, loop->sink);
7691 update_reg_last_use (m, loop->sink);
7693 loop_insn_sink (loop, seq);
7695 /* It is possible that the expansion created lots of new registers.
7696 Iterate over the sequence we just created and record them all. */
7697 loop_regs_update (loop, seq);
7701 /* Emit insns after loop to set REG = B * M + A. */
7703 void
7704 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7705 const struct loop *loop;
7706 rtx b; /* initial value of basic induction variable */
7707 rtx m; /* multiplicative constant */
7708 rtx a; /* additive constant */
7709 rtx reg; /* destination register */
7711 rtx seq;
7713 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7714 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7716 loop_insn_hoist (loop, seq);
7718 /* It is possible that the expansion created lots of new registers.
7719 Iterate over the sequence we just created and record them all. */
7720 loop_regs_update (loop, seq);
7725 /* Similar to gen_add_mult, but compute cost rather than generating
7726 sequence. */
7728 static int
7729 iv_add_mult_cost (b, m, a, reg)
7730 rtx b; /* initial value of basic induction variable */
7731 rtx m; /* multiplicative constant */
7732 rtx a; /* additive constant */
7733 rtx reg; /* destination register */
7735 int cost = 0;
7736 rtx last, result;
7738 start_sequence ();
7739 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7740 if (reg != result)
7741 emit_move_insn (reg, result);
7742 last = get_last_insn ();
7743 while (last)
7745 rtx t = single_set (last);
7746 if (t)
7747 cost += rtx_cost (SET_SRC (t), SET);
7748 last = PREV_INSN (last);
7750 end_sequence ();
7751 return cost;
7754 /* Test whether A * B can be computed without
7755 an actual multiply insn. Value is 1 if so. */
7757 static int
7758 product_cheap_p (a, b)
7759 rtx a;
7760 rtx b;
7762 int i;
7763 rtx tmp;
7764 int win = 1;
7766 /* If only one is constant, make it B. */
7767 if (GET_CODE (a) == CONST_INT)
7768 tmp = a, a = b, b = tmp;
7770 /* If first constant, both constant, so don't need multiply. */
7771 if (GET_CODE (a) == CONST_INT)
7772 return 1;
7774 /* If second not constant, neither is constant, so would need multiply. */
7775 if (GET_CODE (b) != CONST_INT)
7776 return 0;
7778 /* One operand is constant, so might not need multiply insn. Generate the
7779 code for the multiply and see if a call or multiply, or long sequence
7780 of insns is generated. */
7782 start_sequence ();
7783 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7784 tmp = gen_sequence ();
7785 end_sequence ();
7787 if (GET_CODE (tmp) == SEQUENCE)
7789 if (XVEC (tmp, 0) == 0)
7790 win = 1;
7791 else if (XVECLEN (tmp, 0) > 3)
7792 win = 0;
7793 else
7794 for (i = 0; i < XVECLEN (tmp, 0); i++)
7796 rtx insn = XVECEXP (tmp, 0, i);
7798 if (GET_CODE (insn) != INSN
7799 || (GET_CODE (PATTERN (insn)) == SET
7800 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7801 || (GET_CODE (PATTERN (insn)) == PARALLEL
7802 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7803 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7805 win = 0;
7806 break;
7810 else if (GET_CODE (tmp) == SET
7811 && GET_CODE (SET_SRC (tmp)) == MULT)
7812 win = 0;
7813 else if (GET_CODE (tmp) == PARALLEL
7814 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7815 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7816 win = 0;
7818 return win;
7821 /* Check to see if loop can be terminated by a "decrement and branch until
7822 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7823 Also try reversing an increment loop to a decrement loop
7824 to see if the optimization can be performed.
7825 Value is nonzero if optimization was performed. */
7827 /* This is useful even if the architecture doesn't have such an insn,
7828 because it might change a loops which increments from 0 to n to a loop
7829 which decrements from n to 0. A loop that decrements to zero is usually
7830 faster than one that increments from zero. */
7832 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7833 such as approx_final_value, biv_total_increment, loop_iterations, and
7834 final_[bg]iv_value. */
7836 static int
7837 check_dbra_loop (loop, insn_count)
7838 struct loop *loop;
7839 int insn_count;
7841 struct loop_info *loop_info = LOOP_INFO (loop);
7842 struct loop_regs *regs = LOOP_REGS (loop);
7843 struct loop_ivs *ivs = LOOP_IVS (loop);
7844 struct iv_class *bl;
7845 rtx reg;
7846 rtx jump_label;
7847 rtx final_value;
7848 rtx start_value;
7849 rtx new_add_val;
7850 rtx comparison;
7851 rtx before_comparison;
7852 rtx p;
7853 rtx jump;
7854 rtx first_compare;
7855 int compare_and_branch;
7856 rtx loop_start = loop->start;
7857 rtx loop_end = loop->end;
7859 /* If last insn is a conditional branch, and the insn before tests a
7860 register value, try to optimize it. Otherwise, we can't do anything. */
7862 jump = PREV_INSN (loop_end);
7863 comparison = get_condition_for_loop (loop, jump);
7864 if (comparison == 0)
7865 return 0;
7866 if (!onlyjump_p (jump))
7867 return 0;
7869 /* Try to compute whether the compare/branch at the loop end is one or
7870 two instructions. */
7871 get_condition (jump, &first_compare);
7872 if (first_compare == jump)
7873 compare_and_branch = 1;
7874 else if (first_compare == prev_nonnote_insn (jump))
7875 compare_and_branch = 2;
7876 else
7877 return 0;
7880 /* If more than one condition is present to control the loop, then
7881 do not proceed, as this function does not know how to rewrite
7882 loop tests with more than one condition.
7884 Look backwards from the first insn in the last comparison
7885 sequence and see if we've got another comparison sequence. */
7887 rtx jump1;
7888 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7889 if (GET_CODE (jump1) == JUMP_INSN)
7890 return 0;
7893 /* Check all of the bivs to see if the compare uses one of them.
7894 Skip biv's set more than once because we can't guarantee that
7895 it will be zero on the last iteration. Also skip if the biv is
7896 used between its update and the test insn. */
7898 for (bl = ivs->list; bl; bl = bl->next)
7900 if (bl->biv_count == 1
7901 && ! bl->biv->maybe_multiple
7902 && bl->biv->dest_reg == XEXP (comparison, 0)
7903 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7904 first_compare))
7905 break;
7908 if (! bl)
7909 return 0;
7911 /* Look for the case where the basic induction variable is always
7912 nonnegative, and equals zero on the last iteration.
7913 In this case, add a reg_note REG_NONNEG, which allows the
7914 m68k DBRA instruction to be used. */
7916 if (((GET_CODE (comparison) == GT
7917 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7918 && INTVAL (XEXP (comparison, 1)) == -1)
7919 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7920 && GET_CODE (bl->biv->add_val) == CONST_INT
7921 && INTVAL (bl->biv->add_val) < 0)
7923 /* Initial value must be greater than 0,
7924 init_val % -dec_value == 0 to ensure that it equals zero on
7925 the last iteration */
7927 if (GET_CODE (bl->initial_value) == CONST_INT
7928 && INTVAL (bl->initial_value) > 0
7929 && (INTVAL (bl->initial_value)
7930 % (-INTVAL (bl->biv->add_val))) == 0)
7932 /* register always nonnegative, add REG_NOTE to branch */
7933 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7934 REG_NOTES (jump)
7935 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7936 REG_NOTES (jump));
7937 bl->nonneg = 1;
7939 return 1;
7942 /* If the decrement is 1 and the value was tested as >= 0 before
7943 the loop, then we can safely optimize. */
7944 for (p = loop_start; p; p = PREV_INSN (p))
7946 if (GET_CODE (p) == CODE_LABEL)
7947 break;
7948 if (GET_CODE (p) != JUMP_INSN)
7949 continue;
7951 before_comparison = get_condition_for_loop (loop, p);
7952 if (before_comparison
7953 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7954 && GET_CODE (before_comparison) == LT
7955 && XEXP (before_comparison, 1) == const0_rtx
7956 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7957 && INTVAL (bl->biv->add_val) == -1)
7959 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7960 REG_NOTES (jump)
7961 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7962 REG_NOTES (jump));
7963 bl->nonneg = 1;
7965 return 1;
7969 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7970 && INTVAL (bl->biv->add_val) > 0)
7972 /* Try to change inc to dec, so can apply above optimization. */
7973 /* Can do this if:
7974 all registers modified are induction variables or invariant,
7975 all memory references have non-overlapping addresses
7976 (obviously true if only one write)
7977 allow 2 insns for the compare/jump at the end of the loop. */
7978 /* Also, we must avoid any instructions which use both the reversed
7979 biv and another biv. Such instructions will fail if the loop is
7980 reversed. We meet this condition by requiring that either
7981 no_use_except_counting is true, or else that there is only
7982 one biv. */
7983 int num_nonfixed_reads = 0;
7984 /* 1 if the iteration var is used only to count iterations. */
7985 int no_use_except_counting = 0;
7986 /* 1 if the loop has no memory store, or it has a single memory store
7987 which is reversible. */
7988 int reversible_mem_store = 1;
7990 if (bl->giv_count == 0
7991 && !loop->exit_count
7992 && !loop_info->has_multiple_exit_targets)
7994 rtx bivreg = regno_reg_rtx[bl->regno];
7995 struct iv_class *blt;
7997 /* If there are no givs for this biv, and the only exit is the
7998 fall through at the end of the loop, then
7999 see if perhaps there are no uses except to count. */
8000 no_use_except_counting = 1;
8001 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8002 if (INSN_P (p))
8004 rtx set = single_set (p);
8006 if (set && GET_CODE (SET_DEST (set)) == REG
8007 && REGNO (SET_DEST (set)) == bl->regno)
8008 /* An insn that sets the biv is okay. */
8010 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8011 || p == prev_nonnote_insn (loop_end))
8012 && reg_mentioned_p (bivreg, PATTERN (p)))
8014 /* If either of these insns uses the biv and sets a pseudo
8015 that has more than one usage, then the biv has uses
8016 other than counting since it's used to derive a value
8017 that is used more than one time. */
8018 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8019 regs);
8020 if (regs->multiple_uses)
8022 no_use_except_counting = 0;
8023 break;
8026 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8028 no_use_except_counting = 0;
8029 break;
8033 /* A biv has uses besides counting if it is used to set
8034 another biv. */
8035 for (blt = ivs->list; blt; blt = blt->next)
8036 if (blt->init_set
8037 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8039 no_use_except_counting = 0;
8040 break;
8044 if (no_use_except_counting)
8045 /* No need to worry about MEMs. */
8047 else if (loop_info->num_mem_sets <= 1)
8049 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8050 if (INSN_P (p))
8051 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8053 /* If the loop has a single store, and the destination address is
8054 invariant, then we can't reverse the loop, because this address
8055 might then have the wrong value at loop exit.
8056 This would work if the source was invariant also, however, in that
8057 case, the insn should have been moved out of the loop. */
8059 if (loop_info->num_mem_sets == 1)
8061 struct induction *v;
8063 /* If we could prove that each of the memory locations
8064 written to was different, then we could reverse the
8065 store -- but we don't presently have any way of
8066 knowing that. */
8067 reversible_mem_store = 0;
8069 /* If the store depends on a register that is set after the
8070 store, it depends on the initial value, and is thus not
8071 reversible. */
8072 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8074 if (v->giv_type == DEST_REG
8075 && reg_mentioned_p (v->dest_reg,
8076 PATTERN (loop_info->first_loop_store_insn))
8077 && loop_insn_first_p (loop_info->first_loop_store_insn,
8078 v->insn))
8079 reversible_mem_store = 0;
8083 else
8084 return 0;
8086 /* This code only acts for innermost loops. Also it simplifies
8087 the memory address check by only reversing loops with
8088 zero or one memory access.
8089 Two memory accesses could involve parts of the same array,
8090 and that can't be reversed.
8091 If the biv is used only for counting, than we don't need to worry
8092 about all these things. */
8094 if ((num_nonfixed_reads <= 1
8095 && ! loop_info->has_nonconst_call
8096 && ! loop_info->has_volatile
8097 && reversible_mem_store
8098 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8099 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8100 && (bl == ivs->list && bl->next == 0))
8101 || no_use_except_counting)
8103 rtx tem;
8105 /* Loop can be reversed. */
8106 if (loop_dump_stream)
8107 fprintf (loop_dump_stream, "Can reverse loop\n");
8109 /* Now check other conditions:
8111 The increment must be a constant, as must the initial value,
8112 and the comparison code must be LT.
8114 This test can probably be improved since +/- 1 in the constant
8115 can be obtained by changing LT to LE and vice versa; this is
8116 confusing. */
8118 if (comparison
8119 /* for constants, LE gets turned into LT */
8120 && (GET_CODE (comparison) == LT
8121 || (GET_CODE (comparison) == LE
8122 && no_use_except_counting)))
8124 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8125 rtx initial_value, comparison_value;
8126 int nonneg = 0;
8127 enum rtx_code cmp_code;
8128 int comparison_const_width;
8129 unsigned HOST_WIDE_INT comparison_sign_mask;
8131 add_val = INTVAL (bl->biv->add_val);
8132 comparison_value = XEXP (comparison, 1);
8133 if (GET_MODE (comparison_value) == VOIDmode)
8134 comparison_const_width
8135 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8136 else
8137 comparison_const_width
8138 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8139 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8140 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8141 comparison_sign_mask
8142 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8144 /* If the comparison value is not a loop invariant, then we
8145 can not reverse this loop.
8147 ??? If the insns which initialize the comparison value as
8148 a whole compute an invariant result, then we could move
8149 them out of the loop and proceed with loop reversal. */
8150 if (! loop_invariant_p (loop, comparison_value))
8151 return 0;
8153 if (GET_CODE (comparison_value) == CONST_INT)
8154 comparison_val = INTVAL (comparison_value);
8155 initial_value = bl->initial_value;
8157 /* Normalize the initial value if it is an integer and
8158 has no other use except as a counter. This will allow
8159 a few more loops to be reversed. */
8160 if (no_use_except_counting
8161 && GET_CODE (comparison_value) == CONST_INT
8162 && GET_CODE (initial_value) == CONST_INT)
8164 comparison_val = comparison_val - INTVAL (bl->initial_value);
8165 /* The code below requires comparison_val to be a multiple
8166 of add_val in order to do the loop reversal, so
8167 round up comparison_val to a multiple of add_val.
8168 Since comparison_value is constant, we know that the
8169 current comparison code is LT. */
8170 comparison_val = comparison_val + add_val - 1;
8171 comparison_val
8172 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8173 /* We postpone overflow checks for COMPARISON_VAL here;
8174 even if there is an overflow, we might still be able to
8175 reverse the loop, if converting the loop exit test to
8176 NE is possible. */
8177 initial_value = const0_rtx;
8180 /* First check if we can do a vanilla loop reversal. */
8181 if (initial_value == const0_rtx
8182 /* If we have a decrement_and_branch_on_count,
8183 prefer the NE test, since this will allow that
8184 instruction to be generated. Note that we must
8185 use a vanilla loop reversal if the biv is used to
8186 calculate a giv or has a non-counting use. */
8187 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8188 && defined (HAVE_decrement_and_branch_on_count)
8189 && (! (add_val == 1 && loop->vtop
8190 && (bl->biv_count == 0
8191 || no_use_except_counting)))
8192 #endif
8193 && GET_CODE (comparison_value) == CONST_INT
8194 /* Now do postponed overflow checks on COMPARISON_VAL. */
8195 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8196 & comparison_sign_mask))
8198 /* Register will always be nonnegative, with value
8199 0 on last iteration */
8200 add_adjust = add_val;
8201 nonneg = 1;
8202 cmp_code = GE;
8204 else if (add_val == 1 && loop->vtop
8205 && (bl->biv_count == 0
8206 || no_use_except_counting))
8208 add_adjust = 0;
8209 cmp_code = NE;
8211 else
8212 return 0;
8214 if (GET_CODE (comparison) == LE)
8215 add_adjust -= add_val;
8217 /* If the initial value is not zero, or if the comparison
8218 value is not an exact multiple of the increment, then we
8219 can not reverse this loop. */
8220 if (initial_value == const0_rtx
8221 && GET_CODE (comparison_value) == CONST_INT)
8223 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8224 return 0;
8226 else
8228 if (! no_use_except_counting || add_val != 1)
8229 return 0;
8232 final_value = comparison_value;
8234 /* Reset these in case we normalized the initial value
8235 and comparison value above. */
8236 if (GET_CODE (comparison_value) == CONST_INT
8237 && GET_CODE (initial_value) == CONST_INT)
8239 comparison_value = GEN_INT (comparison_val);
8240 final_value
8241 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8243 bl->initial_value = initial_value;
8245 /* Save some info needed to produce the new insns. */
8246 reg = bl->biv->dest_reg;
8247 jump_label = condjump_label (PREV_INSN (loop_end));
8248 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8250 /* Set start_value; if this is not a CONST_INT, we need
8251 to generate a SUB.
8252 Initialize biv to start_value before loop start.
8253 The old initializing insn will be deleted as a
8254 dead store by flow.c. */
8255 if (initial_value == const0_rtx
8256 && GET_CODE (comparison_value) == CONST_INT)
8258 start_value = GEN_INT (comparison_val - add_adjust);
8259 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8261 else if (GET_CODE (initial_value) == CONST_INT)
8263 enum machine_mode mode = GET_MODE (reg);
8264 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8265 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8267 if (add_insn == 0)
8268 return 0;
8270 start_value
8271 = gen_rtx_PLUS (mode, comparison_value, offset);
8272 loop_insn_hoist (loop, add_insn);
8273 if (GET_CODE (comparison) == LE)
8274 final_value = gen_rtx_PLUS (mode, comparison_value,
8275 GEN_INT (add_val));
8277 else if (! add_adjust)
8279 enum machine_mode mode = GET_MODE (reg);
8280 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8281 initial_value);
8283 if (sub_insn == 0)
8284 return 0;
8285 start_value
8286 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8287 loop_insn_hoist (loop, sub_insn);
8289 else
8290 /* We could handle the other cases too, but it'll be
8291 better to have a testcase first. */
8292 return 0;
8294 /* We may not have a single insn which can increment a reg, so
8295 create a sequence to hold all the insns from expand_inc. */
8296 start_sequence ();
8297 expand_inc (reg, new_add_val);
8298 tem = gen_sequence ();
8299 end_sequence ();
8301 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8302 delete_insn (bl->biv->insn);
8304 /* Update biv info to reflect its new status. */
8305 bl->biv->insn = p;
8306 bl->initial_value = start_value;
8307 bl->biv->add_val = new_add_val;
8309 /* Update loop info. */
8310 loop_info->initial_value = reg;
8311 loop_info->initial_equiv_value = reg;
8312 loop_info->final_value = const0_rtx;
8313 loop_info->final_equiv_value = const0_rtx;
8314 loop_info->comparison_value = const0_rtx;
8315 loop_info->comparison_code = cmp_code;
8316 loop_info->increment = new_add_val;
8318 /* Inc LABEL_NUSES so that delete_insn will
8319 not delete the label. */
8320 LABEL_NUSES (XEXP (jump_label, 0))++;
8322 /* Emit an insn after the end of the loop to set the biv's
8323 proper exit value if it is used anywhere outside the loop. */
8324 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8325 || ! bl->init_insn
8326 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8327 loop_insn_sink (loop, gen_move_insn (reg, final_value));
8329 /* Delete compare/branch at end of loop. */
8330 delete_related_insns (PREV_INSN (loop_end));
8331 if (compare_and_branch == 2)
8332 delete_related_insns (first_compare);
8334 /* Add new compare/branch insn at end of loop. */
8335 start_sequence ();
8336 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8337 GET_MODE (reg), 0,
8338 XEXP (jump_label, 0));
8339 tem = gen_sequence ();
8340 end_sequence ();
8341 emit_jump_insn_before (tem, loop_end);
8343 for (tem = PREV_INSN (loop_end);
8344 tem && GET_CODE (tem) != JUMP_INSN;
8345 tem = PREV_INSN (tem))
8348 if (tem)
8349 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8351 if (nonneg)
8353 if (tem)
8355 /* Increment of LABEL_NUSES done above. */
8356 /* Register is now always nonnegative,
8357 so add REG_NONNEG note to the branch. */
8358 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8359 REG_NOTES (tem));
8361 bl->nonneg = 1;
8364 /* No insn may reference both the reversed and another biv or it
8365 will fail (see comment near the top of the loop reversal
8366 code).
8367 Earlier on, we have verified that the biv has no use except
8368 counting, or it is the only biv in this function.
8369 However, the code that computes no_use_except_counting does
8370 not verify reg notes. It's possible to have an insn that
8371 references another biv, and has a REG_EQUAL note with an
8372 expression based on the reversed biv. To avoid this case,
8373 remove all REG_EQUAL notes based on the reversed biv
8374 here. */
8375 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8376 if (INSN_P (p))
8378 rtx *pnote;
8379 rtx set = single_set (p);
8380 /* If this is a set of a GIV based on the reversed biv, any
8381 REG_EQUAL notes should still be correct. */
8382 if (! set
8383 || GET_CODE (SET_DEST (set)) != REG
8384 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8385 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8386 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8387 for (pnote = &REG_NOTES (p); *pnote;)
8389 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8390 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8391 XEXP (*pnote, 0)))
8392 *pnote = XEXP (*pnote, 1);
8393 else
8394 pnote = &XEXP (*pnote, 1);
8398 /* Mark that this biv has been reversed. Each giv which depends
8399 on this biv, and which is also live past the end of the loop
8400 will have to be fixed up. */
8402 bl->reversed = 1;
8404 if (loop_dump_stream)
8406 fprintf (loop_dump_stream, "Reversed loop");
8407 if (bl->nonneg)
8408 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8409 else
8410 fprintf (loop_dump_stream, "\n");
8413 return 1;
8418 return 0;
8421 /* Verify whether the biv BL appears to be eliminable,
8422 based on the insns in the loop that refer to it.
8424 If ELIMINATE_P is non-zero, actually do the elimination.
8426 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8427 determine whether invariant insns should be placed inside or at the
8428 start of the loop. */
8430 static int
8431 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8432 const struct loop *loop;
8433 struct iv_class *bl;
8434 int eliminate_p;
8435 int threshold, insn_count;
8437 struct loop_ivs *ivs = LOOP_IVS (loop);
8438 rtx reg = bl->biv->dest_reg;
8439 rtx p;
8441 /* Scan all insns in the loop, stopping if we find one that uses the
8442 biv in a way that we cannot eliminate. */
8444 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8446 enum rtx_code code = GET_CODE (p);
8447 basic_block where_bb = 0;
8448 rtx where_insn = threshold >= insn_count ? 0 : p;
8450 /* If this is a libcall that sets a giv, skip ahead to its end. */
8451 if (GET_RTX_CLASS (code) == 'i')
8453 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8455 if (note)
8457 rtx last = XEXP (note, 0);
8458 rtx set = single_set (last);
8460 if (set && GET_CODE (SET_DEST (set)) == REG)
8462 unsigned int regno = REGNO (SET_DEST (set));
8464 if (regno < ivs->n_regs
8465 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8466 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8467 p = last;
8471 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8472 && reg_mentioned_p (reg, PATTERN (p))
8473 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8474 eliminate_p, where_bb, where_insn))
8476 if (loop_dump_stream)
8477 fprintf (loop_dump_stream,
8478 "Cannot eliminate biv %d: biv used in insn %d.\n",
8479 bl->regno, INSN_UID (p));
8480 break;
8484 if (p == loop->end)
8486 if (loop_dump_stream)
8487 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8488 bl->regno, eliminate_p ? "was" : "can be");
8489 return 1;
8492 return 0;
8495 /* INSN and REFERENCE are instructions in the same insn chain.
8496 Return non-zero if INSN is first. */
8499 loop_insn_first_p (insn, reference)
8500 rtx insn, reference;
8502 rtx p, q;
8504 for (p = insn, q = reference;;)
8506 /* Start with test for not first so that INSN == REFERENCE yields not
8507 first. */
8508 if (q == insn || ! p)
8509 return 0;
8510 if (p == reference || ! q)
8511 return 1;
8513 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8514 previous insn, hence the <= comparison below does not work if
8515 P is a note. */
8516 if (INSN_UID (p) < max_uid_for_loop
8517 && INSN_UID (q) < max_uid_for_loop
8518 && GET_CODE (p) != NOTE)
8519 return INSN_LUID (p) <= INSN_LUID (q);
8521 if (INSN_UID (p) >= max_uid_for_loop
8522 || GET_CODE (p) == NOTE)
8523 p = NEXT_INSN (p);
8524 if (INSN_UID (q) >= max_uid_for_loop)
8525 q = NEXT_INSN (q);
8529 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8530 the offset that we have to take into account due to auto-increment /
8531 div derivation is zero. */
8532 static int
8533 biv_elimination_giv_has_0_offset (biv, giv, insn)
8534 struct induction *biv, *giv;
8535 rtx insn;
8537 /* If the giv V had the auto-inc address optimization applied
8538 to it, and INSN occurs between the giv insn and the biv
8539 insn, then we'd have to adjust the value used here.
8540 This is rare, so we don't bother to make this possible. */
8541 if (giv->auto_inc_opt
8542 && ((loop_insn_first_p (giv->insn, insn)
8543 && loop_insn_first_p (insn, biv->insn))
8544 || (loop_insn_first_p (biv->insn, insn)
8545 && loop_insn_first_p (insn, giv->insn))))
8546 return 0;
8548 return 1;
8551 /* If BL appears in X (part of the pattern of INSN), see if we can
8552 eliminate its use. If so, return 1. If not, return 0.
8554 If BIV does not appear in X, return 1.
8556 If ELIMINATE_P is non-zero, actually do the elimination.
8557 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8558 Depending on how many items have been moved out of the loop, it
8559 will either be before INSN (when WHERE_INSN is non-zero) or at the
8560 start of the loop (when WHERE_INSN is zero). */
8562 static int
8563 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8564 const struct loop *loop;
8565 rtx x, insn;
8566 struct iv_class *bl;
8567 int eliminate_p;
8568 basic_block where_bb;
8569 rtx where_insn;
8571 enum rtx_code code = GET_CODE (x);
8572 rtx reg = bl->biv->dest_reg;
8573 enum machine_mode mode = GET_MODE (reg);
8574 struct induction *v;
8575 rtx arg, tem;
8576 #ifdef HAVE_cc0
8577 rtx new;
8578 #endif
8579 int arg_operand;
8580 const char *fmt;
8581 int i, j;
8583 switch (code)
8585 case REG:
8586 /* If we haven't already been able to do something with this BIV,
8587 we can't eliminate it. */
8588 if (x == reg)
8589 return 0;
8590 return 1;
8592 case SET:
8593 /* If this sets the BIV, it is not a problem. */
8594 if (SET_DEST (x) == reg)
8595 return 1;
8597 /* If this is an insn that defines a giv, it is also ok because
8598 it will go away when the giv is reduced. */
8599 for (v = bl->giv; v; v = v->next_iv)
8600 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8601 return 1;
8603 #ifdef HAVE_cc0
8604 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8606 /* Can replace with any giv that was reduced and
8607 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8608 Require a constant for MULT_VAL, so we know it's nonzero.
8609 ??? We disable this optimization to avoid potential
8610 overflows. */
8612 for (v = bl->giv; v; v = v->next_iv)
8613 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8614 && v->add_val == const0_rtx
8615 && ! v->ignore && ! v->maybe_dead && v->always_computable
8616 && v->mode == mode
8617 && 0)
8619 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8620 continue;
8622 if (! eliminate_p)
8623 return 1;
8625 /* If the giv has the opposite direction of change,
8626 then reverse the comparison. */
8627 if (INTVAL (v->mult_val) < 0)
8628 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8629 const0_rtx, v->new_reg);
8630 else
8631 new = v->new_reg;
8633 /* We can probably test that giv's reduced reg. */
8634 if (validate_change (insn, &SET_SRC (x), new, 0))
8635 return 1;
8638 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8639 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8640 Require a constant for MULT_VAL, so we know it's nonzero.
8641 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8642 overflow problem. */
8644 for (v = bl->giv; v; v = v->next_iv)
8645 if (GET_CODE (v->mult_val) == CONST_INT
8646 && v->mult_val != const0_rtx
8647 && ! v->ignore && ! v->maybe_dead && v->always_computable
8648 && v->mode == mode
8649 && (GET_CODE (v->add_val) == SYMBOL_REF
8650 || GET_CODE (v->add_val) == LABEL_REF
8651 || GET_CODE (v->add_val) == CONST
8652 || (GET_CODE (v->add_val) == REG
8653 && REG_POINTER (v->add_val))))
8655 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8656 continue;
8658 if (! eliminate_p)
8659 return 1;
8661 /* If the giv has the opposite direction of change,
8662 then reverse the comparison. */
8663 if (INTVAL (v->mult_val) < 0)
8664 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8665 v->new_reg);
8666 else
8667 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8668 copy_rtx (v->add_val));
8670 /* Replace biv with the giv's reduced register. */
8671 update_reg_last_use (v->add_val, insn);
8672 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8673 return 1;
8675 /* Insn doesn't support that constant or invariant. Copy it
8676 into a register (it will be a loop invariant.) */
8677 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8679 loop_insn_emit_before (loop, 0, where_insn,
8680 gen_move_insn (tem,
8681 copy_rtx (v->add_val)));
8683 /* Substitute the new register for its invariant value in
8684 the compare expression. */
8685 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8686 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8687 return 1;
8690 #endif
8691 break;
8693 case COMPARE:
8694 case EQ: case NE:
8695 case GT: case GE: case GTU: case GEU:
8696 case LT: case LE: case LTU: case LEU:
8697 /* See if either argument is the biv. */
8698 if (XEXP (x, 0) == reg)
8699 arg = XEXP (x, 1), arg_operand = 1;
8700 else if (XEXP (x, 1) == reg)
8701 arg = XEXP (x, 0), arg_operand = 0;
8702 else
8703 break;
8705 if (CONSTANT_P (arg))
8707 /* First try to replace with any giv that has constant positive
8708 mult_val and constant add_val. We might be able to support
8709 negative mult_val, but it seems complex to do it in general. */
8711 for (v = bl->giv; v; v = v->next_iv)
8712 if (GET_CODE (v->mult_val) == CONST_INT
8713 && INTVAL (v->mult_val) > 0
8714 && (GET_CODE (v->add_val) == SYMBOL_REF
8715 || GET_CODE (v->add_val) == LABEL_REF
8716 || GET_CODE (v->add_val) == CONST
8717 || (GET_CODE (v->add_val) == REG
8718 && REG_POINTER (v->add_val)))
8719 && ! v->ignore && ! v->maybe_dead && v->always_computable
8720 && v->mode == mode)
8722 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8723 continue;
8725 if (! eliminate_p)
8726 return 1;
8728 /* Replace biv with the giv's reduced reg. */
8729 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8731 /* If all constants are actually constant integers and
8732 the derived constant can be directly placed in the COMPARE,
8733 do so. */
8734 if (GET_CODE (arg) == CONST_INT
8735 && GET_CODE (v->mult_val) == CONST_INT
8736 && GET_CODE (v->add_val) == CONST_INT)
8738 validate_change (insn, &XEXP (x, arg_operand),
8739 GEN_INT (INTVAL (arg)
8740 * INTVAL (v->mult_val)
8741 + INTVAL (v->add_val)), 1);
8743 else
8745 /* Otherwise, load it into a register. */
8746 tem = gen_reg_rtx (mode);
8747 loop_iv_add_mult_emit_before (loop, arg,
8748 v->mult_val, v->add_val,
8749 tem, where_bb, where_insn);
8750 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8752 if (apply_change_group ())
8753 return 1;
8756 /* Look for giv with positive constant mult_val and nonconst add_val.
8757 Insert insns to calculate new compare value.
8758 ??? Turn this off due to possible overflow. */
8760 for (v = bl->giv; v; v = v->next_iv)
8761 if (GET_CODE (v->mult_val) == CONST_INT
8762 && INTVAL (v->mult_val) > 0
8763 && ! v->ignore && ! v->maybe_dead && v->always_computable
8764 && v->mode == mode
8765 && 0)
8767 rtx tem;
8769 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8770 continue;
8772 if (! eliminate_p)
8773 return 1;
8775 tem = gen_reg_rtx (mode);
8777 /* Replace biv with giv's reduced register. */
8778 validate_change (insn, &XEXP (x, 1 - arg_operand),
8779 v->new_reg, 1);
8781 /* Compute value to compare against. */
8782 loop_iv_add_mult_emit_before (loop, arg,
8783 v->mult_val, v->add_val,
8784 tem, where_bb, where_insn);
8785 /* Use it in this insn. */
8786 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8787 if (apply_change_group ())
8788 return 1;
8791 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8793 if (loop_invariant_p (loop, arg) == 1)
8795 /* Look for giv with constant positive mult_val and nonconst
8796 add_val. Insert insns to compute new compare value.
8797 ??? Turn this off due to possible overflow. */
8799 for (v = bl->giv; v; v = v->next_iv)
8800 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8801 && ! v->ignore && ! v->maybe_dead && v->always_computable
8802 && v->mode == mode
8803 && 0)
8805 rtx tem;
8807 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8808 continue;
8810 if (! eliminate_p)
8811 return 1;
8813 tem = gen_reg_rtx (mode);
8815 /* Replace biv with giv's reduced register. */
8816 validate_change (insn, &XEXP (x, 1 - arg_operand),
8817 v->new_reg, 1);
8819 /* Compute value to compare against. */
8820 loop_iv_add_mult_emit_before (loop, arg,
8821 v->mult_val, v->add_val,
8822 tem, where_bb, where_insn);
8823 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8824 if (apply_change_group ())
8825 return 1;
8829 /* This code has problems. Basically, you can't know when
8830 seeing if we will eliminate BL, whether a particular giv
8831 of ARG will be reduced. If it isn't going to be reduced,
8832 we can't eliminate BL. We can try forcing it to be reduced,
8833 but that can generate poor code.
8835 The problem is that the benefit of reducing TV, below should
8836 be increased if BL can actually be eliminated, but this means
8837 we might have to do a topological sort of the order in which
8838 we try to process biv. It doesn't seem worthwhile to do
8839 this sort of thing now. */
8841 #if 0
8842 /* Otherwise the reg compared with had better be a biv. */
8843 if (GET_CODE (arg) != REG
8844 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8845 return 0;
8847 /* Look for a pair of givs, one for each biv,
8848 with identical coefficients. */
8849 for (v = bl->giv; v; v = v->next_iv)
8851 struct induction *tv;
8853 if (v->ignore || v->maybe_dead || v->mode != mode)
8854 continue;
8856 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8857 tv = tv->next_iv)
8858 if (! tv->ignore && ! tv->maybe_dead
8859 && rtx_equal_p (tv->mult_val, v->mult_val)
8860 && rtx_equal_p (tv->add_val, v->add_val)
8861 && tv->mode == mode)
8863 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8864 continue;
8866 if (! eliminate_p)
8867 return 1;
8869 /* Replace biv with its giv's reduced reg. */
8870 XEXP (x, 1 - arg_operand) = v->new_reg;
8871 /* Replace other operand with the other giv's
8872 reduced reg. */
8873 XEXP (x, arg_operand) = tv->new_reg;
8874 return 1;
8877 #endif
8880 /* If we get here, the biv can't be eliminated. */
8881 return 0;
8883 case MEM:
8884 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8885 biv is used in it, since it will be replaced. */
8886 for (v = bl->giv; v; v = v->next_iv)
8887 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8888 return 1;
8889 break;
8891 default:
8892 break;
8895 /* See if any subexpression fails elimination. */
8896 fmt = GET_RTX_FORMAT (code);
8897 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8899 switch (fmt[i])
8901 case 'e':
8902 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8903 eliminate_p, where_bb, where_insn))
8904 return 0;
8905 break;
8907 case 'E':
8908 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8909 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8910 eliminate_p, where_bb, where_insn))
8911 return 0;
8912 break;
8916 return 1;
8919 /* Return nonzero if the last use of REG
8920 is in an insn following INSN in the same basic block. */
8922 static int
8923 last_use_this_basic_block (reg, insn)
8924 rtx reg;
8925 rtx insn;
8927 rtx n;
8928 for (n = insn;
8929 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8930 n = NEXT_INSN (n))
8932 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8933 return 1;
8935 return 0;
8938 /* Called via `note_stores' to record the initial value of a biv. Here we
8939 just record the location of the set and process it later. */
8941 static void
8942 record_initial (dest, set, data)
8943 rtx dest;
8944 rtx set;
8945 void *data ATTRIBUTE_UNUSED;
8947 struct loop_ivs *ivs = (struct loop_ivs *) data;
8948 struct iv_class *bl;
8950 if (GET_CODE (dest) != REG
8951 || REGNO (dest) >= ivs->n_regs
8952 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8953 return;
8955 bl = REG_IV_CLASS (ivs, REGNO (dest));
8957 /* If this is the first set found, record it. */
8958 if (bl->init_insn == 0)
8960 bl->init_insn = note_insn;
8961 bl->init_set = set;
8965 /* If any of the registers in X are "old" and currently have a last use earlier
8966 than INSN, update them to have a last use of INSN. Their actual last use
8967 will be the previous insn but it will not have a valid uid_luid so we can't
8968 use it. X must be a source expression only. */
8970 static void
8971 update_reg_last_use (x, insn)
8972 rtx x;
8973 rtx insn;
8975 /* Check for the case where INSN does not have a valid luid. In this case,
8976 there is no need to modify the regno_last_uid, as this can only happen
8977 when code is inserted after the loop_end to set a pseudo's final value,
8978 and hence this insn will never be the last use of x.
8979 ???? This comment is not correct. See for example loop_givs_reduce.
8980 This may insert an insn before another new insn. */
8981 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8982 && INSN_UID (insn) < max_uid_for_loop
8983 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
8985 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8987 else
8989 int i, j;
8990 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8991 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8993 if (fmt[i] == 'e')
8994 update_reg_last_use (XEXP (x, i), insn);
8995 else if (fmt[i] == 'E')
8996 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8997 update_reg_last_use (XVECEXP (x, i, j), insn);
9002 /* Given an insn INSN and condition COND, return the condition in a
9003 canonical form to simplify testing by callers. Specifically:
9005 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9006 (2) Both operands will be machine operands; (cc0) will have been replaced.
9007 (3) If an operand is a constant, it will be the second operand.
9008 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9009 for GE, GEU, and LEU.
9011 If the condition cannot be understood, or is an inequality floating-point
9012 comparison which needs to be reversed, 0 will be returned.
9014 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9016 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9017 insn used in locating the condition was found. If a replacement test
9018 of the condition is desired, it should be placed in front of that
9019 insn and we will be sure that the inputs are still valid.
9021 If WANT_REG is non-zero, we wish the condition to be relative to that
9022 register, if possible. Therefore, do not canonicalize the condition
9023 further. */
9026 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9027 rtx insn;
9028 rtx cond;
9029 int reverse;
9030 rtx *earliest;
9031 rtx want_reg;
9033 enum rtx_code code;
9034 rtx prev = insn;
9035 rtx set;
9036 rtx tem;
9037 rtx op0, op1;
9038 int reverse_code = 0;
9039 enum machine_mode mode;
9041 code = GET_CODE (cond);
9042 mode = GET_MODE (cond);
9043 op0 = XEXP (cond, 0);
9044 op1 = XEXP (cond, 1);
9046 if (reverse)
9047 code = reversed_comparison_code (cond, insn);
9048 if (code == UNKNOWN)
9049 return 0;
9051 if (earliest)
9052 *earliest = insn;
9054 /* If we are comparing a register with zero, see if the register is set
9055 in the previous insn to a COMPARE or a comparison operation. Perform
9056 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9057 in cse.c */
9059 while (GET_RTX_CLASS (code) == '<'
9060 && op1 == CONST0_RTX (GET_MODE (op0))
9061 && op0 != want_reg)
9063 /* Set non-zero when we find something of interest. */
9064 rtx x = 0;
9066 #ifdef HAVE_cc0
9067 /* If comparison with cc0, import actual comparison from compare
9068 insn. */
9069 if (op0 == cc0_rtx)
9071 if ((prev = prev_nonnote_insn (prev)) == 0
9072 || GET_CODE (prev) != INSN
9073 || (set = single_set (prev)) == 0
9074 || SET_DEST (set) != cc0_rtx)
9075 return 0;
9077 op0 = SET_SRC (set);
9078 op1 = CONST0_RTX (GET_MODE (op0));
9079 if (earliest)
9080 *earliest = prev;
9082 #endif
9084 /* If this is a COMPARE, pick up the two things being compared. */
9085 if (GET_CODE (op0) == COMPARE)
9087 op1 = XEXP (op0, 1);
9088 op0 = XEXP (op0, 0);
9089 continue;
9091 else if (GET_CODE (op0) != REG)
9092 break;
9094 /* Go back to the previous insn. Stop if it is not an INSN. We also
9095 stop if it isn't a single set or if it has a REG_INC note because
9096 we don't want to bother dealing with it. */
9098 if ((prev = prev_nonnote_insn (prev)) == 0
9099 || GET_CODE (prev) != INSN
9100 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9101 break;
9103 set = set_of (op0, prev);
9105 if (set
9106 && (GET_CODE (set) != SET
9107 || !rtx_equal_p (SET_DEST (set), op0)))
9108 break;
9110 /* If this is setting OP0, get what it sets it to if it looks
9111 relevant. */
9112 if (set)
9114 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9116 /* ??? We may not combine comparisons done in a CCmode with
9117 comparisons not done in a CCmode. This is to aid targets
9118 like Alpha that have an IEEE compliant EQ instruction, and
9119 a non-IEEE compliant BEQ instruction. The use of CCmode is
9120 actually artificial, simply to prevent the combination, but
9121 should not affect other platforms.
9123 However, we must allow VOIDmode comparisons to match either
9124 CCmode or non-CCmode comparison, because some ports have
9125 modeless comparisons inside branch patterns.
9127 ??? This mode check should perhaps look more like the mode check
9128 in simplify_comparison in combine. */
9130 if ((GET_CODE (SET_SRC (set)) == COMPARE
9131 || (((code == NE
9132 || (code == LT
9133 && GET_MODE_CLASS (inner_mode) == MODE_INT
9134 && (GET_MODE_BITSIZE (inner_mode)
9135 <= HOST_BITS_PER_WIDE_INT)
9136 && (STORE_FLAG_VALUE
9137 & ((HOST_WIDE_INT) 1
9138 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9139 #ifdef FLOAT_STORE_FLAG_VALUE
9140 || (code == LT
9141 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9142 && (REAL_VALUE_NEGATIVE
9143 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9144 #endif
9146 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9147 && (((GET_MODE_CLASS (mode) == MODE_CC)
9148 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9149 || mode == VOIDmode || inner_mode == VOIDmode))
9150 x = SET_SRC (set);
9151 else if (((code == EQ
9152 || (code == GE
9153 && (GET_MODE_BITSIZE (inner_mode)
9154 <= HOST_BITS_PER_WIDE_INT)
9155 && GET_MODE_CLASS (inner_mode) == MODE_INT
9156 && (STORE_FLAG_VALUE
9157 & ((HOST_WIDE_INT) 1
9158 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9159 #ifdef FLOAT_STORE_FLAG_VALUE
9160 || (code == GE
9161 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9162 && (REAL_VALUE_NEGATIVE
9163 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9164 #endif
9166 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9167 && (((GET_MODE_CLASS (mode) == MODE_CC)
9168 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9169 || mode == VOIDmode || inner_mode == VOIDmode))
9172 reverse_code = 1;
9173 x = SET_SRC (set);
9175 else
9176 break;
9179 else if (reg_set_p (op0, prev))
9180 /* If this sets OP0, but not directly, we have to give up. */
9181 break;
9183 if (x)
9185 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9186 code = GET_CODE (x);
9187 if (reverse_code)
9189 code = reversed_comparison_code (x, prev);
9190 if (code == UNKNOWN)
9191 return 0;
9192 reverse_code = 0;
9195 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9196 if (earliest)
9197 *earliest = prev;
9201 /* If constant is first, put it last. */
9202 if (CONSTANT_P (op0))
9203 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9205 /* If OP0 is the result of a comparison, we weren't able to find what
9206 was really being compared, so fail. */
9207 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9208 return 0;
9210 /* Canonicalize any ordered comparison with integers involving equality
9211 if we can do computations in the relevant mode and we do not
9212 overflow. */
9214 if (GET_CODE (op1) == CONST_INT
9215 && GET_MODE (op0) != VOIDmode
9216 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9218 HOST_WIDE_INT const_val = INTVAL (op1);
9219 unsigned HOST_WIDE_INT uconst_val = const_val;
9220 unsigned HOST_WIDE_INT max_val
9221 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9223 switch (code)
9225 case LE:
9226 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9227 code = LT, op1 = GEN_INT (const_val + 1);
9228 break;
9230 /* When cross-compiling, const_val might be sign-extended from
9231 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9232 case GE:
9233 if ((HOST_WIDE_INT) (const_val & max_val)
9234 != (((HOST_WIDE_INT) 1
9235 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9236 code = GT, op1 = GEN_INT (const_val - 1);
9237 break;
9239 case LEU:
9240 if (uconst_val < max_val)
9241 code = LTU, op1 = GEN_INT (uconst_val + 1);
9242 break;
9244 case GEU:
9245 if (uconst_val != 0)
9246 code = GTU, op1 = GEN_INT (uconst_val - 1);
9247 break;
9249 default:
9250 break;
9254 #ifdef HAVE_cc0
9255 /* Never return CC0; return zero instead. */
9256 if (op0 == cc0_rtx)
9257 return 0;
9258 #endif
9260 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9263 /* Given a jump insn JUMP, return the condition that will cause it to branch
9264 to its JUMP_LABEL. If the condition cannot be understood, or is an
9265 inequality floating-point comparison which needs to be reversed, 0 will
9266 be returned.
9268 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9269 insn used in locating the condition was found. If a replacement test
9270 of the condition is desired, it should be placed in front of that
9271 insn and we will be sure that the inputs are still valid. */
9274 get_condition (jump, earliest)
9275 rtx jump;
9276 rtx *earliest;
9278 rtx cond;
9279 int reverse;
9280 rtx set;
9282 /* If this is not a standard conditional jump, we can't parse it. */
9283 if (GET_CODE (jump) != JUMP_INSN
9284 || ! any_condjump_p (jump))
9285 return 0;
9286 set = pc_set (jump);
9288 cond = XEXP (SET_SRC (set), 0);
9290 /* If this branches to JUMP_LABEL when the condition is false, reverse
9291 the condition. */
9292 reverse
9293 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9294 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9296 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9299 /* Similar to above routine, except that we also put an invariant last
9300 unless both operands are invariants. */
9303 get_condition_for_loop (loop, x)
9304 const struct loop *loop;
9305 rtx x;
9307 rtx comparison = get_condition (x, (rtx*) 0);
9309 if (comparison == 0
9310 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9311 || loop_invariant_p (loop, XEXP (comparison, 1)))
9312 return comparison;
9314 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9315 XEXP (comparison, 1), XEXP (comparison, 0));
9318 /* Scan the function and determine whether it has indirect (computed) jumps.
9320 This is taken mostly from flow.c; similar code exists elsewhere
9321 in the compiler. It may be useful to put this into rtlanal.c. */
9322 static int
9323 indirect_jump_in_function_p (start)
9324 rtx start;
9326 rtx insn;
9328 for (insn = start; insn; insn = NEXT_INSN (insn))
9329 if (computed_jump_p (insn))
9330 return 1;
9332 return 0;
9335 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9336 documentation for LOOP_MEMS for the definition of `appropriate'.
9337 This function is called from prescan_loop via for_each_rtx. */
9339 static int
9340 insert_loop_mem (mem, data)
9341 rtx *mem;
9342 void *data ATTRIBUTE_UNUSED;
9344 struct loop_info *loop_info = data;
9345 int i;
9346 rtx m = *mem;
9348 if (m == NULL_RTX)
9349 return 0;
9351 switch (GET_CODE (m))
9353 case MEM:
9354 break;
9356 case CLOBBER:
9357 /* We're not interested in MEMs that are only clobbered. */
9358 return -1;
9360 case CONST_DOUBLE:
9361 /* We're not interested in the MEM associated with a
9362 CONST_DOUBLE, so there's no need to traverse into this. */
9363 return -1;
9365 case EXPR_LIST:
9366 /* We're not interested in any MEMs that only appear in notes. */
9367 return -1;
9369 default:
9370 /* This is not a MEM. */
9371 return 0;
9374 /* See if we've already seen this MEM. */
9375 for (i = 0; i < loop_info->mems_idx; ++i)
9376 if (rtx_equal_p (m, loop_info->mems[i].mem))
9378 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9379 /* The modes of the two memory accesses are different. If
9380 this happens, something tricky is going on, and we just
9381 don't optimize accesses to this MEM. */
9382 loop_info->mems[i].optimize = 0;
9384 return 0;
9387 /* Resize the array, if necessary. */
9388 if (loop_info->mems_idx == loop_info->mems_allocated)
9390 if (loop_info->mems_allocated != 0)
9391 loop_info->mems_allocated *= 2;
9392 else
9393 loop_info->mems_allocated = 32;
9395 loop_info->mems = (loop_mem_info *)
9396 xrealloc (loop_info->mems,
9397 loop_info->mems_allocated * sizeof (loop_mem_info));
9400 /* Actually insert the MEM. */
9401 loop_info->mems[loop_info->mems_idx].mem = m;
9402 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9403 because we can't put it in a register. We still store it in the
9404 table, though, so that if we see the same address later, but in a
9405 non-BLK mode, we'll not think we can optimize it at that point. */
9406 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9407 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9408 ++loop_info->mems_idx;
9410 return 0;
9414 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9416 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9417 register that is modified by an insn between FROM and TO. If the
9418 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9419 more, stop incrementing it, to avoid overflow.
9421 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9422 register I is used, if it is only used once. Otherwise, it is set
9423 to 0 (for no uses) or const0_rtx for more than one use. This
9424 parameter may be zero, in which case this processing is not done.
9426 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9427 optimize register I. */
9429 static void
9430 loop_regs_scan (loop, extra_size)
9431 const struct loop *loop;
9432 int extra_size;
9434 struct loop_regs *regs = LOOP_REGS (loop);
9435 int old_nregs;
9436 /* last_set[n] is nonzero iff reg n has been set in the current
9437 basic block. In that case, it is the insn that last set reg n. */
9438 rtx *last_set;
9439 rtx insn;
9440 int i;
9442 old_nregs = regs->num;
9443 regs->num = max_reg_num ();
9445 /* Grow the regs array if not allocated or too small. */
9446 if (regs->num >= regs->size)
9448 regs->size = regs->num + extra_size;
9450 regs->array = (struct loop_reg *)
9451 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9453 /* Zero the new elements. */
9454 memset (regs->array + old_nregs, 0,
9455 (regs->size - old_nregs) * sizeof (*regs->array));
9458 /* Clear previously scanned fields but do not clear n_times_set. */
9459 for (i = 0; i < old_nregs; i++)
9461 regs->array[i].set_in_loop = 0;
9462 regs->array[i].may_not_optimize = 0;
9463 regs->array[i].single_usage = NULL_RTX;
9466 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9468 /* Scan the loop, recording register usage. */
9469 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9470 insn = NEXT_INSN (insn))
9472 if (INSN_P (insn))
9474 /* Record registers that have exactly one use. */
9475 find_single_use_in_loop (regs, insn, PATTERN (insn));
9477 /* Include uses in REG_EQUAL notes. */
9478 if (REG_NOTES (insn))
9479 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9481 if (GET_CODE (PATTERN (insn)) == SET
9482 || GET_CODE (PATTERN (insn)) == CLOBBER)
9483 count_one_set (regs, insn, PATTERN (insn), last_set);
9484 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9486 int i;
9487 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9488 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9489 last_set);
9493 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9494 memset (last_set, 0, regs->num * sizeof (rtx));
9497 /* Invalidate all hard registers clobbered by calls. With one exception:
9498 a call-clobbered PIC register is still function-invariant for our
9499 purposes, since we can hoist any PIC calculations out of the loop.
9500 Thus the call to rtx_varies_p. */
9501 if (LOOP_INFO (loop)->has_call)
9502 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9503 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9504 && rtx_varies_p (gen_rtx_REG (Pmode, i), /*for_alias=*/1))
9506 regs->array[i].may_not_optimize = 1;
9507 regs->array[i].set_in_loop = 1;
9510 #ifdef AVOID_CCMODE_COPIES
9511 /* Don't try to move insns which set CC registers if we should not
9512 create CCmode register copies. */
9513 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9514 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9515 regs->array[i].may_not_optimize = 1;
9516 #endif
9518 /* Set regs->array[I].n_times_set for the new registers. */
9519 for (i = old_nregs; i < regs->num; i++)
9520 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9522 free (last_set);
9525 /* Returns the number of real INSNs in the LOOP. */
9527 static int
9528 count_insns_in_loop (loop)
9529 const struct loop *loop;
9531 int count = 0;
9532 rtx insn;
9534 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9535 insn = NEXT_INSN (insn))
9536 if (INSN_P (insn))
9537 ++count;
9539 return count;
9542 /* Move MEMs into registers for the duration of the loop. */
9544 static void
9545 load_mems (loop)
9546 const struct loop *loop;
9548 struct loop_info *loop_info = LOOP_INFO (loop);
9549 struct loop_regs *regs = LOOP_REGS (loop);
9550 int maybe_never = 0;
9551 int i;
9552 rtx p, prev_ebb_head;
9553 rtx label = NULL_RTX;
9554 rtx end_label;
9555 /* Nonzero if the next instruction may never be executed. */
9556 int next_maybe_never = 0;
9557 unsigned int last_max_reg = max_reg_num ();
9559 if (loop_info->mems_idx == 0)
9560 return;
9562 /* We cannot use next_label here because it skips over normal insns. */
9563 end_label = next_nonnote_insn (loop->end);
9564 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9565 end_label = NULL_RTX;
9567 /* Check to see if it's possible that some instructions in the loop are
9568 never executed. Also check if there is a goto out of the loop other
9569 than right after the end of the loop. */
9570 for (p = next_insn_in_loop (loop, loop->scan_start);
9571 p != NULL_RTX;
9572 p = next_insn_in_loop (loop, p))
9574 if (GET_CODE (p) == CODE_LABEL)
9575 maybe_never = 1;
9576 else if (GET_CODE (p) == JUMP_INSN
9577 /* If we enter the loop in the middle, and scan
9578 around to the beginning, don't set maybe_never
9579 for that. This must be an unconditional jump,
9580 otherwise the code at the top of the loop might
9581 never be executed. Unconditional jumps are
9582 followed a by barrier then loop end. */
9583 && ! (GET_CODE (p) == JUMP_INSN
9584 && JUMP_LABEL (p) == loop->top
9585 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9586 && any_uncondjump_p (p)))
9588 /* If this is a jump outside of the loop but not right
9589 after the end of the loop, we would have to emit new fixup
9590 sequences for each such label. */
9591 if (/* If we can't tell where control might go when this
9592 JUMP_INSN is executed, we must be conservative. */
9593 !JUMP_LABEL (p)
9594 || (JUMP_LABEL (p) != end_label
9595 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9596 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9597 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9598 return;
9600 if (!any_condjump_p (p))
9601 /* Something complicated. */
9602 maybe_never = 1;
9603 else
9604 /* If there are any more instructions in the loop, they
9605 might not be reached. */
9606 next_maybe_never = 1;
9608 else if (next_maybe_never)
9609 maybe_never = 1;
9612 /* Find start of the extended basic block that enters the loop. */
9613 for (p = loop->start;
9614 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9615 p = PREV_INSN (p))
9617 prev_ebb_head = p;
9619 cselib_init ();
9621 /* Build table of mems that get set to constant values before the
9622 loop. */
9623 for (; p != loop->start; p = NEXT_INSN (p))
9624 cselib_process_insn (p);
9626 /* Actually move the MEMs. */
9627 for (i = 0; i < loop_info->mems_idx; ++i)
9629 regset_head load_copies;
9630 regset_head store_copies;
9631 int written = 0;
9632 rtx reg;
9633 rtx mem = loop_info->mems[i].mem;
9634 rtx mem_list_entry;
9636 if (MEM_VOLATILE_P (mem)
9637 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9638 /* There's no telling whether or not MEM is modified. */
9639 loop_info->mems[i].optimize = 0;
9641 /* Go through the MEMs written to in the loop to see if this
9642 one is aliased by one of them. */
9643 mem_list_entry = loop_info->store_mems;
9644 while (mem_list_entry)
9646 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9647 written = 1;
9648 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9649 mem, rtx_varies_p))
9651 /* MEM is indeed aliased by this store. */
9652 loop_info->mems[i].optimize = 0;
9653 break;
9655 mem_list_entry = XEXP (mem_list_entry, 1);
9658 if (flag_float_store && written
9659 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9660 loop_info->mems[i].optimize = 0;
9662 /* If this MEM is written to, we must be sure that there
9663 are no reads from another MEM that aliases this one. */
9664 if (loop_info->mems[i].optimize && written)
9666 int j;
9668 for (j = 0; j < loop_info->mems_idx; ++j)
9670 if (j == i)
9671 continue;
9672 else if (true_dependence (mem,
9673 VOIDmode,
9674 loop_info->mems[j].mem,
9675 rtx_varies_p))
9677 /* It's not safe to hoist loop_info->mems[i] out of
9678 the loop because writes to it might not be
9679 seen by reads from loop_info->mems[j]. */
9680 loop_info->mems[i].optimize = 0;
9681 break;
9686 if (maybe_never && may_trap_p (mem))
9687 /* We can't access the MEM outside the loop; it might
9688 cause a trap that wouldn't have happened otherwise. */
9689 loop_info->mems[i].optimize = 0;
9691 if (!loop_info->mems[i].optimize)
9692 /* We thought we were going to lift this MEM out of the
9693 loop, but later discovered that we could not. */
9694 continue;
9696 INIT_REG_SET (&load_copies);
9697 INIT_REG_SET (&store_copies);
9699 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9700 order to keep scan_loop from moving stores to this MEM
9701 out of the loop just because this REG is neither a
9702 user-variable nor used in the loop test. */
9703 reg = gen_reg_rtx (GET_MODE (mem));
9704 REG_USERVAR_P (reg) = 1;
9705 loop_info->mems[i].reg = reg;
9707 /* Now, replace all references to the MEM with the
9708 corresponding pseudos. */
9709 maybe_never = 0;
9710 for (p = next_insn_in_loop (loop, loop->scan_start);
9711 p != NULL_RTX;
9712 p = next_insn_in_loop (loop, p))
9714 if (INSN_P (p))
9716 rtx set;
9718 set = single_set (p);
9720 /* See if this copies the mem into a register that isn't
9721 modified afterwards. We'll try to do copy propagation
9722 a little further on. */
9723 if (set
9724 /* @@@ This test is _way_ too conservative. */
9725 && ! maybe_never
9726 && GET_CODE (SET_DEST (set)) == REG
9727 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9728 && REGNO (SET_DEST (set)) < last_max_reg
9729 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9730 && rtx_equal_p (SET_SRC (set), mem))
9731 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9733 /* See if this copies the mem from a register that isn't
9734 modified afterwards. We'll try to remove the
9735 redundant copy later on by doing a little register
9736 renaming and copy propagation. This will help
9737 to untangle things for the BIV detection code. */
9738 if (set
9739 && ! maybe_never
9740 && GET_CODE (SET_SRC (set)) == REG
9741 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9742 && REGNO (SET_SRC (set)) < last_max_reg
9743 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9744 && rtx_equal_p (SET_DEST (set), mem))
9745 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9747 /* Replace the memory reference with the shadow register. */
9748 replace_loop_mems (p, loop_info->mems[i].mem,
9749 loop_info->mems[i].reg);
9752 if (GET_CODE (p) == CODE_LABEL
9753 || GET_CODE (p) == JUMP_INSN)
9754 maybe_never = 1;
9757 if (! apply_change_group ())
9758 /* We couldn't replace all occurrences of the MEM. */
9759 loop_info->mems[i].optimize = 0;
9760 else
9762 /* Load the memory immediately before LOOP->START, which is
9763 the NOTE_LOOP_BEG. */
9764 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9765 rtx set;
9766 rtx best = mem;
9767 int j;
9768 struct elt_loc_list *const_equiv = 0;
9770 if (e)
9772 struct elt_loc_list *equiv;
9773 struct elt_loc_list *best_equiv = 0;
9774 for (equiv = e->locs; equiv; equiv = equiv->next)
9776 if (CONSTANT_P (equiv->loc))
9777 const_equiv = equiv;
9778 else if (GET_CODE (equiv->loc) == REG
9779 /* Extending hard register lifetimes causes crash
9780 on SRC targets. Doing so on non-SRC is
9781 probably also not good idea, since we most
9782 probably have pseudoregister equivalence as
9783 well. */
9784 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9785 best_equiv = equiv;
9787 /* Use the constant equivalence if that is cheap enough. */
9788 if (! best_equiv)
9789 best_equiv = const_equiv;
9790 else if (const_equiv
9791 && (rtx_cost (const_equiv->loc, SET)
9792 <= rtx_cost (best_equiv->loc, SET)))
9794 best_equiv = const_equiv;
9795 const_equiv = 0;
9798 /* If best_equiv is nonzero, we know that MEM is set to a
9799 constant or register before the loop. We will use this
9800 knowledge to initialize the shadow register with that
9801 constant or reg rather than by loading from MEM. */
9802 if (best_equiv)
9803 best = copy_rtx (best_equiv->loc);
9806 set = gen_move_insn (reg, best);
9807 set = loop_insn_hoist (loop, set);
9808 if (REG_P (best))
9810 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9811 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9813 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9814 break;
9818 if (const_equiv)
9819 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9821 if (written)
9823 if (label == NULL_RTX)
9825 label = gen_label_rtx ();
9826 emit_label_after (label, loop->end);
9829 /* Store the memory immediately after END, which is
9830 the NOTE_LOOP_END. */
9831 set = gen_move_insn (copy_rtx (mem), reg);
9832 loop_insn_emit_after (loop, 0, label, set);
9835 if (loop_dump_stream)
9837 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9838 REGNO (reg), (written ? "r/w" : "r/o"));
9839 print_rtl (loop_dump_stream, mem);
9840 fputc ('\n', loop_dump_stream);
9843 /* Attempt a bit of copy propagation. This helps untangle the
9844 data flow, and enables {basic,general}_induction_var to find
9845 more bivs/givs. */
9846 EXECUTE_IF_SET_IN_REG_SET
9847 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9849 try_copy_prop (loop, reg, j);
9851 CLEAR_REG_SET (&load_copies);
9853 EXECUTE_IF_SET_IN_REG_SET
9854 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9856 try_swap_copy_prop (loop, reg, j);
9858 CLEAR_REG_SET (&store_copies);
9862 if (label != NULL_RTX && end_label != NULL_RTX)
9864 /* Now, we need to replace all references to the previous exit
9865 label with the new one. */
9866 rtx_pair rr;
9867 rr.r1 = end_label;
9868 rr.r2 = label;
9870 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9872 for_each_rtx (&p, replace_label, &rr);
9874 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9875 field. This is not handled by for_each_rtx because it doesn't
9876 handle unprinted ('0') fields. We need to update JUMP_LABEL
9877 because the immediately following unroll pass will use it.
9878 replace_label would not work anyways, because that only handles
9879 LABEL_REFs. */
9880 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9881 JUMP_LABEL (p) = label;
9885 cselib_finish ();
9888 /* For communication between note_reg_stored and its caller. */
9889 struct note_reg_stored_arg
9891 int set_seen;
9892 rtx reg;
9895 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9896 is equal to ARG. */
9897 static void
9898 note_reg_stored (x, setter, arg)
9899 rtx x, setter ATTRIBUTE_UNUSED;
9900 void *arg;
9902 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9903 if (t->reg == x)
9904 t->set_seen = 1;
9907 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9908 There must be exactly one insn that sets this pseudo; it will be
9909 deleted if all replacements succeed and we can prove that the register
9910 is not used after the loop. */
9912 static void
9913 try_copy_prop (loop, replacement, regno)
9914 const struct loop *loop;
9915 rtx replacement;
9916 unsigned int regno;
9918 /* This is the reg that we are copying from. */
9919 rtx reg_rtx = regno_reg_rtx[regno];
9920 rtx init_insn = 0;
9921 rtx insn;
9922 /* These help keep track of whether we replaced all uses of the reg. */
9923 int replaced_last = 0;
9924 int store_is_first = 0;
9926 for (insn = next_insn_in_loop (loop, loop->scan_start);
9927 insn != NULL_RTX;
9928 insn = next_insn_in_loop (loop, insn))
9930 rtx set;
9932 /* Only substitute within one extended basic block from the initializing
9933 insn. */
9934 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9935 break;
9937 if (! INSN_P (insn))
9938 continue;
9940 /* Is this the initializing insn? */
9941 set = single_set (insn);
9942 if (set
9943 && GET_CODE (SET_DEST (set)) == REG
9944 && REGNO (SET_DEST (set)) == regno)
9946 if (init_insn)
9947 abort ();
9949 init_insn = insn;
9950 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9951 store_is_first = 1;
9954 /* Only substitute after seeing the initializing insn. */
9955 if (init_insn && insn != init_insn)
9957 struct note_reg_stored_arg arg;
9959 replace_loop_regs (insn, reg_rtx, replacement);
9960 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9961 replaced_last = 1;
9963 /* Stop replacing when REPLACEMENT is modified. */
9964 arg.reg = replacement;
9965 arg.set_seen = 0;
9966 note_stores (PATTERN (insn), note_reg_stored, &arg);
9967 if (arg.set_seen)
9969 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
9971 /* It is possible that we've turned previously valid REG_EQUAL to
9972 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
9973 REPLACEMENT is modified, we get different meaning. */
9974 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
9975 remove_note (insn, note);
9976 break;
9980 if (! init_insn)
9981 abort ();
9982 if (apply_change_group ())
9984 if (loop_dump_stream)
9985 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9986 if (store_is_first && replaced_last)
9988 rtx first;
9989 rtx retval_note;
9991 /* Assume we're just deleting INIT_INSN. */
9992 first = init_insn;
9993 /* Look for REG_RETVAL note. If we're deleting the end of
9994 the libcall sequence, the whole sequence can go. */
9995 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
9996 /* If we found a REG_RETVAL note, find the first instruction
9997 in the sequence. */
9998 if (retval_note)
9999 first = XEXP (retval_note, 0);
10001 /* Delete the instructions. */
10002 loop_delete_insns (first, init_insn);
10004 if (loop_dump_stream)
10005 fprintf (loop_dump_stream, ".\n");
10009 /* Replace all the instructions from FIRST up to and including LAST
10010 with NOTE_INSN_DELETED notes. */
10012 static void
10013 loop_delete_insns (first, last)
10014 rtx first;
10015 rtx last;
10017 while (1)
10019 if (loop_dump_stream)
10020 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10021 INSN_UID (first));
10022 delete_insn (first);
10024 /* If this was the LAST instructions we're supposed to delete,
10025 we're done. */
10026 if (first == last)
10027 break;
10029 first = NEXT_INSN (first);
10033 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10034 loop LOOP if the order of the sets of these registers can be
10035 swapped. There must be exactly one insn within the loop that sets
10036 this pseudo followed immediately by a move insn that sets
10037 REPLACEMENT with REGNO. */
10038 static void
10039 try_swap_copy_prop (loop, replacement, regno)
10040 const struct loop *loop;
10041 rtx replacement;
10042 unsigned int regno;
10044 rtx insn;
10045 rtx set = NULL_RTX;
10046 unsigned int new_regno;
10048 new_regno = REGNO (replacement);
10050 for (insn = next_insn_in_loop (loop, loop->scan_start);
10051 insn != NULL_RTX;
10052 insn = next_insn_in_loop (loop, insn))
10054 /* Search for the insn that copies REGNO to NEW_REGNO? */
10055 if (INSN_P (insn)
10056 && (set = single_set (insn))
10057 && GET_CODE (SET_DEST (set)) == REG
10058 && REGNO (SET_DEST (set)) == new_regno
10059 && GET_CODE (SET_SRC (set)) == REG
10060 && REGNO (SET_SRC (set)) == regno)
10061 break;
10064 if (insn != NULL_RTX)
10066 rtx prev_insn;
10067 rtx prev_set;
10069 /* Some DEF-USE info would come in handy here to make this
10070 function more general. For now, just check the previous insn
10071 which is the most likely candidate for setting REGNO. */
10073 prev_insn = PREV_INSN (insn);
10075 if (INSN_P (insn)
10076 && (prev_set = single_set (prev_insn))
10077 && GET_CODE (SET_DEST (prev_set)) == REG
10078 && REGNO (SET_DEST (prev_set)) == regno)
10080 /* We have:
10081 (set (reg regno) (expr))
10082 (set (reg new_regno) (reg regno))
10084 so try converting this to:
10085 (set (reg new_regno) (expr))
10086 (set (reg regno) (reg new_regno))
10088 The former construct is often generated when a global
10089 variable used for an induction variable is shadowed by a
10090 register (NEW_REGNO). The latter construct improves the
10091 chances of GIV replacement and BIV elimination. */
10093 validate_change (prev_insn, &SET_DEST (prev_set),
10094 replacement, 1);
10095 validate_change (insn, &SET_DEST (set),
10096 SET_SRC (set), 1);
10097 validate_change (insn, &SET_SRC (set),
10098 replacement, 1);
10100 if (apply_change_group ())
10102 if (loop_dump_stream)
10103 fprintf (loop_dump_stream,
10104 " Swapped set of reg %d at %d with reg %d at %d.\n",
10105 regno, INSN_UID (insn),
10106 new_regno, INSN_UID (prev_insn));
10108 /* Update first use of REGNO. */
10109 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10110 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10112 /* Now perform copy propagation to hopefully
10113 remove all uses of REGNO within the loop. */
10114 try_copy_prop (loop, replacement, regno);
10120 /* Replace MEM with its associated pseudo register. This function is
10121 called from load_mems via for_each_rtx. DATA is actually a pointer
10122 to a structure describing the instruction currently being scanned
10123 and the MEM we are currently replacing. */
10125 static int
10126 replace_loop_mem (mem, data)
10127 rtx *mem;
10128 void *data;
10130 loop_replace_args *args = (loop_replace_args *) data;
10131 rtx m = *mem;
10133 if (m == NULL_RTX)
10134 return 0;
10136 switch (GET_CODE (m))
10138 case MEM:
10139 break;
10141 case CONST_DOUBLE:
10142 /* We're not interested in the MEM associated with a
10143 CONST_DOUBLE, so there's no need to traverse into one. */
10144 return -1;
10146 default:
10147 /* This is not a MEM. */
10148 return 0;
10151 if (!rtx_equal_p (args->match, m))
10152 /* This is not the MEM we are currently replacing. */
10153 return 0;
10155 /* Actually replace the MEM. */
10156 validate_change (args->insn, mem, args->replacement, 1);
10158 return 0;
10161 static void
10162 replace_loop_mems (insn, mem, reg)
10163 rtx insn;
10164 rtx mem;
10165 rtx reg;
10167 loop_replace_args args;
10169 args.insn = insn;
10170 args.match = mem;
10171 args.replacement = reg;
10173 for_each_rtx (&insn, replace_loop_mem, &args);
10176 /* Replace one register with another. Called through for_each_rtx; PX points
10177 to the rtx being scanned. DATA is actually a pointer to
10178 a structure of arguments. */
10180 static int
10181 replace_loop_reg (px, data)
10182 rtx *px;
10183 void *data;
10185 rtx x = *px;
10186 loop_replace_args *args = (loop_replace_args *) data;
10188 if (x == NULL_RTX)
10189 return 0;
10191 if (x == args->match)
10192 validate_change (args->insn, px, args->replacement, 1);
10194 return 0;
10197 static void
10198 replace_loop_regs (insn, reg, replacement)
10199 rtx insn;
10200 rtx reg;
10201 rtx replacement;
10203 loop_replace_args args;
10205 args.insn = insn;
10206 args.match = reg;
10207 args.replacement = replacement;
10209 for_each_rtx (&insn, replace_loop_reg, &args);
10212 /* Replace occurrences of the old exit label for the loop with the new
10213 one. DATA is an rtx_pair containing the old and new labels,
10214 respectively. */
10216 static int
10217 replace_label (x, data)
10218 rtx *x;
10219 void *data;
10221 rtx l = *x;
10222 rtx old_label = ((rtx_pair *) data)->r1;
10223 rtx new_label = ((rtx_pair *) data)->r2;
10225 if (l == NULL_RTX)
10226 return 0;
10228 if (GET_CODE (l) != LABEL_REF)
10229 return 0;
10231 if (XEXP (l, 0) != old_label)
10232 return 0;
10234 XEXP (l, 0) = new_label;
10235 ++LABEL_NUSES (new_label);
10236 --LABEL_NUSES (old_label);
10238 return 0;
10241 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10242 (ignored in the interim). */
10244 static rtx
10245 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10246 const struct loop *loop ATTRIBUTE_UNUSED;
10247 basic_block where_bb ATTRIBUTE_UNUSED;
10248 rtx where_insn;
10249 rtx pattern;
10251 return emit_insn_after (pattern, where_insn);
10255 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10256 in basic block WHERE_BB (ignored in the interim) within the loop
10257 otherwise hoist PATTERN into the loop pre-header. */
10260 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10261 const struct loop *loop;
10262 basic_block where_bb ATTRIBUTE_UNUSED;
10263 rtx where_insn;
10264 rtx pattern;
10266 if (! where_insn)
10267 return loop_insn_hoist (loop, pattern);
10268 return emit_insn_before (pattern, where_insn);
10272 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10273 WHERE_BB (ignored in the interim) within the loop. */
10275 static rtx
10276 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10277 const struct loop *loop ATTRIBUTE_UNUSED;
10278 basic_block where_bb ATTRIBUTE_UNUSED;
10279 rtx where_insn;
10280 rtx pattern;
10282 return emit_call_insn_before (pattern, where_insn);
10286 /* Hoist insn for PATTERN into the loop pre-header. */
10289 loop_insn_hoist (loop, pattern)
10290 const struct loop *loop;
10291 rtx pattern;
10293 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10297 /* Hoist call insn for PATTERN into the loop pre-header. */
10299 static rtx
10300 loop_call_insn_hoist (loop, pattern)
10301 const struct loop *loop;
10302 rtx pattern;
10304 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10308 /* Sink insn for PATTERN after the loop end. */
10311 loop_insn_sink (loop, pattern)
10312 const struct loop *loop;
10313 rtx pattern;
10315 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10319 /* If the loop has multiple exits, emit insn for PATTERN before the
10320 loop to ensure that it will always be executed no matter how the
10321 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10322 since this is slightly more efficient. */
10324 static rtx
10325 loop_insn_sink_or_swim (loop, pattern)
10326 const struct loop *loop;
10327 rtx pattern;
10329 if (loop->exit_count)
10330 return loop_insn_hoist (loop, pattern);
10331 else
10332 return loop_insn_sink (loop, pattern);
10335 static void
10336 loop_ivs_dump (loop, file, verbose)
10337 const struct loop *loop;
10338 FILE *file;
10339 int verbose;
10341 struct iv_class *bl;
10342 int iv_num = 0;
10344 if (! loop || ! file)
10345 return;
10347 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10348 iv_num++;
10350 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10352 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10354 loop_iv_class_dump (bl, file, verbose);
10355 fputc ('\n', file);
10360 static void
10361 loop_iv_class_dump (bl, file, verbose)
10362 const struct iv_class *bl;
10363 FILE *file;
10364 int verbose ATTRIBUTE_UNUSED;
10366 struct induction *v;
10367 rtx incr;
10368 int i;
10370 if (! bl || ! file)
10371 return;
10373 fprintf (file, "IV class for reg %d, benefit %d\n",
10374 bl->regno, bl->total_benefit);
10376 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10377 if (bl->initial_value)
10379 fprintf (file, ", init val: ");
10380 print_simple_rtl (file, bl->initial_value);
10382 if (bl->initial_test)
10384 fprintf (file, ", init test: ");
10385 print_simple_rtl (file, bl->initial_test);
10387 fputc ('\n', file);
10389 if (bl->final_value)
10391 fprintf (file, " Final val: ");
10392 print_simple_rtl (file, bl->final_value);
10393 fputc ('\n', file);
10396 if ((incr = biv_total_increment (bl)))
10398 fprintf (file, " Total increment: ");
10399 print_simple_rtl (file, incr);
10400 fputc ('\n', file);
10403 /* List the increments. */
10404 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10406 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10407 print_simple_rtl (file, v->add_val);
10408 fputc ('\n', file);
10411 /* List the givs. */
10412 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10414 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10415 i, INSN_UID (v->insn), v->benefit);
10416 if (v->giv_type == DEST_ADDR)
10417 print_simple_rtl (file, v->mem);
10418 else
10419 print_simple_rtl (file, single_set (v->insn));
10420 fputc ('\n', file);
10425 static void
10426 loop_biv_dump (v, file, verbose)
10427 const struct induction *v;
10428 FILE *file;
10429 int verbose;
10431 if (! v || ! file)
10432 return;
10434 fprintf (file,
10435 "Biv %d: insn %d",
10436 REGNO (v->dest_reg), INSN_UID (v->insn));
10437 fprintf (file, " const ");
10438 print_simple_rtl (file, v->add_val);
10440 if (verbose && v->final_value)
10442 fputc ('\n', file);
10443 fprintf (file, " final ");
10444 print_simple_rtl (file, v->final_value);
10447 fputc ('\n', file);
10451 static void
10452 loop_giv_dump (v, file, verbose)
10453 const struct induction *v;
10454 FILE *file;
10455 int verbose;
10457 if (! v || ! file)
10458 return;
10460 if (v->giv_type == DEST_REG)
10461 fprintf (file, "Giv %d: insn %d",
10462 REGNO (v->dest_reg), INSN_UID (v->insn));
10463 else
10464 fprintf (file, "Dest address: insn %d",
10465 INSN_UID (v->insn));
10467 fprintf (file, " src reg %d benefit %d",
10468 REGNO (v->src_reg), v->benefit);
10469 fprintf (file, " lifetime %d",
10470 v->lifetime);
10472 if (v->replaceable)
10473 fprintf (file, " replaceable");
10475 if (v->no_const_addval)
10476 fprintf (file, " ncav");
10478 if (v->ext_dependent)
10480 switch (GET_CODE (v->ext_dependent))
10482 case SIGN_EXTEND:
10483 fprintf (file, " ext se");
10484 break;
10485 case ZERO_EXTEND:
10486 fprintf (file, " ext ze");
10487 break;
10488 case TRUNCATE:
10489 fprintf (file, " ext tr");
10490 break;
10491 default:
10492 abort ();
10496 fputc ('\n', file);
10497 fprintf (file, " mult ");
10498 print_simple_rtl (file, v->mult_val);
10500 fputc ('\n', file);
10501 fprintf (file, " add ");
10502 print_simple_rtl (file, v->add_val);
10504 if (verbose && v->final_value)
10506 fputc ('\n', file);
10507 fprintf (file, " final ");
10508 print_simple_rtl (file, v->final_value);
10511 fputc ('\n', file);
10515 void
10516 debug_ivs (loop)
10517 const struct loop *loop;
10519 loop_ivs_dump (loop, stderr, 1);
10523 void
10524 debug_iv_class (bl)
10525 const struct iv_class *bl;
10527 loop_iv_class_dump (bl, stderr, 1);
10531 void
10532 debug_biv (v)
10533 const struct induction *v;
10535 loop_biv_dump (v, stderr, 1);
10539 void
10540 debug_giv (v)
10541 const struct induction *v;
10543 loop_giv_dump (v, stderr, 1);
10547 #define LOOP_BLOCK_NUM_1(INSN) \
10548 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10550 /* The notes do not have an assigned block, so look at the next insn. */
10551 #define LOOP_BLOCK_NUM(INSN) \
10552 ((INSN) ? (GET_CODE (INSN) == NOTE \
10553 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10554 : LOOP_BLOCK_NUM_1 (INSN)) \
10555 : -1)
10557 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10559 static void
10560 loop_dump_aux (loop, file, verbose)
10561 const struct loop *loop;
10562 FILE *file;
10563 int verbose ATTRIBUTE_UNUSED;
10565 rtx label;
10567 if (! loop || ! file)
10568 return;
10570 /* Print diagnostics to compare our concept of a loop with
10571 what the loop notes say. */
10572 if (! PREV_INSN (loop->first->head)
10573 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10574 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10575 != NOTE_INSN_LOOP_BEG)
10576 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10577 INSN_UID (PREV_INSN (loop->first->head)));
10578 if (! NEXT_INSN (loop->last->end)
10579 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10580 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10581 != NOTE_INSN_LOOP_END)
10582 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10583 INSN_UID (NEXT_INSN (loop->last->end)));
10585 if (loop->start)
10587 fprintf (file,
10588 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10589 LOOP_BLOCK_NUM (loop->start),
10590 LOOP_INSN_UID (loop->start),
10591 LOOP_BLOCK_NUM (loop->cont),
10592 LOOP_INSN_UID (loop->cont),
10593 LOOP_BLOCK_NUM (loop->cont),
10594 LOOP_INSN_UID (loop->cont),
10595 LOOP_BLOCK_NUM (loop->vtop),
10596 LOOP_INSN_UID (loop->vtop),
10597 LOOP_BLOCK_NUM (loop->end),
10598 LOOP_INSN_UID (loop->end));
10599 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10600 LOOP_BLOCK_NUM (loop->top),
10601 LOOP_INSN_UID (loop->top),
10602 LOOP_BLOCK_NUM (loop->scan_start),
10603 LOOP_INSN_UID (loop->scan_start));
10604 fprintf (file, ";; exit_count %d", loop->exit_count);
10605 if (loop->exit_count)
10607 fputs (", labels:", file);
10608 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10610 fprintf (file, " %d ",
10611 LOOP_INSN_UID (XEXP (label, 0)));
10614 fputs ("\n", file);
10616 /* This can happen when a marked loop appears as two nested loops,
10617 say from while (a || b) {}. The inner loop won't match
10618 the loop markers but the outer one will. */
10619 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10620 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10624 /* Call this function from the debugger to dump LOOP. */
10626 void
10627 debug_loop (loop)
10628 const struct loop *loop;
10630 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10633 /* Call this function from the debugger to dump LOOPS. */
10635 void
10636 debug_loops (loops)
10637 const struct loops *loops;
10639 flow_loops_dump (loops, stderr, loop_dump_aux, 1);