2002-04-30 Mark Mitchell <mark@codesourcery.com>
[official-gcc.git] / gcc / loop.c
blob5f187736c4b69489357672e88a7c5405ff4dff88
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82 /* The minimal number of prefetch blocks that a loop must consume to make
83 the emitting of prefetch instruction in the body of loop worthwhile. */
84 #define PREFETCH_BLOCKS_IN_LOOP_MIN 6
86 /* Parameterize some prefetch heuristics so they can be turned on and off
87 easily for performance testing on new architecures. These can be
88 defined in target-dependent files. */
90 /* Prefetch is worthwhile only when loads/stores are dense. */
91 #ifndef PREFETCH_ONLY_DENSE_MEM
92 #define PREFETCH_ONLY_DENSE_MEM 1
93 #endif
95 /* Define what we mean by "dense" loads and stores; This value divided by 256
96 is the minimum percentage of memory references that worth prefetching. */
97 #ifndef PREFETCH_DENSE_MEM
98 #define PREFETCH_DENSE_MEM 220
99 #endif
101 /* Do not prefetch for a loop whose iteration count is known to be low. */
102 #ifndef PREFETCH_NO_LOW_LOOPCNT
103 #define PREFETCH_NO_LOW_LOOPCNT 1
104 #endif
106 /* Define what we mean by a "low" iteration count. */
107 #ifndef PREFETCH_LOW_LOOPCNT
108 #define PREFETCH_LOW_LOOPCNT 32
109 #endif
111 /* Do not prefetch for a loop that contains a function call; such a loop is
112 probably not an internal loop. */
113 #ifndef PREFETCH_NO_CALL
114 #define PREFETCH_NO_CALL 1
115 #endif
117 /* Do not prefetch accesses with an extreme stride. */
118 #ifndef PREFETCH_NO_EXTREME_STRIDE
119 #define PREFETCH_NO_EXTREME_STRIDE 1
120 #endif
122 /* Define what we mean by an "extreme" stride. */
123 #ifndef PREFETCH_EXTREME_STRIDE
124 #define PREFETCH_EXTREME_STRIDE 4096
125 #endif
127 /* Do not handle reversed order prefetches (negative stride). */
128 #ifndef PREFETCH_NO_REVERSE_ORDER
129 #define PREFETCH_NO_REVERSE_ORDER 1
130 #endif
132 /* Prefetch even if the GIV is not always executed. */
133 #ifndef PREFETCH_NOT_ALWAYS
134 #define PREFETCH_NOT_ALWAYS 0
135 #endif
137 /* If the loop requires more prefetches than the target can process in
138 parallel then don't prefetch anything in that loop. */
139 #ifndef PREFETCH_LIMIT_TO_SIMULTANEOUS
140 #define PREFETCH_LIMIT_TO_SIMULTANEOUS 1
141 #endif
143 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
144 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
146 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
147 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
148 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
150 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
151 ((REGNO) < FIRST_PSEUDO_REGISTER \
152 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
155 /* Vector mapping INSN_UIDs to luids.
156 The luids are like uids but increase monotonically always.
157 We use them to see whether a jump comes from outside a given loop. */
159 int *uid_luid;
161 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
162 number the insn is contained in. */
164 struct loop **uid_loop;
166 /* 1 + largest uid of any insn. */
168 int max_uid_for_loop;
170 /* 1 + luid of last insn. */
172 static int max_luid;
174 /* Number of loops detected in current function. Used as index to the
175 next few tables. */
177 static int max_loop_num;
179 /* Bound on pseudo register number before loop optimization.
180 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
181 unsigned int max_reg_before_loop;
183 /* The value to pass to the next call of reg_scan_update. */
184 static int loop_max_reg;
186 #define obstack_chunk_alloc xmalloc
187 #define obstack_chunk_free free
189 /* During the analysis of a loop, a chain of `struct movable's
190 is made to record all the movable insns found.
191 Then the entire chain can be scanned to decide which to move. */
193 struct movable
195 rtx insn; /* A movable insn */
196 rtx set_src; /* The expression this reg is set from. */
197 rtx set_dest; /* The destination of this SET. */
198 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
199 of any registers used within the LIBCALL. */
200 int consec; /* Number of consecutive following insns
201 that must be moved with this one. */
202 unsigned int regno; /* The register it sets */
203 short lifetime; /* lifetime of that register;
204 may be adjusted when matching movables
205 that load the same value are found. */
206 short savings; /* Number of insns we can move for this reg,
207 including other movables that force this
208 or match this one. */
209 unsigned int cond : 1; /* 1 if only conditionally movable */
210 unsigned int force : 1; /* 1 means MUST move this insn */
211 unsigned int global : 1; /* 1 means reg is live outside this loop */
212 /* If PARTIAL is 1, GLOBAL means something different:
213 that the reg is live outside the range from where it is set
214 to the following label. */
215 unsigned int done : 1; /* 1 inhibits further processing of this */
217 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
218 In particular, moving it does not make it
219 invariant. */
220 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
221 load SRC, rather than copying INSN. */
222 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
223 first insn of a consecutive sets group. */
224 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
225 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
226 that we should avoid changing when clearing
227 the rest of the reg. */
228 struct movable *match; /* First entry for same value */
229 struct movable *forces; /* An insn that must be moved if this is */
230 struct movable *next;
234 FILE *loop_dump_stream;
236 /* Forward declarations. */
238 static void invalidate_loops_containing_label PARAMS ((rtx));
239 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
240 static void mark_loop_jump PARAMS ((rtx, struct loop *));
241 static void prescan_loop PARAMS ((struct loop *));
242 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
243 static int consec_sets_invariant_p PARAMS ((const struct loop *,
244 rtx, int, rtx));
245 static int labels_in_range_p PARAMS ((rtx, int));
246 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
247 static void note_addr_stored PARAMS ((rtx, rtx, void *));
248 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
249 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
250 static void scan_loop PARAMS ((struct loop*, int));
251 #if 0
252 static void replace_call_address PARAMS ((rtx, rtx, rtx));
253 #endif
254 static rtx skip_consec_insns PARAMS ((rtx, int));
255 static int libcall_benefit PARAMS ((rtx));
256 static void ignore_some_movables PARAMS ((struct loop_movables *));
257 static void force_movables PARAMS ((struct loop_movables *));
258 static void combine_movables PARAMS ((struct loop_movables *,
259 struct loop_regs *));
260 static int num_unmoved_movables PARAMS ((const struct loop *));
261 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
262 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
263 struct loop_regs *));
264 static void add_label_notes PARAMS ((rtx, rtx));
265 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
266 int, int));
267 static void loop_movables_add PARAMS((struct loop_movables *,
268 struct movable *));
269 static void loop_movables_free PARAMS((struct loop_movables *));
270 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
271 static void loop_bivs_find PARAMS((struct loop *));
272 static void loop_bivs_init_find PARAMS((struct loop *));
273 static void loop_bivs_check PARAMS((struct loop *));
274 static void loop_givs_find PARAMS((struct loop *));
275 static void loop_givs_check PARAMS((struct loop *));
276 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
277 int, int));
278 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
279 struct induction *, rtx));
280 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
282 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
283 rtx *));
284 static void loop_ivs_free PARAMS((struct loop *));
285 static void strength_reduce PARAMS ((struct loop *, int));
286 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
287 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
288 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
289 static void record_biv PARAMS ((struct loop *, struct induction *,
290 rtx, rtx, rtx, rtx, rtx *,
291 int, int));
292 static void check_final_value PARAMS ((const struct loop *,
293 struct induction *));
294 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
295 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
296 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
297 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
298 static void record_giv PARAMS ((const struct loop *, struct induction *,
299 rtx, rtx, rtx, rtx, rtx, rtx, int,
300 enum g_types, int, int, rtx *));
301 static void update_giv_derive PARAMS ((const struct loop *, rtx));
302 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
303 struct loop_info *));
304 static int basic_induction_var PARAMS ((const struct loop *, rtx,
305 enum machine_mode, rtx, rtx,
306 rtx *, rtx *, rtx **));
307 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
308 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
309 rtx *, rtx *, rtx *, int, int *,
310 enum machine_mode));
311 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
312 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
313 static int check_dbra_loop PARAMS ((struct loop *, int));
314 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
315 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
316 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
317 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
318 static int product_cheap_p PARAMS ((rtx, rtx));
319 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
320 int, int, int));
321 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
322 struct iv_class *, int,
323 basic_block, rtx));
324 static int last_use_this_basic_block PARAMS ((rtx, rtx));
325 static void record_initial PARAMS ((rtx, rtx, void *));
326 static void update_reg_last_use PARAMS ((rtx, rtx));
327 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
328 static void loop_regs_scan PARAMS ((const struct loop *, int));
329 static int count_insns_in_loop PARAMS ((const struct loop *));
330 static void load_mems PARAMS ((const struct loop *));
331 static int insert_loop_mem PARAMS ((rtx *, void *));
332 static int replace_loop_mem PARAMS ((rtx *, void *));
333 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
334 static int replace_loop_reg PARAMS ((rtx *, void *));
335 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
336 static void note_reg_stored PARAMS ((rtx, rtx, void *));
337 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
338 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
339 unsigned int));
340 static int replace_label PARAMS ((rtx *, void *));
341 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
342 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
343 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
344 static void loop_regs_update PARAMS ((const struct loop *, rtx));
345 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
347 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
348 rtx, rtx));
349 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
350 basic_block, rtx, rtx));
351 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
352 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
354 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
355 static void loop_delete_insns PARAMS ((rtx, rtx));
356 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
357 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
358 void debug_ivs PARAMS ((const struct loop *));
359 void debug_iv_class PARAMS ((const struct iv_class *));
360 void debug_biv PARAMS ((const struct induction *));
361 void debug_giv PARAMS ((const struct induction *));
362 void debug_loop PARAMS ((const struct loop *));
363 void debug_loops PARAMS ((const struct loops *));
365 typedef struct rtx_pair
367 rtx r1;
368 rtx r2;
369 } rtx_pair;
371 typedef struct loop_replace_args
373 rtx match;
374 rtx replacement;
375 rtx insn;
376 } loop_replace_args;
378 /* Nonzero iff INSN is between START and END, inclusive. */
379 #define INSN_IN_RANGE_P(INSN, START, END) \
380 (INSN_UID (INSN) < max_uid_for_loop \
381 && INSN_LUID (INSN) >= INSN_LUID (START) \
382 && INSN_LUID (INSN) <= INSN_LUID (END))
384 /* Indirect_jump_in_function is computed once per function. */
385 static int indirect_jump_in_function;
386 static int indirect_jump_in_function_p PARAMS ((rtx));
388 static int compute_luids PARAMS ((rtx, rtx, int));
390 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
391 struct induction *,
392 rtx));
394 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
395 copy the value of the strength reduced giv to its original register. */
396 static int copy_cost;
398 /* Cost of using a register, to normalize the benefits of a giv. */
399 static int reg_address_cost;
401 void
402 init_loop ()
404 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
406 reg_address_cost = address_cost (reg, SImode);
408 copy_cost = COSTS_N_INSNS (1);
411 /* Compute the mapping from uids to luids.
412 LUIDs are numbers assigned to insns, like uids,
413 except that luids increase monotonically through the code.
414 Start at insn START and stop just before END. Assign LUIDs
415 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
416 static int
417 compute_luids (start, end, prev_luid)
418 rtx start, end;
419 int prev_luid;
421 int i;
422 rtx insn;
424 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
426 if (INSN_UID (insn) >= max_uid_for_loop)
427 continue;
428 /* Don't assign luids to line-number NOTEs, so that the distance in
429 luids between two insns is not affected by -g. */
430 if (GET_CODE (insn) != NOTE
431 || NOTE_LINE_NUMBER (insn) <= 0)
432 uid_luid[INSN_UID (insn)] = ++i;
433 else
434 /* Give a line number note the same luid as preceding insn. */
435 uid_luid[INSN_UID (insn)] = i;
437 return i + 1;
440 /* Entry point of this file. Perform loop optimization
441 on the current function. F is the first insn of the function
442 and DUMPFILE is a stream for output of a trace of actions taken
443 (or 0 if none should be output). */
445 void
446 loop_optimize (f, dumpfile, flags)
447 /* f is the first instruction of a chain of insns for one function */
448 rtx f;
449 FILE *dumpfile;
450 int flags;
452 rtx insn;
453 int i;
454 struct loops loops_data;
455 struct loops *loops = &loops_data;
456 struct loop_info *loops_info;
458 loop_dump_stream = dumpfile;
460 init_recog_no_volatile ();
462 max_reg_before_loop = max_reg_num ();
463 loop_max_reg = max_reg_before_loop;
465 regs_may_share = 0;
467 /* Count the number of loops. */
469 max_loop_num = 0;
470 for (insn = f; insn; insn = NEXT_INSN (insn))
472 if (GET_CODE (insn) == NOTE
473 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
474 max_loop_num++;
477 /* Don't waste time if no loops. */
478 if (max_loop_num == 0)
479 return;
481 loops->num = max_loop_num;
483 /* Get size to use for tables indexed by uids.
484 Leave some space for labels allocated by find_and_verify_loops. */
485 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
487 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
488 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
489 sizeof (struct loop *));
491 /* Allocate storage for array of loops. */
492 loops->array = (struct loop *)
493 xcalloc (loops->num, sizeof (struct loop));
495 /* Find and process each loop.
496 First, find them, and record them in order of their beginnings. */
497 find_and_verify_loops (f, loops);
499 /* Allocate and initialize auxiliary loop information. */
500 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
501 for (i = 0; i < loops->num; i++)
502 loops->array[i].aux = loops_info + i;
504 /* Now find all register lifetimes. This must be done after
505 find_and_verify_loops, because it might reorder the insns in the
506 function. */
507 reg_scan (f, max_reg_before_loop, 1);
509 /* This must occur after reg_scan so that registers created by gcse
510 will have entries in the register tables.
512 We could have added a call to reg_scan after gcse_main in toplev.c,
513 but moving this call to init_alias_analysis is more efficient. */
514 init_alias_analysis ();
516 /* See if we went too far. Note that get_max_uid already returns
517 one more that the maximum uid of all insn. */
518 if (get_max_uid () > max_uid_for_loop)
519 abort ();
520 /* Now reset it to the actual size we need. See above. */
521 max_uid_for_loop = get_max_uid ();
523 /* find_and_verify_loops has already called compute_luids, but it
524 might have rearranged code afterwards, so we need to recompute
525 the luids now. */
526 max_luid = compute_luids (f, NULL_RTX, 0);
528 /* Don't leave gaps in uid_luid for insns that have been
529 deleted. It is possible that the first or last insn
530 using some register has been deleted by cross-jumping.
531 Make sure that uid_luid for that former insn's uid
532 points to the general area where that insn used to be. */
533 for (i = 0; i < max_uid_for_loop; i++)
535 uid_luid[0] = uid_luid[i];
536 if (uid_luid[0] != 0)
537 break;
539 for (i = 0; i < max_uid_for_loop; i++)
540 if (uid_luid[i] == 0)
541 uid_luid[i] = uid_luid[i - 1];
543 /* Determine if the function has indirect jump. On some systems
544 this prevents low overhead loop instructions from being used. */
545 indirect_jump_in_function = indirect_jump_in_function_p (f);
547 /* Now scan the loops, last ones first, since this means inner ones are done
548 before outer ones. */
549 for (i = max_loop_num - 1; i >= 0; i--)
551 struct loop *loop = &loops->array[i];
553 if (! loop->invalid && loop->end)
554 scan_loop (loop, flags);
557 /* If there were lexical blocks inside the loop, they have been
558 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
559 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
560 the BLOCKs as well. */
561 if (write_symbols != NO_DEBUG)
562 reorder_blocks ();
564 end_alias_analysis ();
566 /* Clean up. */
567 free (uid_luid);
568 free (uid_loop);
569 free (loops_info);
570 free (loops->array);
573 /* Returns the next insn, in execution order, after INSN. START and
574 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
575 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
576 insn-stream; it is used with loops that are entered near the
577 bottom. */
579 static rtx
580 next_insn_in_loop (loop, insn)
581 const struct loop *loop;
582 rtx insn;
584 insn = NEXT_INSN (insn);
586 if (insn == loop->end)
588 if (loop->top)
589 /* Go to the top of the loop, and continue there. */
590 insn = loop->top;
591 else
592 /* We're done. */
593 insn = NULL_RTX;
596 if (insn == loop->scan_start)
597 /* We're done. */
598 insn = NULL_RTX;
600 return insn;
603 /* Optimize one loop described by LOOP. */
605 /* ??? Could also move memory writes out of loops if the destination address
606 is invariant, the source is invariant, the memory write is not volatile,
607 and if we can prove that no read inside the loop can read this address
608 before the write occurs. If there is a read of this address after the
609 write, then we can also mark the memory read as invariant. */
611 static void
612 scan_loop (loop, flags)
613 struct loop *loop;
614 int flags;
616 struct loop_info *loop_info = LOOP_INFO (loop);
617 struct loop_regs *regs = LOOP_REGS (loop);
618 int i;
619 rtx loop_start = loop->start;
620 rtx loop_end = loop->end;
621 rtx p;
622 /* 1 if we are scanning insns that could be executed zero times. */
623 int maybe_never = 0;
624 /* 1 if we are scanning insns that might never be executed
625 due to a subroutine call which might exit before they are reached. */
626 int call_passed = 0;
627 /* Jump insn that enters the loop, or 0 if control drops in. */
628 rtx loop_entry_jump = 0;
629 /* Number of insns in the loop. */
630 int insn_count;
631 int tem;
632 rtx temp, update_start, update_end;
633 /* The SET from an insn, if it is the only SET in the insn. */
634 rtx set, set1;
635 /* Chain describing insns movable in current loop. */
636 struct loop_movables *movables = LOOP_MOVABLES (loop);
637 /* Ratio of extra register life span we can justify
638 for saving an instruction. More if loop doesn't call subroutines
639 since in that case saving an insn makes more difference
640 and more registers are available. */
641 int threshold;
642 /* Nonzero if we are scanning instructions in a sub-loop. */
643 int loop_depth = 0;
645 loop->top = 0;
647 movables->head = 0;
648 movables->last = 0;
650 /* Determine whether this loop starts with a jump down to a test at
651 the end. This will occur for a small number of loops with a test
652 that is too complex to duplicate in front of the loop.
654 We search for the first insn or label in the loop, skipping NOTEs.
655 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
656 (because we might have a loop executed only once that contains a
657 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
658 (in case we have a degenerate loop).
660 Note that if we mistakenly think that a loop is entered at the top
661 when, in fact, it is entered at the exit test, the only effect will be
662 slightly poorer optimization. Making the opposite error can generate
663 incorrect code. Since very few loops now start with a jump to the
664 exit test, the code here to detect that case is very conservative. */
666 for (p = NEXT_INSN (loop_start);
667 p != loop_end
668 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
669 && (GET_CODE (p) != NOTE
670 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
671 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
672 p = NEXT_INSN (p))
675 loop->scan_start = p;
677 /* If loop end is the end of the current function, then emit a
678 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
679 note insn. This is the position we use when sinking insns out of
680 the loop. */
681 if (NEXT_INSN (loop->end) != 0)
682 loop->sink = NEXT_INSN (loop->end);
683 else
684 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
686 /* Set up variables describing this loop. */
687 prescan_loop (loop);
688 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
690 /* If loop has a jump before the first label,
691 the true entry is the target of that jump.
692 Start scan from there.
693 But record in LOOP->TOP the place where the end-test jumps
694 back to so we can scan that after the end of the loop. */
695 if (GET_CODE (p) == JUMP_INSN)
697 loop_entry_jump = p;
699 /* Loop entry must be unconditional jump (and not a RETURN) */
700 if (any_uncondjump_p (p)
701 && JUMP_LABEL (p) != 0
702 /* Check to see whether the jump actually
703 jumps out of the loop (meaning it's no loop).
704 This case can happen for things like
705 do {..} while (0). If this label was generated previously
706 by loop, we can't tell anything about it and have to reject
707 the loop. */
708 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
710 loop->top = next_label (loop->scan_start);
711 loop->scan_start = JUMP_LABEL (p);
715 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
716 as required by loop_reg_used_before_p. So skip such loops. (This
717 test may never be true, but it's best to play it safe.)
719 Also, skip loops where we do not start scanning at a label. This
720 test also rejects loops starting with a JUMP_INSN that failed the
721 test above. */
723 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
724 || GET_CODE (loop->scan_start) != CODE_LABEL)
726 if (loop_dump_stream)
727 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
728 INSN_UID (loop_start), INSN_UID (loop_end));
729 return;
732 /* Allocate extra space for REGs that might be created by load_mems.
733 We allocate a little extra slop as well, in the hopes that we
734 won't have to reallocate the regs array. */
735 loop_regs_scan (loop, loop_info->mems_idx + 16);
736 insn_count = count_insns_in_loop (loop);
738 if (loop_dump_stream)
740 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
741 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
742 if (loop->cont)
743 fprintf (loop_dump_stream, "Continue at insn %d.\n",
744 INSN_UID (loop->cont));
747 /* Scan through the loop finding insns that are safe to move.
748 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
749 this reg will be considered invariant for subsequent insns.
750 We consider whether subsequent insns use the reg
751 in deciding whether it is worth actually moving.
753 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
754 and therefore it is possible that the insns we are scanning
755 would never be executed. At such times, we must make sure
756 that it is safe to execute the insn once instead of zero times.
757 When MAYBE_NEVER is 0, all insns will be executed at least once
758 so that is not a problem. */
760 for (p = next_insn_in_loop (loop, loop->scan_start);
761 p != NULL_RTX;
762 p = next_insn_in_loop (loop, p))
764 if (GET_CODE (p) == INSN
765 && (set = single_set (p))
766 && GET_CODE (SET_DEST (set)) == REG
767 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
768 && SET_DEST (set) != pic_offset_table_rtx
769 #endif
770 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
772 int tem1 = 0;
773 int tem2 = 0;
774 int move_insn = 0;
775 rtx src = SET_SRC (set);
776 rtx dependencies = 0;
778 /* Figure out what to use as a source of this insn. If a REG_EQUIV
779 note is given or if a REG_EQUAL note with a constant operand is
780 specified, use it as the source and mark that we should move
781 this insn by calling emit_move_insn rather that duplicating the
782 insn.
784 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
785 is present. */
786 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
787 if (temp)
788 src = XEXP (temp, 0), move_insn = 1;
789 else
791 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
792 if (temp && CONSTANT_P (XEXP (temp, 0)))
793 src = XEXP (temp, 0), move_insn = 1;
794 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
796 src = XEXP (temp, 0);
797 /* A libcall block can use regs that don't appear in
798 the equivalent expression. To move the libcall,
799 we must move those regs too. */
800 dependencies = libcall_other_reg (p, src);
804 /* For parallels, add any possible uses to the depencies, as we can't move
805 the insn without resolving them first. */
806 if (GET_CODE (PATTERN (p)) == PARALLEL)
808 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
810 rtx x = XVECEXP (PATTERN (p), 0, i);
811 if (GET_CODE (x) == USE)
812 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
816 /* Don't try to optimize a register that was made
817 by loop-optimization for an inner loop.
818 We don't know its life-span, so we can't compute the benefit. */
819 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
821 else if (/* The register is used in basic blocks other
822 than the one where it is set (meaning that
823 something after this point in the loop might
824 depend on its value before the set). */
825 ! reg_in_basic_block_p (p, SET_DEST (set))
826 /* And the set is not guaranteed to be executed once
827 the loop starts, or the value before the set is
828 needed before the set occurs...
830 ??? Note we have quadratic behaviour here, mitigated
831 by the fact that the previous test will often fail for
832 large loops. Rather than re-scanning the entire loop
833 each time for register usage, we should build tables
834 of the register usage and use them here instead. */
835 && (maybe_never
836 || loop_reg_used_before_p (loop, set, p)))
837 /* It is unsafe to move the set.
839 This code used to consider it OK to move a set of a variable
840 which was not created by the user and not used in an exit test.
841 That behavior is incorrect and was removed. */
843 else if ((tem = loop_invariant_p (loop, src))
844 && (dependencies == 0
845 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
846 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
847 || (tem1
848 = consec_sets_invariant_p
849 (loop, SET_DEST (set),
850 regs->array[REGNO (SET_DEST (set))].set_in_loop,
851 p)))
852 /* If the insn can cause a trap (such as divide by zero),
853 can't move it unless it's guaranteed to be executed
854 once loop is entered. Even a function call might
855 prevent the trap insn from being reached
856 (since it might exit!) */
857 && ! ((maybe_never || call_passed)
858 && may_trap_p (src)))
860 struct movable *m;
861 int regno = REGNO (SET_DEST (set));
863 /* A potential lossage is where we have a case where two insns
864 can be combined as long as they are both in the loop, but
865 we move one of them outside the loop. For large loops,
866 this can lose. The most common case of this is the address
867 of a function being called.
869 Therefore, if this register is marked as being used exactly
870 once if we are in a loop with calls (a "large loop"), see if
871 we can replace the usage of this register with the source
872 of this SET. If we can, delete this insn.
874 Don't do this if P has a REG_RETVAL note or if we have
875 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
877 if (loop_info->has_call
878 && regs->array[regno].single_usage != 0
879 && regs->array[regno].single_usage != const0_rtx
880 && REGNO_FIRST_UID (regno) == INSN_UID (p)
881 && (REGNO_LAST_UID (regno)
882 == INSN_UID (regs->array[regno].single_usage))
883 && regs->array[regno].set_in_loop == 1
884 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
885 && ! side_effects_p (SET_SRC (set))
886 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
887 && (! SMALL_REGISTER_CLASSES
888 || (! (GET_CODE (SET_SRC (set)) == REG
889 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
890 /* This test is not redundant; SET_SRC (set) might be
891 a call-clobbered register and the life of REGNO
892 might span a call. */
893 && ! modified_between_p (SET_SRC (set), p,
894 regs->array[regno].single_usage)
895 && no_labels_between_p (p, regs->array[regno].single_usage)
896 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
897 regs->array[regno].single_usage))
899 /* Replace any usage in a REG_EQUAL note. Must copy the
900 new source, so that we don't get rtx sharing between the
901 SET_SOURCE and REG_NOTES of insn p. */
902 REG_NOTES (regs->array[regno].single_usage)
903 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
904 SET_DEST (set), copy_rtx (SET_SRC (set)));
906 delete_insn (p);
907 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
908 regs->array[regno+i].set_in_loop = 0;
909 continue;
912 m = (struct movable *) xmalloc (sizeof (struct movable));
913 m->next = 0;
914 m->insn = p;
915 m->set_src = src;
916 m->dependencies = dependencies;
917 m->set_dest = SET_DEST (set);
918 m->force = 0;
919 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
920 m->done = 0;
921 m->forces = 0;
922 m->partial = 0;
923 m->move_insn = move_insn;
924 m->move_insn_first = 0;
925 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
926 m->savemode = VOIDmode;
927 m->regno = regno;
928 /* Set M->cond if either loop_invariant_p
929 or consec_sets_invariant_p returned 2
930 (only conditionally invariant). */
931 m->cond = ((tem | tem1 | tem2) > 1);
932 m->global = LOOP_REG_GLOBAL_P (loop, regno);
933 m->match = 0;
934 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
935 m->savings = regs->array[regno].n_times_set;
936 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
937 m->savings += libcall_benefit (p);
938 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
939 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
940 /* Add M to the end of the chain MOVABLES. */
941 loop_movables_add (movables, m);
943 if (m->consec > 0)
945 /* It is possible for the first instruction to have a
946 REG_EQUAL note but a non-invariant SET_SRC, so we must
947 remember the status of the first instruction in case
948 the last instruction doesn't have a REG_EQUAL note. */
949 m->move_insn_first = m->move_insn;
951 /* Skip this insn, not checking REG_LIBCALL notes. */
952 p = next_nonnote_insn (p);
953 /* Skip the consecutive insns, if there are any. */
954 p = skip_consec_insns (p, m->consec);
955 /* Back up to the last insn of the consecutive group. */
956 p = prev_nonnote_insn (p);
958 /* We must now reset m->move_insn, m->is_equiv, and possibly
959 m->set_src to correspond to the effects of all the
960 insns. */
961 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
962 if (temp)
963 m->set_src = XEXP (temp, 0), m->move_insn = 1;
964 else
966 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
967 if (temp && CONSTANT_P (XEXP (temp, 0)))
968 m->set_src = XEXP (temp, 0), m->move_insn = 1;
969 else
970 m->move_insn = 0;
973 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
976 /* If this register is always set within a STRICT_LOW_PART
977 or set to zero, then its high bytes are constant.
978 So clear them outside the loop and within the loop
979 just load the low bytes.
980 We must check that the machine has an instruction to do so.
981 Also, if the value loaded into the register
982 depends on the same register, this cannot be done. */
983 else if (SET_SRC (set) == const0_rtx
984 && GET_CODE (NEXT_INSN (p)) == INSN
985 && (set1 = single_set (NEXT_INSN (p)))
986 && GET_CODE (set1) == SET
987 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
988 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
989 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
990 == SET_DEST (set))
991 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
993 int regno = REGNO (SET_DEST (set));
994 if (regs->array[regno].set_in_loop == 2)
996 struct movable *m;
997 m = (struct movable *) xmalloc (sizeof (struct movable));
998 m->next = 0;
999 m->insn = p;
1000 m->set_dest = SET_DEST (set);
1001 m->dependencies = 0;
1002 m->force = 0;
1003 m->consec = 0;
1004 m->done = 0;
1005 m->forces = 0;
1006 m->move_insn = 0;
1007 m->move_insn_first = 0;
1008 m->partial = 1;
1009 /* If the insn may not be executed on some cycles,
1010 we can't clear the whole reg; clear just high part.
1011 Not even if the reg is used only within this loop.
1012 Consider this:
1013 while (1)
1014 while (s != t) {
1015 if (foo ()) x = *s;
1016 use (x);
1018 Clearing x before the inner loop could clobber a value
1019 being saved from the last time around the outer loop.
1020 However, if the reg is not used outside this loop
1021 and all uses of the register are in the same
1022 basic block as the store, there is no problem.
1024 If this insn was made by loop, we don't know its
1025 INSN_LUID and hence must make a conservative
1026 assumption. */
1027 m->global = (INSN_UID (p) >= max_uid_for_loop
1028 || LOOP_REG_GLOBAL_P (loop, regno)
1029 || (labels_in_range_p
1030 (p, REGNO_FIRST_LUID (regno))));
1031 if (maybe_never && m->global)
1032 m->savemode = GET_MODE (SET_SRC (set1));
1033 else
1034 m->savemode = VOIDmode;
1035 m->regno = regno;
1036 m->cond = 0;
1037 m->match = 0;
1038 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1039 m->savings = 1;
1040 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1041 regs->array[regno+i].set_in_loop = -1;
1042 /* Add M to the end of the chain MOVABLES. */
1043 loop_movables_add (movables, m);
1047 /* Past a call insn, we get to insns which might not be executed
1048 because the call might exit. This matters for insns that trap.
1049 Constant and pure call insns always return, so they don't count. */
1050 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1051 call_passed = 1;
1052 /* Past a label or a jump, we get to insns for which we
1053 can't count on whether or how many times they will be
1054 executed during each iteration. Therefore, we can
1055 only move out sets of trivial variables
1056 (those not used after the loop). */
1057 /* Similar code appears twice in strength_reduce. */
1058 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1059 /* If we enter the loop in the middle, and scan around to the
1060 beginning, don't set maybe_never for that. This must be an
1061 unconditional jump, otherwise the code at the top of the
1062 loop might never be executed. Unconditional jumps are
1063 followed by a barrier then the loop_end. */
1064 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1065 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1066 && any_uncondjump_p (p)))
1067 maybe_never = 1;
1068 else if (GET_CODE (p) == NOTE)
1070 /* At the virtual top of a converted loop, insns are again known to
1071 be executed: logically, the loop begins here even though the exit
1072 code has been duplicated. */
1073 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1074 maybe_never = call_passed = 0;
1075 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1076 loop_depth++;
1077 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1078 loop_depth--;
1082 /* If one movable subsumes another, ignore that other. */
1084 ignore_some_movables (movables);
1086 /* For each movable insn, see if the reg that it loads
1087 leads when it dies right into another conditionally movable insn.
1088 If so, record that the second insn "forces" the first one,
1089 since the second can be moved only if the first is. */
1091 force_movables (movables);
1093 /* See if there are multiple movable insns that load the same value.
1094 If there are, make all but the first point at the first one
1095 through the `match' field, and add the priorities of them
1096 all together as the priority of the first. */
1098 combine_movables (movables, regs);
1100 /* Now consider each movable insn to decide whether it is worth moving.
1101 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1103 Generally this increases code size, so do not move moveables when
1104 optimizing for code size. */
1106 if (! optimize_size)
1108 move_movables (loop, movables, threshold, insn_count);
1110 /* Recalculate regs->array if move_movables has created new
1111 registers. */
1112 if (max_reg_num () > regs->num)
1114 loop_regs_scan (loop, 0);
1115 for (update_start = loop_start;
1116 PREV_INSN (update_start)
1117 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1118 update_start = PREV_INSN (update_start))
1120 update_end = NEXT_INSN (loop_end);
1122 reg_scan_update (update_start, update_end, loop_max_reg);
1123 loop_max_reg = max_reg_num ();
1127 /* Now candidates that still are negative are those not moved.
1128 Change regs->array[I].set_in_loop to indicate that those are not actually
1129 invariant. */
1130 for (i = 0; i < regs->num; i++)
1131 if (regs->array[i].set_in_loop < 0)
1132 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1134 /* Now that we've moved some things out of the loop, we might be able to
1135 hoist even more memory references. */
1136 load_mems (loop);
1138 /* Recalculate regs->array if load_mems has created new registers. */
1139 if (max_reg_num () > regs->num)
1140 loop_regs_scan (loop, 0);
1142 for (update_start = loop_start;
1143 PREV_INSN (update_start)
1144 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1145 update_start = PREV_INSN (update_start))
1147 update_end = NEXT_INSN (loop_end);
1149 reg_scan_update (update_start, update_end, loop_max_reg);
1150 loop_max_reg = max_reg_num ();
1152 if (flag_strength_reduce)
1154 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1155 /* Ensure our label doesn't go away. */
1156 LABEL_NUSES (update_end)++;
1158 strength_reduce (loop, flags);
1160 reg_scan_update (update_start, update_end, loop_max_reg);
1161 loop_max_reg = max_reg_num ();
1163 if (update_end && GET_CODE (update_end) == CODE_LABEL
1164 && --LABEL_NUSES (update_end) == 0)
1165 delete_related_insns (update_end);
1169 /* The movable information is required for strength reduction. */
1170 loop_movables_free (movables);
1172 free (regs->array);
1173 regs->array = 0;
1174 regs->num = 0;
1177 /* Add elements to *OUTPUT to record all the pseudo-regs
1178 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1180 void
1181 record_excess_regs (in_this, not_in_this, output)
1182 rtx in_this, not_in_this;
1183 rtx *output;
1185 enum rtx_code code;
1186 const char *fmt;
1187 int i;
1189 code = GET_CODE (in_this);
1191 switch (code)
1193 case PC:
1194 case CC0:
1195 case CONST_INT:
1196 case CONST_DOUBLE:
1197 case CONST:
1198 case SYMBOL_REF:
1199 case LABEL_REF:
1200 return;
1202 case REG:
1203 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1204 && ! reg_mentioned_p (in_this, not_in_this))
1205 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1206 return;
1208 default:
1209 break;
1212 fmt = GET_RTX_FORMAT (code);
1213 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1215 int j;
1217 switch (fmt[i])
1219 case 'E':
1220 for (j = 0; j < XVECLEN (in_this, i); j++)
1221 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1222 break;
1224 case 'e':
1225 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1226 break;
1231 /* Check what regs are referred to in the libcall block ending with INSN,
1232 aside from those mentioned in the equivalent value.
1233 If there are none, return 0.
1234 If there are one or more, return an EXPR_LIST containing all of them. */
1237 libcall_other_reg (insn, equiv)
1238 rtx insn, equiv;
1240 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1241 rtx p = XEXP (note, 0);
1242 rtx output = 0;
1244 /* First, find all the regs used in the libcall block
1245 that are not mentioned as inputs to the result. */
1247 while (p != insn)
1249 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1250 || GET_CODE (p) == CALL_INSN)
1251 record_excess_regs (PATTERN (p), equiv, &output);
1252 p = NEXT_INSN (p);
1255 return output;
1258 /* Return 1 if all uses of REG
1259 are between INSN and the end of the basic block. */
1261 static int
1262 reg_in_basic_block_p (insn, reg)
1263 rtx insn, reg;
1265 int regno = REGNO (reg);
1266 rtx p;
1268 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1269 return 0;
1271 /* Search this basic block for the already recorded last use of the reg. */
1272 for (p = insn; p; p = NEXT_INSN (p))
1274 switch (GET_CODE (p))
1276 case NOTE:
1277 break;
1279 case INSN:
1280 case CALL_INSN:
1281 /* Ordinary insn: if this is the last use, we win. */
1282 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1283 return 1;
1284 break;
1286 case JUMP_INSN:
1287 /* Jump insn: if this is the last use, we win. */
1288 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1289 return 1;
1290 /* Otherwise, it's the end of the basic block, so we lose. */
1291 return 0;
1293 case CODE_LABEL:
1294 case BARRIER:
1295 /* It's the end of the basic block, so we lose. */
1296 return 0;
1298 default:
1299 break;
1303 /* The "last use" that was recorded can't be found after the first
1304 use. This can happen when the last use was deleted while
1305 processing an inner loop, this inner loop was then completely
1306 unrolled, and the outer loop is always exited after the inner loop,
1307 so that everything after the first use becomes a single basic block. */
1308 return 1;
1311 /* Compute the benefit of eliminating the insns in the block whose
1312 last insn is LAST. This may be a group of insns used to compute a
1313 value directly or can contain a library call. */
1315 static int
1316 libcall_benefit (last)
1317 rtx last;
1319 rtx insn;
1320 int benefit = 0;
1322 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1323 insn != last; insn = NEXT_INSN (insn))
1325 if (GET_CODE (insn) == CALL_INSN)
1326 benefit += 10; /* Assume at least this many insns in a library
1327 routine. */
1328 else if (GET_CODE (insn) == INSN
1329 && GET_CODE (PATTERN (insn)) != USE
1330 && GET_CODE (PATTERN (insn)) != CLOBBER)
1331 benefit++;
1334 return benefit;
1337 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1339 static rtx
1340 skip_consec_insns (insn, count)
1341 rtx insn;
1342 int count;
1344 for (; count > 0; count--)
1346 rtx temp;
1348 /* If first insn of libcall sequence, skip to end. */
1349 /* Do this at start of loop, since INSN is guaranteed to
1350 be an insn here. */
1351 if (GET_CODE (insn) != NOTE
1352 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1353 insn = XEXP (temp, 0);
1356 insn = NEXT_INSN (insn);
1357 while (GET_CODE (insn) == NOTE);
1360 return insn;
1363 /* Ignore any movable whose insn falls within a libcall
1364 which is part of another movable.
1365 We make use of the fact that the movable for the libcall value
1366 was made later and so appears later on the chain. */
1368 static void
1369 ignore_some_movables (movables)
1370 struct loop_movables *movables;
1372 struct movable *m, *m1;
1374 for (m = movables->head; m; m = m->next)
1376 /* Is this a movable for the value of a libcall? */
1377 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1378 if (note)
1380 rtx insn;
1381 /* Check for earlier movables inside that range,
1382 and mark them invalid. We cannot use LUIDs here because
1383 insns created by loop.c for prior loops don't have LUIDs.
1384 Rather than reject all such insns from movables, we just
1385 explicitly check each insn in the libcall (since invariant
1386 libcalls aren't that common). */
1387 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1388 for (m1 = movables->head; m1 != m; m1 = m1->next)
1389 if (m1->insn == insn)
1390 m1->done = 1;
1395 /* For each movable insn, see if the reg that it loads
1396 leads when it dies right into another conditionally movable insn.
1397 If so, record that the second insn "forces" the first one,
1398 since the second can be moved only if the first is. */
1400 static void
1401 force_movables (movables)
1402 struct loop_movables *movables;
1404 struct movable *m, *m1;
1406 for (m1 = movables->head; m1; m1 = m1->next)
1407 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1408 if (!m1->partial && !m1->done)
1410 int regno = m1->regno;
1411 for (m = m1->next; m; m = m->next)
1412 /* ??? Could this be a bug? What if CSE caused the
1413 register of M1 to be used after this insn?
1414 Since CSE does not update regno_last_uid,
1415 this insn M->insn might not be where it dies.
1416 But very likely this doesn't matter; what matters is
1417 that M's reg is computed from M1's reg. */
1418 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1419 && !m->done)
1420 break;
1421 if (m != 0 && m->set_src == m1->set_dest
1422 /* If m->consec, m->set_src isn't valid. */
1423 && m->consec == 0)
1424 m = 0;
1426 /* Increase the priority of the moving the first insn
1427 since it permits the second to be moved as well. */
1428 if (m != 0)
1430 m->forces = m1;
1431 m1->lifetime += m->lifetime;
1432 m1->savings += m->savings;
1437 /* Find invariant expressions that are equal and can be combined into
1438 one register. */
1440 static void
1441 combine_movables (movables, regs)
1442 struct loop_movables *movables;
1443 struct loop_regs *regs;
1445 struct movable *m;
1446 char *matched_regs = (char *) xmalloc (regs->num);
1447 enum machine_mode mode;
1449 /* Regs that are set more than once are not allowed to match
1450 or be matched. I'm no longer sure why not. */
1451 /* Only pseudo registers are allowed to match or be matched,
1452 since move_movables does not validate the change. */
1453 /* Perhaps testing m->consec_sets would be more appropriate here? */
1455 for (m = movables->head; m; m = m->next)
1456 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1457 && m->regno >= FIRST_PSEUDO_REGISTER
1458 && !m->partial)
1460 struct movable *m1;
1461 int regno = m->regno;
1463 memset (matched_regs, 0, regs->num);
1464 matched_regs[regno] = 1;
1466 /* We want later insns to match the first one. Don't make the first
1467 one match any later ones. So start this loop at m->next. */
1468 for (m1 = m->next; m1; m1 = m1->next)
1469 if (m != m1 && m1->match == 0
1470 && regs->array[m1->regno].n_times_set == 1
1471 && m1->regno >= FIRST_PSEUDO_REGISTER
1472 /* A reg used outside the loop mustn't be eliminated. */
1473 && !m1->global
1474 /* A reg used for zero-extending mustn't be eliminated. */
1475 && !m1->partial
1476 && (matched_regs[m1->regno]
1479 /* Can combine regs with different modes loaded from the
1480 same constant only if the modes are the same or
1481 if both are integer modes with M wider or the same
1482 width as M1. The check for integer is redundant, but
1483 safe, since the only case of differing destination
1484 modes with equal sources is when both sources are
1485 VOIDmode, i.e., CONST_INT. */
1486 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1487 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1488 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1489 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1490 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1491 /* See if the source of M1 says it matches M. */
1492 && ((GET_CODE (m1->set_src) == REG
1493 && matched_regs[REGNO (m1->set_src)])
1494 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1495 movables, regs))))
1496 && ((m->dependencies == m1->dependencies)
1497 || rtx_equal_p (m->dependencies, m1->dependencies)))
1499 m->lifetime += m1->lifetime;
1500 m->savings += m1->savings;
1501 m1->done = 1;
1502 m1->match = m;
1503 matched_regs[m1->regno] = 1;
1507 /* Now combine the regs used for zero-extension.
1508 This can be done for those not marked `global'
1509 provided their lives don't overlap. */
1511 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1512 mode = GET_MODE_WIDER_MODE (mode))
1514 struct movable *m0 = 0;
1516 /* Combine all the registers for extension from mode MODE.
1517 Don't combine any that are used outside this loop. */
1518 for (m = movables->head; m; m = m->next)
1519 if (m->partial && ! m->global
1520 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1522 struct movable *m1;
1524 int first = REGNO_FIRST_LUID (m->regno);
1525 int last = REGNO_LAST_LUID (m->regno);
1527 if (m0 == 0)
1529 /* First one: don't check for overlap, just record it. */
1530 m0 = m;
1531 continue;
1534 /* Make sure they extend to the same mode.
1535 (Almost always true.) */
1536 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1537 continue;
1539 /* We already have one: check for overlap with those
1540 already combined together. */
1541 for (m1 = movables->head; m1 != m; m1 = m1->next)
1542 if (m1 == m0 || (m1->partial && m1->match == m0))
1543 if (! (REGNO_FIRST_LUID (m1->regno) > last
1544 || REGNO_LAST_LUID (m1->regno) < first))
1545 goto overlap;
1547 /* No overlap: we can combine this with the others. */
1548 m0->lifetime += m->lifetime;
1549 m0->savings += m->savings;
1550 m->done = 1;
1551 m->match = m0;
1553 overlap:
1558 /* Clean up. */
1559 free (matched_regs);
1562 /* Returns the number of movable instructions in LOOP that were not
1563 moved outside the loop. */
1565 static int
1566 num_unmoved_movables (loop)
1567 const struct loop *loop;
1569 int num = 0;
1570 struct movable *m;
1572 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1573 if (!m->done)
1574 ++num;
1576 return num;
1580 /* Return 1 if regs X and Y will become the same if moved. */
1582 static int
1583 regs_match_p (x, y, movables)
1584 rtx x, y;
1585 struct loop_movables *movables;
1587 unsigned int xn = REGNO (x);
1588 unsigned int yn = REGNO (y);
1589 struct movable *mx, *my;
1591 for (mx = movables->head; mx; mx = mx->next)
1592 if (mx->regno == xn)
1593 break;
1595 for (my = movables->head; my; my = my->next)
1596 if (my->regno == yn)
1597 break;
1599 return (mx && my
1600 && ((mx->match == my->match && mx->match != 0)
1601 || mx->match == my
1602 || mx == my->match));
1605 /* Return 1 if X and Y are identical-looking rtx's.
1606 This is the Lisp function EQUAL for rtx arguments.
1608 If two registers are matching movables or a movable register and an
1609 equivalent constant, consider them equal. */
1611 static int
1612 rtx_equal_for_loop_p (x, y, movables, regs)
1613 rtx x, y;
1614 struct loop_movables *movables;
1615 struct loop_regs *regs;
1617 int i;
1618 int j;
1619 struct movable *m;
1620 enum rtx_code code;
1621 const char *fmt;
1623 if (x == y)
1624 return 1;
1625 if (x == 0 || y == 0)
1626 return 0;
1628 code = GET_CODE (x);
1630 /* If we have a register and a constant, they may sometimes be
1631 equal. */
1632 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1633 && CONSTANT_P (y))
1635 for (m = movables->head; m; m = m->next)
1636 if (m->move_insn && m->regno == REGNO (x)
1637 && rtx_equal_p (m->set_src, y))
1638 return 1;
1640 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1641 && CONSTANT_P (x))
1643 for (m = movables->head; m; m = m->next)
1644 if (m->move_insn && m->regno == REGNO (y)
1645 && rtx_equal_p (m->set_src, x))
1646 return 1;
1649 /* Otherwise, rtx's of different codes cannot be equal. */
1650 if (code != GET_CODE (y))
1651 return 0;
1653 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1654 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1656 if (GET_MODE (x) != GET_MODE (y))
1657 return 0;
1659 /* These three types of rtx's can be compared nonrecursively. */
1660 if (code == REG)
1661 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1663 if (code == LABEL_REF)
1664 return XEXP (x, 0) == XEXP (y, 0);
1665 if (code == SYMBOL_REF)
1666 return XSTR (x, 0) == XSTR (y, 0);
1668 /* Compare the elements. If any pair of corresponding elements
1669 fail to match, return 0 for the whole things. */
1671 fmt = GET_RTX_FORMAT (code);
1672 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1674 switch (fmt[i])
1676 case 'w':
1677 if (XWINT (x, i) != XWINT (y, i))
1678 return 0;
1679 break;
1681 case 'i':
1682 if (XINT (x, i) != XINT (y, i))
1683 return 0;
1684 break;
1686 case 'E':
1687 /* Two vectors must have the same length. */
1688 if (XVECLEN (x, i) != XVECLEN (y, i))
1689 return 0;
1691 /* And the corresponding elements must match. */
1692 for (j = 0; j < XVECLEN (x, i); j++)
1693 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1694 movables, regs) == 0)
1695 return 0;
1696 break;
1698 case 'e':
1699 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1700 == 0)
1701 return 0;
1702 break;
1704 case 's':
1705 if (strcmp (XSTR (x, i), XSTR (y, i)))
1706 return 0;
1707 break;
1709 case 'u':
1710 /* These are just backpointers, so they don't matter. */
1711 break;
1713 case '0':
1714 break;
1716 /* It is believed that rtx's at this level will never
1717 contain anything but integers and other rtx's,
1718 except for within LABEL_REFs and SYMBOL_REFs. */
1719 default:
1720 abort ();
1723 return 1;
1726 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1727 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1728 references is incremented once for each added note. */
1730 static void
1731 add_label_notes (x, insns)
1732 rtx x;
1733 rtx insns;
1735 enum rtx_code code = GET_CODE (x);
1736 int i, j;
1737 const char *fmt;
1738 rtx insn;
1740 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1742 /* This code used to ignore labels that referred to dispatch tables to
1743 avoid flow generating (slighly) worse code.
1745 We no longer ignore such label references (see LABEL_REF handling in
1746 mark_jump_label for additional information). */
1747 for (insn = insns; insn; insn = NEXT_INSN (insn))
1748 if (reg_mentioned_p (XEXP (x, 0), insn))
1750 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1751 REG_NOTES (insn));
1752 if (LABEL_P (XEXP (x, 0)))
1753 LABEL_NUSES (XEXP (x, 0))++;
1757 fmt = GET_RTX_FORMAT (code);
1758 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1760 if (fmt[i] == 'e')
1761 add_label_notes (XEXP (x, i), insns);
1762 else if (fmt[i] == 'E')
1763 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1764 add_label_notes (XVECEXP (x, i, j), insns);
1768 /* Scan MOVABLES, and move the insns that deserve to be moved.
1769 If two matching movables are combined, replace one reg with the
1770 other throughout. */
1772 static void
1773 move_movables (loop, movables, threshold, insn_count)
1774 struct loop *loop;
1775 struct loop_movables *movables;
1776 int threshold;
1777 int insn_count;
1779 struct loop_regs *regs = LOOP_REGS (loop);
1780 int nregs = regs->num;
1781 rtx new_start = 0;
1782 struct movable *m;
1783 rtx p;
1784 rtx loop_start = loop->start;
1785 rtx loop_end = loop->end;
1786 /* Map of pseudo-register replacements to handle combining
1787 when we move several insns that load the same value
1788 into different pseudo-registers. */
1789 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1790 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1792 for (m = movables->head; m; m = m->next)
1794 /* Describe this movable insn. */
1796 if (loop_dump_stream)
1798 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1799 INSN_UID (m->insn), m->regno, m->lifetime);
1800 if (m->consec > 0)
1801 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1802 if (m->cond)
1803 fprintf (loop_dump_stream, "cond ");
1804 if (m->force)
1805 fprintf (loop_dump_stream, "force ");
1806 if (m->global)
1807 fprintf (loop_dump_stream, "global ");
1808 if (m->done)
1809 fprintf (loop_dump_stream, "done ");
1810 if (m->move_insn)
1811 fprintf (loop_dump_stream, "move-insn ");
1812 if (m->match)
1813 fprintf (loop_dump_stream, "matches %d ",
1814 INSN_UID (m->match->insn));
1815 if (m->forces)
1816 fprintf (loop_dump_stream, "forces %d ",
1817 INSN_UID (m->forces->insn));
1820 /* Ignore the insn if it's already done (it matched something else).
1821 Otherwise, see if it is now safe to move. */
1823 if (!m->done
1824 && (! m->cond
1825 || (1 == loop_invariant_p (loop, m->set_src)
1826 && (m->dependencies == 0
1827 || 1 == loop_invariant_p (loop, m->dependencies))
1828 && (m->consec == 0
1829 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1830 m->consec + 1,
1831 m->insn))))
1832 && (! m->forces || m->forces->done))
1834 int regno;
1835 rtx p;
1836 int savings = m->savings;
1838 /* We have an insn that is safe to move.
1839 Compute its desirability. */
1841 p = m->insn;
1842 regno = m->regno;
1844 if (loop_dump_stream)
1845 fprintf (loop_dump_stream, "savings %d ", savings);
1847 if (regs->array[regno].moved_once && loop_dump_stream)
1848 fprintf (loop_dump_stream, "halved since already moved ");
1850 /* An insn MUST be moved if we already moved something else
1851 which is safe only if this one is moved too: that is,
1852 if already_moved[REGNO] is nonzero. */
1854 /* An insn is desirable to move if the new lifetime of the
1855 register is no more than THRESHOLD times the old lifetime.
1856 If it's not desirable, it means the loop is so big
1857 that moving won't speed things up much,
1858 and it is liable to make register usage worse. */
1860 /* It is also desirable to move if it can be moved at no
1861 extra cost because something else was already moved. */
1863 if (already_moved[regno]
1864 || flag_move_all_movables
1865 || (threshold * savings * m->lifetime) >=
1866 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1867 || (m->forces && m->forces->done
1868 && regs->array[m->forces->regno].n_times_set == 1))
1870 int count;
1871 struct movable *m1;
1872 rtx first = NULL_RTX;
1874 /* Now move the insns that set the reg. */
1876 if (m->partial && m->match)
1878 rtx newpat, i1;
1879 rtx r1, r2;
1880 /* Find the end of this chain of matching regs.
1881 Thus, we load each reg in the chain from that one reg.
1882 And that reg is loaded with 0 directly,
1883 since it has ->match == 0. */
1884 for (m1 = m; m1->match; m1 = m1->match);
1885 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1886 SET_DEST (PATTERN (m1->insn)));
1887 i1 = loop_insn_hoist (loop, newpat);
1889 /* Mark the moved, invariant reg as being allowed to
1890 share a hard reg with the other matching invariant. */
1891 REG_NOTES (i1) = REG_NOTES (m->insn);
1892 r1 = SET_DEST (PATTERN (m->insn));
1893 r2 = SET_DEST (PATTERN (m1->insn));
1894 regs_may_share
1895 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1896 gen_rtx_EXPR_LIST (VOIDmode, r2,
1897 regs_may_share));
1898 delete_insn (m->insn);
1900 if (new_start == 0)
1901 new_start = i1;
1903 if (loop_dump_stream)
1904 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1906 /* If we are to re-generate the item being moved with a
1907 new move insn, first delete what we have and then emit
1908 the move insn before the loop. */
1909 else if (m->move_insn)
1911 rtx i1, temp, seq;
1913 for (count = m->consec; count >= 0; count--)
1915 /* If this is the first insn of a library call sequence,
1916 skip to the end. */
1917 if (GET_CODE (p) != NOTE
1918 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1919 p = XEXP (temp, 0);
1921 /* If this is the last insn of a libcall sequence, then
1922 delete every insn in the sequence except the last.
1923 The last insn is handled in the normal manner. */
1924 if (GET_CODE (p) != NOTE
1925 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1927 temp = XEXP (temp, 0);
1928 while (temp != p)
1929 temp = delete_insn (temp);
1932 temp = p;
1933 p = delete_insn (p);
1935 /* simplify_giv_expr expects that it can walk the insns
1936 at m->insn forwards and see this old sequence we are
1937 tossing here. delete_insn does preserve the next
1938 pointers, but when we skip over a NOTE we must fix
1939 it up. Otherwise that code walks into the non-deleted
1940 insn stream. */
1941 while (p && GET_CODE (p) == NOTE)
1942 p = NEXT_INSN (temp) = NEXT_INSN (p);
1945 start_sequence ();
1946 emit_move_insn (m->set_dest, m->set_src);
1947 temp = get_insns ();
1948 seq = gen_sequence ();
1949 end_sequence ();
1951 add_label_notes (m->set_src, temp);
1953 i1 = loop_insn_hoist (loop, seq);
1954 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1955 set_unique_reg_note (i1,
1956 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1957 m->set_src);
1959 if (loop_dump_stream)
1960 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1962 /* The more regs we move, the less we like moving them. */
1963 threshold -= 3;
1965 else
1967 for (count = m->consec; count >= 0; count--)
1969 rtx i1, temp;
1971 /* If first insn of libcall sequence, skip to end. */
1972 /* Do this at start of loop, since p is guaranteed to
1973 be an insn here. */
1974 if (GET_CODE (p) != NOTE
1975 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1976 p = XEXP (temp, 0);
1978 /* If last insn of libcall sequence, move all
1979 insns except the last before the loop. The last
1980 insn is handled in the normal manner. */
1981 if (GET_CODE (p) != NOTE
1982 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1984 rtx fn_address = 0;
1985 rtx fn_reg = 0;
1986 rtx fn_address_insn = 0;
1988 first = 0;
1989 for (temp = XEXP (temp, 0); temp != p;
1990 temp = NEXT_INSN (temp))
1992 rtx body;
1993 rtx n;
1994 rtx next;
1996 if (GET_CODE (temp) == NOTE)
1997 continue;
1999 body = PATTERN (temp);
2001 /* Find the next insn after TEMP,
2002 not counting USE or NOTE insns. */
2003 for (next = NEXT_INSN (temp); next != p;
2004 next = NEXT_INSN (next))
2005 if (! (GET_CODE (next) == INSN
2006 && GET_CODE (PATTERN (next)) == USE)
2007 && GET_CODE (next) != NOTE)
2008 break;
2010 /* If that is the call, this may be the insn
2011 that loads the function address.
2013 Extract the function address from the insn
2014 that loads it into a register.
2015 If this insn was cse'd, we get incorrect code.
2017 So emit a new move insn that copies the
2018 function address into the register that the
2019 call insn will use. flow.c will delete any
2020 redundant stores that we have created. */
2021 if (GET_CODE (next) == CALL_INSN
2022 && GET_CODE (body) == SET
2023 && GET_CODE (SET_DEST (body)) == REG
2024 && (n = find_reg_note (temp, REG_EQUAL,
2025 NULL_RTX)))
2027 fn_reg = SET_SRC (body);
2028 if (GET_CODE (fn_reg) != REG)
2029 fn_reg = SET_DEST (body);
2030 fn_address = XEXP (n, 0);
2031 fn_address_insn = temp;
2033 /* We have the call insn.
2034 If it uses the register we suspect it might,
2035 load it with the correct address directly. */
2036 if (GET_CODE (temp) == CALL_INSN
2037 && fn_address != 0
2038 && reg_referenced_p (fn_reg, body))
2039 loop_insn_emit_after (loop, 0, fn_address_insn,
2040 gen_move_insn
2041 (fn_reg, fn_address));
2043 if (GET_CODE (temp) == CALL_INSN)
2045 i1 = loop_call_insn_hoist (loop, body);
2046 /* Because the USAGE information potentially
2047 contains objects other than hard registers
2048 we need to copy it. */
2049 if (CALL_INSN_FUNCTION_USAGE (temp))
2050 CALL_INSN_FUNCTION_USAGE (i1)
2051 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2053 else
2054 i1 = loop_insn_hoist (loop, body);
2055 if (first == 0)
2056 first = i1;
2057 if (temp == fn_address_insn)
2058 fn_address_insn = i1;
2059 REG_NOTES (i1) = REG_NOTES (temp);
2060 REG_NOTES (temp) = NULL;
2061 delete_insn (temp);
2063 if (new_start == 0)
2064 new_start = first;
2066 if (m->savemode != VOIDmode)
2068 /* P sets REG to zero; but we should clear only
2069 the bits that are not covered by the mode
2070 m->savemode. */
2071 rtx reg = m->set_dest;
2072 rtx sequence;
2073 rtx tem;
2075 start_sequence ();
2076 tem = expand_simple_binop
2077 (GET_MODE (reg), AND, reg,
2078 GEN_INT ((((HOST_WIDE_INT) 1
2079 << GET_MODE_BITSIZE (m->savemode)))
2080 - 1),
2081 reg, 1, OPTAB_LIB_WIDEN);
2082 if (tem == 0)
2083 abort ();
2084 if (tem != reg)
2085 emit_move_insn (reg, tem);
2086 sequence = gen_sequence ();
2087 end_sequence ();
2088 i1 = loop_insn_hoist (loop, sequence);
2090 else if (GET_CODE (p) == CALL_INSN)
2092 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2093 /* Because the USAGE information potentially
2094 contains objects other than hard registers
2095 we need to copy it. */
2096 if (CALL_INSN_FUNCTION_USAGE (p))
2097 CALL_INSN_FUNCTION_USAGE (i1)
2098 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2100 else if (count == m->consec && m->move_insn_first)
2102 rtx seq;
2103 /* The SET_SRC might not be invariant, so we must
2104 use the REG_EQUAL note. */
2105 start_sequence ();
2106 emit_move_insn (m->set_dest, m->set_src);
2107 temp = get_insns ();
2108 seq = gen_sequence ();
2109 end_sequence ();
2111 add_label_notes (m->set_src, temp);
2113 i1 = loop_insn_hoist (loop, seq);
2114 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2115 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2116 : REG_EQUAL, m->set_src);
2118 else
2119 i1 = loop_insn_hoist (loop, PATTERN (p));
2121 if (REG_NOTES (i1) == 0)
2123 REG_NOTES (i1) = REG_NOTES (p);
2124 REG_NOTES (p) = NULL;
2126 /* If there is a REG_EQUAL note present whose value
2127 is not loop invariant, then delete it, since it
2128 may cause problems with later optimization passes.
2129 It is possible for cse to create such notes
2130 like this as a result of record_jump_cond. */
2132 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2133 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2134 remove_note (i1, temp);
2137 if (new_start == 0)
2138 new_start = i1;
2140 if (loop_dump_stream)
2141 fprintf (loop_dump_stream, " moved to %d",
2142 INSN_UID (i1));
2144 /* If library call, now fix the REG_NOTES that contain
2145 insn pointers, namely REG_LIBCALL on FIRST
2146 and REG_RETVAL on I1. */
2147 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2149 XEXP (temp, 0) = first;
2150 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2151 XEXP (temp, 0) = i1;
2154 temp = p;
2155 delete_insn (p);
2156 p = NEXT_INSN (p);
2158 /* simplify_giv_expr expects that it can walk the insns
2159 at m->insn forwards and see this old sequence we are
2160 tossing here. delete_insn does preserve the next
2161 pointers, but when we skip over a NOTE we must fix
2162 it up. Otherwise that code walks into the non-deleted
2163 insn stream. */
2164 while (p && GET_CODE (p) == NOTE)
2165 p = NEXT_INSN (temp) = NEXT_INSN (p);
2168 /* The more regs we move, the less we like moving them. */
2169 threshold -= 3;
2172 /* Any other movable that loads the same register
2173 MUST be moved. */
2174 already_moved[regno] = 1;
2176 /* This reg has been moved out of one loop. */
2177 regs->array[regno].moved_once = 1;
2179 /* The reg set here is now invariant. */
2180 if (! m->partial)
2182 int i;
2183 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2184 regs->array[regno+i].set_in_loop = 0;
2187 m->done = 1;
2189 /* Change the length-of-life info for the register
2190 to say it lives at least the full length of this loop.
2191 This will help guide optimizations in outer loops. */
2193 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2194 /* This is the old insn before all the moved insns.
2195 We can't use the moved insn because it is out of range
2196 in uid_luid. Only the old insns have luids. */
2197 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2198 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2199 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2201 /* Combine with this moved insn any other matching movables. */
2203 if (! m->partial)
2204 for (m1 = movables->head; m1; m1 = m1->next)
2205 if (m1->match == m)
2207 rtx temp;
2209 /* Schedule the reg loaded by M1
2210 for replacement so that shares the reg of M.
2211 If the modes differ (only possible in restricted
2212 circumstances, make a SUBREG.
2214 Note this assumes that the target dependent files
2215 treat REG and SUBREG equally, including within
2216 GO_IF_LEGITIMATE_ADDRESS and in all the
2217 predicates since we never verify that replacing the
2218 original register with a SUBREG results in a
2219 recognizable insn. */
2220 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2221 reg_map[m1->regno] = m->set_dest;
2222 else
2223 reg_map[m1->regno]
2224 = gen_lowpart_common (GET_MODE (m1->set_dest),
2225 m->set_dest);
2227 /* Get rid of the matching insn
2228 and prevent further processing of it. */
2229 m1->done = 1;
2231 /* if library call, delete all insns. */
2232 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2233 NULL_RTX)))
2234 delete_insn_chain (XEXP (temp, 0), m1->insn);
2235 else
2236 delete_insn (m1->insn);
2238 /* Any other movable that loads the same register
2239 MUST be moved. */
2240 already_moved[m1->regno] = 1;
2242 /* The reg merged here is now invariant,
2243 if the reg it matches is invariant. */
2244 if (! m->partial)
2246 int i;
2247 for (i = 0;
2248 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2249 i++)
2250 regs->array[m1->regno+i].set_in_loop = 0;
2254 else if (loop_dump_stream)
2255 fprintf (loop_dump_stream, "not desirable");
2257 else if (loop_dump_stream && !m->match)
2258 fprintf (loop_dump_stream, "not safe");
2260 if (loop_dump_stream)
2261 fprintf (loop_dump_stream, "\n");
2264 if (new_start == 0)
2265 new_start = loop_start;
2267 /* Go through all the instructions in the loop, making
2268 all the register substitutions scheduled in REG_MAP. */
2269 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2270 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2271 || GET_CODE (p) == CALL_INSN)
2273 replace_regs (PATTERN (p), reg_map, nregs, 0);
2274 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2275 INSN_CODE (p) = -1;
2278 /* Clean up. */
2279 free (reg_map);
2280 free (already_moved);
2284 static void
2285 loop_movables_add (movables, m)
2286 struct loop_movables *movables;
2287 struct movable *m;
2289 if (movables->head == 0)
2290 movables->head = m;
2291 else
2292 movables->last->next = m;
2293 movables->last = m;
2297 static void
2298 loop_movables_free (movables)
2299 struct loop_movables *movables;
2301 struct movable *m;
2302 struct movable *m_next;
2304 for (m = movables->head; m; m = m_next)
2306 m_next = m->next;
2307 free (m);
2311 #if 0
2312 /* Scan X and replace the address of any MEM in it with ADDR.
2313 REG is the address that MEM should have before the replacement. */
2315 static void
2316 replace_call_address (x, reg, addr)
2317 rtx x, reg, addr;
2319 enum rtx_code code;
2320 int i;
2321 const char *fmt;
2323 if (x == 0)
2324 return;
2325 code = GET_CODE (x);
2326 switch (code)
2328 case PC:
2329 case CC0:
2330 case CONST_INT:
2331 case CONST_DOUBLE:
2332 case CONST:
2333 case SYMBOL_REF:
2334 case LABEL_REF:
2335 case REG:
2336 return;
2338 case SET:
2339 /* Short cut for very common case. */
2340 replace_call_address (XEXP (x, 1), reg, addr);
2341 return;
2343 case CALL:
2344 /* Short cut for very common case. */
2345 replace_call_address (XEXP (x, 0), reg, addr);
2346 return;
2348 case MEM:
2349 /* If this MEM uses a reg other than the one we expected,
2350 something is wrong. */
2351 if (XEXP (x, 0) != reg)
2352 abort ();
2353 XEXP (x, 0) = addr;
2354 return;
2356 default:
2357 break;
2360 fmt = GET_RTX_FORMAT (code);
2361 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2363 if (fmt[i] == 'e')
2364 replace_call_address (XEXP (x, i), reg, addr);
2365 else if (fmt[i] == 'E')
2367 int j;
2368 for (j = 0; j < XVECLEN (x, i); j++)
2369 replace_call_address (XVECEXP (x, i, j), reg, addr);
2373 #endif
2375 /* Return the number of memory refs to addresses that vary
2376 in the rtx X. */
2378 static int
2379 count_nonfixed_reads (loop, x)
2380 const struct loop *loop;
2381 rtx x;
2383 enum rtx_code code;
2384 int i;
2385 const char *fmt;
2386 int value;
2388 if (x == 0)
2389 return 0;
2391 code = GET_CODE (x);
2392 switch (code)
2394 case PC:
2395 case CC0:
2396 case CONST_INT:
2397 case CONST_DOUBLE:
2398 case CONST:
2399 case SYMBOL_REF:
2400 case LABEL_REF:
2401 case REG:
2402 return 0;
2404 case MEM:
2405 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2406 + count_nonfixed_reads (loop, XEXP (x, 0)));
2408 default:
2409 break;
2412 value = 0;
2413 fmt = GET_RTX_FORMAT (code);
2414 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2416 if (fmt[i] == 'e')
2417 value += count_nonfixed_reads (loop, XEXP (x, i));
2418 if (fmt[i] == 'E')
2420 int j;
2421 for (j = 0; j < XVECLEN (x, i); j++)
2422 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2425 return value;
2428 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2429 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2430 `unknown_address_altered', `unknown_constant_address_altered', and
2431 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2432 list `store_mems' in LOOP. */
2434 static void
2435 prescan_loop (loop)
2436 struct loop *loop;
2438 int level = 1;
2439 rtx insn;
2440 struct loop_info *loop_info = LOOP_INFO (loop);
2441 rtx start = loop->start;
2442 rtx end = loop->end;
2443 /* The label after END. Jumping here is just like falling off the
2444 end of the loop. We use next_nonnote_insn instead of next_label
2445 as a hedge against the (pathological) case where some actual insn
2446 might end up between the two. */
2447 rtx exit_target = next_nonnote_insn (end);
2449 loop_info->has_indirect_jump = indirect_jump_in_function;
2450 loop_info->pre_header_has_call = 0;
2451 loop_info->has_call = 0;
2452 loop_info->has_nonconst_call = 0;
2453 loop_info->has_volatile = 0;
2454 loop_info->has_tablejump = 0;
2455 loop_info->has_multiple_exit_targets = 0;
2456 loop->level = 1;
2458 loop_info->unknown_address_altered = 0;
2459 loop_info->unknown_constant_address_altered = 0;
2460 loop_info->store_mems = NULL_RTX;
2461 loop_info->first_loop_store_insn = NULL_RTX;
2462 loop_info->mems_idx = 0;
2463 loop_info->num_mem_sets = 0;
2466 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2467 insn = PREV_INSN (insn))
2469 if (GET_CODE (insn) == CALL_INSN)
2471 loop_info->pre_header_has_call = 1;
2472 break;
2476 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2477 insn = NEXT_INSN (insn))
2479 switch (GET_CODE (insn))
2481 case NOTE:
2482 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2484 ++level;
2485 /* Count number of loops contained in this one. */
2486 loop->level++;
2488 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2489 --level;
2490 break;
2492 case CALL_INSN:
2493 if (! CONST_OR_PURE_CALL_P (insn))
2495 loop_info->unknown_address_altered = 1;
2496 loop_info->has_nonconst_call = 1;
2498 else if (pure_call_p (insn))
2499 loop_info->has_nonconst_call = 1;
2500 loop_info->has_call = 1;
2501 if (can_throw_internal (insn))
2502 loop_info->has_multiple_exit_targets = 1;
2503 break;
2505 case JUMP_INSN:
2506 if (! loop_info->has_multiple_exit_targets)
2508 rtx set = pc_set (insn);
2510 if (set)
2512 rtx src = SET_SRC (set);
2513 rtx label1, label2;
2515 if (GET_CODE (src) == IF_THEN_ELSE)
2517 label1 = XEXP (src, 1);
2518 label2 = XEXP (src, 2);
2520 else
2522 label1 = src;
2523 label2 = NULL_RTX;
2528 if (label1 && label1 != pc_rtx)
2530 if (GET_CODE (label1) != LABEL_REF)
2532 /* Something tricky. */
2533 loop_info->has_multiple_exit_targets = 1;
2534 break;
2536 else if (XEXP (label1, 0) != exit_target
2537 && LABEL_OUTSIDE_LOOP_P (label1))
2539 /* A jump outside the current loop. */
2540 loop_info->has_multiple_exit_targets = 1;
2541 break;
2545 label1 = label2;
2546 label2 = NULL_RTX;
2548 while (label1);
2550 else
2552 /* A return, or something tricky. */
2553 loop_info->has_multiple_exit_targets = 1;
2556 /* FALLTHRU */
2558 case INSN:
2559 if (volatile_refs_p (PATTERN (insn)))
2560 loop_info->has_volatile = 1;
2562 if (GET_CODE (insn) == JUMP_INSN
2563 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2564 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2565 loop_info->has_tablejump = 1;
2567 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2568 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2569 loop_info->first_loop_store_insn = insn;
2571 if (flag_non_call_exceptions && can_throw_internal (insn))
2572 loop_info->has_multiple_exit_targets = 1;
2573 break;
2575 default:
2576 break;
2580 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2581 if (/* An exception thrown by a called function might land us
2582 anywhere. */
2583 ! loop_info->has_nonconst_call
2584 /* We don't want loads for MEMs moved to a location before the
2585 one at which their stack memory becomes allocated. (Note
2586 that this is not a problem for malloc, etc., since those
2587 require actual function calls. */
2588 && ! current_function_calls_alloca
2589 /* There are ways to leave the loop other than falling off the
2590 end. */
2591 && ! loop_info->has_multiple_exit_targets)
2592 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2593 insn = NEXT_INSN (insn))
2594 for_each_rtx (&insn, insert_loop_mem, loop_info);
2596 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2597 that loop_invariant_p and load_mems can use true_dependence
2598 to determine what is really clobbered. */
2599 if (loop_info->unknown_address_altered)
2601 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2603 loop_info->store_mems
2604 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2606 if (loop_info->unknown_constant_address_altered)
2608 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2610 RTX_UNCHANGING_P (mem) = 1;
2611 loop_info->store_mems
2612 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2616 /* Invalidate all loops containing LABEL. */
2618 static void
2619 invalidate_loops_containing_label (label)
2620 rtx label;
2622 struct loop *loop;
2623 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2624 loop->invalid = 1;
2627 /* Scan the function looking for loops. Record the start and end of each loop.
2628 Also mark as invalid loops any loops that contain a setjmp or are branched
2629 to from outside the loop. */
2631 static void
2632 find_and_verify_loops (f, loops)
2633 rtx f;
2634 struct loops *loops;
2636 rtx insn;
2637 rtx label;
2638 int num_loops;
2639 struct loop *current_loop;
2640 struct loop *next_loop;
2641 struct loop *loop;
2643 num_loops = loops->num;
2645 compute_luids (f, NULL_RTX, 0);
2647 /* If there are jumps to undefined labels,
2648 treat them as jumps out of any/all loops.
2649 This also avoids writing past end of tables when there are no loops. */
2650 uid_loop[0] = NULL;
2652 /* Find boundaries of loops, mark which loops are contained within
2653 loops, and invalidate loops that have setjmp. */
2655 num_loops = 0;
2656 current_loop = NULL;
2657 for (insn = f; insn; insn = NEXT_INSN (insn))
2659 if (GET_CODE (insn) == NOTE)
2660 switch (NOTE_LINE_NUMBER (insn))
2662 case NOTE_INSN_LOOP_BEG:
2663 next_loop = loops->array + num_loops;
2664 next_loop->num = num_loops;
2665 num_loops++;
2666 next_loop->start = insn;
2667 next_loop->outer = current_loop;
2668 current_loop = next_loop;
2669 break;
2671 case NOTE_INSN_LOOP_CONT:
2672 current_loop->cont = insn;
2673 break;
2675 case NOTE_INSN_LOOP_VTOP:
2676 current_loop->vtop = insn;
2677 break;
2679 case NOTE_INSN_LOOP_END:
2680 if (! current_loop)
2681 abort ();
2683 current_loop->end = insn;
2684 current_loop = current_loop->outer;
2685 break;
2687 default:
2688 break;
2691 if (GET_CODE (insn) == CALL_INSN
2692 && find_reg_note (insn, REG_SETJMP, NULL))
2694 /* In this case, we must invalidate our current loop and any
2695 enclosing loop. */
2696 for (loop = current_loop; loop; loop = loop->outer)
2698 loop->invalid = 1;
2699 if (loop_dump_stream)
2700 fprintf (loop_dump_stream,
2701 "\nLoop at %d ignored due to setjmp.\n",
2702 INSN_UID (loop->start));
2706 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2707 enclosing loop, but this doesn't matter. */
2708 uid_loop[INSN_UID (insn)] = current_loop;
2711 /* Any loop containing a label used in an initializer must be invalidated,
2712 because it can be jumped into from anywhere. */
2713 for (label = forced_labels; label; label = XEXP (label, 1))
2714 invalidate_loops_containing_label (XEXP (label, 0));
2716 /* Any loop containing a label used for an exception handler must be
2717 invalidated, because it can be jumped into from anywhere. */
2718 for_each_eh_label (invalidate_loops_containing_label);
2720 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2721 loop that it is not contained within, that loop is marked invalid.
2722 If any INSN or CALL_INSN uses a label's address, then the loop containing
2723 that label is marked invalid, because it could be jumped into from
2724 anywhere.
2726 Also look for blocks of code ending in an unconditional branch that
2727 exits the loop. If such a block is surrounded by a conditional
2728 branch around the block, move the block elsewhere (see below) and
2729 invert the jump to point to the code block. This may eliminate a
2730 label in our loop and will simplify processing by both us and a
2731 possible second cse pass. */
2733 for (insn = f; insn; insn = NEXT_INSN (insn))
2734 if (INSN_P (insn))
2736 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2738 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2740 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2741 if (note)
2742 invalidate_loops_containing_label (XEXP (note, 0));
2745 if (GET_CODE (insn) != JUMP_INSN)
2746 continue;
2748 mark_loop_jump (PATTERN (insn), this_loop);
2750 /* See if this is an unconditional branch outside the loop. */
2751 if (this_loop
2752 && (GET_CODE (PATTERN (insn)) == RETURN
2753 || (any_uncondjump_p (insn)
2754 && onlyjump_p (insn)
2755 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2756 != this_loop)))
2757 && get_max_uid () < max_uid_for_loop)
2759 rtx p;
2760 rtx our_next = next_real_insn (insn);
2761 rtx last_insn_to_move = NEXT_INSN (insn);
2762 struct loop *dest_loop;
2763 struct loop *outer_loop = NULL;
2765 /* Go backwards until we reach the start of the loop, a label,
2766 or a JUMP_INSN. */
2767 for (p = PREV_INSN (insn);
2768 GET_CODE (p) != CODE_LABEL
2769 && ! (GET_CODE (p) == NOTE
2770 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2771 && GET_CODE (p) != JUMP_INSN;
2772 p = PREV_INSN (p))
2775 /* Check for the case where we have a jump to an inner nested
2776 loop, and do not perform the optimization in that case. */
2778 if (JUMP_LABEL (insn))
2780 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2781 if (dest_loop)
2783 for (outer_loop = dest_loop; outer_loop;
2784 outer_loop = outer_loop->outer)
2785 if (outer_loop == this_loop)
2786 break;
2790 /* Make sure that the target of P is within the current loop. */
2792 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2793 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2794 outer_loop = this_loop;
2796 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2797 we have a block of code to try to move.
2799 We look backward and then forward from the target of INSN
2800 to find a BARRIER at the same loop depth as the target.
2801 If we find such a BARRIER, we make a new label for the start
2802 of the block, invert the jump in P and point it to that label,
2803 and move the block of code to the spot we found. */
2805 if (! outer_loop
2806 && GET_CODE (p) == JUMP_INSN
2807 && JUMP_LABEL (p) != 0
2808 /* Just ignore jumps to labels that were never emitted.
2809 These always indicate compilation errors. */
2810 && INSN_UID (JUMP_LABEL (p)) != 0
2811 && any_condjump_p (p) && onlyjump_p (p)
2812 && next_real_insn (JUMP_LABEL (p)) == our_next
2813 /* If it's not safe to move the sequence, then we
2814 mustn't try. */
2815 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2816 &last_insn_to_move))
2818 rtx target
2819 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2820 struct loop *target_loop = uid_loop[INSN_UID (target)];
2821 rtx loc, loc2;
2822 rtx tmp;
2824 /* Search for possible garbage past the conditional jumps
2825 and look for the last barrier. */
2826 for (tmp = last_insn_to_move;
2827 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2828 if (GET_CODE (tmp) == BARRIER)
2829 last_insn_to_move = tmp;
2831 for (loc = target; loc; loc = PREV_INSN (loc))
2832 if (GET_CODE (loc) == BARRIER
2833 /* Don't move things inside a tablejump. */
2834 && ((loc2 = next_nonnote_insn (loc)) == 0
2835 || GET_CODE (loc2) != CODE_LABEL
2836 || (loc2 = next_nonnote_insn (loc2)) == 0
2837 || GET_CODE (loc2) != JUMP_INSN
2838 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2839 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2840 && uid_loop[INSN_UID (loc)] == target_loop)
2841 break;
2843 if (loc == 0)
2844 for (loc = target; loc; loc = NEXT_INSN (loc))
2845 if (GET_CODE (loc) == BARRIER
2846 /* Don't move things inside a tablejump. */
2847 && ((loc2 = next_nonnote_insn (loc)) == 0
2848 || GET_CODE (loc2) != CODE_LABEL
2849 || (loc2 = next_nonnote_insn (loc2)) == 0
2850 || GET_CODE (loc2) != JUMP_INSN
2851 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2852 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2853 && uid_loop[INSN_UID (loc)] == target_loop)
2854 break;
2856 if (loc)
2858 rtx cond_label = JUMP_LABEL (p);
2859 rtx new_label = get_label_after (p);
2861 /* Ensure our label doesn't go away. */
2862 LABEL_NUSES (cond_label)++;
2864 /* Verify that uid_loop is large enough and that
2865 we can invert P. */
2866 if (invert_jump (p, new_label, 1))
2868 rtx q, r;
2870 /* If no suitable BARRIER was found, create a suitable
2871 one before TARGET. Since TARGET is a fall through
2872 path, we'll need to insert an jump around our block
2873 and add a BARRIER before TARGET.
2875 This creates an extra unconditional jump outside
2876 the loop. However, the benefits of removing rarely
2877 executed instructions from inside the loop usually
2878 outweighs the cost of the extra unconditional jump
2879 outside the loop. */
2880 if (loc == 0)
2882 rtx temp;
2884 temp = gen_jump (JUMP_LABEL (insn));
2885 temp = emit_jump_insn_before (temp, target);
2886 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2887 LABEL_NUSES (JUMP_LABEL (insn))++;
2888 loc = emit_barrier_before (target);
2891 /* Include the BARRIER after INSN and copy the
2892 block after LOC. */
2893 if (squeeze_notes (&new_label, &last_insn_to_move))
2894 abort ();
2895 reorder_insns (new_label, last_insn_to_move, loc);
2897 /* All those insns are now in TARGET_LOOP. */
2898 for (q = new_label;
2899 q != NEXT_INSN (last_insn_to_move);
2900 q = NEXT_INSN (q))
2901 uid_loop[INSN_UID (q)] = target_loop;
2903 /* The label jumped to by INSN is no longer a loop
2904 exit. Unless INSN does not have a label (e.g.,
2905 it is a RETURN insn), search loop->exit_labels
2906 to find its label_ref, and remove it. Also turn
2907 off LABEL_OUTSIDE_LOOP_P bit. */
2908 if (JUMP_LABEL (insn))
2910 for (q = 0, r = this_loop->exit_labels;
2912 q = r, r = LABEL_NEXTREF (r))
2913 if (XEXP (r, 0) == JUMP_LABEL (insn))
2915 LABEL_OUTSIDE_LOOP_P (r) = 0;
2916 if (q)
2917 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2918 else
2919 this_loop->exit_labels = LABEL_NEXTREF (r);
2920 break;
2923 for (loop = this_loop; loop && loop != target_loop;
2924 loop = loop->outer)
2925 loop->exit_count--;
2927 /* If we didn't find it, then something is
2928 wrong. */
2929 if (! r)
2930 abort ();
2933 /* P is now a jump outside the loop, so it must be put
2934 in loop->exit_labels, and marked as such.
2935 The easiest way to do this is to just call
2936 mark_loop_jump again for P. */
2937 mark_loop_jump (PATTERN (p), this_loop);
2939 /* If INSN now jumps to the insn after it,
2940 delete INSN. */
2941 if (JUMP_LABEL (insn) != 0
2942 && (next_real_insn (JUMP_LABEL (insn))
2943 == next_real_insn (insn)))
2944 delete_related_insns (insn);
2947 /* Continue the loop after where the conditional
2948 branch used to jump, since the only branch insn
2949 in the block (if it still remains) is an inter-loop
2950 branch and hence needs no processing. */
2951 insn = NEXT_INSN (cond_label);
2953 if (--LABEL_NUSES (cond_label) == 0)
2954 delete_related_insns (cond_label);
2956 /* This loop will be continued with NEXT_INSN (insn). */
2957 insn = PREV_INSN (insn);
2964 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2965 loops it is contained in, mark the target loop invalid.
2967 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2969 static void
2970 mark_loop_jump (x, loop)
2971 rtx x;
2972 struct loop *loop;
2974 struct loop *dest_loop;
2975 struct loop *outer_loop;
2976 int i;
2978 switch (GET_CODE (x))
2980 case PC:
2981 case USE:
2982 case CLOBBER:
2983 case REG:
2984 case MEM:
2985 case CONST_INT:
2986 case CONST_DOUBLE:
2987 case RETURN:
2988 return;
2990 case CONST:
2991 /* There could be a label reference in here. */
2992 mark_loop_jump (XEXP (x, 0), loop);
2993 return;
2995 case PLUS:
2996 case MINUS:
2997 case MULT:
2998 mark_loop_jump (XEXP (x, 0), loop);
2999 mark_loop_jump (XEXP (x, 1), loop);
3000 return;
3002 case LO_SUM:
3003 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3004 mark_loop_jump (XEXP (x, 1), loop);
3005 return;
3007 case SIGN_EXTEND:
3008 case ZERO_EXTEND:
3009 mark_loop_jump (XEXP (x, 0), loop);
3010 return;
3012 case LABEL_REF:
3013 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3015 /* Link together all labels that branch outside the loop. This
3016 is used by final_[bg]iv_value and the loop unrolling code. Also
3017 mark this LABEL_REF so we know that this branch should predict
3018 false. */
3020 /* A check to make sure the label is not in an inner nested loop,
3021 since this does not count as a loop exit. */
3022 if (dest_loop)
3024 for (outer_loop = dest_loop; outer_loop;
3025 outer_loop = outer_loop->outer)
3026 if (outer_loop == loop)
3027 break;
3029 else
3030 outer_loop = NULL;
3032 if (loop && ! outer_loop)
3034 LABEL_OUTSIDE_LOOP_P (x) = 1;
3035 LABEL_NEXTREF (x) = loop->exit_labels;
3036 loop->exit_labels = x;
3038 for (outer_loop = loop;
3039 outer_loop && outer_loop != dest_loop;
3040 outer_loop = outer_loop->outer)
3041 outer_loop->exit_count++;
3044 /* If this is inside a loop, but not in the current loop or one enclosed
3045 by it, it invalidates at least one loop. */
3047 if (! dest_loop)
3048 return;
3050 /* We must invalidate every nested loop containing the target of this
3051 label, except those that also contain the jump insn. */
3053 for (; dest_loop; dest_loop = dest_loop->outer)
3055 /* Stop when we reach a loop that also contains the jump insn. */
3056 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3057 if (dest_loop == outer_loop)
3058 return;
3060 /* If we get here, we know we need to invalidate a loop. */
3061 if (loop_dump_stream && ! dest_loop->invalid)
3062 fprintf (loop_dump_stream,
3063 "\nLoop at %d ignored due to multiple entry points.\n",
3064 INSN_UID (dest_loop->start));
3066 dest_loop->invalid = 1;
3068 return;
3070 case SET:
3071 /* If this is not setting pc, ignore. */
3072 if (SET_DEST (x) == pc_rtx)
3073 mark_loop_jump (SET_SRC (x), loop);
3074 return;
3076 case IF_THEN_ELSE:
3077 mark_loop_jump (XEXP (x, 1), loop);
3078 mark_loop_jump (XEXP (x, 2), loop);
3079 return;
3081 case PARALLEL:
3082 case ADDR_VEC:
3083 for (i = 0; i < XVECLEN (x, 0); i++)
3084 mark_loop_jump (XVECEXP (x, 0, i), loop);
3085 return;
3087 case ADDR_DIFF_VEC:
3088 for (i = 0; i < XVECLEN (x, 1); i++)
3089 mark_loop_jump (XVECEXP (x, 1, i), loop);
3090 return;
3092 default:
3093 /* Strictly speaking this is not a jump into the loop, only a possible
3094 jump out of the loop. However, we have no way to link the destination
3095 of this jump onto the list of exit labels. To be safe we mark this
3096 loop and any containing loops as invalid. */
3097 if (loop)
3099 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3101 if (loop_dump_stream && ! outer_loop->invalid)
3102 fprintf (loop_dump_stream,
3103 "\nLoop at %d ignored due to unknown exit jump.\n",
3104 INSN_UID (outer_loop->start));
3105 outer_loop->invalid = 1;
3108 return;
3112 /* Return nonzero if there is a label in the range from
3113 insn INSN to and including the insn whose luid is END
3114 INSN must have an assigned luid (i.e., it must not have
3115 been previously created by loop.c). */
3117 static int
3118 labels_in_range_p (insn, end)
3119 rtx insn;
3120 int end;
3122 while (insn && INSN_LUID (insn) <= end)
3124 if (GET_CODE (insn) == CODE_LABEL)
3125 return 1;
3126 insn = NEXT_INSN (insn);
3129 return 0;
3132 /* Record that a memory reference X is being set. */
3134 static void
3135 note_addr_stored (x, y, data)
3136 rtx x;
3137 rtx y ATTRIBUTE_UNUSED;
3138 void *data ATTRIBUTE_UNUSED;
3140 struct loop_info *loop_info = data;
3142 if (x == 0 || GET_CODE (x) != MEM)
3143 return;
3145 /* Count number of memory writes.
3146 This affects heuristics in strength_reduce. */
3147 loop_info->num_mem_sets++;
3149 /* BLKmode MEM means all memory is clobbered. */
3150 if (GET_MODE (x) == BLKmode)
3152 if (RTX_UNCHANGING_P (x))
3153 loop_info->unknown_constant_address_altered = 1;
3154 else
3155 loop_info->unknown_address_altered = 1;
3157 return;
3160 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3161 loop_info->store_mems);
3164 /* X is a value modified by an INSN that references a biv inside a loop
3165 exit test (ie, X is somehow related to the value of the biv). If X
3166 is a pseudo that is used more than once, then the biv is (effectively)
3167 used more than once. DATA is a pointer to a loop_regs structure. */
3169 static void
3170 note_set_pseudo_multiple_uses (x, y, data)
3171 rtx x;
3172 rtx y ATTRIBUTE_UNUSED;
3173 void *data;
3175 struct loop_regs *regs = (struct loop_regs *) data;
3177 if (x == 0)
3178 return;
3180 while (GET_CODE (x) == STRICT_LOW_PART
3181 || GET_CODE (x) == SIGN_EXTRACT
3182 || GET_CODE (x) == ZERO_EXTRACT
3183 || GET_CODE (x) == SUBREG)
3184 x = XEXP (x, 0);
3186 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3187 return;
3189 /* If we do not have usage information, or if we know the register
3190 is used more than once, note that fact for check_dbra_loop. */
3191 if (REGNO (x) >= max_reg_before_loop
3192 || ! regs->array[REGNO (x)].single_usage
3193 || regs->array[REGNO (x)].single_usage == const0_rtx)
3194 regs->multiple_uses = 1;
3197 /* Return nonzero if the rtx X is invariant over the current loop.
3199 The value is 2 if we refer to something only conditionally invariant.
3201 A memory ref is invariant if it is not volatile and does not conflict
3202 with anything stored in `loop_info->store_mems'. */
3205 loop_invariant_p (loop, x)
3206 const struct loop *loop;
3207 rtx x;
3209 struct loop_info *loop_info = LOOP_INFO (loop);
3210 struct loop_regs *regs = LOOP_REGS (loop);
3211 int i;
3212 enum rtx_code code;
3213 const char *fmt;
3214 int conditional = 0;
3215 rtx mem_list_entry;
3217 if (x == 0)
3218 return 1;
3219 code = GET_CODE (x);
3220 switch (code)
3222 case CONST_INT:
3223 case CONST_DOUBLE:
3224 case SYMBOL_REF:
3225 case CONST:
3226 return 1;
3228 case LABEL_REF:
3229 /* A LABEL_REF is normally invariant, however, if we are unrolling
3230 loops, and this label is inside the loop, then it isn't invariant.
3231 This is because each unrolled copy of the loop body will have
3232 a copy of this label. If this was invariant, then an insn loading
3233 the address of this label into a register might get moved outside
3234 the loop, and then each loop body would end up using the same label.
3236 We don't know the loop bounds here though, so just fail for all
3237 labels. */
3238 if (flag_unroll_loops)
3239 return 0;
3240 else
3241 return 1;
3243 case PC:
3244 case CC0:
3245 case UNSPEC_VOLATILE:
3246 return 0;
3248 case REG:
3249 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3250 since the reg might be set by initialization within the loop. */
3252 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3253 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3254 && ! current_function_has_nonlocal_goto)
3255 return 1;
3257 if (LOOP_INFO (loop)->has_call
3258 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3259 return 0;
3261 if (regs->array[REGNO (x)].set_in_loop < 0)
3262 return 2;
3264 return regs->array[REGNO (x)].set_in_loop == 0;
3266 case MEM:
3267 /* Volatile memory references must be rejected. Do this before
3268 checking for read-only items, so that volatile read-only items
3269 will be rejected also. */
3270 if (MEM_VOLATILE_P (x))
3271 return 0;
3273 /* See if there is any dependence between a store and this load. */
3274 mem_list_entry = loop_info->store_mems;
3275 while (mem_list_entry)
3277 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3278 x, rtx_varies_p))
3279 return 0;
3281 mem_list_entry = XEXP (mem_list_entry, 1);
3284 /* It's not invalidated by a store in memory
3285 but we must still verify the address is invariant. */
3286 break;
3288 case ASM_OPERANDS:
3289 /* Don't mess with insns declared volatile. */
3290 if (MEM_VOLATILE_P (x))
3291 return 0;
3292 break;
3294 default:
3295 break;
3298 fmt = GET_RTX_FORMAT (code);
3299 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3301 if (fmt[i] == 'e')
3303 int tem = loop_invariant_p (loop, XEXP (x, i));
3304 if (tem == 0)
3305 return 0;
3306 if (tem == 2)
3307 conditional = 1;
3309 else if (fmt[i] == 'E')
3311 int j;
3312 for (j = 0; j < XVECLEN (x, i); j++)
3314 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3315 if (tem == 0)
3316 return 0;
3317 if (tem == 2)
3318 conditional = 1;
3324 return 1 + conditional;
3327 /* Return nonzero if all the insns in the loop that set REG
3328 are INSN and the immediately following insns,
3329 and if each of those insns sets REG in an invariant way
3330 (not counting uses of REG in them).
3332 The value is 2 if some of these insns are only conditionally invariant.
3334 We assume that INSN itself is the first set of REG
3335 and that its source is invariant. */
3337 static int
3338 consec_sets_invariant_p (loop, reg, n_sets, insn)
3339 const struct loop *loop;
3340 int n_sets;
3341 rtx reg, insn;
3343 struct loop_regs *regs = LOOP_REGS (loop);
3344 rtx p = insn;
3345 unsigned int regno = REGNO (reg);
3346 rtx temp;
3347 /* Number of sets we have to insist on finding after INSN. */
3348 int count = n_sets - 1;
3349 int old = regs->array[regno].set_in_loop;
3350 int value = 0;
3351 int this;
3353 /* If N_SETS hit the limit, we can't rely on its value. */
3354 if (n_sets == 127)
3355 return 0;
3357 regs->array[regno].set_in_loop = 0;
3359 while (count > 0)
3361 enum rtx_code code;
3362 rtx set;
3364 p = NEXT_INSN (p);
3365 code = GET_CODE (p);
3367 /* If library call, skip to end of it. */
3368 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3369 p = XEXP (temp, 0);
3371 this = 0;
3372 if (code == INSN
3373 && (set = single_set (p))
3374 && GET_CODE (SET_DEST (set)) == REG
3375 && REGNO (SET_DEST (set)) == regno)
3377 this = loop_invariant_p (loop, SET_SRC (set));
3378 if (this != 0)
3379 value |= this;
3380 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3382 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3383 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3384 notes are OK. */
3385 this = (CONSTANT_P (XEXP (temp, 0))
3386 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3387 && loop_invariant_p (loop, XEXP (temp, 0))));
3388 if (this != 0)
3389 value |= this;
3392 if (this != 0)
3393 count--;
3394 else if (code != NOTE)
3396 regs->array[regno].set_in_loop = old;
3397 return 0;
3401 regs->array[regno].set_in_loop = old;
3402 /* If loop_invariant_p ever returned 2, we return 2. */
3403 return 1 + (value & 2);
3406 #if 0
3407 /* I don't think this condition is sufficient to allow INSN
3408 to be moved, so we no longer test it. */
3410 /* Return 1 if all insns in the basic block of INSN and following INSN
3411 that set REG are invariant according to TABLE. */
3413 static int
3414 all_sets_invariant_p (reg, insn, table)
3415 rtx reg, insn;
3416 short *table;
3418 rtx p = insn;
3419 int regno = REGNO (reg);
3421 while (1)
3423 enum rtx_code code;
3424 p = NEXT_INSN (p);
3425 code = GET_CODE (p);
3426 if (code == CODE_LABEL || code == JUMP_INSN)
3427 return 1;
3428 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3429 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3430 && REGNO (SET_DEST (PATTERN (p))) == regno)
3432 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3433 return 0;
3437 #endif /* 0 */
3439 /* Look at all uses (not sets) of registers in X. For each, if it is
3440 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3441 a different insn, set USAGE[REGNO] to const0_rtx. */
3443 static void
3444 find_single_use_in_loop (regs, insn, x)
3445 struct loop_regs *regs;
3446 rtx insn;
3447 rtx x;
3449 enum rtx_code code = GET_CODE (x);
3450 const char *fmt = GET_RTX_FORMAT (code);
3451 int i, j;
3453 if (code == REG)
3454 regs->array[REGNO (x)].single_usage
3455 = (regs->array[REGNO (x)].single_usage != 0
3456 && regs->array[REGNO (x)].single_usage != insn)
3457 ? const0_rtx : insn;
3459 else if (code == SET)
3461 /* Don't count SET_DEST if it is a REG; otherwise count things
3462 in SET_DEST because if a register is partially modified, it won't
3463 show up as a potential movable so we don't care how USAGE is set
3464 for it. */
3465 if (GET_CODE (SET_DEST (x)) != REG)
3466 find_single_use_in_loop (regs, insn, SET_DEST (x));
3467 find_single_use_in_loop (regs, insn, SET_SRC (x));
3469 else
3470 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3472 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3473 find_single_use_in_loop (regs, insn, XEXP (x, i));
3474 else if (fmt[i] == 'E')
3475 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3476 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3480 /* Count and record any set in X which is contained in INSN. Update
3481 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3482 in X. */
3484 static void
3485 count_one_set (regs, insn, x, last_set)
3486 struct loop_regs *regs;
3487 rtx insn, x;
3488 rtx *last_set;
3490 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3491 /* Don't move a reg that has an explicit clobber.
3492 It's not worth the pain to try to do it correctly. */
3493 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3495 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3497 rtx dest = SET_DEST (x);
3498 while (GET_CODE (dest) == SUBREG
3499 || GET_CODE (dest) == ZERO_EXTRACT
3500 || GET_CODE (dest) == SIGN_EXTRACT
3501 || GET_CODE (dest) == STRICT_LOW_PART)
3502 dest = XEXP (dest, 0);
3503 if (GET_CODE (dest) == REG)
3505 int i;
3506 int regno = REGNO (dest);
3507 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3509 /* If this is the first setting of this reg
3510 in current basic block, and it was set before,
3511 it must be set in two basic blocks, so it cannot
3512 be moved out of the loop. */
3513 if (regs->array[regno].set_in_loop > 0
3514 && last_set == 0)
3515 regs->array[regno+i].may_not_optimize = 1;
3516 /* If this is not first setting in current basic block,
3517 see if reg was used in between previous one and this.
3518 If so, neither one can be moved. */
3519 if (last_set[regno] != 0
3520 && reg_used_between_p (dest, last_set[regno], insn))
3521 regs->array[regno+i].may_not_optimize = 1;
3522 if (regs->array[regno+i].set_in_loop < 127)
3523 ++regs->array[regno+i].set_in_loop;
3524 last_set[regno+i] = insn;
3530 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3531 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3532 contained in insn INSN is used by any insn that precedes INSN in
3533 cyclic order starting from the loop entry point.
3535 We don't want to use INSN_LUID here because if we restrict INSN to those
3536 that have a valid INSN_LUID, it means we cannot move an invariant out
3537 from an inner loop past two loops. */
3539 static int
3540 loop_reg_used_before_p (loop, set, insn)
3541 const struct loop *loop;
3542 rtx set, insn;
3544 rtx reg = SET_DEST (set);
3545 rtx p;
3547 /* Scan forward checking for register usage. If we hit INSN, we
3548 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3549 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3551 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3552 return 1;
3554 if (p == loop->end)
3555 p = loop->start;
3558 return 0;
3562 /* Information we collect about arrays that we might want to prefetch. */
3563 struct prefetch_info
3565 struct iv_class *class; /* Class this prefetch is based on. */
3566 struct induction *giv; /* GIV this prefetch is based on. */
3567 rtx base_address; /* Start prefetching from this address plus
3568 index. */
3569 HOST_WIDE_INT index;
3570 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3571 iteration. */
3572 unsigned int bytes_accesed; /* Sum of sizes of all acceses to this
3573 prefetch area in one iteration. */
3574 unsigned int total_bytes; /* Total bytes loop will access in this block.
3575 This is set only for loops with known
3576 iteration counts and is 0xffffffff
3577 otherwise. */
3578 unsigned int write : 1; /* 1 for read/write prefetches. */
3579 unsigned int prefetch_in_loop : 1;
3580 /* 1 for those chosen for prefetching. */
3581 unsigned int prefetch_before_loop : 1;
3582 /* 1 for those chosen for prefetching. */
3585 /* Data used by check_store function. */
3586 struct check_store_data
3588 rtx mem_address;
3589 int mem_write;
3592 static void check_store PARAMS ((rtx, rtx, void *));
3593 static void emit_prefetch_instructions PARAMS ((struct loop *));
3594 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3596 /* Set mem_write when mem_address is found. Used as callback to
3597 note_stores. */
3598 static void
3599 check_store (x, pat, data)
3600 rtx x, pat ATTRIBUTE_UNUSED;
3601 void *data;
3603 struct check_store_data *d = (struct check_store_data *) data;
3605 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3606 d->mem_write = 1;
3609 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3610 important to get some addresses combined. Later more sophisticated
3611 transformations can be added when necesary.
3613 ??? Same trick with swapping operand is done at several other places.
3614 It can be nice to develop some common way to handle this. */
3616 static int
3617 rtx_equal_for_prefetch_p (x, y)
3618 rtx x, y;
3620 int i;
3621 int j;
3622 enum rtx_code code = GET_CODE (x);
3623 const char *fmt;
3625 if (x == y)
3626 return 1;
3627 if (code != GET_CODE (y))
3628 return 0;
3630 code = GET_CODE (x);
3632 if (GET_RTX_CLASS (code) == 'c')
3634 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3635 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3636 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3637 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3639 /* Compare the elements. If any pair of corresponding elements fails to
3640 match, return 0 for the whole thing. */
3642 fmt = GET_RTX_FORMAT (code);
3643 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3645 switch (fmt[i])
3647 case 'w':
3648 if (XWINT (x, i) != XWINT (y, i))
3649 return 0;
3650 break;
3652 case 'i':
3653 if (XINT (x, i) != XINT (y, i))
3654 return 0;
3655 break;
3657 case 'E':
3658 /* Two vectors must have the same length. */
3659 if (XVECLEN (x, i) != XVECLEN (y, i))
3660 return 0;
3662 /* And the corresponding elements must match. */
3663 for (j = 0; j < XVECLEN (x, i); j++)
3664 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3665 XVECEXP (y, i, j)) == 0)
3666 return 0;
3667 break;
3669 case 'e':
3670 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3671 return 0;
3672 break;
3674 case 's':
3675 if (strcmp (XSTR (x, i), XSTR (y, i)))
3676 return 0;
3677 break;
3679 case 'u':
3680 /* These are just backpointers, so they don't matter. */
3681 break;
3683 case '0':
3684 break;
3686 /* It is believed that rtx's at this level will never
3687 contain anything but integers and other rtx's,
3688 except for within LABEL_REFs and SYMBOL_REFs. */
3689 default:
3690 abort ();
3693 return 1;
3696 /* Remove constant addition value from the expression X (when present)
3697 and return it. */
3699 static HOST_WIDE_INT
3700 remove_constant_addition (x)
3701 rtx *x;
3703 HOST_WIDE_INT addval = 0;
3704 rtx exp = *x;
3706 /* Avoid clobbering a shared CONST expression. */
3707 if (GET_CODE (exp) == CONST)
3709 if (GET_CODE (XEXP (exp, 0)) == PLUS
3710 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3711 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3713 *x = XEXP (XEXP (exp, 0), 0);
3714 return INTVAL (XEXP (XEXP (exp, 0), 1));
3716 return 0;
3719 if (GET_CODE (exp) == CONST_INT)
3721 addval = INTVAL (exp);
3722 *x = const0_rtx;
3725 /* For plus expression recurse on ourself. */
3726 else if (GET_CODE (exp) == PLUS)
3728 addval += remove_constant_addition (&XEXP (exp, 0));
3729 addval += remove_constant_addition (&XEXP (exp, 1));
3731 /* In case our parameter was constant, remove extra zero from the
3732 expression. */
3733 if (XEXP (exp, 0) == const0_rtx)
3734 *x = XEXP (exp, 1);
3735 else if (XEXP (exp, 1) == const0_rtx)
3736 *x = XEXP (exp, 0);
3739 return addval;
3742 /* Attempt to identify accesses to arrays that are most likely to cause cache
3743 misses, and emit prefetch instructions a few prefetch blocks forward.
3745 To detect the arrays we use the GIV information that was collected by the
3746 strength reduction pass.
3748 The prefetch instructions are generated after the GIV information is done
3749 and before the strength reduction process. The new GIVs are injected into
3750 the strength reduction tables, so the prefetch addresses are optimized as
3751 well.
3753 GIVs are split into base address, stride, and constant addition values.
3754 GIVs with the same address, stride and close addition values are combined
3755 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3756 for write instructions can be used for the block we write to, on machines
3757 that support write prefetches.
3759 Several heuristics are used to determine when to prefetch. They are
3760 controlled by defined symbols that can be overridden for each target. */
3762 static void
3763 emit_prefetch_instructions (loop)
3764 struct loop *loop;
3766 int num_prefetches = 0;
3767 int num_real_prefetches = 0;
3768 int num_real_write_prefetches = 0;
3769 int ahead;
3770 int i;
3771 struct iv_class *bl;
3772 struct induction *iv;
3773 struct prefetch_info info[MAX_PREFETCHES];
3774 struct loop_ivs *ivs = LOOP_IVS (loop);
3776 if (!HAVE_prefetch)
3777 return;
3779 /* Consider only loops w/o calls. When a call is done, the loop is probably
3780 slow enough to read the memory. */
3781 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3783 if (loop_dump_stream)
3784 fprintf (loop_dump_stream, "Prefetch: ignoring loop - has call.\n");
3786 return;
3789 if (PREFETCH_NO_LOW_LOOPCNT
3790 && LOOP_INFO (loop)->n_iterations
3791 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3793 if (loop_dump_stream)
3794 fprintf (loop_dump_stream,
3795 "Prefetch: ignoring loop - not enought iterations.\n");
3796 return;
3799 /* Search all induction variables and pick those interesting for the prefetch
3800 machinery. */
3801 for (bl = ivs->list; bl; bl = bl->next)
3803 struct induction *biv = bl->biv, *biv1;
3804 int basestride = 0;
3806 biv1 = biv;
3808 /* Expect all BIVs to be executed in each iteration. This makes our
3809 analysis more conservative. */
3810 while (biv1)
3812 /* Discard non-constant additions that we can't handle well yet, and
3813 BIVs that are executed multiple times; such BIVs ought to be
3814 handled in the nested loop. We accept not_every_iteration BIVs,
3815 since these only result in larger strides and make our
3816 heuristics more conservative.
3817 ??? What does the last sentence mean? */
3818 if (GET_CODE (biv->add_val) != CONST_INT)
3820 if (loop_dump_stream)
3822 fprintf (loop_dump_stream,
3823 "Prefetch: biv %i ignored: non-constant addition at insn %i:",
3824 REGNO (biv->src_reg), INSN_UID (biv->insn));
3825 print_rtl (loop_dump_stream, biv->add_val);
3826 fprintf (loop_dump_stream, "\n");
3828 break;
3831 if (biv->maybe_multiple)
3833 if (loop_dump_stream)
3835 fprintf (loop_dump_stream,
3836 "Prefetch: biv %i ignored: maybe_multiple at insn %i:",
3837 REGNO (biv->src_reg), INSN_UID (biv->insn));
3838 print_rtl (loop_dump_stream, biv->add_val);
3839 fprintf (loop_dump_stream, "\n");
3841 break;
3844 basestride += INTVAL (biv1->add_val);
3845 biv1 = biv1->next_iv;
3848 if (biv1 || !basestride)
3849 continue;
3851 for (iv = bl->giv; iv; iv = iv->next_iv)
3853 rtx address;
3854 rtx temp;
3855 HOST_WIDE_INT index = 0;
3856 int add = 1;
3857 HOST_WIDE_INT stride;
3858 struct check_store_data d;
3859 int size = GET_MODE_SIZE (GET_MODE (iv));
3861 /* There are several reasons why an induction variable is not
3862 interesting to us. */
3863 if (iv->giv_type != DEST_ADDR
3864 /* We are interested only in constant stride memory references
3865 in order to be able to compute density easily. */
3866 || GET_CODE (iv->mult_val) != CONST_INT
3867 /* Don't handle reversed order prefetches, since they are usually
3868 ineffective. Later we may be able to reverse such BIVs. */
3869 || (PREFETCH_NO_REVERSE_ORDER
3870 && (stride = INTVAL (iv->mult_val) * basestride) < 0)
3871 /* Prefetching of accesses with such an extreme stride is probably
3872 not worthwhile, either. */
3873 || (PREFETCH_NO_EXTREME_STRIDE
3874 && stride > PREFETCH_EXTREME_STRIDE)
3875 /* Ignore GIVs with varying add values; we can't predict the
3876 value for the next iteration. */
3877 || !loop_invariant_p (loop, iv->add_val)
3878 /* Ignore GIVs in the nested loops; they ought to have been
3879 handled already. */
3880 || iv->maybe_multiple)
3882 if (loop_dump_stream)
3883 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %i\n",
3884 INSN_UID (iv->insn));
3885 continue;
3888 /* Determine the pointer to the basic array we are examining. It is
3889 the sum of the BIV's initial value and the GIV's add_val. */
3890 index = 0;
3892 address = copy_rtx (iv->add_val);
3893 temp = copy_rtx (bl->initial_value);
3895 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3896 index = remove_constant_addition (&address);
3898 index += size;
3899 d.mem_write = 0;
3900 d.mem_address = *iv->location;
3902 /* When the GIV is not always executed, we might be better off by
3903 not dirtying the cache pages. */
3904 if (PREFETCH_NOT_ALWAYS || iv->always_executed)
3905 note_stores (PATTERN (iv->insn), check_store, &d);
3907 /* Attempt to find another prefetch to the same array and see if we
3908 can merge this one. */
3909 for (i = 0; i < num_prefetches; i++)
3910 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3911 && stride == info[i].stride)
3913 /* In case both access same array (same location
3914 just with small difference in constant indexes), merge
3915 the prefetches. Just do the later and the earlier will
3916 get prefetched from previous iteration.
3917 4096 is artificial threshold. It should not be too small,
3918 but also not bigger than small portion of memory usually
3919 traversed by single loop. */
3920 if (index >= info[i].index && index - info[i].index < 4096)
3922 info[i].write |= d.mem_write;
3923 info[i].bytes_accesed += size;
3924 info[i].index = index;
3925 info[i].giv = iv;
3926 info[i].class = bl;
3927 info[num_prefetches].base_address = address;
3928 add = 0;
3929 break;
3932 if (index < info[i].index && info[i].index - index < 4096)
3934 info[i].write |= d.mem_write;
3935 info[i].bytes_accesed += size;
3936 add = 0;
3937 break;
3941 /* Merging failed. */
3942 if (add)
3944 info[num_prefetches].giv = iv;
3945 info[num_prefetches].class = bl;
3946 info[num_prefetches].index = index;
3947 info[num_prefetches].stride = stride;
3948 info[num_prefetches].base_address = address;
3949 info[num_prefetches].write = d.mem_write;
3950 info[num_prefetches].bytes_accesed = size;
3951 num_prefetches++;
3952 if (num_prefetches >= MAX_PREFETCHES)
3954 if (loop_dump_stream)
3955 fprintf (loop_dump_stream,
3956 "Maximal number of prefetches exceeded.\n");
3957 return;
3963 for (i = 0; i < num_prefetches; i++)
3965 /* Attempt to calculate the number of bytes fetched by the loop.
3966 Avoid overflow. */
3967 if (LOOP_INFO (loop)->n_iterations
3968 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3969 >= LOOP_INFO (loop)->n_iterations))
3970 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
3971 else
3972 info[i].total_bytes = 0xffffffff;
3974 /* Prefetch is worthwhile only when the loads/stores are dense. */
3975 if (PREFETCH_ONLY_DENSE_MEM
3976 && info[i].bytes_accesed * 256 / info[i].stride > PREFETCH_DENSE_MEM
3977 && (info[i].total_bytes / PREFETCH_BLOCK
3978 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
3980 info[i].prefetch_before_loop = 1;
3981 info[i].prefetch_in_loop
3982 = (info[i].total_bytes / PREFETCH_BLOCK
3983 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
3985 else
3986 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
3988 if (info[i].prefetch_in_loop)
3990 num_real_prefetches += ((info[i].stride + PREFETCH_BLOCK - 1)
3991 / PREFETCH_BLOCK);
3992 if (info[i].write)
3993 num_real_write_prefetches
3994 += (info[i].stride + PREFETCH_BLOCK - 1) / PREFETCH_BLOCK;
3998 if (loop_dump_stream)
4000 for (i = 0; i < num_prefetches; i++)
4002 fprintf (loop_dump_stream, "Prefetch insn %i address: ",
4003 INSN_UID (info[i].giv->insn));
4004 print_rtl (loop_dump_stream, info[i].base_address);
4005 fprintf (loop_dump_stream, " Index: ");
4006 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4007 fprintf (loop_dump_stream, " stride: ");
4008 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4009 fprintf (loop_dump_stream,
4010 " density: %i%% total_bytes: %u%sin loop: %s before: %s\n",
4011 (int) (info[i].bytes_accesed * 100 / info[i].stride),
4012 info[i].total_bytes,
4013 info[i].write ? " read/write " : " read only ",
4014 info[i].prefetch_in_loop ? "yes" : "no",
4015 info[i].prefetch_before_loop ? "yes" : "no");
4018 fprintf (loop_dump_stream, "Real prefetches needed: %i (write: %i)\n",
4019 num_real_prefetches, num_real_write_prefetches);
4022 if (!num_real_prefetches)
4023 return;
4025 ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches;
4027 if (!ahead)
4028 return;
4030 for (i = 0; i < num_prefetches; i++)
4032 if (info[i].prefetch_in_loop)
4034 int y;
4036 for (y = 0; y < ((info[i].stride + PREFETCH_BLOCK - 1)
4037 / PREFETCH_BLOCK); y++)
4039 rtx loc = copy_rtx (*info[i].giv->location);
4040 rtx insn;
4041 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4042 rtx before_insn = info[i].giv->insn;
4043 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4045 /* We can save some effort by offsetting the address on
4046 architectures with offsettable memory references. */
4047 if (offsettable_address_p (0, VOIDmode, loc))
4048 loc = plus_constant (loc, bytes_ahead);
4049 else
4051 rtx reg = gen_reg_rtx (Pmode);
4052 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4053 GEN_INT (bytes_ahead), reg,
4054 0, before_insn);
4055 loc = reg;
4058 /* Make sure the address operand is valid for prefetch. */
4059 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4060 (loc,
4061 insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4062 loc = force_reg (Pmode, loc);
4063 emit_insn_before (gen_prefetch (loc, GEN_INT (info[i].write),
4064 GEN_INT (3)),
4065 before_insn);
4067 /* Check all insns emitted and record the new GIV
4068 information. */
4069 insn = NEXT_INSN (prev_insn);
4070 while (insn != before_insn)
4072 insn = check_insn_for_givs (loop, insn,
4073 info[i].giv->always_executed,
4074 info[i].giv->maybe_multiple);
4075 insn = NEXT_INSN (insn);
4080 if (info[i].prefetch_before_loop)
4082 int y;
4084 /* Emit INSNs before the loop to fetch the first cache lines. */
4085 for (y = 0;
4086 (!info[i].prefetch_in_loop || y < ahead)
4087 && y * PREFETCH_BLOCK < (int) info[i].total_bytes; y ++)
4089 rtx reg = gen_reg_rtx (Pmode);
4090 rtx loop_start = loop->start;
4091 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4092 info[i].giv->add_val,
4093 GEN_INT (y * PREFETCH_BLOCK));
4095 loop_iv_add_mult_emit_before (loop, info[i].class->initial_value,
4096 info[i].giv->mult_val,
4097 add_val, reg, 0, loop_start);
4098 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4099 GEN_INT (3)),
4100 loop_start);
4105 return;
4108 /* A "basic induction variable" or biv is a pseudo reg that is set
4109 (within this loop) only by incrementing or decrementing it. */
4110 /* A "general induction variable" or giv is a pseudo reg whose
4111 value is a linear function of a biv. */
4113 /* Bivs are recognized by `basic_induction_var';
4114 Givs by `general_induction_var'. */
4116 /* Communication with routines called via `note_stores'. */
4118 static rtx note_insn;
4120 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4122 static rtx addr_placeholder;
4124 /* ??? Unfinished optimizations, and possible future optimizations,
4125 for the strength reduction code. */
4127 /* ??? The interaction of biv elimination, and recognition of 'constant'
4128 bivs, may cause problems. */
4130 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4131 performance problems.
4133 Perhaps don't eliminate things that can be combined with an addressing
4134 mode. Find all givs that have the same biv, mult_val, and add_val;
4135 then for each giv, check to see if its only use dies in a following
4136 memory address. If so, generate a new memory address and check to see
4137 if it is valid. If it is valid, then store the modified memory address,
4138 otherwise, mark the giv as not done so that it will get its own iv. */
4140 /* ??? Could try to optimize branches when it is known that a biv is always
4141 positive. */
4143 /* ??? When replace a biv in a compare insn, we should replace with closest
4144 giv so that an optimized branch can still be recognized by the combiner,
4145 e.g. the VAX acb insn. */
4147 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4148 was rerun in loop_optimize whenever a register was added or moved.
4149 Also, some of the optimizations could be a little less conservative. */
4151 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4152 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4153 callback.
4155 NOT_EVERY_ITERATION if current insn is not executed at least once for every
4156 loop iteration except for the last one.
4158 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4159 loop iteration.
4161 void
4162 for_each_insn_in_loop (loop, fncall)
4163 struct loop *loop;
4164 loop_insn_callback fncall;
4166 /* This is 1 if current insn is not executed at least once for every loop
4167 iteration. */
4168 int not_every_iteration = 0;
4169 int maybe_multiple = 0;
4170 int past_loop_latch = 0;
4171 int loop_depth = 0;
4172 rtx p;
4174 /* If loop_scan_start points to the loop exit test, we have to be wary of
4175 subversive use of gotos inside expression statements. */
4176 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4177 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4179 /* Scan through loop to find all possible bivs. */
4181 for (p = next_insn_in_loop (loop, loop->scan_start);
4182 p != NULL_RTX;
4183 p = next_insn_in_loop (loop, p))
4185 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4187 /* Past CODE_LABEL, we get to insns that may be executed multiple
4188 times. The only way we can be sure that they can't is if every
4189 jump insn between here and the end of the loop either
4190 returns, exits the loop, is a jump to a location that is still
4191 behind the label, or is a jump to the loop start. */
4193 if (GET_CODE (p) == CODE_LABEL)
4195 rtx insn = p;
4197 maybe_multiple = 0;
4199 while (1)
4201 insn = NEXT_INSN (insn);
4202 if (insn == loop->scan_start)
4203 break;
4204 if (insn == loop->end)
4206 if (loop->top != 0)
4207 insn = loop->top;
4208 else
4209 break;
4210 if (insn == loop->scan_start)
4211 break;
4214 if (GET_CODE (insn) == JUMP_INSN
4215 && GET_CODE (PATTERN (insn)) != RETURN
4216 && (!any_condjump_p (insn)
4217 || (JUMP_LABEL (insn) != 0
4218 && JUMP_LABEL (insn) != loop->scan_start
4219 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4221 maybe_multiple = 1;
4222 break;
4227 /* Past a jump, we get to insns for which we can't count
4228 on whether they will be executed during each iteration. */
4229 /* This code appears twice in strength_reduce. There is also similar
4230 code in scan_loop. */
4231 if (GET_CODE (p) == JUMP_INSN
4232 /* If we enter the loop in the middle, and scan around to the
4233 beginning, don't set not_every_iteration for that.
4234 This can be any kind of jump, since we want to know if insns
4235 will be executed if the loop is executed. */
4236 && !(JUMP_LABEL (p) == loop->top
4237 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4238 && any_uncondjump_p (p))
4239 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4241 rtx label = 0;
4243 /* If this is a jump outside the loop, then it also doesn't
4244 matter. Check to see if the target of this branch is on the
4245 loop->exits_labels list. */
4247 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4248 if (XEXP (label, 0) == JUMP_LABEL (p))
4249 break;
4251 if (!label)
4252 not_every_iteration = 1;
4255 else if (GET_CODE (p) == NOTE)
4257 /* At the virtual top of a converted loop, insns are again known to
4258 be executed each iteration: logically, the loop begins here
4259 even though the exit code has been duplicated.
4261 Insns are also again known to be executed each iteration at
4262 the LOOP_CONT note. */
4263 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4264 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4265 && loop_depth == 0)
4266 not_every_iteration = 0;
4267 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4268 loop_depth++;
4269 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4270 loop_depth--;
4273 /* Note if we pass a loop latch. If we do, then we can not clear
4274 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4275 a loop since a jump before the last CODE_LABEL may have started
4276 a new loop iteration.
4278 Note that LOOP_TOP is only set for rotated loops and we need
4279 this check for all loops, so compare against the CODE_LABEL
4280 which immediately follows LOOP_START. */
4281 if (GET_CODE (p) == JUMP_INSN
4282 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4283 past_loop_latch = 1;
4285 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4286 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4287 or not an insn is known to be executed each iteration of the
4288 loop, whether or not any iterations are known to occur.
4290 Therefore, if we have just passed a label and have no more labels
4291 between here and the test insn of the loop, and we have not passed
4292 a jump to the top of the loop, then we know these insns will be
4293 executed each iteration. */
4295 if (not_every_iteration
4296 && !past_loop_latch
4297 && GET_CODE (p) == CODE_LABEL
4298 && no_labels_between_p (p, loop->end)
4299 && loop_insn_first_p (p, loop->cont))
4300 not_every_iteration = 0;
4304 static void
4305 loop_bivs_find (loop)
4306 struct loop *loop;
4308 struct loop_regs *regs = LOOP_REGS (loop);
4309 struct loop_ivs *ivs = LOOP_IVS (loop);
4310 /* Temporary list pointers for traversing ivs->list. */
4311 struct iv_class *bl, **backbl;
4313 ivs->list = 0;
4315 for_each_insn_in_loop (loop, check_insn_for_bivs);
4317 /* Scan ivs->list to remove all regs that proved not to be bivs.
4318 Make a sanity check against regs->n_times_set. */
4319 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4321 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4322 /* Above happens if register modified by subreg, etc. */
4323 /* Make sure it is not recognized as a basic induction var: */
4324 || regs->array[bl->regno].n_times_set != bl->biv_count
4325 /* If never incremented, it is invariant that we decided not to
4326 move. So leave it alone. */
4327 || ! bl->incremented)
4329 if (loop_dump_stream)
4330 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4331 bl->regno,
4332 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4333 ? "not induction variable"
4334 : (! bl->incremented ? "never incremented"
4335 : "count error")));
4337 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4338 *backbl = bl->next;
4340 else
4342 backbl = &bl->next;
4344 if (loop_dump_stream)
4345 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4351 /* Determine how BIVS are initialised by looking through pre-header
4352 extended basic block. */
4353 static void
4354 loop_bivs_init_find (loop)
4355 struct loop *loop;
4357 struct loop_ivs *ivs = LOOP_IVS (loop);
4358 /* Temporary list pointers for traversing ivs->list. */
4359 struct iv_class *bl;
4360 int call_seen;
4361 rtx p;
4363 /* Find initial value for each biv by searching backwards from loop_start,
4364 halting at first label. Also record any test condition. */
4366 call_seen = 0;
4367 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4369 rtx test;
4371 note_insn = p;
4373 if (GET_CODE (p) == CALL_INSN)
4374 call_seen = 1;
4376 if (INSN_P (p))
4377 note_stores (PATTERN (p), record_initial, ivs);
4379 /* Record any test of a biv that branches around the loop if no store
4380 between it and the start of loop. We only care about tests with
4381 constants and registers and only certain of those. */
4382 if (GET_CODE (p) == JUMP_INSN
4383 && JUMP_LABEL (p) != 0
4384 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4385 && (test = get_condition_for_loop (loop, p)) != 0
4386 && GET_CODE (XEXP (test, 0)) == REG
4387 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4388 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4389 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4390 && bl->init_insn == 0)
4392 /* If an NE test, we have an initial value! */
4393 if (GET_CODE (test) == NE)
4395 bl->init_insn = p;
4396 bl->init_set = gen_rtx_SET (VOIDmode,
4397 XEXP (test, 0), XEXP (test, 1));
4399 else
4400 bl->initial_test = test;
4406 /* Look at the each biv and see if we can say anything better about its
4407 initial value from any initializing insns set up above. (This is done
4408 in two passes to avoid missing SETs in a PARALLEL.) */
4409 static void
4410 loop_bivs_check (loop)
4411 struct loop *loop;
4413 struct loop_ivs *ivs = LOOP_IVS (loop);
4414 /* Temporary list pointers for traversing ivs->list. */
4415 struct iv_class *bl;
4416 struct iv_class **backbl;
4418 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4420 rtx src;
4421 rtx note;
4423 if (! bl->init_insn)
4424 continue;
4426 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4427 is a constant, use the value of that. */
4428 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4429 && CONSTANT_P (XEXP (note, 0)))
4430 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4431 && CONSTANT_P (XEXP (note, 0))))
4432 src = XEXP (note, 0);
4433 else
4434 src = SET_SRC (bl->init_set);
4436 if (loop_dump_stream)
4437 fprintf (loop_dump_stream,
4438 "Biv %d: initialized at insn %d: initial value ",
4439 bl->regno, INSN_UID (bl->init_insn));
4441 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4442 || GET_MODE (src) == VOIDmode)
4443 && valid_initial_value_p (src, bl->init_insn,
4444 LOOP_INFO (loop)->pre_header_has_call,
4445 loop->start))
4447 bl->initial_value = src;
4449 if (loop_dump_stream)
4451 print_simple_rtl (loop_dump_stream, src);
4452 fputc ('\n', loop_dump_stream);
4455 /* If we can't make it a giv,
4456 let biv keep initial value of "itself". */
4457 else if (loop_dump_stream)
4458 fprintf (loop_dump_stream, "is complex\n");
4463 /* Search the loop for general induction variables. */
4465 static void
4466 loop_givs_find (loop)
4467 struct loop* loop;
4469 for_each_insn_in_loop (loop, check_insn_for_givs);
4473 /* For each giv for which we still don't know whether or not it is
4474 replaceable, check to see if it is replaceable because its final value
4475 can be calculated. */
4477 static void
4478 loop_givs_check (loop)
4479 struct loop *loop;
4481 struct loop_ivs *ivs = LOOP_IVS (loop);
4482 struct iv_class *bl;
4484 for (bl = ivs->list; bl; bl = bl->next)
4486 struct induction *v;
4488 for (v = bl->giv; v; v = v->next_iv)
4489 if (! v->replaceable && ! v->not_replaceable)
4490 check_final_value (loop, v);
4495 /* Return non-zero if it is possible to eliminate the biv BL provided
4496 all givs are reduced. This is possible if either the reg is not
4497 used outside the loop, or we can compute what its final value will
4498 be. */
4500 static int
4501 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4502 struct loop *loop;
4503 struct iv_class *bl;
4504 int threshold;
4505 int insn_count;
4507 /* For architectures with a decrement_and_branch_until_zero insn,
4508 don't do this if we put a REG_NONNEG note on the endtest for this
4509 biv. */
4511 #ifdef HAVE_decrement_and_branch_until_zero
4512 if (bl->nonneg)
4514 if (loop_dump_stream)
4515 fprintf (loop_dump_stream,
4516 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4517 return 0;
4519 #endif
4521 /* Check that biv is used outside loop or if it has a final value.
4522 Compare against bl->init_insn rather than loop->start. We aren't
4523 concerned with any uses of the biv between init_insn and
4524 loop->start since these won't be affected by the value of the biv
4525 elsewhere in the function, so long as init_insn doesn't use the
4526 biv itself. */
4528 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4529 && bl->init_insn
4530 && INSN_UID (bl->init_insn) < max_uid_for_loop
4531 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4532 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4533 || (bl->final_value = final_biv_value (loop, bl)))
4534 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4536 if (loop_dump_stream)
4538 fprintf (loop_dump_stream,
4539 "Cannot eliminate biv %d.\n",
4540 bl->regno);
4541 fprintf (loop_dump_stream,
4542 "First use: insn %d, last use: insn %d.\n",
4543 REGNO_FIRST_UID (bl->regno),
4544 REGNO_LAST_UID (bl->regno));
4546 return 0;
4550 /* Reduce each giv of BL that we have decided to reduce. */
4552 static void
4553 loop_givs_reduce (loop, bl)
4554 struct loop *loop;
4555 struct iv_class *bl;
4557 struct induction *v;
4559 for (v = bl->giv; v; v = v->next_iv)
4561 struct induction *tv;
4562 if (! v->ignore && v->same == 0)
4564 int auto_inc_opt = 0;
4566 /* If the code for derived givs immediately below has already
4567 allocated a new_reg, we must keep it. */
4568 if (! v->new_reg)
4569 v->new_reg = gen_reg_rtx (v->mode);
4571 #ifdef AUTO_INC_DEC
4572 /* If the target has auto-increment addressing modes, and
4573 this is an address giv, then try to put the increment
4574 immediately after its use, so that flow can create an
4575 auto-increment addressing mode. */
4576 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4577 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4578 /* We don't handle reversed biv's because bl->biv->insn
4579 does not have a valid INSN_LUID. */
4580 && ! bl->reversed
4581 && v->always_executed && ! v->maybe_multiple
4582 && INSN_UID (v->insn) < max_uid_for_loop)
4584 /* If other giv's have been combined with this one, then
4585 this will work only if all uses of the other giv's occur
4586 before this giv's insn. This is difficult to check.
4588 We simplify this by looking for the common case where
4589 there is one DEST_REG giv, and this giv's insn is the
4590 last use of the dest_reg of that DEST_REG giv. If the
4591 increment occurs after the address giv, then we can
4592 perform the optimization. (Otherwise, the increment
4593 would have to go before other_giv, and we would not be
4594 able to combine it with the address giv to get an
4595 auto-inc address.) */
4596 if (v->combined_with)
4598 struct induction *other_giv = 0;
4600 for (tv = bl->giv; tv; tv = tv->next_iv)
4601 if (tv->same == v)
4603 if (other_giv)
4604 break;
4605 else
4606 other_giv = tv;
4608 if (! tv && other_giv
4609 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4610 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4611 == INSN_UID (v->insn))
4612 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4613 auto_inc_opt = 1;
4615 /* Check for case where increment is before the address
4616 giv. Do this test in "loop order". */
4617 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4618 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4619 || (INSN_LUID (bl->biv->insn)
4620 > INSN_LUID (loop->scan_start))))
4621 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4622 && (INSN_LUID (loop->scan_start)
4623 < INSN_LUID (bl->biv->insn))))
4624 auto_inc_opt = -1;
4625 else
4626 auto_inc_opt = 1;
4628 #ifdef HAVE_cc0
4630 rtx prev;
4632 /* We can't put an insn immediately after one setting
4633 cc0, or immediately before one using cc0. */
4634 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4635 || (auto_inc_opt == -1
4636 && (prev = prev_nonnote_insn (v->insn)) != 0
4637 && INSN_P (prev)
4638 && sets_cc0_p (PATTERN (prev))))
4639 auto_inc_opt = 0;
4641 #endif
4643 if (auto_inc_opt)
4644 v->auto_inc_opt = 1;
4646 #endif
4648 /* For each place where the biv is incremented, add an insn
4649 to increment the new, reduced reg for the giv. */
4650 for (tv = bl->biv; tv; tv = tv->next_iv)
4652 rtx insert_before;
4654 if (! auto_inc_opt)
4655 insert_before = tv->insn;
4656 else if (auto_inc_opt == 1)
4657 insert_before = NEXT_INSN (v->insn);
4658 else
4659 insert_before = v->insn;
4661 if (tv->mult_val == const1_rtx)
4662 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4663 v->new_reg, v->new_reg,
4664 0, insert_before);
4665 else /* tv->mult_val == const0_rtx */
4666 /* A multiply is acceptable here
4667 since this is presumed to be seldom executed. */
4668 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4669 v->add_val, v->new_reg,
4670 0, insert_before);
4673 /* Add code at loop start to initialize giv's reduced reg. */
4675 loop_iv_add_mult_hoist (loop,
4676 extend_value_for_giv (v, bl->initial_value),
4677 v->mult_val, v->add_val, v->new_reg);
4683 /* Check for givs whose first use is their definition and whose
4684 last use is the definition of another giv. If so, it is likely
4685 dead and should not be used to derive another giv nor to
4686 eliminate a biv. */
4688 static void
4689 loop_givs_dead_check (loop, bl)
4690 struct loop *loop ATTRIBUTE_UNUSED;
4691 struct iv_class *bl;
4693 struct induction *v;
4695 for (v = bl->giv; v; v = v->next_iv)
4697 if (v->ignore
4698 || (v->same && v->same->ignore))
4699 continue;
4701 if (v->giv_type == DEST_REG
4702 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4704 struct induction *v1;
4706 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4707 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4708 v->maybe_dead = 1;
4714 static void
4715 loop_givs_rescan (loop, bl, reg_map)
4716 struct loop *loop;
4717 struct iv_class *bl;
4718 rtx *reg_map;
4720 struct induction *v;
4722 for (v = bl->giv; v; v = v->next_iv)
4724 if (v->same && v->same->ignore)
4725 v->ignore = 1;
4727 if (v->ignore)
4728 continue;
4730 /* Update expression if this was combined, in case other giv was
4731 replaced. */
4732 if (v->same)
4733 v->new_reg = replace_rtx (v->new_reg,
4734 v->same->dest_reg, v->same->new_reg);
4736 /* See if this register is known to be a pointer to something. If
4737 so, see if we can find the alignment. First see if there is a
4738 destination register that is a pointer. If so, this shares the
4739 alignment too. Next see if we can deduce anything from the
4740 computational information. If not, and this is a DEST_ADDR
4741 giv, at least we know that it's a pointer, though we don't know
4742 the alignment. */
4743 if (GET_CODE (v->new_reg) == REG
4744 && v->giv_type == DEST_REG
4745 && REG_POINTER (v->dest_reg))
4746 mark_reg_pointer (v->new_reg,
4747 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4748 else if (GET_CODE (v->new_reg) == REG
4749 && REG_POINTER (v->src_reg))
4751 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4753 if (align == 0
4754 || GET_CODE (v->add_val) != CONST_INT
4755 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4756 align = 0;
4758 mark_reg_pointer (v->new_reg, align);
4760 else if (GET_CODE (v->new_reg) == REG
4761 && GET_CODE (v->add_val) == REG
4762 && REG_POINTER (v->add_val))
4764 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4766 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4767 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4768 align = 0;
4770 mark_reg_pointer (v->new_reg, align);
4772 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4773 mark_reg_pointer (v->new_reg, 0);
4775 if (v->giv_type == DEST_ADDR)
4776 /* Store reduced reg as the address in the memref where we found
4777 this giv. */
4778 validate_change (v->insn, v->location, v->new_reg, 0);
4779 else if (v->replaceable)
4781 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4783 else
4785 /* Not replaceable; emit an insn to set the original giv reg from
4786 the reduced giv, same as above. */
4787 loop_insn_emit_after (loop, 0, v->insn,
4788 gen_move_insn (v->dest_reg, v->new_reg));
4791 /* When a loop is reversed, givs which depend on the reversed
4792 biv, and which are live outside the loop, must be set to their
4793 correct final value. This insn is only needed if the giv is
4794 not replaceable. The correct final value is the same as the
4795 value that the giv starts the reversed loop with. */
4796 if (bl->reversed && ! v->replaceable)
4797 loop_iv_add_mult_sink (loop,
4798 extend_value_for_giv (v, bl->initial_value),
4799 v->mult_val, v->add_val, v->dest_reg);
4800 else if (v->final_value)
4801 loop_insn_sink_or_swim (loop,
4802 gen_load_of_final_value (v->dest_reg,
4803 v->final_value));
4805 if (loop_dump_stream)
4807 fprintf (loop_dump_stream, "giv at %d reduced to ",
4808 INSN_UID (v->insn));
4809 print_simple_rtl (loop_dump_stream, v->new_reg);
4810 fprintf (loop_dump_stream, "\n");
4816 static int
4817 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4818 struct loop *loop ATTRIBUTE_UNUSED;
4819 struct iv_class *bl;
4820 struct induction *v;
4821 rtx test_reg;
4823 int add_cost;
4824 int benefit;
4826 benefit = v->benefit;
4827 PUT_MODE (test_reg, v->mode);
4828 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4829 test_reg, test_reg);
4831 /* Reduce benefit if not replaceable, since we will insert a
4832 move-insn to replace the insn that calculates this giv. Don't do
4833 this unless the giv is a user variable, since it will often be
4834 marked non-replaceable because of the duplication of the exit
4835 code outside the loop. In such a case, the copies we insert are
4836 dead and will be deleted. So they don't have a cost. Similar
4837 situations exist. */
4838 /* ??? The new final_[bg]iv_value code does a much better job of
4839 finding replaceable giv's, and hence this code may no longer be
4840 necessary. */
4841 if (! v->replaceable && ! bl->eliminable
4842 && REG_USERVAR_P (v->dest_reg))
4843 benefit -= copy_cost;
4845 /* Decrease the benefit to count the add-insns that we will insert
4846 to increment the reduced reg for the giv. ??? This can
4847 overestimate the run-time cost of the additional insns, e.g. if
4848 there are multiple basic blocks that increment the biv, but only
4849 one of these blocks is executed during each iteration. There is
4850 no good way to detect cases like this with the current structure
4851 of the loop optimizer. This code is more accurate for
4852 determining code size than run-time benefits. */
4853 benefit -= add_cost * bl->biv_count;
4855 /* Decide whether to strength-reduce this giv or to leave the code
4856 unchanged (recompute it from the biv each time it is used). This
4857 decision can be made independently for each giv. */
4859 #ifdef AUTO_INC_DEC
4860 /* Attempt to guess whether autoincrement will handle some of the
4861 new add insns; if so, increase BENEFIT (undo the subtraction of
4862 add_cost that was done above). */
4863 if (v->giv_type == DEST_ADDR
4864 /* Increasing the benefit is risky, since this is only a guess.
4865 Avoid increasing register pressure in cases where there would
4866 be no other benefit from reducing this giv. */
4867 && benefit > 0
4868 && GET_CODE (v->mult_val) == CONST_INT)
4870 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4872 if (HAVE_POST_INCREMENT
4873 && INTVAL (v->mult_val) == size)
4874 benefit += add_cost * bl->biv_count;
4875 else if (HAVE_PRE_INCREMENT
4876 && INTVAL (v->mult_val) == size)
4877 benefit += add_cost * bl->biv_count;
4878 else if (HAVE_POST_DECREMENT
4879 && -INTVAL (v->mult_val) == size)
4880 benefit += add_cost * bl->biv_count;
4881 else if (HAVE_PRE_DECREMENT
4882 && -INTVAL (v->mult_val) == size)
4883 benefit += add_cost * bl->biv_count;
4885 #endif
4887 return benefit;
4891 /* Free IV structures for LOOP. */
4893 static void
4894 loop_ivs_free (loop)
4895 struct loop *loop;
4897 struct loop_ivs *ivs = LOOP_IVS (loop);
4898 struct iv_class *iv = ivs->list;
4900 free (ivs->regs);
4902 while (iv)
4904 struct iv_class *next = iv->next;
4905 struct induction *induction;
4906 struct induction *next_induction;
4908 for (induction = iv->biv; induction; induction = next_induction)
4910 next_induction = induction->next_iv;
4911 free (induction);
4913 for (induction = iv->giv; induction; induction = next_induction)
4915 next_induction = induction->next_iv;
4916 free (induction);
4919 free (iv);
4920 iv = next;
4925 /* Perform strength reduction and induction variable elimination.
4927 Pseudo registers created during this function will be beyond the
4928 last valid index in several tables including
4929 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4930 problem here, because the added registers cannot be givs outside of
4931 their loop, and hence will never be reconsidered. But scan_loop
4932 must check regnos to make sure they are in bounds. */
4934 static void
4935 strength_reduce (loop, flags)
4936 struct loop *loop;
4937 int flags;
4939 struct loop_info *loop_info = LOOP_INFO (loop);
4940 struct loop_regs *regs = LOOP_REGS (loop);
4941 struct loop_ivs *ivs = LOOP_IVS (loop);
4942 rtx p;
4943 /* Temporary list pointer for traversing ivs->list. */
4944 struct iv_class *bl;
4945 /* Ratio of extra register life span we can justify
4946 for saving an instruction. More if loop doesn't call subroutines
4947 since in that case saving an insn makes more difference
4948 and more registers are available. */
4949 /* ??? could set this to last value of threshold in move_movables */
4950 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4951 /* Map of pseudo-register replacements. */
4952 rtx *reg_map = NULL;
4953 int reg_map_size;
4954 int unrolled_insn_copies = 0;
4955 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4956 int insn_count = count_insns_in_loop (loop);
4958 addr_placeholder = gen_reg_rtx (Pmode);
4960 ivs->n_regs = max_reg_before_loop;
4961 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4963 /* Find all BIVs in loop. */
4964 loop_bivs_find (loop);
4966 /* Exit if there are no bivs. */
4967 if (! ivs->list)
4969 /* Can still unroll the loop anyways, but indicate that there is no
4970 strength reduction info available. */
4971 if (flags & LOOP_UNROLL)
4972 unroll_loop (loop, insn_count, 0);
4974 loop_ivs_free (loop);
4975 return;
4978 /* Determine how BIVS are initialised by looking through pre-header
4979 extended basic block. */
4980 loop_bivs_init_find (loop);
4982 /* Look at the each biv and see if we can say anything better about its
4983 initial value from any initializing insns set up above. */
4984 loop_bivs_check (loop);
4986 /* Search the loop for general induction variables. */
4987 loop_givs_find (loop);
4989 /* Try to calculate and save the number of loop iterations. This is
4990 set to zero if the actual number can not be calculated. This must
4991 be called after all giv's have been identified, since otherwise it may
4992 fail if the iteration variable is a giv. */
4993 loop_iterations (loop);
4995 #ifdef HAVE_prefetch
4996 if (flags & LOOP_PREFETCH)
4997 emit_prefetch_instructions (loop);
4998 #endif
5000 /* Now for each giv for which we still don't know whether or not it is
5001 replaceable, check to see if it is replaceable because its final value
5002 can be calculated. This must be done after loop_iterations is called,
5003 so that final_giv_value will work correctly. */
5004 loop_givs_check (loop);
5006 /* Try to prove that the loop counter variable (if any) is always
5007 nonnegative; if so, record that fact with a REG_NONNEG note
5008 so that "decrement and branch until zero" insn can be used. */
5009 check_dbra_loop (loop, insn_count);
5011 /* Create reg_map to hold substitutions for replaceable giv regs.
5012 Some givs might have been made from biv increments, so look at
5013 ivs->reg_iv_type for a suitable size. */
5014 reg_map_size = ivs->n_regs;
5015 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5017 /* Examine each iv class for feasibility of strength reduction/induction
5018 variable elimination. */
5020 for (bl = ivs->list; bl; bl = bl->next)
5022 struct induction *v;
5023 int benefit;
5025 /* Test whether it will be possible to eliminate this biv
5026 provided all givs are reduced. */
5027 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5029 /* This will be true at the end, if all givs which depend on this
5030 biv have been strength reduced.
5031 We can't (currently) eliminate the biv unless this is so. */
5032 bl->all_reduced = 1;
5034 /* Check each extension dependent giv in this class to see if its
5035 root biv is safe from wrapping in the interior mode. */
5036 check_ext_dependent_givs (bl, loop_info);
5038 /* Combine all giv's for this iv_class. */
5039 combine_givs (regs, bl);
5041 for (v = bl->giv; v; v = v->next_iv)
5043 struct induction *tv;
5045 if (v->ignore || v->same)
5046 continue;
5048 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5050 /* If an insn is not to be strength reduced, then set its ignore
5051 flag, and clear bl->all_reduced. */
5053 /* A giv that depends on a reversed biv must be reduced if it is
5054 used after the loop exit, otherwise, it would have the wrong
5055 value after the loop exit. To make it simple, just reduce all
5056 of such giv's whether or not we know they are used after the loop
5057 exit. */
5059 if (! flag_reduce_all_givs
5060 && v->lifetime * threshold * benefit < insn_count
5061 && ! bl->reversed)
5063 if (loop_dump_stream)
5064 fprintf (loop_dump_stream,
5065 "giv of insn %d not worth while, %d vs %d.\n",
5066 INSN_UID (v->insn),
5067 v->lifetime * threshold * benefit, insn_count);
5068 v->ignore = 1;
5069 bl->all_reduced = 0;
5071 else
5073 /* Check that we can increment the reduced giv without a
5074 multiply insn. If not, reject it. */
5076 for (tv = bl->biv; tv; tv = tv->next_iv)
5077 if (tv->mult_val == const1_rtx
5078 && ! product_cheap_p (tv->add_val, v->mult_val))
5080 if (loop_dump_stream)
5081 fprintf (loop_dump_stream,
5082 "giv of insn %d: would need a multiply.\n",
5083 INSN_UID (v->insn));
5084 v->ignore = 1;
5085 bl->all_reduced = 0;
5086 break;
5091 /* Check for givs whose first use is their definition and whose
5092 last use is the definition of another giv. If so, it is likely
5093 dead and should not be used to derive another giv nor to
5094 eliminate a biv. */
5095 loop_givs_dead_check (loop, bl);
5097 /* Reduce each giv that we decided to reduce. */
5098 loop_givs_reduce (loop, bl);
5100 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5101 as not reduced.
5103 For each giv register that can be reduced now: if replaceable,
5104 substitute reduced reg wherever the old giv occurs;
5105 else add new move insn "giv_reg = reduced_reg". */
5106 loop_givs_rescan (loop, bl, reg_map);
5108 /* All the givs based on the biv bl have been reduced if they
5109 merit it. */
5111 /* For each giv not marked as maybe dead that has been combined with a
5112 second giv, clear any "maybe dead" mark on that second giv.
5113 v->new_reg will either be or refer to the register of the giv it
5114 combined with.
5116 Doing this clearing avoids problems in biv elimination where
5117 a giv's new_reg is a complex value that can't be put in the
5118 insn but the giv combined with (with a reg as new_reg) is
5119 marked maybe_dead. Since the register will be used in either
5120 case, we'd prefer it be used from the simpler giv. */
5122 for (v = bl->giv; v; v = v->next_iv)
5123 if (! v->maybe_dead && v->same)
5124 v->same->maybe_dead = 0;
5126 /* Try to eliminate the biv, if it is a candidate.
5127 This won't work if ! bl->all_reduced,
5128 since the givs we planned to use might not have been reduced.
5130 We have to be careful that we didn't initially think we could
5131 eliminate this biv because of a giv that we now think may be
5132 dead and shouldn't be used as a biv replacement.
5134 Also, there is the possibility that we may have a giv that looks
5135 like it can be used to eliminate a biv, but the resulting insn
5136 isn't valid. This can happen, for example, on the 88k, where a
5137 JUMP_INSN can compare a register only with zero. Attempts to
5138 replace it with a compare with a constant will fail.
5140 Note that in cases where this call fails, we may have replaced some
5141 of the occurrences of the biv with a giv, but no harm was done in
5142 doing so in the rare cases where it can occur. */
5144 if (bl->all_reduced == 1 && bl->eliminable
5145 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5147 /* ?? If we created a new test to bypass the loop entirely,
5148 or otherwise drop straight in, based on this test, then
5149 we might want to rewrite it also. This way some later
5150 pass has more hope of removing the initialization of this
5151 biv entirely. */
5153 /* If final_value != 0, then the biv may be used after loop end
5154 and we must emit an insn to set it just in case.
5156 Reversed bivs already have an insn after the loop setting their
5157 value, so we don't need another one. We can't calculate the
5158 proper final value for such a biv here anyways. */
5159 if (bl->final_value && ! bl->reversed)
5160 loop_insn_sink_or_swim (loop,
5161 gen_load_of_final_value (bl->biv->dest_reg,
5162 bl->final_value));
5164 if (loop_dump_stream)
5165 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5166 bl->regno);
5168 /* See above note wrt final_value. But since we couldn't eliminate
5169 the biv, we must set the value after the loop instead of before. */
5170 else if (bl->final_value && ! bl->reversed)
5171 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5172 bl->final_value));
5175 /* Go through all the instructions in the loop, making all the
5176 register substitutions scheduled in REG_MAP. */
5178 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5179 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5180 || GET_CODE (p) == CALL_INSN)
5182 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5183 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5184 INSN_CODE (p) = -1;
5187 if (loop_info->n_iterations > 0)
5189 /* When we completely unroll a loop we will likely not need the increment
5190 of the loop BIV and we will not need the conditional branch at the
5191 end of the loop. */
5192 unrolled_insn_copies = insn_count - 2;
5194 #ifdef HAVE_cc0
5195 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5196 need the comparison before the conditional branch at the end of the
5197 loop. */
5198 unrolled_insn_copies -= 1;
5199 #endif
5201 /* We'll need one copy for each loop iteration. */
5202 unrolled_insn_copies *= loop_info->n_iterations;
5204 /* A little slop to account for the ability to remove initialization
5205 code, better CSE, and other secondary benefits of completely
5206 unrolling some loops. */
5207 unrolled_insn_copies -= 1;
5209 /* Clamp the value. */
5210 if (unrolled_insn_copies < 0)
5211 unrolled_insn_copies = 0;
5214 /* Unroll loops from within strength reduction so that we can use the
5215 induction variable information that strength_reduce has already
5216 collected. Always unroll loops that would be as small or smaller
5217 unrolled than when rolled. */
5218 if ((flags & LOOP_UNROLL)
5219 || (!(flags & LOOP_FIRST_PASS)
5220 && loop_info->n_iterations > 0
5221 && unrolled_insn_copies <= insn_count))
5222 unroll_loop (loop, insn_count, 1);
5224 #ifdef HAVE_doloop_end
5225 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5226 doloop_optimize (loop);
5227 #endif /* HAVE_doloop_end */
5229 /* In case number of iterations is known, drop branch prediction note
5230 in the branch. Do that only in second loop pass, as loop unrolling
5231 may change the number of iterations performed. */
5232 if (flags & LOOP_BCT)
5234 unsigned HOST_WIDE_INT n
5235 = loop_info->n_iterations / loop_info->unroll_number;
5236 if (n > 1)
5237 predict_insn (PREV_INSN (loop->end), PRED_LOOP_ITERATIONS,
5238 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5241 if (loop_dump_stream)
5242 fprintf (loop_dump_stream, "\n");
5244 loop_ivs_free (loop);
5245 if (reg_map)
5246 free (reg_map);
5249 /*Record all basic induction variables calculated in the insn. */
5250 static rtx
5251 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5252 struct loop *loop;
5253 rtx p;
5254 int not_every_iteration;
5255 int maybe_multiple;
5257 struct loop_ivs *ivs = LOOP_IVS (loop);
5258 rtx set;
5259 rtx dest_reg;
5260 rtx inc_val;
5261 rtx mult_val;
5262 rtx *location;
5264 if (GET_CODE (p) == INSN
5265 && (set = single_set (p))
5266 && GET_CODE (SET_DEST (set)) == REG)
5268 dest_reg = SET_DEST (set);
5269 if (REGNO (dest_reg) < max_reg_before_loop
5270 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5271 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5273 if (basic_induction_var (loop, SET_SRC (set),
5274 GET_MODE (SET_SRC (set)),
5275 dest_reg, p, &inc_val, &mult_val,
5276 &location))
5278 /* It is a possible basic induction variable.
5279 Create and initialize an induction structure for it. */
5281 struct induction *v
5282 = (struct induction *) xmalloc (sizeof (struct induction));
5284 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5285 not_every_iteration, maybe_multiple);
5286 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5288 else if (REGNO (dest_reg) < ivs->n_regs)
5289 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5292 return p;
5295 /* Record all givs calculated in the insn.
5296 A register is a giv if: it is only set once, it is a function of a
5297 biv and a constant (or invariant), and it is not a biv. */
5298 static rtx
5299 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5300 struct loop *loop;
5301 rtx p;
5302 int not_every_iteration;
5303 int maybe_multiple;
5305 struct loop_regs *regs = LOOP_REGS (loop);
5307 rtx set;
5308 /* Look for a general induction variable in a register. */
5309 if (GET_CODE (p) == INSN
5310 && (set = single_set (p))
5311 && GET_CODE (SET_DEST (set)) == REG
5312 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5314 rtx src_reg;
5315 rtx dest_reg;
5316 rtx add_val;
5317 rtx mult_val;
5318 rtx ext_val;
5319 int benefit;
5320 rtx regnote = 0;
5321 rtx last_consec_insn;
5323 dest_reg = SET_DEST (set);
5324 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5325 return p;
5327 if (/* SET_SRC is a giv. */
5328 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5329 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5330 /* Equivalent expression is a giv. */
5331 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5332 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5333 &add_val, &mult_val, &ext_val, 0,
5334 &benefit, VOIDmode)))
5335 /* Don't try to handle any regs made by loop optimization.
5336 We have nothing on them in regno_first_uid, etc. */
5337 && REGNO (dest_reg) < max_reg_before_loop
5338 /* Don't recognize a BASIC_INDUCT_VAR here. */
5339 && dest_reg != src_reg
5340 /* This must be the only place where the register is set. */
5341 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5342 /* or all sets must be consecutive and make a giv. */
5343 || (benefit = consec_sets_giv (loop, benefit, p,
5344 src_reg, dest_reg,
5345 &add_val, &mult_val, &ext_val,
5346 &last_consec_insn))))
5348 struct induction *v
5349 = (struct induction *) xmalloc (sizeof (struct induction));
5351 /* If this is a library call, increase benefit. */
5352 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5353 benefit += libcall_benefit (p);
5355 /* Skip the consecutive insns, if there are any. */
5356 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5357 p = last_consec_insn;
5359 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5360 ext_val, benefit, DEST_REG, not_every_iteration,
5361 maybe_multiple, (rtx*) 0);
5366 #ifndef DONT_REDUCE_ADDR
5367 /* Look for givs which are memory addresses. */
5368 /* This resulted in worse code on a VAX 8600. I wonder if it
5369 still does. */
5370 if (GET_CODE (p) == INSN)
5371 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5372 maybe_multiple);
5373 #endif
5375 /* Update the status of whether giv can derive other givs. This can
5376 change when we pass a label or an insn that updates a biv. */
5377 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5378 || GET_CODE (p) == CODE_LABEL)
5379 update_giv_derive (loop, p);
5380 return p;
5383 /* Return 1 if X is a valid source for an initial value (or as value being
5384 compared against in an initial test).
5386 X must be either a register or constant and must not be clobbered between
5387 the current insn and the start of the loop.
5389 INSN is the insn containing X. */
5391 static int
5392 valid_initial_value_p (x, insn, call_seen, loop_start)
5393 rtx x;
5394 rtx insn;
5395 int call_seen;
5396 rtx loop_start;
5398 if (CONSTANT_P (x))
5399 return 1;
5401 /* Only consider pseudos we know about initialized in insns whose luids
5402 we know. */
5403 if (GET_CODE (x) != REG
5404 || REGNO (x) >= max_reg_before_loop)
5405 return 0;
5407 /* Don't use call-clobbered registers across a call which clobbers it. On
5408 some machines, don't use any hard registers at all. */
5409 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5410 && (SMALL_REGISTER_CLASSES
5411 || (call_used_regs[REGNO (x)] && call_seen)))
5412 return 0;
5414 /* Don't use registers that have been clobbered before the start of the
5415 loop. */
5416 if (reg_set_between_p (x, insn, loop_start))
5417 return 0;
5419 return 1;
5422 /* Scan X for memory refs and check each memory address
5423 as a possible giv. INSN is the insn whose pattern X comes from.
5424 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5425 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5426 more thanonce in each loop iteration. */
5428 static void
5429 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5430 const struct loop *loop;
5431 rtx x;
5432 rtx insn;
5433 int not_every_iteration, maybe_multiple;
5435 int i, j;
5436 enum rtx_code code;
5437 const char *fmt;
5439 if (x == 0)
5440 return;
5442 code = GET_CODE (x);
5443 switch (code)
5445 case REG:
5446 case CONST_INT:
5447 case CONST:
5448 case CONST_DOUBLE:
5449 case SYMBOL_REF:
5450 case LABEL_REF:
5451 case PC:
5452 case CC0:
5453 case ADDR_VEC:
5454 case ADDR_DIFF_VEC:
5455 case USE:
5456 case CLOBBER:
5457 return;
5459 case MEM:
5461 rtx src_reg;
5462 rtx add_val;
5463 rtx mult_val;
5464 rtx ext_val;
5465 int benefit;
5467 /* This code used to disable creating GIVs with mult_val == 1 and
5468 add_val == 0. However, this leads to lost optimizations when
5469 it comes time to combine a set of related DEST_ADDR GIVs, since
5470 this one would not be seen. */
5472 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5473 &mult_val, &ext_val, 1, &benefit,
5474 GET_MODE (x)))
5476 /* Found one; record it. */
5477 struct induction *v
5478 = (struct induction *) xmalloc (sizeof (struct induction));
5480 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5481 add_val, ext_val, benefit, DEST_ADDR,
5482 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5484 v->mem = x;
5487 return;
5489 default:
5490 break;
5493 /* Recursively scan the subexpressions for other mem refs. */
5495 fmt = GET_RTX_FORMAT (code);
5496 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5497 if (fmt[i] == 'e')
5498 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5499 maybe_multiple);
5500 else if (fmt[i] == 'E')
5501 for (j = 0; j < XVECLEN (x, i); j++)
5502 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5503 maybe_multiple);
5506 /* Fill in the data about one biv update.
5507 V is the `struct induction' in which we record the biv. (It is
5508 allocated by the caller, with alloca.)
5509 INSN is the insn that sets it.
5510 DEST_REG is the biv's reg.
5512 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5513 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5514 being set to INC_VAL.
5516 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5517 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5518 can be executed more than once per iteration. If MAYBE_MULTIPLE
5519 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5520 executed exactly once per iteration. */
5522 static void
5523 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5524 not_every_iteration, maybe_multiple)
5525 struct loop *loop;
5526 struct induction *v;
5527 rtx insn;
5528 rtx dest_reg;
5529 rtx inc_val;
5530 rtx mult_val;
5531 rtx *location;
5532 int not_every_iteration;
5533 int maybe_multiple;
5535 struct loop_ivs *ivs = LOOP_IVS (loop);
5536 struct iv_class *bl;
5538 v->insn = insn;
5539 v->src_reg = dest_reg;
5540 v->dest_reg = dest_reg;
5541 v->mult_val = mult_val;
5542 v->add_val = inc_val;
5543 v->ext_dependent = NULL_RTX;
5544 v->location = location;
5545 v->mode = GET_MODE (dest_reg);
5546 v->always_computable = ! not_every_iteration;
5547 v->always_executed = ! not_every_iteration;
5548 v->maybe_multiple = maybe_multiple;
5550 /* Add this to the reg's iv_class, creating a class
5551 if this is the first incrementation of the reg. */
5553 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5554 if (bl == 0)
5556 /* Create and initialize new iv_class. */
5558 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5560 bl->regno = REGNO (dest_reg);
5561 bl->biv = 0;
5562 bl->giv = 0;
5563 bl->biv_count = 0;
5564 bl->giv_count = 0;
5566 /* Set initial value to the reg itself. */
5567 bl->initial_value = dest_reg;
5568 bl->final_value = 0;
5569 /* We haven't seen the initializing insn yet */
5570 bl->init_insn = 0;
5571 bl->init_set = 0;
5572 bl->initial_test = 0;
5573 bl->incremented = 0;
5574 bl->eliminable = 0;
5575 bl->nonneg = 0;
5576 bl->reversed = 0;
5577 bl->total_benefit = 0;
5579 /* Add this class to ivs->list. */
5580 bl->next = ivs->list;
5581 ivs->list = bl;
5583 /* Put it in the array of biv register classes. */
5584 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5587 /* Update IV_CLASS entry for this biv. */
5588 v->next_iv = bl->biv;
5589 bl->biv = v;
5590 bl->biv_count++;
5591 if (mult_val == const1_rtx)
5592 bl->incremented = 1;
5594 if (loop_dump_stream)
5595 loop_biv_dump (v, loop_dump_stream, 0);
5598 /* Fill in the data about one giv.
5599 V is the `struct induction' in which we record the giv. (It is
5600 allocated by the caller, with alloca.)
5601 INSN is the insn that sets it.
5602 BENEFIT estimates the savings from deleting this insn.
5603 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5604 into a register or is used as a memory address.
5606 SRC_REG is the biv reg which the giv is computed from.
5607 DEST_REG is the giv's reg (if the giv is stored in a reg).
5608 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5609 LOCATION points to the place where this giv's value appears in INSN. */
5611 static void
5612 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5613 benefit, type, not_every_iteration, maybe_multiple, location)
5614 const struct loop *loop;
5615 struct induction *v;
5616 rtx insn;
5617 rtx src_reg;
5618 rtx dest_reg;
5619 rtx mult_val, add_val, ext_val;
5620 int benefit;
5621 enum g_types type;
5622 int not_every_iteration, maybe_multiple;
5623 rtx *location;
5625 struct loop_ivs *ivs = LOOP_IVS (loop);
5626 struct induction *b;
5627 struct iv_class *bl;
5628 rtx set = single_set (insn);
5629 rtx temp;
5631 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5632 undo the MULT canonicalization that we performed earlier. */
5633 temp = simplify_rtx (add_val);
5634 if (temp
5635 && ! (GET_CODE (add_val) == MULT
5636 && GET_CODE (temp) == ASHIFT))
5637 add_val = temp;
5639 v->insn = insn;
5640 v->src_reg = src_reg;
5641 v->giv_type = type;
5642 v->dest_reg = dest_reg;
5643 v->mult_val = mult_val;
5644 v->add_val = add_val;
5645 v->ext_dependent = ext_val;
5646 v->benefit = benefit;
5647 v->location = location;
5648 v->cant_derive = 0;
5649 v->combined_with = 0;
5650 v->maybe_multiple = maybe_multiple;
5651 v->maybe_dead = 0;
5652 v->derive_adjustment = 0;
5653 v->same = 0;
5654 v->ignore = 0;
5655 v->new_reg = 0;
5656 v->final_value = 0;
5657 v->same_insn = 0;
5658 v->auto_inc_opt = 0;
5659 v->unrolled = 0;
5660 v->shared = 0;
5662 /* The v->always_computable field is used in update_giv_derive, to
5663 determine whether a giv can be used to derive another giv. For a
5664 DEST_REG giv, INSN computes a new value for the giv, so its value
5665 isn't computable if INSN insn't executed every iteration.
5666 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5667 it does not compute a new value. Hence the value is always computable
5668 regardless of whether INSN is executed each iteration. */
5670 if (type == DEST_ADDR)
5671 v->always_computable = 1;
5672 else
5673 v->always_computable = ! not_every_iteration;
5675 v->always_executed = ! not_every_iteration;
5677 if (type == DEST_ADDR)
5679 v->mode = GET_MODE (*location);
5680 v->lifetime = 1;
5682 else /* type == DEST_REG */
5684 v->mode = GET_MODE (SET_DEST (set));
5686 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5688 /* If the lifetime is zero, it means that this register is
5689 really a dead store. So mark this as a giv that can be
5690 ignored. This will not prevent the biv from being eliminated. */
5691 if (v->lifetime == 0)
5692 v->ignore = 1;
5694 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5695 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5698 /* Add the giv to the class of givs computed from one biv. */
5700 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5701 if (bl)
5703 v->next_iv = bl->giv;
5704 bl->giv = v;
5705 /* Don't count DEST_ADDR. This is supposed to count the number of
5706 insns that calculate givs. */
5707 if (type == DEST_REG)
5708 bl->giv_count++;
5709 bl->total_benefit += benefit;
5711 else
5712 /* Fatal error, biv missing for this giv? */
5713 abort ();
5715 if (type == DEST_ADDR)
5716 v->replaceable = 1;
5717 else
5719 /* The giv can be replaced outright by the reduced register only if all
5720 of the following conditions are true:
5721 - the insn that sets the giv is always executed on any iteration
5722 on which the giv is used at all
5723 (there are two ways to deduce this:
5724 either the insn is executed on every iteration,
5725 or all uses follow that insn in the same basic block),
5726 - the giv is not used outside the loop
5727 - no assignments to the biv occur during the giv's lifetime. */
5729 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5730 /* Previous line always fails if INSN was moved by loop opt. */
5731 && REGNO_LAST_LUID (REGNO (dest_reg))
5732 < INSN_LUID (loop->end)
5733 && (! not_every_iteration
5734 || last_use_this_basic_block (dest_reg, insn)))
5736 /* Now check that there are no assignments to the biv within the
5737 giv's lifetime. This requires two separate checks. */
5739 /* Check each biv update, and fail if any are between the first
5740 and last use of the giv.
5742 If this loop contains an inner loop that was unrolled, then
5743 the insn modifying the biv may have been emitted by the loop
5744 unrolling code, and hence does not have a valid luid. Just
5745 mark the biv as not replaceable in this case. It is not very
5746 useful as a biv, because it is used in two different loops.
5747 It is very unlikely that we would be able to optimize the giv
5748 using this biv anyways. */
5750 v->replaceable = 1;
5751 for (b = bl->biv; b; b = b->next_iv)
5753 if (INSN_UID (b->insn) >= max_uid_for_loop
5754 || ((INSN_LUID (b->insn)
5755 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5756 && (INSN_LUID (b->insn)
5757 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5759 v->replaceable = 0;
5760 v->not_replaceable = 1;
5761 break;
5765 /* If there are any backwards branches that go from after the
5766 biv update to before it, then this giv is not replaceable. */
5767 if (v->replaceable)
5768 for (b = bl->biv; b; b = b->next_iv)
5769 if (back_branch_in_range_p (loop, b->insn))
5771 v->replaceable = 0;
5772 v->not_replaceable = 1;
5773 break;
5776 else
5778 /* May still be replaceable, we don't have enough info here to
5779 decide. */
5780 v->replaceable = 0;
5781 v->not_replaceable = 0;
5785 /* Record whether the add_val contains a const_int, for later use by
5786 combine_givs. */
5788 rtx tem = add_val;
5790 v->no_const_addval = 1;
5791 if (tem == const0_rtx)
5793 else if (CONSTANT_P (add_val))
5794 v->no_const_addval = 0;
5795 if (GET_CODE (tem) == PLUS)
5797 while (1)
5799 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5800 tem = XEXP (tem, 0);
5801 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5802 tem = XEXP (tem, 1);
5803 else
5804 break;
5806 if (CONSTANT_P (XEXP (tem, 1)))
5807 v->no_const_addval = 0;
5811 if (loop_dump_stream)
5812 loop_giv_dump (v, loop_dump_stream, 0);
5815 /* All this does is determine whether a giv can be made replaceable because
5816 its final value can be calculated. This code can not be part of record_giv
5817 above, because final_giv_value requires that the number of loop iterations
5818 be known, and that can not be accurately calculated until after all givs
5819 have been identified. */
5821 static void
5822 check_final_value (loop, v)
5823 const struct loop *loop;
5824 struct induction *v;
5826 struct loop_ivs *ivs = LOOP_IVS (loop);
5827 struct iv_class *bl;
5828 rtx final_value = 0;
5830 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5832 /* DEST_ADDR givs will never reach here, because they are always marked
5833 replaceable above in record_giv. */
5835 /* The giv can be replaced outright by the reduced register only if all
5836 of the following conditions are true:
5837 - the insn that sets the giv is always executed on any iteration
5838 on which the giv is used at all
5839 (there are two ways to deduce this:
5840 either the insn is executed on every iteration,
5841 or all uses follow that insn in the same basic block),
5842 - its final value can be calculated (this condition is different
5843 than the one above in record_giv)
5844 - it's not used before the it's set
5845 - no assignments to the biv occur during the giv's lifetime. */
5847 #if 0
5848 /* This is only called now when replaceable is known to be false. */
5849 /* Clear replaceable, so that it won't confuse final_giv_value. */
5850 v->replaceable = 0;
5851 #endif
5853 if ((final_value = final_giv_value (loop, v))
5854 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5856 int biv_increment_seen = 0, before_giv_insn = 0;
5857 rtx p = v->insn;
5858 rtx last_giv_use;
5860 v->replaceable = 1;
5862 /* When trying to determine whether or not a biv increment occurs
5863 during the lifetime of the giv, we can ignore uses of the variable
5864 outside the loop because final_value is true. Hence we can not
5865 use regno_last_uid and regno_first_uid as above in record_giv. */
5867 /* Search the loop to determine whether any assignments to the
5868 biv occur during the giv's lifetime. Start with the insn
5869 that sets the giv, and search around the loop until we come
5870 back to that insn again.
5872 Also fail if there is a jump within the giv's lifetime that jumps
5873 to somewhere outside the lifetime but still within the loop. This
5874 catches spaghetti code where the execution order is not linear, and
5875 hence the above test fails. Here we assume that the giv lifetime
5876 does not extend from one iteration of the loop to the next, so as
5877 to make the test easier. Since the lifetime isn't known yet,
5878 this requires two loops. See also record_giv above. */
5880 last_giv_use = v->insn;
5882 while (1)
5884 p = NEXT_INSN (p);
5885 if (p == loop->end)
5887 before_giv_insn = 1;
5888 p = NEXT_INSN (loop->start);
5890 if (p == v->insn)
5891 break;
5893 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5894 || GET_CODE (p) == CALL_INSN)
5896 /* It is possible for the BIV increment to use the GIV if we
5897 have a cycle. Thus we must be sure to check each insn for
5898 both BIV and GIV uses, and we must check for BIV uses
5899 first. */
5901 if (! biv_increment_seen
5902 && reg_set_p (v->src_reg, PATTERN (p)))
5903 biv_increment_seen = 1;
5905 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5907 if (biv_increment_seen || before_giv_insn)
5909 v->replaceable = 0;
5910 v->not_replaceable = 1;
5911 break;
5913 last_giv_use = p;
5918 /* Now that the lifetime of the giv is known, check for branches
5919 from within the lifetime to outside the lifetime if it is still
5920 replaceable. */
5922 if (v->replaceable)
5924 p = v->insn;
5925 while (1)
5927 p = NEXT_INSN (p);
5928 if (p == loop->end)
5929 p = NEXT_INSN (loop->start);
5930 if (p == last_giv_use)
5931 break;
5933 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5934 && LABEL_NAME (JUMP_LABEL (p))
5935 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5936 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5937 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5938 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5940 v->replaceable = 0;
5941 v->not_replaceable = 1;
5943 if (loop_dump_stream)
5944 fprintf (loop_dump_stream,
5945 "Found branch outside giv lifetime.\n");
5947 break;
5952 /* If it is replaceable, then save the final value. */
5953 if (v->replaceable)
5954 v->final_value = final_value;
5957 if (loop_dump_stream && v->replaceable)
5958 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5959 INSN_UID (v->insn), REGNO (v->dest_reg));
5962 /* Update the status of whether a giv can derive other givs.
5964 We need to do something special if there is or may be an update to the biv
5965 between the time the giv is defined and the time it is used to derive
5966 another giv.
5968 In addition, a giv that is only conditionally set is not allowed to
5969 derive another giv once a label has been passed.
5971 The cases we look at are when a label or an update to a biv is passed. */
5973 static void
5974 update_giv_derive (loop, p)
5975 const struct loop *loop;
5976 rtx p;
5978 struct loop_ivs *ivs = LOOP_IVS (loop);
5979 struct iv_class *bl;
5980 struct induction *biv, *giv;
5981 rtx tem;
5982 int dummy;
5984 /* Search all IV classes, then all bivs, and finally all givs.
5986 There are three cases we are concerned with. First we have the situation
5987 of a giv that is only updated conditionally. In that case, it may not
5988 derive any givs after a label is passed.
5990 The second case is when a biv update occurs, or may occur, after the
5991 definition of a giv. For certain biv updates (see below) that are
5992 known to occur between the giv definition and use, we can adjust the
5993 giv definition. For others, or when the biv update is conditional,
5994 we must prevent the giv from deriving any other givs. There are two
5995 sub-cases within this case.
5997 If this is a label, we are concerned with any biv update that is done
5998 conditionally, since it may be done after the giv is defined followed by
5999 a branch here (actually, we need to pass both a jump and a label, but
6000 this extra tracking doesn't seem worth it).
6002 If this is a jump, we are concerned about any biv update that may be
6003 executed multiple times. We are actually only concerned about
6004 backward jumps, but it is probably not worth performing the test
6005 on the jump again here.
6007 If this is a biv update, we must adjust the giv status to show that a
6008 subsequent biv update was performed. If this adjustment cannot be done,
6009 the giv cannot derive further givs. */
6011 for (bl = ivs->list; bl; bl = bl->next)
6012 for (biv = bl->biv; biv; biv = biv->next_iv)
6013 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6014 || biv->insn == p)
6016 for (giv = bl->giv; giv; giv = giv->next_iv)
6018 /* If cant_derive is already true, there is no point in
6019 checking all of these conditions again. */
6020 if (giv->cant_derive)
6021 continue;
6023 /* If this giv is conditionally set and we have passed a label,
6024 it cannot derive anything. */
6025 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6026 giv->cant_derive = 1;
6028 /* Skip givs that have mult_val == 0, since
6029 they are really invariants. Also skip those that are
6030 replaceable, since we know their lifetime doesn't contain
6031 any biv update. */
6032 else if (giv->mult_val == const0_rtx || giv->replaceable)
6033 continue;
6035 /* The only way we can allow this giv to derive another
6036 is if this is a biv increment and we can form the product
6037 of biv->add_val and giv->mult_val. In this case, we will
6038 be able to compute a compensation. */
6039 else if (biv->insn == p)
6041 rtx ext_val_dummy;
6043 tem = 0;
6044 if (biv->mult_val == const1_rtx)
6045 tem = simplify_giv_expr (loop,
6046 gen_rtx_MULT (giv->mode,
6047 biv->add_val,
6048 giv->mult_val),
6049 &ext_val_dummy, &dummy);
6051 if (tem && giv->derive_adjustment)
6052 tem = simplify_giv_expr
6053 (loop,
6054 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6055 &ext_val_dummy, &dummy);
6057 if (tem)
6058 giv->derive_adjustment = tem;
6059 else
6060 giv->cant_derive = 1;
6062 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6063 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6064 giv->cant_derive = 1;
6069 /* Check whether an insn is an increment legitimate for a basic induction var.
6070 X is the source of insn P, or a part of it.
6071 MODE is the mode in which X should be interpreted.
6073 DEST_REG is the putative biv, also the destination of the insn.
6074 We accept patterns of these forms:
6075 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6076 REG = INVARIANT + REG
6078 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6079 store the additive term into *INC_VAL, and store the place where
6080 we found the additive term into *LOCATION.
6082 If X is an assignment of an invariant into DEST_REG, we set
6083 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6085 We also want to detect a BIV when it corresponds to a variable
6086 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6087 of the variable may be a PLUS that adds a SUBREG of that variable to
6088 an invariant and then sign- or zero-extends the result of the PLUS
6089 into the variable.
6091 Most GIVs in such cases will be in the promoted mode, since that is the
6092 probably the natural computation mode (and almost certainly the mode
6093 used for addresses) on the machine. So we view the pseudo-reg containing
6094 the variable as the BIV, as if it were simply incremented.
6096 Note that treating the entire pseudo as a BIV will result in making
6097 simple increments to any GIVs based on it. However, if the variable
6098 overflows in its declared mode but not its promoted mode, the result will
6099 be incorrect. This is acceptable if the variable is signed, since
6100 overflows in such cases are undefined, but not if it is unsigned, since
6101 those overflows are defined. So we only check for SIGN_EXTEND and
6102 not ZERO_EXTEND.
6104 If we cannot find a biv, we return 0. */
6106 static int
6107 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6108 const struct loop *loop;
6109 rtx x;
6110 enum machine_mode mode;
6111 rtx dest_reg;
6112 rtx p;
6113 rtx *inc_val;
6114 rtx *mult_val;
6115 rtx **location;
6117 enum rtx_code code;
6118 rtx *argp, arg;
6119 rtx insn, set = 0;
6121 code = GET_CODE (x);
6122 *location = NULL;
6123 switch (code)
6125 case PLUS:
6126 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6127 || (GET_CODE (XEXP (x, 0)) == SUBREG
6128 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6129 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6131 argp = &XEXP (x, 1);
6133 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6134 || (GET_CODE (XEXP (x, 1)) == SUBREG
6135 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6136 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6138 argp = &XEXP (x, 0);
6140 else
6141 return 0;
6143 arg = *argp;
6144 if (loop_invariant_p (loop, arg) != 1)
6145 return 0;
6147 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6148 *mult_val = const1_rtx;
6149 *location = argp;
6150 return 1;
6152 case SUBREG:
6153 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6154 handle addition of promoted variables.
6155 ??? The comment at the start of this function is wrong: promoted
6156 variable increments don't look like it says they do. */
6157 return basic_induction_var (loop, SUBREG_REG (x),
6158 GET_MODE (SUBREG_REG (x)),
6159 dest_reg, p, inc_val, mult_val, location);
6161 case REG:
6162 /* If this register is assigned in a previous insn, look at its
6163 source, but don't go outside the loop or past a label. */
6165 /* If this sets a register to itself, we would repeat any previous
6166 biv increment if we applied this strategy blindly. */
6167 if (rtx_equal_p (dest_reg, x))
6168 return 0;
6170 insn = p;
6171 while (1)
6173 rtx dest;
6176 insn = PREV_INSN (insn);
6178 while (insn && GET_CODE (insn) == NOTE
6179 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6181 if (!insn)
6182 break;
6183 set = single_set (insn);
6184 if (set == 0)
6185 break;
6186 dest = SET_DEST (set);
6187 if (dest == x
6188 || (GET_CODE (dest) == SUBREG
6189 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6190 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6191 && SUBREG_REG (dest) == x))
6192 return basic_induction_var (loop, SET_SRC (set),
6193 (GET_MODE (SET_SRC (set)) == VOIDmode
6194 ? GET_MODE (x)
6195 : GET_MODE (SET_SRC (set))),
6196 dest_reg, insn,
6197 inc_val, mult_val, location);
6199 while (GET_CODE (dest) == SIGN_EXTRACT
6200 || GET_CODE (dest) == ZERO_EXTRACT
6201 || GET_CODE (dest) == SUBREG
6202 || GET_CODE (dest) == STRICT_LOW_PART)
6203 dest = XEXP (dest, 0);
6204 if (dest == x)
6205 break;
6207 /* Fall through. */
6209 /* Can accept constant setting of biv only when inside inner most loop.
6210 Otherwise, a biv of an inner loop may be incorrectly recognized
6211 as a biv of the outer loop,
6212 causing code to be moved INTO the inner loop. */
6213 case MEM:
6214 if (loop_invariant_p (loop, x) != 1)
6215 return 0;
6216 case CONST_INT:
6217 case SYMBOL_REF:
6218 case CONST:
6219 /* convert_modes aborts if we try to convert to or from CCmode, so just
6220 exclude that case. It is very unlikely that a condition code value
6221 would be a useful iterator anyways. convert_modes aborts if we try to
6222 convert a float mode to non-float or vice versa too. */
6223 if (loop->level == 1
6224 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6225 && GET_MODE_CLASS (mode) != MODE_CC)
6227 /* Possible bug here? Perhaps we don't know the mode of X. */
6228 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6229 *mult_val = const0_rtx;
6230 return 1;
6232 else
6233 return 0;
6235 case SIGN_EXTEND:
6236 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6237 dest_reg, p, inc_val, mult_val, location);
6239 case ASHIFTRT:
6240 /* Similar, since this can be a sign extension. */
6241 for (insn = PREV_INSN (p);
6242 (insn && GET_CODE (insn) == NOTE
6243 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6244 insn = PREV_INSN (insn))
6247 if (insn)
6248 set = single_set (insn);
6250 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6251 && set && SET_DEST (set) == XEXP (x, 0)
6252 && GET_CODE (XEXP (x, 1)) == CONST_INT
6253 && INTVAL (XEXP (x, 1)) >= 0
6254 && GET_CODE (SET_SRC (set)) == ASHIFT
6255 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6256 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6257 GET_MODE (XEXP (x, 0)),
6258 dest_reg, insn, inc_val, mult_val,
6259 location);
6260 return 0;
6262 default:
6263 return 0;
6267 /* A general induction variable (giv) is any quantity that is a linear
6268 function of a basic induction variable,
6269 i.e. giv = biv * mult_val + add_val.
6270 The coefficients can be any loop invariant quantity.
6271 A giv need not be computed directly from the biv;
6272 it can be computed by way of other givs. */
6274 /* Determine whether X computes a giv.
6275 If it does, return a nonzero value
6276 which is the benefit from eliminating the computation of X;
6277 set *SRC_REG to the register of the biv that it is computed from;
6278 set *ADD_VAL and *MULT_VAL to the coefficients,
6279 such that the value of X is biv * mult + add; */
6281 static int
6282 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6283 is_addr, pbenefit, addr_mode)
6284 const struct loop *loop;
6285 rtx x;
6286 rtx *src_reg;
6287 rtx *add_val;
6288 rtx *mult_val;
6289 rtx *ext_val;
6290 int is_addr;
6291 int *pbenefit;
6292 enum machine_mode addr_mode;
6294 struct loop_ivs *ivs = LOOP_IVS (loop);
6295 rtx orig_x = x;
6297 /* If this is an invariant, forget it, it isn't a giv. */
6298 if (loop_invariant_p (loop, x) == 1)
6299 return 0;
6301 *pbenefit = 0;
6302 *ext_val = NULL_RTX;
6303 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6304 if (x == 0)
6305 return 0;
6307 switch (GET_CODE (x))
6309 case USE:
6310 case CONST_INT:
6311 /* Since this is now an invariant and wasn't before, it must be a giv
6312 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6313 with. */
6314 *src_reg = ivs->list->biv->dest_reg;
6315 *mult_val = const0_rtx;
6316 *add_val = x;
6317 break;
6319 case REG:
6320 /* This is equivalent to a BIV. */
6321 *src_reg = x;
6322 *mult_val = const1_rtx;
6323 *add_val = const0_rtx;
6324 break;
6326 case PLUS:
6327 /* Either (plus (biv) (invar)) or
6328 (plus (mult (biv) (invar_1)) (invar_2)). */
6329 if (GET_CODE (XEXP (x, 0)) == MULT)
6331 *src_reg = XEXP (XEXP (x, 0), 0);
6332 *mult_val = XEXP (XEXP (x, 0), 1);
6334 else
6336 *src_reg = XEXP (x, 0);
6337 *mult_val = const1_rtx;
6339 *add_val = XEXP (x, 1);
6340 break;
6342 case MULT:
6343 /* ADD_VAL is zero. */
6344 *src_reg = XEXP (x, 0);
6345 *mult_val = XEXP (x, 1);
6346 *add_val = const0_rtx;
6347 break;
6349 default:
6350 abort ();
6353 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6354 unless they are CONST_INT). */
6355 if (GET_CODE (*add_val) == USE)
6356 *add_val = XEXP (*add_val, 0);
6357 if (GET_CODE (*mult_val) == USE)
6358 *mult_val = XEXP (*mult_val, 0);
6360 if (is_addr)
6361 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6362 else
6363 *pbenefit += rtx_cost (orig_x, SET);
6365 /* Always return true if this is a giv so it will be detected as such,
6366 even if the benefit is zero or negative. This allows elimination
6367 of bivs that might otherwise not be eliminated. */
6368 return 1;
6371 /* Given an expression, X, try to form it as a linear function of a biv.
6372 We will canonicalize it to be of the form
6373 (plus (mult (BIV) (invar_1))
6374 (invar_2))
6375 with possible degeneracies.
6377 The invariant expressions must each be of a form that can be used as a
6378 machine operand. We surround then with a USE rtx (a hack, but localized
6379 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6380 routine; it is the caller's responsibility to strip them.
6382 If no such canonicalization is possible (i.e., two biv's are used or an
6383 expression that is neither invariant nor a biv or giv), this routine
6384 returns 0.
6386 For a non-zero return, the result will have a code of CONST_INT, USE,
6387 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6389 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6391 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6392 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6394 static rtx
6395 simplify_giv_expr (loop, x, ext_val, benefit)
6396 const struct loop *loop;
6397 rtx x;
6398 rtx *ext_val;
6399 int *benefit;
6401 struct loop_ivs *ivs = LOOP_IVS (loop);
6402 struct loop_regs *regs = LOOP_REGS (loop);
6403 enum machine_mode mode = GET_MODE (x);
6404 rtx arg0, arg1;
6405 rtx tem;
6407 /* If this is not an integer mode, or if we cannot do arithmetic in this
6408 mode, this can't be a giv. */
6409 if (mode != VOIDmode
6410 && (GET_MODE_CLASS (mode) != MODE_INT
6411 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6412 return NULL_RTX;
6414 switch (GET_CODE (x))
6416 case PLUS:
6417 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6418 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6419 if (arg0 == 0 || arg1 == 0)
6420 return NULL_RTX;
6422 /* Put constant last, CONST_INT last if both constant. */
6423 if ((GET_CODE (arg0) == USE
6424 || GET_CODE (arg0) == CONST_INT)
6425 && ! ((GET_CODE (arg0) == USE
6426 && GET_CODE (arg1) == USE)
6427 || GET_CODE (arg1) == CONST_INT))
6428 tem = arg0, arg0 = arg1, arg1 = tem;
6430 /* Handle addition of zero, then addition of an invariant. */
6431 if (arg1 == const0_rtx)
6432 return arg0;
6433 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6434 switch (GET_CODE (arg0))
6436 case CONST_INT:
6437 case USE:
6438 /* Adding two invariants must result in an invariant, so enclose
6439 addition operation inside a USE and return it. */
6440 if (GET_CODE (arg0) == USE)
6441 arg0 = XEXP (arg0, 0);
6442 if (GET_CODE (arg1) == USE)
6443 arg1 = XEXP (arg1, 0);
6445 if (GET_CODE (arg0) == CONST_INT)
6446 tem = arg0, arg0 = arg1, arg1 = tem;
6447 if (GET_CODE (arg1) == CONST_INT)
6448 tem = sge_plus_constant (arg0, arg1);
6449 else
6450 tem = sge_plus (mode, arg0, arg1);
6452 if (GET_CODE (tem) != CONST_INT)
6453 tem = gen_rtx_USE (mode, tem);
6454 return tem;
6456 case REG:
6457 case MULT:
6458 /* biv + invar or mult + invar. Return sum. */
6459 return gen_rtx_PLUS (mode, arg0, arg1);
6461 case PLUS:
6462 /* (a + invar_1) + invar_2. Associate. */
6463 return
6464 simplify_giv_expr (loop,
6465 gen_rtx_PLUS (mode,
6466 XEXP (arg0, 0),
6467 gen_rtx_PLUS (mode,
6468 XEXP (arg0, 1),
6469 arg1)),
6470 ext_val, benefit);
6472 default:
6473 abort ();
6476 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6477 MULT to reduce cases. */
6478 if (GET_CODE (arg0) == REG)
6479 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6480 if (GET_CODE (arg1) == REG)
6481 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6483 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6484 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6485 Recurse to associate the second PLUS. */
6486 if (GET_CODE (arg1) == MULT)
6487 tem = arg0, arg0 = arg1, arg1 = tem;
6489 if (GET_CODE (arg1) == PLUS)
6490 return
6491 simplify_giv_expr (loop,
6492 gen_rtx_PLUS (mode,
6493 gen_rtx_PLUS (mode, arg0,
6494 XEXP (arg1, 0)),
6495 XEXP (arg1, 1)),
6496 ext_val, benefit);
6498 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6499 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6500 return NULL_RTX;
6502 if (!rtx_equal_p (arg0, arg1))
6503 return NULL_RTX;
6505 return simplify_giv_expr (loop,
6506 gen_rtx_MULT (mode,
6507 XEXP (arg0, 0),
6508 gen_rtx_PLUS (mode,
6509 XEXP (arg0, 1),
6510 XEXP (arg1, 1))),
6511 ext_val, benefit);
6513 case MINUS:
6514 /* Handle "a - b" as "a + b * (-1)". */
6515 return simplify_giv_expr (loop,
6516 gen_rtx_PLUS (mode,
6517 XEXP (x, 0),
6518 gen_rtx_MULT (mode,
6519 XEXP (x, 1),
6520 constm1_rtx)),
6521 ext_val, benefit);
6523 case MULT:
6524 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6525 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6526 if (arg0 == 0 || arg1 == 0)
6527 return NULL_RTX;
6529 /* Put constant last, CONST_INT last if both constant. */
6530 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6531 && GET_CODE (arg1) != CONST_INT)
6532 tem = arg0, arg0 = arg1, arg1 = tem;
6534 /* If second argument is not now constant, not giv. */
6535 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6536 return NULL_RTX;
6538 /* Handle multiply by 0 or 1. */
6539 if (arg1 == const0_rtx)
6540 return const0_rtx;
6542 else if (arg1 == const1_rtx)
6543 return arg0;
6545 switch (GET_CODE (arg0))
6547 case REG:
6548 /* biv * invar. Done. */
6549 return gen_rtx_MULT (mode, arg0, arg1);
6551 case CONST_INT:
6552 /* Product of two constants. */
6553 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6555 case USE:
6556 /* invar * invar is a giv, but attempt to simplify it somehow. */
6557 if (GET_CODE (arg1) != CONST_INT)
6558 return NULL_RTX;
6560 arg0 = XEXP (arg0, 0);
6561 if (GET_CODE (arg0) == MULT)
6563 /* (invar_0 * invar_1) * invar_2. Associate. */
6564 return simplify_giv_expr (loop,
6565 gen_rtx_MULT (mode,
6566 XEXP (arg0, 0),
6567 gen_rtx_MULT (mode,
6568 XEXP (arg0,
6570 arg1)),
6571 ext_val, benefit);
6573 /* Porpagate the MULT expressions to the intermost nodes. */
6574 else if (GET_CODE (arg0) == PLUS)
6576 /* (invar_0 + invar_1) * invar_2. Distribute. */
6577 return simplify_giv_expr (loop,
6578 gen_rtx_PLUS (mode,
6579 gen_rtx_MULT (mode,
6580 XEXP (arg0,
6582 arg1),
6583 gen_rtx_MULT (mode,
6584 XEXP (arg0,
6586 arg1)),
6587 ext_val, benefit);
6589 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6591 case MULT:
6592 /* (a * invar_1) * invar_2. Associate. */
6593 return simplify_giv_expr (loop,
6594 gen_rtx_MULT (mode,
6595 XEXP (arg0, 0),
6596 gen_rtx_MULT (mode,
6597 XEXP (arg0, 1),
6598 arg1)),
6599 ext_val, benefit);
6601 case PLUS:
6602 /* (a + invar_1) * invar_2. Distribute. */
6603 return simplify_giv_expr (loop,
6604 gen_rtx_PLUS (mode,
6605 gen_rtx_MULT (mode,
6606 XEXP (arg0, 0),
6607 arg1),
6608 gen_rtx_MULT (mode,
6609 XEXP (arg0, 1),
6610 arg1)),
6611 ext_val, benefit);
6613 default:
6614 abort ();
6617 case ASHIFT:
6618 /* Shift by constant is multiply by power of two. */
6619 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6620 return 0;
6622 return
6623 simplify_giv_expr (loop,
6624 gen_rtx_MULT (mode,
6625 XEXP (x, 0),
6626 GEN_INT ((HOST_WIDE_INT) 1
6627 << INTVAL (XEXP (x, 1)))),
6628 ext_val, benefit);
6630 case NEG:
6631 /* "-a" is "a * (-1)" */
6632 return simplify_giv_expr (loop,
6633 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6634 ext_val, benefit);
6636 case NOT:
6637 /* "~a" is "-a - 1". Silly, but easy. */
6638 return simplify_giv_expr (loop,
6639 gen_rtx_MINUS (mode,
6640 gen_rtx_NEG (mode, XEXP (x, 0)),
6641 const1_rtx),
6642 ext_val, benefit);
6644 case USE:
6645 /* Already in proper form for invariant. */
6646 return x;
6648 case SIGN_EXTEND:
6649 case ZERO_EXTEND:
6650 case TRUNCATE:
6651 /* Conditionally recognize extensions of simple IVs. After we've
6652 computed loop traversal counts and verified the range of the
6653 source IV, we'll reevaluate this as a GIV. */
6654 if (*ext_val == NULL_RTX)
6656 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6657 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6659 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6660 return arg0;
6663 goto do_default;
6665 case REG:
6666 /* If this is a new register, we can't deal with it. */
6667 if (REGNO (x) >= max_reg_before_loop)
6668 return 0;
6670 /* Check for biv or giv. */
6671 switch (REG_IV_TYPE (ivs, REGNO (x)))
6673 case BASIC_INDUCT:
6674 return x;
6675 case GENERAL_INDUCT:
6677 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6679 /* Form expression from giv and add benefit. Ensure this giv
6680 can derive another and subtract any needed adjustment if so. */
6682 /* Increasing the benefit here is risky. The only case in which it
6683 is arguably correct is if this is the only use of V. In other
6684 cases, this will artificially inflate the benefit of the current
6685 giv, and lead to suboptimal code. Thus, it is disabled, since
6686 potentially not reducing an only marginally beneficial giv is
6687 less harmful than reducing many givs that are not really
6688 beneficial. */
6690 rtx single_use = regs->array[REGNO (x)].single_usage;
6691 if (single_use && single_use != const0_rtx)
6692 *benefit += v->benefit;
6695 if (v->cant_derive)
6696 return 0;
6698 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6699 v->src_reg, v->mult_val),
6700 v->add_val);
6702 if (v->derive_adjustment)
6703 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6704 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6705 if (*ext_val)
6707 if (!v->ext_dependent)
6708 return arg0;
6710 else
6712 *ext_val = v->ext_dependent;
6713 return arg0;
6715 return 0;
6718 default:
6719 do_default:
6720 /* If it isn't an induction variable, and it is invariant, we
6721 may be able to simplify things further by looking through
6722 the bits we just moved outside the loop. */
6723 if (loop_invariant_p (loop, x) == 1)
6725 struct movable *m;
6726 struct loop_movables *movables = LOOP_MOVABLES (loop);
6728 for (m = movables->head; m; m = m->next)
6729 if (rtx_equal_p (x, m->set_dest))
6731 /* Ok, we found a match. Substitute and simplify. */
6733 /* If we match another movable, we must use that, as
6734 this one is going away. */
6735 if (m->match)
6736 return simplify_giv_expr (loop, m->match->set_dest,
6737 ext_val, benefit);
6739 /* If consec is non-zero, this is a member of a group of
6740 instructions that were moved together. We handle this
6741 case only to the point of seeking to the last insn and
6742 looking for a REG_EQUAL. Fail if we don't find one. */
6743 if (m->consec != 0)
6745 int i = m->consec;
6746 tem = m->insn;
6749 tem = NEXT_INSN (tem);
6751 while (--i > 0);
6753 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6754 if (tem)
6755 tem = XEXP (tem, 0);
6757 else
6759 tem = single_set (m->insn);
6760 if (tem)
6761 tem = SET_SRC (tem);
6764 if (tem)
6766 /* What we are most interested in is pointer
6767 arithmetic on invariants -- only take
6768 patterns we may be able to do something with. */
6769 if (GET_CODE (tem) == PLUS
6770 || GET_CODE (tem) == MULT
6771 || GET_CODE (tem) == ASHIFT
6772 || GET_CODE (tem) == CONST_INT
6773 || GET_CODE (tem) == SYMBOL_REF)
6775 tem = simplify_giv_expr (loop, tem, ext_val,
6776 benefit);
6777 if (tem)
6778 return tem;
6780 else if (GET_CODE (tem) == CONST
6781 && GET_CODE (XEXP (tem, 0)) == PLUS
6782 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6783 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6785 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6786 ext_val, benefit);
6787 if (tem)
6788 return tem;
6791 break;
6794 break;
6797 /* Fall through to general case. */
6798 default:
6799 /* If invariant, return as USE (unless CONST_INT).
6800 Otherwise, not giv. */
6801 if (GET_CODE (x) == USE)
6802 x = XEXP (x, 0);
6804 if (loop_invariant_p (loop, x) == 1)
6806 if (GET_CODE (x) == CONST_INT)
6807 return x;
6808 if (GET_CODE (x) == CONST
6809 && GET_CODE (XEXP (x, 0)) == PLUS
6810 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6811 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6812 x = XEXP (x, 0);
6813 return gen_rtx_USE (mode, x);
6815 else
6816 return 0;
6820 /* This routine folds invariants such that there is only ever one
6821 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6823 static rtx
6824 sge_plus_constant (x, c)
6825 rtx x, c;
6827 if (GET_CODE (x) == CONST_INT)
6828 return GEN_INT (INTVAL (x) + INTVAL (c));
6829 else if (GET_CODE (x) != PLUS)
6830 return gen_rtx_PLUS (GET_MODE (x), x, c);
6831 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6833 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6834 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6836 else if (GET_CODE (XEXP (x, 0)) == PLUS
6837 || GET_CODE (XEXP (x, 1)) != PLUS)
6839 return gen_rtx_PLUS (GET_MODE (x),
6840 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6842 else
6844 return gen_rtx_PLUS (GET_MODE (x),
6845 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6849 static rtx
6850 sge_plus (mode, x, y)
6851 enum machine_mode mode;
6852 rtx x, y;
6854 while (GET_CODE (y) == PLUS)
6856 rtx a = XEXP (y, 0);
6857 if (GET_CODE (a) == CONST_INT)
6858 x = sge_plus_constant (x, a);
6859 else
6860 x = gen_rtx_PLUS (mode, x, a);
6861 y = XEXP (y, 1);
6863 if (GET_CODE (y) == CONST_INT)
6864 x = sge_plus_constant (x, y);
6865 else
6866 x = gen_rtx_PLUS (mode, x, y);
6867 return x;
6870 /* Help detect a giv that is calculated by several consecutive insns;
6871 for example,
6872 giv = biv * M
6873 giv = giv + A
6874 The caller has already identified the first insn P as having a giv as dest;
6875 we check that all other insns that set the same register follow
6876 immediately after P, that they alter nothing else,
6877 and that the result of the last is still a giv.
6879 The value is 0 if the reg set in P is not really a giv.
6880 Otherwise, the value is the amount gained by eliminating
6881 all the consecutive insns that compute the value.
6883 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6884 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6886 The coefficients of the ultimate giv value are stored in
6887 *MULT_VAL and *ADD_VAL. */
6889 static int
6890 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6891 add_val, mult_val, ext_val, last_consec_insn)
6892 const struct loop *loop;
6893 int first_benefit;
6894 rtx p;
6895 rtx src_reg;
6896 rtx dest_reg;
6897 rtx *add_val;
6898 rtx *mult_val;
6899 rtx *ext_val;
6900 rtx *last_consec_insn;
6902 struct loop_ivs *ivs = LOOP_IVS (loop);
6903 struct loop_regs *regs = LOOP_REGS (loop);
6904 int count;
6905 enum rtx_code code;
6906 int benefit;
6907 rtx temp;
6908 rtx set;
6910 /* Indicate that this is a giv so that we can update the value produced in
6911 each insn of the multi-insn sequence.
6913 This induction structure will be used only by the call to
6914 general_induction_var below, so we can allocate it on our stack.
6915 If this is a giv, our caller will replace the induct var entry with
6916 a new induction structure. */
6917 struct induction *v;
6919 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6920 return 0;
6922 v = (struct induction *) alloca (sizeof (struct induction));
6923 v->src_reg = src_reg;
6924 v->mult_val = *mult_val;
6925 v->add_val = *add_val;
6926 v->benefit = first_benefit;
6927 v->cant_derive = 0;
6928 v->derive_adjustment = 0;
6929 v->ext_dependent = NULL_RTX;
6931 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6932 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6934 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6936 while (count > 0)
6938 p = NEXT_INSN (p);
6939 code = GET_CODE (p);
6941 /* If libcall, skip to end of call sequence. */
6942 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6943 p = XEXP (temp, 0);
6945 if (code == INSN
6946 && (set = single_set (p))
6947 && GET_CODE (SET_DEST (set)) == REG
6948 && SET_DEST (set) == dest_reg
6949 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6950 add_val, mult_val, ext_val, 0,
6951 &benefit, VOIDmode)
6952 /* Giv created by equivalent expression. */
6953 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6954 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6955 add_val, mult_val, ext_val, 0,
6956 &benefit, VOIDmode)))
6957 && src_reg == v->src_reg)
6959 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6960 benefit += libcall_benefit (p);
6962 count--;
6963 v->mult_val = *mult_val;
6964 v->add_val = *add_val;
6965 v->benefit += benefit;
6967 else if (code != NOTE)
6969 /* Allow insns that set something other than this giv to a
6970 constant. Such insns are needed on machines which cannot
6971 include long constants and should not disqualify a giv. */
6972 if (code == INSN
6973 && (set = single_set (p))
6974 && SET_DEST (set) != dest_reg
6975 && CONSTANT_P (SET_SRC (set)))
6976 continue;
6978 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6979 return 0;
6983 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6984 *last_consec_insn = p;
6985 return v->benefit;
6988 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6989 represented by G1. If no such expression can be found, or it is clear that
6990 it cannot possibly be a valid address, 0 is returned.
6992 To perform the computation, we note that
6993 G1 = x * v + a and
6994 G2 = y * v + b
6995 where `v' is the biv.
6997 So G2 = (y/b) * G1 + (b - a*y/x).
6999 Note that MULT = y/x.
7001 Update: A and B are now allowed to be additive expressions such that
7002 B contains all variables in A. That is, computing B-A will not require
7003 subtracting variables. */
7005 static rtx
7006 express_from_1 (a, b, mult)
7007 rtx a, b, mult;
7009 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7011 if (mult == const0_rtx)
7012 return b;
7014 /* If MULT is not 1, we cannot handle A with non-constants, since we
7015 would then be required to subtract multiples of the registers in A.
7016 This is theoretically possible, and may even apply to some Fortran
7017 constructs, but it is a lot of work and we do not attempt it here. */
7019 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7020 return NULL_RTX;
7022 /* In general these structures are sorted top to bottom (down the PLUS
7023 chain), but not left to right across the PLUS. If B is a higher
7024 order giv than A, we can strip one level and recurse. If A is higher
7025 order, we'll eventually bail out, but won't know that until the end.
7026 If they are the same, we'll strip one level around this loop. */
7028 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7030 rtx ra, rb, oa, ob, tmp;
7032 ra = XEXP (a, 0), oa = XEXP (a, 1);
7033 if (GET_CODE (ra) == PLUS)
7034 tmp = ra, ra = oa, oa = tmp;
7036 rb = XEXP (b, 0), ob = XEXP (b, 1);
7037 if (GET_CODE (rb) == PLUS)
7038 tmp = rb, rb = ob, ob = tmp;
7040 if (rtx_equal_p (ra, rb))
7041 /* We matched: remove one reg completely. */
7042 a = oa, b = ob;
7043 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7044 /* An alternate match. */
7045 a = oa, b = rb;
7046 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7047 /* An alternate match. */
7048 a = ra, b = ob;
7049 else
7051 /* Indicates an extra register in B. Strip one level from B and
7052 recurse, hoping B was the higher order expression. */
7053 ob = express_from_1 (a, ob, mult);
7054 if (ob == NULL_RTX)
7055 return NULL_RTX;
7056 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7060 /* Here we are at the last level of A, go through the cases hoping to
7061 get rid of everything but a constant. */
7063 if (GET_CODE (a) == PLUS)
7065 rtx ra, oa;
7067 ra = XEXP (a, 0), oa = XEXP (a, 1);
7068 if (rtx_equal_p (oa, b))
7069 oa = ra;
7070 else if (!rtx_equal_p (ra, b))
7071 return NULL_RTX;
7073 if (GET_CODE (oa) != CONST_INT)
7074 return NULL_RTX;
7076 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7078 else if (GET_CODE (a) == CONST_INT)
7080 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7082 else if (CONSTANT_P (a))
7084 enum machine_mode mode_a = GET_MODE (a);
7085 enum machine_mode mode_b = GET_MODE (b);
7086 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7087 return simplify_gen_binary (MINUS, mode, b, a);
7089 else if (GET_CODE (b) == PLUS)
7091 if (rtx_equal_p (a, XEXP (b, 0)))
7092 return XEXP (b, 1);
7093 else if (rtx_equal_p (a, XEXP (b, 1)))
7094 return XEXP (b, 0);
7095 else
7096 return NULL_RTX;
7098 else if (rtx_equal_p (a, b))
7099 return const0_rtx;
7101 return NULL_RTX;
7105 express_from (g1, g2)
7106 struct induction *g1, *g2;
7108 rtx mult, add;
7110 /* The value that G1 will be multiplied by must be a constant integer. Also,
7111 the only chance we have of getting a valid address is if b*c/a (see above
7112 for notation) is also an integer. */
7113 if (GET_CODE (g1->mult_val) == CONST_INT
7114 && GET_CODE (g2->mult_val) == CONST_INT)
7116 if (g1->mult_val == const0_rtx
7117 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7118 return NULL_RTX;
7119 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7121 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7122 mult = const1_rtx;
7123 else
7125 /* ??? Find out if the one is a multiple of the other? */
7126 return NULL_RTX;
7129 add = express_from_1 (g1->add_val, g2->add_val, mult);
7130 if (add == NULL_RTX)
7132 /* Failed. If we've got a multiplication factor between G1 and G2,
7133 scale G1's addend and try again. */
7134 if (INTVAL (mult) > 1)
7136 rtx g1_add_val = g1->add_val;
7137 if (GET_CODE (g1_add_val) == MULT
7138 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7140 HOST_WIDE_INT m;
7141 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7142 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7143 XEXP (g1_add_val, 0), GEN_INT (m));
7145 else
7147 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7148 mult);
7151 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7154 if (add == NULL_RTX)
7155 return NULL_RTX;
7157 /* Form simplified final result. */
7158 if (mult == const0_rtx)
7159 return add;
7160 else if (mult == const1_rtx)
7161 mult = g1->dest_reg;
7162 else
7163 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7165 if (add == const0_rtx)
7166 return mult;
7167 else
7169 if (GET_CODE (add) == PLUS
7170 && CONSTANT_P (XEXP (add, 1)))
7172 rtx tem = XEXP (add, 1);
7173 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7174 add = tem;
7177 return gen_rtx_PLUS (g2->mode, mult, add);
7181 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7182 represented by G1. This indicates that G2 should be combined with G1 and
7183 that G2 can use (either directly or via an address expression) a register
7184 used to represent G1. */
7186 static rtx
7187 combine_givs_p (g1, g2)
7188 struct induction *g1, *g2;
7190 rtx comb, ret;
7192 /* With the introduction of ext dependent givs, we must care for modes.
7193 G2 must not use a wider mode than G1. */
7194 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7195 return NULL_RTX;
7197 ret = comb = express_from (g1, g2);
7198 if (comb == NULL_RTX)
7199 return NULL_RTX;
7200 if (g1->mode != g2->mode)
7201 ret = gen_lowpart (g2->mode, comb);
7203 /* If these givs are identical, they can be combined. We use the results
7204 of express_from because the addends are not in a canonical form, so
7205 rtx_equal_p is a weaker test. */
7206 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7207 combination to be the other way round. */
7208 if (comb == g1->dest_reg
7209 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7211 return ret;
7214 /* If G2 can be expressed as a function of G1 and that function is valid
7215 as an address and no more expensive than using a register for G2,
7216 the expression of G2 in terms of G1 can be used. */
7217 if (ret != NULL_RTX
7218 && g2->giv_type == DEST_ADDR
7219 && memory_address_p (GET_MODE (g2->mem), ret)
7220 /* ??? Looses, especially with -fforce-addr, where *g2->location
7221 will always be a register, and so anything more complicated
7222 gets discarded. */
7223 #if 0
7224 #ifdef ADDRESS_COST
7225 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7226 #else
7227 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7228 #endif
7229 #endif
7232 return ret;
7235 return NULL_RTX;
7238 /* Check each extension dependent giv in this class to see if its
7239 root biv is safe from wrapping in the interior mode, which would
7240 make the giv illegal. */
7242 static void
7243 check_ext_dependent_givs (bl, loop_info)
7244 struct iv_class *bl;
7245 struct loop_info *loop_info;
7247 int ze_ok = 0, se_ok = 0, info_ok = 0;
7248 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7249 HOST_WIDE_INT start_val;
7250 unsigned HOST_WIDE_INT u_end_val = 0;
7251 unsigned HOST_WIDE_INT u_start_val = 0;
7252 rtx incr = pc_rtx;
7253 struct induction *v;
7255 /* Make sure the iteration data is available. We must have
7256 constants in order to be certain of no overflow. */
7257 /* ??? An unknown iteration count with an increment of +-1
7258 combined with friendly exit tests of against an invariant
7259 value is also ameanable to optimization. Not implemented. */
7260 if (loop_info->n_iterations > 0
7261 && bl->initial_value
7262 && GET_CODE (bl->initial_value) == CONST_INT
7263 && (incr = biv_total_increment (bl))
7264 && GET_CODE (incr) == CONST_INT
7265 /* Make sure the host can represent the arithmetic. */
7266 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7268 unsigned HOST_WIDE_INT abs_incr, total_incr;
7269 HOST_WIDE_INT s_end_val;
7270 int neg_incr;
7272 info_ok = 1;
7273 start_val = INTVAL (bl->initial_value);
7274 u_start_val = start_val;
7276 neg_incr = 0, abs_incr = INTVAL (incr);
7277 if (INTVAL (incr) < 0)
7278 neg_incr = 1, abs_incr = -abs_incr;
7279 total_incr = abs_incr * loop_info->n_iterations;
7281 /* Check for host arithmatic overflow. */
7282 if (total_incr / loop_info->n_iterations == abs_incr)
7284 unsigned HOST_WIDE_INT u_max;
7285 HOST_WIDE_INT s_max;
7287 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7288 s_end_val = u_end_val;
7289 u_max = GET_MODE_MASK (biv_mode);
7290 s_max = u_max >> 1;
7292 /* Check zero extension of biv ok. */
7293 if (start_val >= 0
7294 /* Check for host arithmatic overflow. */
7295 && (neg_incr
7296 ? u_end_val < u_start_val
7297 : u_end_val > u_start_val)
7298 /* Check for target arithmetic overflow. */
7299 && (neg_incr
7300 ? 1 /* taken care of with host overflow */
7301 : u_end_val <= u_max))
7303 ze_ok = 1;
7306 /* Check sign extension of biv ok. */
7307 /* ??? While it is true that overflow with signed and pointer
7308 arithmetic is undefined, I fear too many programmers don't
7309 keep this fact in mind -- myself included on occasion.
7310 So leave alone with the signed overflow optimizations. */
7311 if (start_val >= -s_max - 1
7312 /* Check for host arithmatic overflow. */
7313 && (neg_incr
7314 ? s_end_val < start_val
7315 : s_end_val > start_val)
7316 /* Check for target arithmetic overflow. */
7317 && (neg_incr
7318 ? s_end_val >= -s_max - 1
7319 : s_end_val <= s_max))
7321 se_ok = 1;
7326 /* Invalidate givs that fail the tests. */
7327 for (v = bl->giv; v; v = v->next_iv)
7328 if (v->ext_dependent)
7330 enum rtx_code code = GET_CODE (v->ext_dependent);
7331 int ok = 0;
7333 switch (code)
7335 case SIGN_EXTEND:
7336 ok = se_ok;
7337 break;
7338 case ZERO_EXTEND:
7339 ok = ze_ok;
7340 break;
7342 case TRUNCATE:
7343 /* We don't know whether this value is being used as either
7344 signed or unsigned, so to safely truncate we must satisfy
7345 both. The initial check here verifies the BIV itself;
7346 once that is successful we may check its range wrt the
7347 derived GIV. */
7348 if (se_ok && ze_ok)
7350 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7351 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7353 /* We know from the above that both endpoints are nonnegative,
7354 and that there is no wrapping. Verify that both endpoints
7355 are within the (signed) range of the outer mode. */
7356 if (u_start_val <= max && u_end_val <= max)
7357 ok = 1;
7359 break;
7361 default:
7362 abort ();
7365 if (ok)
7367 if (loop_dump_stream)
7369 fprintf (loop_dump_stream,
7370 "Verified ext dependent giv at %d of reg %d\n",
7371 INSN_UID (v->insn), bl->regno);
7374 else
7376 if (loop_dump_stream)
7378 const char *why;
7380 if (info_ok)
7381 why = "biv iteration values overflowed";
7382 else
7384 if (incr == pc_rtx)
7385 incr = biv_total_increment (bl);
7386 if (incr == const1_rtx)
7387 why = "biv iteration info incomplete; incr by 1";
7388 else
7389 why = "biv iteration info incomplete";
7392 fprintf (loop_dump_stream,
7393 "Failed ext dependent giv at %d, %s\n",
7394 INSN_UID (v->insn), why);
7396 v->ignore = 1;
7397 bl->all_reduced = 0;
7402 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7405 extend_value_for_giv (v, value)
7406 struct induction *v;
7407 rtx value;
7409 rtx ext_dep = v->ext_dependent;
7411 if (! ext_dep)
7412 return value;
7414 /* Recall that check_ext_dependent_givs verified that the known bounds
7415 of a biv did not overflow or wrap with respect to the extension for
7416 the giv. Therefore, constants need no additional adjustment. */
7417 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7418 return value;
7420 /* Otherwise, we must adjust the value to compensate for the
7421 differing modes of the biv and the giv. */
7422 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7425 struct combine_givs_stats
7427 int giv_number;
7428 int total_benefit;
7431 static int
7432 cmp_combine_givs_stats (xp, yp)
7433 const PTR xp;
7434 const PTR yp;
7436 const struct combine_givs_stats * const x =
7437 (const struct combine_givs_stats *) xp;
7438 const struct combine_givs_stats * const y =
7439 (const struct combine_givs_stats *) yp;
7440 int d;
7441 d = y->total_benefit - x->total_benefit;
7442 /* Stabilize the sort. */
7443 if (!d)
7444 d = x->giv_number - y->giv_number;
7445 return d;
7448 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7449 any other. If so, point SAME to the giv combined with and set NEW_REG to
7450 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7451 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7453 static void
7454 combine_givs (regs, bl)
7455 struct loop_regs *regs;
7456 struct iv_class *bl;
7458 /* Additional benefit to add for being combined multiple times. */
7459 const int extra_benefit = 3;
7461 struct induction *g1, *g2, **giv_array;
7462 int i, j, k, giv_count;
7463 struct combine_givs_stats *stats;
7464 rtx *can_combine;
7466 /* Count givs, because bl->giv_count is incorrect here. */
7467 giv_count = 0;
7468 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7469 if (!g1->ignore)
7470 giv_count++;
7472 giv_array
7473 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7474 i = 0;
7475 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7476 if (!g1->ignore)
7477 giv_array[i++] = g1;
7479 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7480 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7482 for (i = 0; i < giv_count; i++)
7484 int this_benefit;
7485 rtx single_use;
7487 g1 = giv_array[i];
7488 stats[i].giv_number = i;
7490 /* If a DEST_REG GIV is used only once, do not allow it to combine
7491 with anything, for in doing so we will gain nothing that cannot
7492 be had by simply letting the GIV with which we would have combined
7493 to be reduced on its own. The losage shows up in particular with
7494 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7495 be seen elsewhere as well. */
7496 if (g1->giv_type == DEST_REG
7497 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7498 && single_use != const0_rtx)
7499 continue;
7501 this_benefit = g1->benefit;
7502 /* Add an additional weight for zero addends. */
7503 if (g1->no_const_addval)
7504 this_benefit += 1;
7506 for (j = 0; j < giv_count; j++)
7508 rtx this_combine;
7510 g2 = giv_array[j];
7511 if (g1 != g2
7512 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7514 can_combine[i * giv_count + j] = this_combine;
7515 this_benefit += g2->benefit + extra_benefit;
7518 stats[i].total_benefit = this_benefit;
7521 /* Iterate, combining until we can't. */
7522 restart:
7523 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7525 if (loop_dump_stream)
7527 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7528 for (k = 0; k < giv_count; k++)
7530 g1 = giv_array[stats[k].giv_number];
7531 if (!g1->combined_with && !g1->same)
7532 fprintf (loop_dump_stream, " {%d, %d}",
7533 INSN_UID (giv_array[stats[k].giv_number]->insn),
7534 stats[k].total_benefit);
7536 putc ('\n', loop_dump_stream);
7539 for (k = 0; k < giv_count; k++)
7541 int g1_add_benefit = 0;
7543 i = stats[k].giv_number;
7544 g1 = giv_array[i];
7546 /* If it has already been combined, skip. */
7547 if (g1->combined_with || g1->same)
7548 continue;
7550 for (j = 0; j < giv_count; j++)
7552 g2 = giv_array[j];
7553 if (g1 != g2 && can_combine[i * giv_count + j]
7554 /* If it has already been combined, skip. */
7555 && ! g2->same && ! g2->combined_with)
7557 int l;
7559 g2->new_reg = can_combine[i * giv_count + j];
7560 g2->same = g1;
7561 /* For destination, we now may replace by mem expression instead
7562 of register. This changes the costs considerably, so add the
7563 compensation. */
7564 if (g2->giv_type == DEST_ADDR)
7565 g2->benefit = (g2->benefit + reg_address_cost
7566 - address_cost (g2->new_reg,
7567 GET_MODE (g2->mem)));
7568 g1->combined_with++;
7569 g1->lifetime += g2->lifetime;
7571 g1_add_benefit += g2->benefit;
7573 /* ??? The new final_[bg]iv_value code does a much better job
7574 of finding replaceable giv's, and hence this code may no
7575 longer be necessary. */
7576 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7577 g1_add_benefit -= copy_cost;
7579 /* To help optimize the next set of combinations, remove
7580 this giv from the benefits of other potential mates. */
7581 for (l = 0; l < giv_count; ++l)
7583 int m = stats[l].giv_number;
7584 if (can_combine[m * giv_count + j])
7585 stats[l].total_benefit -= g2->benefit + extra_benefit;
7588 if (loop_dump_stream)
7589 fprintf (loop_dump_stream,
7590 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7591 INSN_UID (g2->insn), INSN_UID (g1->insn),
7592 g1->benefit, g1_add_benefit, g1->lifetime);
7596 /* To help optimize the next set of combinations, remove
7597 this giv from the benefits of other potential mates. */
7598 if (g1->combined_with)
7600 for (j = 0; j < giv_count; ++j)
7602 int m = stats[j].giv_number;
7603 if (can_combine[m * giv_count + i])
7604 stats[j].total_benefit -= g1->benefit + extra_benefit;
7607 g1->benefit += g1_add_benefit;
7609 /* We've finished with this giv, and everything it touched.
7610 Restart the combination so that proper weights for the
7611 rest of the givs are properly taken into account. */
7612 /* ??? Ideally we would compact the arrays at this point, so
7613 as to not cover old ground. But sanely compacting
7614 can_combine is tricky. */
7615 goto restart;
7619 /* Clean up. */
7620 free (stats);
7621 free (can_combine);
7624 /* Generate sequence for REG = B * M + A. */
7626 static rtx
7627 gen_add_mult (b, m, a, reg)
7628 rtx b; /* initial value of basic induction variable */
7629 rtx m; /* multiplicative constant */
7630 rtx a; /* additive constant */
7631 rtx reg; /* destination register */
7633 rtx seq;
7634 rtx result;
7636 start_sequence ();
7637 /* Use unsigned arithmetic. */
7638 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7639 if (reg != result)
7640 emit_move_insn (reg, result);
7641 seq = gen_sequence ();
7642 end_sequence ();
7644 return seq;
7648 /* Update registers created in insn sequence SEQ. */
7650 static void
7651 loop_regs_update (loop, seq)
7652 const struct loop *loop ATTRIBUTE_UNUSED;
7653 rtx seq;
7655 /* Update register info for alias analysis. */
7657 if (GET_CODE (seq) == SEQUENCE)
7659 int i;
7660 for (i = 0; i < XVECLEN (seq, 0); ++i)
7662 rtx set = single_set (XVECEXP (seq, 0, i));
7663 if (set && GET_CODE (SET_DEST (set)) == REG)
7664 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7667 else
7669 if (GET_CODE (seq) == SET
7670 && GET_CODE (SET_DEST (seq)) == REG)
7671 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7676 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7678 void
7679 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7680 const struct loop *loop;
7681 rtx b; /* initial value of basic induction variable */
7682 rtx m; /* multiplicative constant */
7683 rtx a; /* additive constant */
7684 rtx reg; /* destination register */
7685 basic_block before_bb;
7686 rtx before_insn;
7688 rtx seq;
7690 if (! before_insn)
7692 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7693 return;
7696 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7697 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7699 /* Increase the lifetime of any invariants moved further in code. */
7700 update_reg_last_use (a, before_insn);
7701 update_reg_last_use (b, before_insn);
7702 update_reg_last_use (m, before_insn);
7704 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7706 /* It is possible that the expansion created lots of new registers.
7707 Iterate over the sequence we just created and record them all. */
7708 loop_regs_update (loop, seq);
7712 /* Emit insns in loop pre-header to set REG = B * M + A. */
7714 void
7715 loop_iv_add_mult_sink (loop, b, m, a, reg)
7716 const struct loop *loop;
7717 rtx b; /* initial value of basic induction variable */
7718 rtx m; /* multiplicative constant */
7719 rtx a; /* additive constant */
7720 rtx reg; /* destination register */
7722 rtx seq;
7724 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7725 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7727 /* Increase the lifetime of any invariants moved further in code.
7728 ???? Is this really necessary? */
7729 update_reg_last_use (a, loop->sink);
7730 update_reg_last_use (b, loop->sink);
7731 update_reg_last_use (m, loop->sink);
7733 loop_insn_sink (loop, seq);
7735 /* It is possible that the expansion created lots of new registers.
7736 Iterate over the sequence we just created and record them all. */
7737 loop_regs_update (loop, seq);
7741 /* Emit insns after loop to set REG = B * M + A. */
7743 void
7744 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7745 const struct loop *loop;
7746 rtx b; /* initial value of basic induction variable */
7747 rtx m; /* multiplicative constant */
7748 rtx a; /* additive constant */
7749 rtx reg; /* destination register */
7751 rtx seq;
7753 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7754 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7756 loop_insn_hoist (loop, seq);
7758 /* It is possible that the expansion created lots of new registers.
7759 Iterate over the sequence we just created and record them all. */
7760 loop_regs_update (loop, seq);
7765 /* Similar to gen_add_mult, but compute cost rather than generating
7766 sequence. */
7768 static int
7769 iv_add_mult_cost (b, m, a, reg)
7770 rtx b; /* initial value of basic induction variable */
7771 rtx m; /* multiplicative constant */
7772 rtx a; /* additive constant */
7773 rtx reg; /* destination register */
7775 int cost = 0;
7776 rtx last, result;
7778 start_sequence ();
7779 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7780 if (reg != result)
7781 emit_move_insn (reg, result);
7782 last = get_last_insn ();
7783 while (last)
7785 rtx t = single_set (last);
7786 if (t)
7787 cost += rtx_cost (SET_SRC (t), SET);
7788 last = PREV_INSN (last);
7790 end_sequence ();
7791 return cost;
7794 /* Test whether A * B can be computed without
7795 an actual multiply insn. Value is 1 if so. */
7797 static int
7798 product_cheap_p (a, b)
7799 rtx a;
7800 rtx b;
7802 int i;
7803 rtx tmp;
7804 int win = 1;
7806 /* If only one is constant, make it B. */
7807 if (GET_CODE (a) == CONST_INT)
7808 tmp = a, a = b, b = tmp;
7810 /* If first constant, both constant, so don't need multiply. */
7811 if (GET_CODE (a) == CONST_INT)
7812 return 1;
7814 /* If second not constant, neither is constant, so would need multiply. */
7815 if (GET_CODE (b) != CONST_INT)
7816 return 0;
7818 /* One operand is constant, so might not need multiply insn. Generate the
7819 code for the multiply and see if a call or multiply, or long sequence
7820 of insns is generated. */
7822 start_sequence ();
7823 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7824 tmp = gen_sequence ();
7825 end_sequence ();
7827 if (GET_CODE (tmp) == SEQUENCE)
7829 if (XVEC (tmp, 0) == 0)
7830 win = 1;
7831 else if (XVECLEN (tmp, 0) > 3)
7832 win = 0;
7833 else
7834 for (i = 0; i < XVECLEN (tmp, 0); i++)
7836 rtx insn = XVECEXP (tmp, 0, i);
7838 if (GET_CODE (insn) != INSN
7839 || (GET_CODE (PATTERN (insn)) == SET
7840 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7841 || (GET_CODE (PATTERN (insn)) == PARALLEL
7842 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7843 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7845 win = 0;
7846 break;
7850 else if (GET_CODE (tmp) == SET
7851 && GET_CODE (SET_SRC (tmp)) == MULT)
7852 win = 0;
7853 else if (GET_CODE (tmp) == PARALLEL
7854 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7855 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7856 win = 0;
7858 return win;
7861 /* Check to see if loop can be terminated by a "decrement and branch until
7862 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7863 Also try reversing an increment loop to a decrement loop
7864 to see if the optimization can be performed.
7865 Value is nonzero if optimization was performed. */
7867 /* This is useful even if the architecture doesn't have such an insn,
7868 because it might change a loops which increments from 0 to n to a loop
7869 which decrements from n to 0. A loop that decrements to zero is usually
7870 faster than one that increments from zero. */
7872 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7873 such as approx_final_value, biv_total_increment, loop_iterations, and
7874 final_[bg]iv_value. */
7876 static int
7877 check_dbra_loop (loop, insn_count)
7878 struct loop *loop;
7879 int insn_count;
7881 struct loop_info *loop_info = LOOP_INFO (loop);
7882 struct loop_regs *regs = LOOP_REGS (loop);
7883 struct loop_ivs *ivs = LOOP_IVS (loop);
7884 struct iv_class *bl;
7885 rtx reg;
7886 rtx jump_label;
7887 rtx final_value;
7888 rtx start_value;
7889 rtx new_add_val;
7890 rtx comparison;
7891 rtx before_comparison;
7892 rtx p;
7893 rtx jump;
7894 rtx first_compare;
7895 int compare_and_branch;
7896 rtx loop_start = loop->start;
7897 rtx loop_end = loop->end;
7899 /* If last insn is a conditional branch, and the insn before tests a
7900 register value, try to optimize it. Otherwise, we can't do anything. */
7902 jump = PREV_INSN (loop_end);
7903 comparison = get_condition_for_loop (loop, jump);
7904 if (comparison == 0)
7905 return 0;
7906 if (!onlyjump_p (jump))
7907 return 0;
7909 /* Try to compute whether the compare/branch at the loop end is one or
7910 two instructions. */
7911 get_condition (jump, &first_compare);
7912 if (first_compare == jump)
7913 compare_and_branch = 1;
7914 else if (first_compare == prev_nonnote_insn (jump))
7915 compare_and_branch = 2;
7916 else
7917 return 0;
7920 /* If more than one condition is present to control the loop, then
7921 do not proceed, as this function does not know how to rewrite
7922 loop tests with more than one condition.
7924 Look backwards from the first insn in the last comparison
7925 sequence and see if we've got another comparison sequence. */
7927 rtx jump1;
7928 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7929 if (GET_CODE (jump1) == JUMP_INSN)
7930 return 0;
7933 /* Check all of the bivs to see if the compare uses one of them.
7934 Skip biv's set more than once because we can't guarantee that
7935 it will be zero on the last iteration. Also skip if the biv is
7936 used between its update and the test insn. */
7938 for (bl = ivs->list; bl; bl = bl->next)
7940 if (bl->biv_count == 1
7941 && ! bl->biv->maybe_multiple
7942 && bl->biv->dest_reg == XEXP (comparison, 0)
7943 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7944 first_compare))
7945 break;
7948 if (! bl)
7949 return 0;
7951 /* Look for the case where the basic induction variable is always
7952 nonnegative, and equals zero on the last iteration.
7953 In this case, add a reg_note REG_NONNEG, which allows the
7954 m68k DBRA instruction to be used. */
7956 if (((GET_CODE (comparison) == GT
7957 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7958 && INTVAL (XEXP (comparison, 1)) == -1)
7959 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7960 && GET_CODE (bl->biv->add_val) == CONST_INT
7961 && INTVAL (bl->biv->add_val) < 0)
7963 /* Initial value must be greater than 0,
7964 init_val % -dec_value == 0 to ensure that it equals zero on
7965 the last iteration */
7967 if (GET_CODE (bl->initial_value) == CONST_INT
7968 && INTVAL (bl->initial_value) > 0
7969 && (INTVAL (bl->initial_value)
7970 % (-INTVAL (bl->biv->add_val))) == 0)
7972 /* register always nonnegative, add REG_NOTE to branch */
7973 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7974 REG_NOTES (jump)
7975 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7976 REG_NOTES (jump));
7977 bl->nonneg = 1;
7979 return 1;
7982 /* If the decrement is 1 and the value was tested as >= 0 before
7983 the loop, then we can safely optimize. */
7984 for (p = loop_start; p; p = PREV_INSN (p))
7986 if (GET_CODE (p) == CODE_LABEL)
7987 break;
7988 if (GET_CODE (p) != JUMP_INSN)
7989 continue;
7991 before_comparison = get_condition_for_loop (loop, p);
7992 if (before_comparison
7993 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7994 && GET_CODE (before_comparison) == LT
7995 && XEXP (before_comparison, 1) == const0_rtx
7996 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7997 && INTVAL (bl->biv->add_val) == -1)
7999 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8000 REG_NOTES (jump)
8001 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8002 REG_NOTES (jump));
8003 bl->nonneg = 1;
8005 return 1;
8009 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8010 && INTVAL (bl->biv->add_val) > 0)
8012 /* Try to change inc to dec, so can apply above optimization. */
8013 /* Can do this if:
8014 all registers modified are induction variables or invariant,
8015 all memory references have non-overlapping addresses
8016 (obviously true if only one write)
8017 allow 2 insns for the compare/jump at the end of the loop. */
8018 /* Also, we must avoid any instructions which use both the reversed
8019 biv and another biv. Such instructions will fail if the loop is
8020 reversed. We meet this condition by requiring that either
8021 no_use_except_counting is true, or else that there is only
8022 one biv. */
8023 int num_nonfixed_reads = 0;
8024 /* 1 if the iteration var is used only to count iterations. */
8025 int no_use_except_counting = 0;
8026 /* 1 if the loop has no memory store, or it has a single memory store
8027 which is reversible. */
8028 int reversible_mem_store = 1;
8030 if (bl->giv_count == 0
8031 && !loop->exit_count
8032 && !loop_info->has_multiple_exit_targets)
8034 rtx bivreg = regno_reg_rtx[bl->regno];
8035 struct iv_class *blt;
8037 /* If there are no givs for this biv, and the only exit is the
8038 fall through at the end of the loop, then
8039 see if perhaps there are no uses except to count. */
8040 no_use_except_counting = 1;
8041 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8042 if (INSN_P (p))
8044 rtx set = single_set (p);
8046 if (set && GET_CODE (SET_DEST (set)) == REG
8047 && REGNO (SET_DEST (set)) == bl->regno)
8048 /* An insn that sets the biv is okay. */
8050 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8051 || p == prev_nonnote_insn (loop_end))
8052 && reg_mentioned_p (bivreg, PATTERN (p)))
8054 /* If either of these insns uses the biv and sets a pseudo
8055 that has more than one usage, then the biv has uses
8056 other than counting since it's used to derive a value
8057 that is used more than one time. */
8058 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8059 regs);
8060 if (regs->multiple_uses)
8062 no_use_except_counting = 0;
8063 break;
8066 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8068 no_use_except_counting = 0;
8069 break;
8073 /* A biv has uses besides counting if it is used to set
8074 another biv. */
8075 for (blt = ivs->list; blt; blt = blt->next)
8076 if (blt->init_set
8077 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8079 no_use_except_counting = 0;
8080 break;
8084 if (no_use_except_counting)
8085 /* No need to worry about MEMs. */
8087 else if (loop_info->num_mem_sets <= 1)
8089 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8090 if (INSN_P (p))
8091 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8093 /* If the loop has a single store, and the destination address is
8094 invariant, then we can't reverse the loop, because this address
8095 might then have the wrong value at loop exit.
8096 This would work if the source was invariant also, however, in that
8097 case, the insn should have been moved out of the loop. */
8099 if (loop_info->num_mem_sets == 1)
8101 struct induction *v;
8103 /* If we could prove that each of the memory locations
8104 written to was different, then we could reverse the
8105 store -- but we don't presently have any way of
8106 knowing that. */
8107 reversible_mem_store = 0;
8109 /* If the store depends on a register that is set after the
8110 store, it depends on the initial value, and is thus not
8111 reversible. */
8112 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8114 if (v->giv_type == DEST_REG
8115 && reg_mentioned_p (v->dest_reg,
8116 PATTERN (loop_info->first_loop_store_insn))
8117 && loop_insn_first_p (loop_info->first_loop_store_insn,
8118 v->insn))
8119 reversible_mem_store = 0;
8123 else
8124 return 0;
8126 /* This code only acts for innermost loops. Also it simplifies
8127 the memory address check by only reversing loops with
8128 zero or one memory access.
8129 Two memory accesses could involve parts of the same array,
8130 and that can't be reversed.
8131 If the biv is used only for counting, than we don't need to worry
8132 about all these things. */
8134 if ((num_nonfixed_reads <= 1
8135 && ! loop_info->has_nonconst_call
8136 && ! loop_info->has_volatile
8137 && reversible_mem_store
8138 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8139 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8140 && (bl == ivs->list && bl->next == 0))
8141 || no_use_except_counting)
8143 rtx tem;
8145 /* Loop can be reversed. */
8146 if (loop_dump_stream)
8147 fprintf (loop_dump_stream, "Can reverse loop\n");
8149 /* Now check other conditions:
8151 The increment must be a constant, as must the initial value,
8152 and the comparison code must be LT.
8154 This test can probably be improved since +/- 1 in the constant
8155 can be obtained by changing LT to LE and vice versa; this is
8156 confusing. */
8158 if (comparison
8159 /* for constants, LE gets turned into LT */
8160 && (GET_CODE (comparison) == LT
8161 || (GET_CODE (comparison) == LE
8162 && no_use_except_counting)))
8164 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8165 rtx initial_value, comparison_value;
8166 int nonneg = 0;
8167 enum rtx_code cmp_code;
8168 int comparison_const_width;
8169 unsigned HOST_WIDE_INT comparison_sign_mask;
8171 add_val = INTVAL (bl->biv->add_val);
8172 comparison_value = XEXP (comparison, 1);
8173 if (GET_MODE (comparison_value) == VOIDmode)
8174 comparison_const_width
8175 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8176 else
8177 comparison_const_width
8178 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8179 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8180 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8181 comparison_sign_mask
8182 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8184 /* If the comparison value is not a loop invariant, then we
8185 can not reverse this loop.
8187 ??? If the insns which initialize the comparison value as
8188 a whole compute an invariant result, then we could move
8189 them out of the loop and proceed with loop reversal. */
8190 if (! loop_invariant_p (loop, comparison_value))
8191 return 0;
8193 if (GET_CODE (comparison_value) == CONST_INT)
8194 comparison_val = INTVAL (comparison_value);
8195 initial_value = bl->initial_value;
8197 /* Normalize the initial value if it is an integer and
8198 has no other use except as a counter. This will allow
8199 a few more loops to be reversed. */
8200 if (no_use_except_counting
8201 && GET_CODE (comparison_value) == CONST_INT
8202 && GET_CODE (initial_value) == CONST_INT)
8204 comparison_val = comparison_val - INTVAL (bl->initial_value);
8205 /* The code below requires comparison_val to be a multiple
8206 of add_val in order to do the loop reversal, so
8207 round up comparison_val to a multiple of add_val.
8208 Since comparison_value is constant, we know that the
8209 current comparison code is LT. */
8210 comparison_val = comparison_val + add_val - 1;
8211 comparison_val
8212 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8213 /* We postpone overflow checks for COMPARISON_VAL here;
8214 even if there is an overflow, we might still be able to
8215 reverse the loop, if converting the loop exit test to
8216 NE is possible. */
8217 initial_value = const0_rtx;
8220 /* First check if we can do a vanilla loop reversal. */
8221 if (initial_value == const0_rtx
8222 /* If we have a decrement_and_branch_on_count,
8223 prefer the NE test, since this will allow that
8224 instruction to be generated. Note that we must
8225 use a vanilla loop reversal if the biv is used to
8226 calculate a giv or has a non-counting use. */
8227 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8228 && defined (HAVE_decrement_and_branch_on_count)
8229 && (! (add_val == 1 && loop->vtop
8230 && (bl->biv_count == 0
8231 || no_use_except_counting)))
8232 #endif
8233 && GET_CODE (comparison_value) == CONST_INT
8234 /* Now do postponed overflow checks on COMPARISON_VAL. */
8235 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8236 & comparison_sign_mask))
8238 /* Register will always be nonnegative, with value
8239 0 on last iteration */
8240 add_adjust = add_val;
8241 nonneg = 1;
8242 cmp_code = GE;
8244 else if (add_val == 1 && loop->vtop
8245 && (bl->biv_count == 0
8246 || no_use_except_counting))
8248 add_adjust = 0;
8249 cmp_code = NE;
8251 else
8252 return 0;
8254 if (GET_CODE (comparison) == LE)
8255 add_adjust -= add_val;
8257 /* If the initial value is not zero, or if the comparison
8258 value is not an exact multiple of the increment, then we
8259 can not reverse this loop. */
8260 if (initial_value == const0_rtx
8261 && GET_CODE (comparison_value) == CONST_INT)
8263 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8264 return 0;
8266 else
8268 if (! no_use_except_counting || add_val != 1)
8269 return 0;
8272 final_value = comparison_value;
8274 /* Reset these in case we normalized the initial value
8275 and comparison value above. */
8276 if (GET_CODE (comparison_value) == CONST_INT
8277 && GET_CODE (initial_value) == CONST_INT)
8279 comparison_value = GEN_INT (comparison_val);
8280 final_value
8281 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8283 bl->initial_value = initial_value;
8285 /* Save some info needed to produce the new insns. */
8286 reg = bl->biv->dest_reg;
8287 jump_label = condjump_label (PREV_INSN (loop_end));
8288 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8290 /* Set start_value; if this is not a CONST_INT, we need
8291 to generate a SUB.
8292 Initialize biv to start_value before loop start.
8293 The old initializing insn will be deleted as a
8294 dead store by flow.c. */
8295 if (initial_value == const0_rtx
8296 && GET_CODE (comparison_value) == CONST_INT)
8298 start_value = GEN_INT (comparison_val - add_adjust);
8299 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8301 else if (GET_CODE (initial_value) == CONST_INT)
8303 enum machine_mode mode = GET_MODE (reg);
8304 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8305 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8307 if (add_insn == 0)
8308 return 0;
8310 start_value
8311 = gen_rtx_PLUS (mode, comparison_value, offset);
8312 loop_insn_hoist (loop, add_insn);
8313 if (GET_CODE (comparison) == LE)
8314 final_value = gen_rtx_PLUS (mode, comparison_value,
8315 GEN_INT (add_val));
8317 else if (! add_adjust)
8319 enum machine_mode mode = GET_MODE (reg);
8320 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8321 initial_value);
8323 if (sub_insn == 0)
8324 return 0;
8325 start_value
8326 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8327 loop_insn_hoist (loop, sub_insn);
8329 else
8330 /* We could handle the other cases too, but it'll be
8331 better to have a testcase first. */
8332 return 0;
8334 /* We may not have a single insn which can increment a reg, so
8335 create a sequence to hold all the insns from expand_inc. */
8336 start_sequence ();
8337 expand_inc (reg, new_add_val);
8338 tem = gen_sequence ();
8339 end_sequence ();
8341 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8342 delete_insn (bl->biv->insn);
8344 /* Update biv info to reflect its new status. */
8345 bl->biv->insn = p;
8346 bl->initial_value = start_value;
8347 bl->biv->add_val = new_add_val;
8349 /* Update loop info. */
8350 loop_info->initial_value = reg;
8351 loop_info->initial_equiv_value = reg;
8352 loop_info->final_value = const0_rtx;
8353 loop_info->final_equiv_value = const0_rtx;
8354 loop_info->comparison_value = const0_rtx;
8355 loop_info->comparison_code = cmp_code;
8356 loop_info->increment = new_add_val;
8358 /* Inc LABEL_NUSES so that delete_insn will
8359 not delete the label. */
8360 LABEL_NUSES (XEXP (jump_label, 0))++;
8362 /* Emit an insn after the end of the loop to set the biv's
8363 proper exit value if it is used anywhere outside the loop. */
8364 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8365 || ! bl->init_insn
8366 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8367 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8369 /* Delete compare/branch at end of loop. */
8370 delete_related_insns (PREV_INSN (loop_end));
8371 if (compare_and_branch == 2)
8372 delete_related_insns (first_compare);
8374 /* Add new compare/branch insn at end of loop. */
8375 start_sequence ();
8376 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8377 GET_MODE (reg), 0,
8378 XEXP (jump_label, 0));
8379 tem = gen_sequence ();
8380 end_sequence ();
8381 emit_jump_insn_before (tem, loop_end);
8383 for (tem = PREV_INSN (loop_end);
8384 tem && GET_CODE (tem) != JUMP_INSN;
8385 tem = PREV_INSN (tem))
8388 if (tem)
8389 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8391 if (nonneg)
8393 if (tem)
8395 /* Increment of LABEL_NUSES done above. */
8396 /* Register is now always nonnegative,
8397 so add REG_NONNEG note to the branch. */
8398 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8399 REG_NOTES (tem));
8401 bl->nonneg = 1;
8404 /* No insn may reference both the reversed and another biv or it
8405 will fail (see comment near the top of the loop reversal
8406 code).
8407 Earlier on, we have verified that the biv has no use except
8408 counting, or it is the only biv in this function.
8409 However, the code that computes no_use_except_counting does
8410 not verify reg notes. It's possible to have an insn that
8411 references another biv, and has a REG_EQUAL note with an
8412 expression based on the reversed biv. To avoid this case,
8413 remove all REG_EQUAL notes based on the reversed biv
8414 here. */
8415 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8416 if (INSN_P (p))
8418 rtx *pnote;
8419 rtx set = single_set (p);
8420 /* If this is a set of a GIV based on the reversed biv, any
8421 REG_EQUAL notes should still be correct. */
8422 if (! set
8423 || GET_CODE (SET_DEST (set)) != REG
8424 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8425 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8426 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8427 for (pnote = &REG_NOTES (p); *pnote;)
8429 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8430 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8431 XEXP (*pnote, 0)))
8432 *pnote = XEXP (*pnote, 1);
8433 else
8434 pnote = &XEXP (*pnote, 1);
8438 /* Mark that this biv has been reversed. Each giv which depends
8439 on this biv, and which is also live past the end of the loop
8440 will have to be fixed up. */
8442 bl->reversed = 1;
8444 if (loop_dump_stream)
8446 fprintf (loop_dump_stream, "Reversed loop");
8447 if (bl->nonneg)
8448 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8449 else
8450 fprintf (loop_dump_stream, "\n");
8453 return 1;
8458 return 0;
8461 /* Verify whether the biv BL appears to be eliminable,
8462 based on the insns in the loop that refer to it.
8464 If ELIMINATE_P is non-zero, actually do the elimination.
8466 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8467 determine whether invariant insns should be placed inside or at the
8468 start of the loop. */
8470 static int
8471 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8472 const struct loop *loop;
8473 struct iv_class *bl;
8474 int eliminate_p;
8475 int threshold, insn_count;
8477 struct loop_ivs *ivs = LOOP_IVS (loop);
8478 rtx reg = bl->biv->dest_reg;
8479 rtx p;
8481 /* Scan all insns in the loop, stopping if we find one that uses the
8482 biv in a way that we cannot eliminate. */
8484 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8486 enum rtx_code code = GET_CODE (p);
8487 basic_block where_bb = 0;
8488 rtx where_insn = threshold >= insn_count ? 0 : p;
8490 /* If this is a libcall that sets a giv, skip ahead to its end. */
8491 if (GET_RTX_CLASS (code) == 'i')
8493 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8495 if (note)
8497 rtx last = XEXP (note, 0);
8498 rtx set = single_set (last);
8500 if (set && GET_CODE (SET_DEST (set)) == REG)
8502 unsigned int regno = REGNO (SET_DEST (set));
8504 if (regno < ivs->n_regs
8505 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8506 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8507 p = last;
8511 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8512 && reg_mentioned_p (reg, PATTERN (p))
8513 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8514 eliminate_p, where_bb, where_insn))
8516 if (loop_dump_stream)
8517 fprintf (loop_dump_stream,
8518 "Cannot eliminate biv %d: biv used in insn %d.\n",
8519 bl->regno, INSN_UID (p));
8520 break;
8524 if (p == loop->end)
8526 if (loop_dump_stream)
8527 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8528 bl->regno, eliminate_p ? "was" : "can be");
8529 return 1;
8532 return 0;
8535 /* INSN and REFERENCE are instructions in the same insn chain.
8536 Return non-zero if INSN is first. */
8539 loop_insn_first_p (insn, reference)
8540 rtx insn, reference;
8542 rtx p, q;
8544 for (p = insn, q = reference;;)
8546 /* Start with test for not first so that INSN == REFERENCE yields not
8547 first. */
8548 if (q == insn || ! p)
8549 return 0;
8550 if (p == reference || ! q)
8551 return 1;
8553 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8554 previous insn, hence the <= comparison below does not work if
8555 P is a note. */
8556 if (INSN_UID (p) < max_uid_for_loop
8557 && INSN_UID (q) < max_uid_for_loop
8558 && GET_CODE (p) != NOTE)
8559 return INSN_LUID (p) <= INSN_LUID (q);
8561 if (INSN_UID (p) >= max_uid_for_loop
8562 || GET_CODE (p) == NOTE)
8563 p = NEXT_INSN (p);
8564 if (INSN_UID (q) >= max_uid_for_loop)
8565 q = NEXT_INSN (q);
8569 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8570 the offset that we have to take into account due to auto-increment /
8571 div derivation is zero. */
8572 static int
8573 biv_elimination_giv_has_0_offset (biv, giv, insn)
8574 struct induction *biv, *giv;
8575 rtx insn;
8577 /* If the giv V had the auto-inc address optimization applied
8578 to it, and INSN occurs between the giv insn and the biv
8579 insn, then we'd have to adjust the value used here.
8580 This is rare, so we don't bother to make this possible. */
8581 if (giv->auto_inc_opt
8582 && ((loop_insn_first_p (giv->insn, insn)
8583 && loop_insn_first_p (insn, biv->insn))
8584 || (loop_insn_first_p (biv->insn, insn)
8585 && loop_insn_first_p (insn, giv->insn))))
8586 return 0;
8588 return 1;
8591 /* If BL appears in X (part of the pattern of INSN), see if we can
8592 eliminate its use. If so, return 1. If not, return 0.
8594 If BIV does not appear in X, return 1.
8596 If ELIMINATE_P is non-zero, actually do the elimination.
8597 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8598 Depending on how many items have been moved out of the loop, it
8599 will either be before INSN (when WHERE_INSN is non-zero) or at the
8600 start of the loop (when WHERE_INSN is zero). */
8602 static int
8603 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8604 const struct loop *loop;
8605 rtx x, insn;
8606 struct iv_class *bl;
8607 int eliminate_p;
8608 basic_block where_bb;
8609 rtx where_insn;
8611 enum rtx_code code = GET_CODE (x);
8612 rtx reg = bl->biv->dest_reg;
8613 enum machine_mode mode = GET_MODE (reg);
8614 struct induction *v;
8615 rtx arg, tem;
8616 #ifdef HAVE_cc0
8617 rtx new;
8618 #endif
8619 int arg_operand;
8620 const char *fmt;
8621 int i, j;
8623 switch (code)
8625 case REG:
8626 /* If we haven't already been able to do something with this BIV,
8627 we can't eliminate it. */
8628 if (x == reg)
8629 return 0;
8630 return 1;
8632 case SET:
8633 /* If this sets the BIV, it is not a problem. */
8634 if (SET_DEST (x) == reg)
8635 return 1;
8637 /* If this is an insn that defines a giv, it is also ok because
8638 it will go away when the giv is reduced. */
8639 for (v = bl->giv; v; v = v->next_iv)
8640 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8641 return 1;
8643 #ifdef HAVE_cc0
8644 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8646 /* Can replace with any giv that was reduced and
8647 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8648 Require a constant for MULT_VAL, so we know it's nonzero.
8649 ??? We disable this optimization to avoid potential
8650 overflows. */
8652 for (v = bl->giv; v; v = v->next_iv)
8653 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8654 && v->add_val == const0_rtx
8655 && ! v->ignore && ! v->maybe_dead && v->always_computable
8656 && v->mode == mode
8657 && 0)
8659 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8660 continue;
8662 if (! eliminate_p)
8663 return 1;
8665 /* If the giv has the opposite direction of change,
8666 then reverse the comparison. */
8667 if (INTVAL (v->mult_val) < 0)
8668 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8669 const0_rtx, v->new_reg);
8670 else
8671 new = v->new_reg;
8673 /* We can probably test that giv's reduced reg. */
8674 if (validate_change (insn, &SET_SRC (x), new, 0))
8675 return 1;
8678 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8679 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8680 Require a constant for MULT_VAL, so we know it's nonzero.
8681 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8682 overflow problem. */
8684 for (v = bl->giv; v; v = v->next_iv)
8685 if (GET_CODE (v->mult_val) == CONST_INT
8686 && v->mult_val != const0_rtx
8687 && ! v->ignore && ! v->maybe_dead && v->always_computable
8688 && v->mode == mode
8689 && (GET_CODE (v->add_val) == SYMBOL_REF
8690 || GET_CODE (v->add_val) == LABEL_REF
8691 || GET_CODE (v->add_val) == CONST
8692 || (GET_CODE (v->add_val) == REG
8693 && REG_POINTER (v->add_val))))
8695 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8696 continue;
8698 if (! eliminate_p)
8699 return 1;
8701 /* If the giv has the opposite direction of change,
8702 then reverse the comparison. */
8703 if (INTVAL (v->mult_val) < 0)
8704 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8705 v->new_reg);
8706 else
8707 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8708 copy_rtx (v->add_val));
8710 /* Replace biv with the giv's reduced register. */
8711 update_reg_last_use (v->add_val, insn);
8712 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8713 return 1;
8715 /* Insn doesn't support that constant or invariant. Copy it
8716 into a register (it will be a loop invariant.) */
8717 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8719 loop_insn_emit_before (loop, 0, where_insn,
8720 gen_move_insn (tem,
8721 copy_rtx (v->add_val)));
8723 /* Substitute the new register for its invariant value in
8724 the compare expression. */
8725 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8726 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8727 return 1;
8730 #endif
8731 break;
8733 case COMPARE:
8734 case EQ: case NE:
8735 case GT: case GE: case GTU: case GEU:
8736 case LT: case LE: case LTU: case LEU:
8737 /* See if either argument is the biv. */
8738 if (XEXP (x, 0) == reg)
8739 arg = XEXP (x, 1), arg_operand = 1;
8740 else if (XEXP (x, 1) == reg)
8741 arg = XEXP (x, 0), arg_operand = 0;
8742 else
8743 break;
8745 if (CONSTANT_P (arg))
8747 /* First try to replace with any giv that has constant positive
8748 mult_val and constant add_val. We might be able to support
8749 negative mult_val, but it seems complex to do it in general. */
8751 for (v = bl->giv; v; v = v->next_iv)
8752 if (GET_CODE (v->mult_val) == CONST_INT
8753 && INTVAL (v->mult_val) > 0
8754 && (GET_CODE (v->add_val) == SYMBOL_REF
8755 || GET_CODE (v->add_val) == LABEL_REF
8756 || GET_CODE (v->add_val) == CONST
8757 || (GET_CODE (v->add_val) == REG
8758 && REG_POINTER (v->add_val)))
8759 && ! v->ignore && ! v->maybe_dead && v->always_computable
8760 && v->mode == mode)
8762 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8763 continue;
8765 if (! eliminate_p)
8766 return 1;
8768 /* Replace biv with the giv's reduced reg. */
8769 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8771 /* If all constants are actually constant integers and
8772 the derived constant can be directly placed in the COMPARE,
8773 do so. */
8774 if (GET_CODE (arg) == CONST_INT
8775 && GET_CODE (v->mult_val) == CONST_INT
8776 && GET_CODE (v->add_val) == CONST_INT)
8778 validate_change (insn, &XEXP (x, arg_operand),
8779 GEN_INT (INTVAL (arg)
8780 * INTVAL (v->mult_val)
8781 + INTVAL (v->add_val)), 1);
8783 else
8785 /* Otherwise, load it into a register. */
8786 tem = gen_reg_rtx (mode);
8787 loop_iv_add_mult_emit_before (loop, arg,
8788 v->mult_val, v->add_val,
8789 tem, where_bb, where_insn);
8790 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8792 if (apply_change_group ())
8793 return 1;
8796 /* Look for giv with positive constant mult_val and nonconst add_val.
8797 Insert insns to calculate new compare value.
8798 ??? Turn this off due to possible overflow. */
8800 for (v = bl->giv; v; v = v->next_iv)
8801 if (GET_CODE (v->mult_val) == CONST_INT
8802 && INTVAL (v->mult_val) > 0
8803 && ! v->ignore && ! v->maybe_dead && v->always_computable
8804 && v->mode == mode
8805 && 0)
8807 rtx tem;
8809 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8810 continue;
8812 if (! eliminate_p)
8813 return 1;
8815 tem = gen_reg_rtx (mode);
8817 /* Replace biv with giv's reduced register. */
8818 validate_change (insn, &XEXP (x, 1 - arg_operand),
8819 v->new_reg, 1);
8821 /* Compute value to compare against. */
8822 loop_iv_add_mult_emit_before (loop, arg,
8823 v->mult_val, v->add_val,
8824 tem, where_bb, where_insn);
8825 /* Use it in this insn. */
8826 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8827 if (apply_change_group ())
8828 return 1;
8831 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8833 if (loop_invariant_p (loop, arg) == 1)
8835 /* Look for giv with constant positive mult_val and nonconst
8836 add_val. Insert insns to compute new compare value.
8837 ??? Turn this off due to possible overflow. */
8839 for (v = bl->giv; v; v = v->next_iv)
8840 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8841 && ! v->ignore && ! v->maybe_dead && v->always_computable
8842 && v->mode == mode
8843 && 0)
8845 rtx tem;
8847 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8848 continue;
8850 if (! eliminate_p)
8851 return 1;
8853 tem = gen_reg_rtx (mode);
8855 /* Replace biv with giv's reduced register. */
8856 validate_change (insn, &XEXP (x, 1 - arg_operand),
8857 v->new_reg, 1);
8859 /* Compute value to compare against. */
8860 loop_iv_add_mult_emit_before (loop, arg,
8861 v->mult_val, v->add_val,
8862 tem, where_bb, where_insn);
8863 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8864 if (apply_change_group ())
8865 return 1;
8869 /* This code has problems. Basically, you can't know when
8870 seeing if we will eliminate BL, whether a particular giv
8871 of ARG will be reduced. If it isn't going to be reduced,
8872 we can't eliminate BL. We can try forcing it to be reduced,
8873 but that can generate poor code.
8875 The problem is that the benefit of reducing TV, below should
8876 be increased if BL can actually be eliminated, but this means
8877 we might have to do a topological sort of the order in which
8878 we try to process biv. It doesn't seem worthwhile to do
8879 this sort of thing now. */
8881 #if 0
8882 /* Otherwise the reg compared with had better be a biv. */
8883 if (GET_CODE (arg) != REG
8884 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8885 return 0;
8887 /* Look for a pair of givs, one for each biv,
8888 with identical coefficients. */
8889 for (v = bl->giv; v; v = v->next_iv)
8891 struct induction *tv;
8893 if (v->ignore || v->maybe_dead || v->mode != mode)
8894 continue;
8896 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8897 tv = tv->next_iv)
8898 if (! tv->ignore && ! tv->maybe_dead
8899 && rtx_equal_p (tv->mult_val, v->mult_val)
8900 && rtx_equal_p (tv->add_val, v->add_val)
8901 && tv->mode == mode)
8903 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8904 continue;
8906 if (! eliminate_p)
8907 return 1;
8909 /* Replace biv with its giv's reduced reg. */
8910 XEXP (x, 1 - arg_operand) = v->new_reg;
8911 /* Replace other operand with the other giv's
8912 reduced reg. */
8913 XEXP (x, arg_operand) = tv->new_reg;
8914 return 1;
8917 #endif
8920 /* If we get here, the biv can't be eliminated. */
8921 return 0;
8923 case MEM:
8924 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8925 biv is used in it, since it will be replaced. */
8926 for (v = bl->giv; v; v = v->next_iv)
8927 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8928 return 1;
8929 break;
8931 default:
8932 break;
8935 /* See if any subexpression fails elimination. */
8936 fmt = GET_RTX_FORMAT (code);
8937 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8939 switch (fmt[i])
8941 case 'e':
8942 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8943 eliminate_p, where_bb, where_insn))
8944 return 0;
8945 break;
8947 case 'E':
8948 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8949 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8950 eliminate_p, where_bb, where_insn))
8951 return 0;
8952 break;
8956 return 1;
8959 /* Return nonzero if the last use of REG
8960 is in an insn following INSN in the same basic block. */
8962 static int
8963 last_use_this_basic_block (reg, insn)
8964 rtx reg;
8965 rtx insn;
8967 rtx n;
8968 for (n = insn;
8969 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8970 n = NEXT_INSN (n))
8972 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8973 return 1;
8975 return 0;
8978 /* Called via `note_stores' to record the initial value of a biv. Here we
8979 just record the location of the set and process it later. */
8981 static void
8982 record_initial (dest, set, data)
8983 rtx dest;
8984 rtx set;
8985 void *data ATTRIBUTE_UNUSED;
8987 struct loop_ivs *ivs = (struct loop_ivs *) data;
8988 struct iv_class *bl;
8990 if (GET_CODE (dest) != REG
8991 || REGNO (dest) >= ivs->n_regs
8992 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8993 return;
8995 bl = REG_IV_CLASS (ivs, REGNO (dest));
8997 /* If this is the first set found, record it. */
8998 if (bl->init_insn == 0)
9000 bl->init_insn = note_insn;
9001 bl->init_set = set;
9005 /* If any of the registers in X are "old" and currently have a last use earlier
9006 than INSN, update them to have a last use of INSN. Their actual last use
9007 will be the previous insn but it will not have a valid uid_luid so we can't
9008 use it. X must be a source expression only. */
9010 static void
9011 update_reg_last_use (x, insn)
9012 rtx x;
9013 rtx insn;
9015 /* Check for the case where INSN does not have a valid luid. In this case,
9016 there is no need to modify the regno_last_uid, as this can only happen
9017 when code is inserted after the loop_end to set a pseudo's final value,
9018 and hence this insn will never be the last use of x.
9019 ???? This comment is not correct. See for example loop_givs_reduce.
9020 This may insert an insn before another new insn. */
9021 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9022 && INSN_UID (insn) < max_uid_for_loop
9023 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9025 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9027 else
9029 int i, j;
9030 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9031 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9033 if (fmt[i] == 'e')
9034 update_reg_last_use (XEXP (x, i), insn);
9035 else if (fmt[i] == 'E')
9036 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9037 update_reg_last_use (XVECEXP (x, i, j), insn);
9042 /* Given an insn INSN and condition COND, return the condition in a
9043 canonical form to simplify testing by callers. Specifically:
9045 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9046 (2) Both operands will be machine operands; (cc0) will have been replaced.
9047 (3) If an operand is a constant, it will be the second operand.
9048 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9049 for GE, GEU, and LEU.
9051 If the condition cannot be understood, or is an inequality floating-point
9052 comparison which needs to be reversed, 0 will be returned.
9054 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9056 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9057 insn used in locating the condition was found. If a replacement test
9058 of the condition is desired, it should be placed in front of that
9059 insn and we will be sure that the inputs are still valid.
9061 If WANT_REG is non-zero, we wish the condition to be relative to that
9062 register, if possible. Therefore, do not canonicalize the condition
9063 further. */
9066 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9067 rtx insn;
9068 rtx cond;
9069 int reverse;
9070 rtx *earliest;
9071 rtx want_reg;
9073 enum rtx_code code;
9074 rtx prev = insn;
9075 rtx set;
9076 rtx tem;
9077 rtx op0, op1;
9078 int reverse_code = 0;
9079 enum machine_mode mode;
9081 code = GET_CODE (cond);
9082 mode = GET_MODE (cond);
9083 op0 = XEXP (cond, 0);
9084 op1 = XEXP (cond, 1);
9086 if (reverse)
9087 code = reversed_comparison_code (cond, insn);
9088 if (code == UNKNOWN)
9089 return 0;
9091 if (earliest)
9092 *earliest = insn;
9094 /* If we are comparing a register with zero, see if the register is set
9095 in the previous insn to a COMPARE or a comparison operation. Perform
9096 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9097 in cse.c */
9099 while (GET_RTX_CLASS (code) == '<'
9100 && op1 == CONST0_RTX (GET_MODE (op0))
9101 && op0 != want_reg)
9103 /* Set non-zero when we find something of interest. */
9104 rtx x = 0;
9106 #ifdef HAVE_cc0
9107 /* If comparison with cc0, import actual comparison from compare
9108 insn. */
9109 if (op0 == cc0_rtx)
9111 if ((prev = prev_nonnote_insn (prev)) == 0
9112 || GET_CODE (prev) != INSN
9113 || (set = single_set (prev)) == 0
9114 || SET_DEST (set) != cc0_rtx)
9115 return 0;
9117 op0 = SET_SRC (set);
9118 op1 = CONST0_RTX (GET_MODE (op0));
9119 if (earliest)
9120 *earliest = prev;
9122 #endif
9124 /* If this is a COMPARE, pick up the two things being compared. */
9125 if (GET_CODE (op0) == COMPARE)
9127 op1 = XEXP (op0, 1);
9128 op0 = XEXP (op0, 0);
9129 continue;
9131 else if (GET_CODE (op0) != REG)
9132 break;
9134 /* Go back to the previous insn. Stop if it is not an INSN. We also
9135 stop if it isn't a single set or if it has a REG_INC note because
9136 we don't want to bother dealing with it. */
9138 if ((prev = prev_nonnote_insn (prev)) == 0
9139 || GET_CODE (prev) != INSN
9140 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9141 break;
9143 set = set_of (op0, prev);
9145 if (set
9146 && (GET_CODE (set) != SET
9147 || !rtx_equal_p (SET_DEST (set), op0)))
9148 break;
9150 /* If this is setting OP0, get what it sets it to if it looks
9151 relevant. */
9152 if (set)
9154 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9156 /* ??? We may not combine comparisons done in a CCmode with
9157 comparisons not done in a CCmode. This is to aid targets
9158 like Alpha that have an IEEE compliant EQ instruction, and
9159 a non-IEEE compliant BEQ instruction. The use of CCmode is
9160 actually artificial, simply to prevent the combination, but
9161 should not affect other platforms.
9163 However, we must allow VOIDmode comparisons to match either
9164 CCmode or non-CCmode comparison, because some ports have
9165 modeless comparisons inside branch patterns.
9167 ??? This mode check should perhaps look more like the mode check
9168 in simplify_comparison in combine. */
9170 if ((GET_CODE (SET_SRC (set)) == COMPARE
9171 || (((code == NE
9172 || (code == LT
9173 && GET_MODE_CLASS (inner_mode) == MODE_INT
9174 && (GET_MODE_BITSIZE (inner_mode)
9175 <= HOST_BITS_PER_WIDE_INT)
9176 && (STORE_FLAG_VALUE
9177 & ((HOST_WIDE_INT) 1
9178 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9179 #ifdef FLOAT_STORE_FLAG_VALUE
9180 || (code == LT
9181 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9182 && (REAL_VALUE_NEGATIVE
9183 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9184 #endif
9186 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9187 && (((GET_MODE_CLASS (mode) == MODE_CC)
9188 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9189 || mode == VOIDmode || inner_mode == VOIDmode))
9190 x = SET_SRC (set);
9191 else if (((code == EQ
9192 || (code == GE
9193 && (GET_MODE_BITSIZE (inner_mode)
9194 <= HOST_BITS_PER_WIDE_INT)
9195 && GET_MODE_CLASS (inner_mode) == MODE_INT
9196 && (STORE_FLAG_VALUE
9197 & ((HOST_WIDE_INT) 1
9198 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9199 #ifdef FLOAT_STORE_FLAG_VALUE
9200 || (code == GE
9201 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9202 && (REAL_VALUE_NEGATIVE
9203 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9204 #endif
9206 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9207 && (((GET_MODE_CLASS (mode) == MODE_CC)
9208 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9209 || mode == VOIDmode || inner_mode == VOIDmode))
9212 reverse_code = 1;
9213 x = SET_SRC (set);
9215 else
9216 break;
9219 else if (reg_set_p (op0, prev))
9220 /* If this sets OP0, but not directly, we have to give up. */
9221 break;
9223 if (x)
9225 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9226 code = GET_CODE (x);
9227 if (reverse_code)
9229 code = reversed_comparison_code (x, prev);
9230 if (code == UNKNOWN)
9231 return 0;
9232 reverse_code = 0;
9235 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9236 if (earliest)
9237 *earliest = prev;
9241 /* If constant is first, put it last. */
9242 if (CONSTANT_P (op0))
9243 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9245 /* If OP0 is the result of a comparison, we weren't able to find what
9246 was really being compared, so fail. */
9247 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9248 return 0;
9250 /* Canonicalize any ordered comparison with integers involving equality
9251 if we can do computations in the relevant mode and we do not
9252 overflow. */
9254 if (GET_CODE (op1) == CONST_INT
9255 && GET_MODE (op0) != VOIDmode
9256 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9258 HOST_WIDE_INT const_val = INTVAL (op1);
9259 unsigned HOST_WIDE_INT uconst_val = const_val;
9260 unsigned HOST_WIDE_INT max_val
9261 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9263 switch (code)
9265 case LE:
9266 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9267 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9268 break;
9270 /* When cross-compiling, const_val might be sign-extended from
9271 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9272 case GE:
9273 if ((HOST_WIDE_INT) (const_val & max_val)
9274 != (((HOST_WIDE_INT) 1
9275 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9276 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9277 break;
9279 case LEU:
9280 if (uconst_val < max_val)
9281 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9282 break;
9284 case GEU:
9285 if (uconst_val != 0)
9286 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9287 break;
9289 default:
9290 break;
9294 #ifdef HAVE_cc0
9295 /* Never return CC0; return zero instead. */
9296 if (op0 == cc0_rtx)
9297 return 0;
9298 #endif
9300 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9303 /* Given a jump insn JUMP, return the condition that will cause it to branch
9304 to its JUMP_LABEL. If the condition cannot be understood, or is an
9305 inequality floating-point comparison which needs to be reversed, 0 will
9306 be returned.
9308 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9309 insn used in locating the condition was found. If a replacement test
9310 of the condition is desired, it should be placed in front of that
9311 insn and we will be sure that the inputs are still valid. */
9314 get_condition (jump, earliest)
9315 rtx jump;
9316 rtx *earliest;
9318 rtx cond;
9319 int reverse;
9320 rtx set;
9322 /* If this is not a standard conditional jump, we can't parse it. */
9323 if (GET_CODE (jump) != JUMP_INSN
9324 || ! any_condjump_p (jump))
9325 return 0;
9326 set = pc_set (jump);
9328 cond = XEXP (SET_SRC (set), 0);
9330 /* If this branches to JUMP_LABEL when the condition is false, reverse
9331 the condition. */
9332 reverse
9333 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9334 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9336 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9339 /* Similar to above routine, except that we also put an invariant last
9340 unless both operands are invariants. */
9343 get_condition_for_loop (loop, x)
9344 const struct loop *loop;
9345 rtx x;
9347 rtx comparison = get_condition (x, (rtx*) 0);
9349 if (comparison == 0
9350 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9351 || loop_invariant_p (loop, XEXP (comparison, 1)))
9352 return comparison;
9354 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9355 XEXP (comparison, 1), XEXP (comparison, 0));
9358 /* Scan the function and determine whether it has indirect (computed) jumps.
9360 This is taken mostly from flow.c; similar code exists elsewhere
9361 in the compiler. It may be useful to put this into rtlanal.c. */
9362 static int
9363 indirect_jump_in_function_p (start)
9364 rtx start;
9366 rtx insn;
9368 for (insn = start; insn; insn = NEXT_INSN (insn))
9369 if (computed_jump_p (insn))
9370 return 1;
9372 return 0;
9375 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9376 documentation for LOOP_MEMS for the definition of `appropriate'.
9377 This function is called from prescan_loop via for_each_rtx. */
9379 static int
9380 insert_loop_mem (mem, data)
9381 rtx *mem;
9382 void *data ATTRIBUTE_UNUSED;
9384 struct loop_info *loop_info = data;
9385 int i;
9386 rtx m = *mem;
9388 if (m == NULL_RTX)
9389 return 0;
9391 switch (GET_CODE (m))
9393 case MEM:
9394 break;
9396 case CLOBBER:
9397 /* We're not interested in MEMs that are only clobbered. */
9398 return -1;
9400 case CONST_DOUBLE:
9401 /* We're not interested in the MEM associated with a
9402 CONST_DOUBLE, so there's no need to traverse into this. */
9403 return -1;
9405 case EXPR_LIST:
9406 /* We're not interested in any MEMs that only appear in notes. */
9407 return -1;
9409 default:
9410 /* This is not a MEM. */
9411 return 0;
9414 /* See if we've already seen this MEM. */
9415 for (i = 0; i < loop_info->mems_idx; ++i)
9416 if (rtx_equal_p (m, loop_info->mems[i].mem))
9418 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9419 /* The modes of the two memory accesses are different. If
9420 this happens, something tricky is going on, and we just
9421 don't optimize accesses to this MEM. */
9422 loop_info->mems[i].optimize = 0;
9424 return 0;
9427 /* Resize the array, if necessary. */
9428 if (loop_info->mems_idx == loop_info->mems_allocated)
9430 if (loop_info->mems_allocated != 0)
9431 loop_info->mems_allocated *= 2;
9432 else
9433 loop_info->mems_allocated = 32;
9435 loop_info->mems = (loop_mem_info *)
9436 xrealloc (loop_info->mems,
9437 loop_info->mems_allocated * sizeof (loop_mem_info));
9440 /* Actually insert the MEM. */
9441 loop_info->mems[loop_info->mems_idx].mem = m;
9442 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9443 because we can't put it in a register. We still store it in the
9444 table, though, so that if we see the same address later, but in a
9445 non-BLK mode, we'll not think we can optimize it at that point. */
9446 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9447 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9448 ++loop_info->mems_idx;
9450 return 0;
9454 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9456 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9457 register that is modified by an insn between FROM and TO. If the
9458 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9459 more, stop incrementing it, to avoid overflow.
9461 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9462 register I is used, if it is only used once. Otherwise, it is set
9463 to 0 (for no uses) or const0_rtx for more than one use. This
9464 parameter may be zero, in which case this processing is not done.
9466 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9467 optimize register I. */
9469 static void
9470 loop_regs_scan (loop, extra_size)
9471 const struct loop *loop;
9472 int extra_size;
9474 struct loop_regs *regs = LOOP_REGS (loop);
9475 int old_nregs;
9476 /* last_set[n] is nonzero iff reg n has been set in the current
9477 basic block. In that case, it is the insn that last set reg n. */
9478 rtx *last_set;
9479 rtx insn;
9480 int i;
9482 old_nregs = regs->num;
9483 regs->num = max_reg_num ();
9485 /* Grow the regs array if not allocated or too small. */
9486 if (regs->num >= regs->size)
9488 regs->size = regs->num + extra_size;
9490 regs->array = (struct loop_reg *)
9491 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9493 /* Zero the new elements. */
9494 memset (regs->array + old_nregs, 0,
9495 (regs->size - old_nregs) * sizeof (*regs->array));
9498 /* Clear previously scanned fields but do not clear n_times_set. */
9499 for (i = 0; i < old_nregs; i++)
9501 regs->array[i].set_in_loop = 0;
9502 regs->array[i].may_not_optimize = 0;
9503 regs->array[i].single_usage = NULL_RTX;
9506 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9508 /* Scan the loop, recording register usage. */
9509 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9510 insn = NEXT_INSN (insn))
9512 if (INSN_P (insn))
9514 /* Record registers that have exactly one use. */
9515 find_single_use_in_loop (regs, insn, PATTERN (insn));
9517 /* Include uses in REG_EQUAL notes. */
9518 if (REG_NOTES (insn))
9519 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9521 if (GET_CODE (PATTERN (insn)) == SET
9522 || GET_CODE (PATTERN (insn)) == CLOBBER)
9523 count_one_set (regs, insn, PATTERN (insn), last_set);
9524 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9526 int i;
9527 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9528 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9529 last_set);
9533 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9534 memset (last_set, 0, regs->num * sizeof (rtx));
9537 /* Invalidate all hard registers clobbered by calls. With one exception:
9538 a call-clobbered PIC register is still function-invariant for our
9539 purposes, since we can hoist any PIC calculations out of the loop.
9540 Thus the call to rtx_varies_p. */
9541 if (LOOP_INFO (loop)->has_call)
9542 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9543 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9544 && rtx_varies_p (gen_rtx_REG (Pmode, i), /*for_alias=*/1))
9546 regs->array[i].may_not_optimize = 1;
9547 regs->array[i].set_in_loop = 1;
9550 #ifdef AVOID_CCMODE_COPIES
9551 /* Don't try to move insns which set CC registers if we should not
9552 create CCmode register copies. */
9553 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9554 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9555 regs->array[i].may_not_optimize = 1;
9556 #endif
9558 /* Set regs->array[I].n_times_set for the new registers. */
9559 for (i = old_nregs; i < regs->num; i++)
9560 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9562 free (last_set);
9565 /* Returns the number of real INSNs in the LOOP. */
9567 static int
9568 count_insns_in_loop (loop)
9569 const struct loop *loop;
9571 int count = 0;
9572 rtx insn;
9574 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9575 insn = NEXT_INSN (insn))
9576 if (INSN_P (insn))
9577 ++count;
9579 return count;
9582 /* Move MEMs into registers for the duration of the loop. */
9584 static void
9585 load_mems (loop)
9586 const struct loop *loop;
9588 struct loop_info *loop_info = LOOP_INFO (loop);
9589 struct loop_regs *regs = LOOP_REGS (loop);
9590 int maybe_never = 0;
9591 int i;
9592 rtx p, prev_ebb_head;
9593 rtx label = NULL_RTX;
9594 rtx end_label;
9595 /* Nonzero if the next instruction may never be executed. */
9596 int next_maybe_never = 0;
9597 unsigned int last_max_reg = max_reg_num ();
9599 if (loop_info->mems_idx == 0)
9600 return;
9602 /* We cannot use next_label here because it skips over normal insns. */
9603 end_label = next_nonnote_insn (loop->end);
9604 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9605 end_label = NULL_RTX;
9607 /* Check to see if it's possible that some instructions in the loop are
9608 never executed. Also check if there is a goto out of the loop other
9609 than right after the end of the loop. */
9610 for (p = next_insn_in_loop (loop, loop->scan_start);
9611 p != NULL_RTX;
9612 p = next_insn_in_loop (loop, p))
9614 if (GET_CODE (p) == CODE_LABEL)
9615 maybe_never = 1;
9616 else if (GET_CODE (p) == JUMP_INSN
9617 /* If we enter the loop in the middle, and scan
9618 around to the beginning, don't set maybe_never
9619 for that. This must be an unconditional jump,
9620 otherwise the code at the top of the loop might
9621 never be executed. Unconditional jumps are
9622 followed a by barrier then loop end. */
9623 && ! (GET_CODE (p) == JUMP_INSN
9624 && JUMP_LABEL (p) == loop->top
9625 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9626 && any_uncondjump_p (p)))
9628 /* If this is a jump outside of the loop but not right
9629 after the end of the loop, we would have to emit new fixup
9630 sequences for each such label. */
9631 if (/* If we can't tell where control might go when this
9632 JUMP_INSN is executed, we must be conservative. */
9633 !JUMP_LABEL (p)
9634 || (JUMP_LABEL (p) != end_label
9635 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9636 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9637 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9638 return;
9640 if (!any_condjump_p (p))
9641 /* Something complicated. */
9642 maybe_never = 1;
9643 else
9644 /* If there are any more instructions in the loop, they
9645 might not be reached. */
9646 next_maybe_never = 1;
9648 else if (next_maybe_never)
9649 maybe_never = 1;
9652 /* Find start of the extended basic block that enters the loop. */
9653 for (p = loop->start;
9654 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9655 p = PREV_INSN (p))
9657 prev_ebb_head = p;
9659 cselib_init ();
9661 /* Build table of mems that get set to constant values before the
9662 loop. */
9663 for (; p != loop->start; p = NEXT_INSN (p))
9664 cselib_process_insn (p);
9666 /* Actually move the MEMs. */
9667 for (i = 0; i < loop_info->mems_idx; ++i)
9669 regset_head load_copies;
9670 regset_head store_copies;
9671 int written = 0;
9672 rtx reg;
9673 rtx mem = loop_info->mems[i].mem;
9674 rtx mem_list_entry;
9676 if (MEM_VOLATILE_P (mem)
9677 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9678 /* There's no telling whether or not MEM is modified. */
9679 loop_info->mems[i].optimize = 0;
9681 /* Go through the MEMs written to in the loop to see if this
9682 one is aliased by one of them. */
9683 mem_list_entry = loop_info->store_mems;
9684 while (mem_list_entry)
9686 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9687 written = 1;
9688 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9689 mem, rtx_varies_p))
9691 /* MEM is indeed aliased by this store. */
9692 loop_info->mems[i].optimize = 0;
9693 break;
9695 mem_list_entry = XEXP (mem_list_entry, 1);
9698 if (flag_float_store && written
9699 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9700 loop_info->mems[i].optimize = 0;
9702 /* If this MEM is written to, we must be sure that there
9703 are no reads from another MEM that aliases this one. */
9704 if (loop_info->mems[i].optimize && written)
9706 int j;
9708 for (j = 0; j < loop_info->mems_idx; ++j)
9710 if (j == i)
9711 continue;
9712 else if (true_dependence (mem,
9713 VOIDmode,
9714 loop_info->mems[j].mem,
9715 rtx_varies_p))
9717 /* It's not safe to hoist loop_info->mems[i] out of
9718 the loop because writes to it might not be
9719 seen by reads from loop_info->mems[j]. */
9720 loop_info->mems[i].optimize = 0;
9721 break;
9726 if (maybe_never && may_trap_p (mem))
9727 /* We can't access the MEM outside the loop; it might
9728 cause a trap that wouldn't have happened otherwise. */
9729 loop_info->mems[i].optimize = 0;
9731 if (!loop_info->mems[i].optimize)
9732 /* We thought we were going to lift this MEM out of the
9733 loop, but later discovered that we could not. */
9734 continue;
9736 INIT_REG_SET (&load_copies);
9737 INIT_REG_SET (&store_copies);
9739 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9740 order to keep scan_loop from moving stores to this MEM
9741 out of the loop just because this REG is neither a
9742 user-variable nor used in the loop test. */
9743 reg = gen_reg_rtx (GET_MODE (mem));
9744 REG_USERVAR_P (reg) = 1;
9745 loop_info->mems[i].reg = reg;
9747 /* Now, replace all references to the MEM with the
9748 corresponding pseudos. */
9749 maybe_never = 0;
9750 for (p = next_insn_in_loop (loop, loop->scan_start);
9751 p != NULL_RTX;
9752 p = next_insn_in_loop (loop, p))
9754 if (INSN_P (p))
9756 rtx set;
9758 set = single_set (p);
9760 /* See if this copies the mem into a register that isn't
9761 modified afterwards. We'll try to do copy propagation
9762 a little further on. */
9763 if (set
9764 /* @@@ This test is _way_ too conservative. */
9765 && ! maybe_never
9766 && GET_CODE (SET_DEST (set)) == REG
9767 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9768 && REGNO (SET_DEST (set)) < last_max_reg
9769 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9770 && rtx_equal_p (SET_SRC (set), mem))
9771 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9773 /* See if this copies the mem from a register that isn't
9774 modified afterwards. We'll try to remove the
9775 redundant copy later on by doing a little register
9776 renaming and copy propagation. This will help
9777 to untangle things for the BIV detection code. */
9778 if (set
9779 && ! maybe_never
9780 && GET_CODE (SET_SRC (set)) == REG
9781 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9782 && REGNO (SET_SRC (set)) < last_max_reg
9783 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9784 && rtx_equal_p (SET_DEST (set), mem))
9785 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9787 /* If this is a call which uses / clobbers this memory
9788 location, we must not change the interface here. */
9789 if (GET_CODE (p) == CALL_INSN
9790 && reg_mentioned_p (loop_info->mems[i].mem,
9791 CALL_INSN_FUNCTION_USAGE (p)))
9793 cancel_changes (0);
9794 loop_info->mems[i].optimize = 0;
9795 break;
9797 else
9798 /* Replace the memory reference with the shadow register. */
9799 replace_loop_mems (p, loop_info->mems[i].mem,
9800 loop_info->mems[i].reg);
9803 if (GET_CODE (p) == CODE_LABEL
9804 || GET_CODE (p) == JUMP_INSN)
9805 maybe_never = 1;
9808 if (! loop_info->mems[i].optimize)
9809 ; /* We found we couldn't do the replacement, so do nothing. */
9810 else if (! apply_change_group ())
9811 /* We couldn't replace all occurrences of the MEM. */
9812 loop_info->mems[i].optimize = 0;
9813 else
9815 /* Load the memory immediately before LOOP->START, which is
9816 the NOTE_LOOP_BEG. */
9817 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9818 rtx set;
9819 rtx best = mem;
9820 int j;
9821 struct elt_loc_list *const_equiv = 0;
9823 if (e)
9825 struct elt_loc_list *equiv;
9826 struct elt_loc_list *best_equiv = 0;
9827 for (equiv = e->locs; equiv; equiv = equiv->next)
9829 if (CONSTANT_P (equiv->loc))
9830 const_equiv = equiv;
9831 else if (GET_CODE (equiv->loc) == REG
9832 /* Extending hard register lifetimes causes crash
9833 on SRC targets. Doing so on non-SRC is
9834 probably also not good idea, since we most
9835 probably have pseudoregister equivalence as
9836 well. */
9837 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9838 best_equiv = equiv;
9840 /* Use the constant equivalence if that is cheap enough. */
9841 if (! best_equiv)
9842 best_equiv = const_equiv;
9843 else if (const_equiv
9844 && (rtx_cost (const_equiv->loc, SET)
9845 <= rtx_cost (best_equiv->loc, SET)))
9847 best_equiv = const_equiv;
9848 const_equiv = 0;
9851 /* If best_equiv is nonzero, we know that MEM is set to a
9852 constant or register before the loop. We will use this
9853 knowledge to initialize the shadow register with that
9854 constant or reg rather than by loading from MEM. */
9855 if (best_equiv)
9856 best = copy_rtx (best_equiv->loc);
9859 set = gen_move_insn (reg, best);
9860 set = loop_insn_hoist (loop, set);
9861 if (REG_P (best))
9863 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9864 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9866 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9867 break;
9871 if (const_equiv)
9872 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9874 if (written)
9876 if (label == NULL_RTX)
9878 label = gen_label_rtx ();
9879 emit_label_after (label, loop->end);
9882 /* Store the memory immediately after END, which is
9883 the NOTE_LOOP_END. */
9884 set = gen_move_insn (copy_rtx (mem), reg);
9885 loop_insn_emit_after (loop, 0, label, set);
9888 if (loop_dump_stream)
9890 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9891 REGNO (reg), (written ? "r/w" : "r/o"));
9892 print_rtl (loop_dump_stream, mem);
9893 fputc ('\n', loop_dump_stream);
9896 /* Attempt a bit of copy propagation. This helps untangle the
9897 data flow, and enables {basic,general}_induction_var to find
9898 more bivs/givs. */
9899 EXECUTE_IF_SET_IN_REG_SET
9900 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9902 try_copy_prop (loop, reg, j);
9904 CLEAR_REG_SET (&load_copies);
9906 EXECUTE_IF_SET_IN_REG_SET
9907 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9909 try_swap_copy_prop (loop, reg, j);
9911 CLEAR_REG_SET (&store_copies);
9915 if (label != NULL_RTX && end_label != NULL_RTX)
9917 /* Now, we need to replace all references to the previous exit
9918 label with the new one. */
9919 rtx_pair rr;
9920 rr.r1 = end_label;
9921 rr.r2 = label;
9923 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9925 for_each_rtx (&p, replace_label, &rr);
9927 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9928 field. This is not handled by for_each_rtx because it doesn't
9929 handle unprinted ('0') fields. We need to update JUMP_LABEL
9930 because the immediately following unroll pass will use it.
9931 replace_label would not work anyways, because that only handles
9932 LABEL_REFs. */
9933 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9934 JUMP_LABEL (p) = label;
9938 cselib_finish ();
9941 /* For communication between note_reg_stored and its caller. */
9942 struct note_reg_stored_arg
9944 int set_seen;
9945 rtx reg;
9948 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9949 is equal to ARG. */
9950 static void
9951 note_reg_stored (x, setter, arg)
9952 rtx x, setter ATTRIBUTE_UNUSED;
9953 void *arg;
9955 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9956 if (t->reg == x)
9957 t->set_seen = 1;
9960 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9961 There must be exactly one insn that sets this pseudo; it will be
9962 deleted if all replacements succeed and we can prove that the register
9963 is not used after the loop. */
9965 static void
9966 try_copy_prop (loop, replacement, regno)
9967 const struct loop *loop;
9968 rtx replacement;
9969 unsigned int regno;
9971 /* This is the reg that we are copying from. */
9972 rtx reg_rtx = regno_reg_rtx[regno];
9973 rtx init_insn = 0;
9974 rtx insn;
9975 /* These help keep track of whether we replaced all uses of the reg. */
9976 int replaced_last = 0;
9977 int store_is_first = 0;
9979 for (insn = next_insn_in_loop (loop, loop->scan_start);
9980 insn != NULL_RTX;
9981 insn = next_insn_in_loop (loop, insn))
9983 rtx set;
9985 /* Only substitute within one extended basic block from the initializing
9986 insn. */
9987 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9988 break;
9990 if (! INSN_P (insn))
9991 continue;
9993 /* Is this the initializing insn? */
9994 set = single_set (insn);
9995 if (set
9996 && GET_CODE (SET_DEST (set)) == REG
9997 && REGNO (SET_DEST (set)) == regno)
9999 if (init_insn)
10000 abort ();
10002 init_insn = insn;
10003 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10004 store_is_first = 1;
10007 /* Only substitute after seeing the initializing insn. */
10008 if (init_insn && insn != init_insn)
10010 struct note_reg_stored_arg arg;
10012 replace_loop_regs (insn, reg_rtx, replacement);
10013 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10014 replaced_last = 1;
10016 /* Stop replacing when REPLACEMENT is modified. */
10017 arg.reg = replacement;
10018 arg.set_seen = 0;
10019 note_stores (PATTERN (insn), note_reg_stored, &arg);
10020 if (arg.set_seen)
10022 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10024 /* It is possible that we've turned previously valid REG_EQUAL to
10025 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10026 REPLACEMENT is modified, we get different meaning. */
10027 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10028 remove_note (insn, note);
10029 break;
10033 if (! init_insn)
10034 abort ();
10035 if (apply_change_group ())
10037 if (loop_dump_stream)
10038 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10039 if (store_is_first && replaced_last)
10041 rtx first;
10042 rtx retval_note;
10044 /* Assume we're just deleting INIT_INSN. */
10045 first = init_insn;
10046 /* Look for REG_RETVAL note. If we're deleting the end of
10047 the libcall sequence, the whole sequence can go. */
10048 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10049 /* If we found a REG_RETVAL note, find the first instruction
10050 in the sequence. */
10051 if (retval_note)
10052 first = XEXP (retval_note, 0);
10054 /* Delete the instructions. */
10055 loop_delete_insns (first, init_insn);
10057 if (loop_dump_stream)
10058 fprintf (loop_dump_stream, ".\n");
10062 /* Replace all the instructions from FIRST up to and including LAST
10063 with NOTE_INSN_DELETED notes. */
10065 static void
10066 loop_delete_insns (first, last)
10067 rtx first;
10068 rtx last;
10070 while (1)
10072 if (loop_dump_stream)
10073 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10074 INSN_UID (first));
10075 delete_insn (first);
10077 /* If this was the LAST instructions we're supposed to delete,
10078 we're done. */
10079 if (first == last)
10080 break;
10082 first = NEXT_INSN (first);
10086 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10087 loop LOOP if the order of the sets of these registers can be
10088 swapped. There must be exactly one insn within the loop that sets
10089 this pseudo followed immediately by a move insn that sets
10090 REPLACEMENT with REGNO. */
10091 static void
10092 try_swap_copy_prop (loop, replacement, regno)
10093 const struct loop *loop;
10094 rtx replacement;
10095 unsigned int regno;
10097 rtx insn;
10098 rtx set = NULL_RTX;
10099 unsigned int new_regno;
10101 new_regno = REGNO (replacement);
10103 for (insn = next_insn_in_loop (loop, loop->scan_start);
10104 insn != NULL_RTX;
10105 insn = next_insn_in_loop (loop, insn))
10107 /* Search for the insn that copies REGNO to NEW_REGNO? */
10108 if (INSN_P (insn)
10109 && (set = single_set (insn))
10110 && GET_CODE (SET_DEST (set)) == REG
10111 && REGNO (SET_DEST (set)) == new_regno
10112 && GET_CODE (SET_SRC (set)) == REG
10113 && REGNO (SET_SRC (set)) == regno)
10114 break;
10117 if (insn != NULL_RTX)
10119 rtx prev_insn;
10120 rtx prev_set;
10122 /* Some DEF-USE info would come in handy here to make this
10123 function more general. For now, just check the previous insn
10124 which is the most likely candidate for setting REGNO. */
10126 prev_insn = PREV_INSN (insn);
10128 if (INSN_P (insn)
10129 && (prev_set = single_set (prev_insn))
10130 && GET_CODE (SET_DEST (prev_set)) == REG
10131 && REGNO (SET_DEST (prev_set)) == regno)
10133 /* We have:
10134 (set (reg regno) (expr))
10135 (set (reg new_regno) (reg regno))
10137 so try converting this to:
10138 (set (reg new_regno) (expr))
10139 (set (reg regno) (reg new_regno))
10141 The former construct is often generated when a global
10142 variable used for an induction variable is shadowed by a
10143 register (NEW_REGNO). The latter construct improves the
10144 chances of GIV replacement and BIV elimination. */
10146 validate_change (prev_insn, &SET_DEST (prev_set),
10147 replacement, 1);
10148 validate_change (insn, &SET_DEST (set),
10149 SET_SRC (set), 1);
10150 validate_change (insn, &SET_SRC (set),
10151 replacement, 1);
10153 if (apply_change_group ())
10155 if (loop_dump_stream)
10156 fprintf (loop_dump_stream,
10157 " Swapped set of reg %d at %d with reg %d at %d.\n",
10158 regno, INSN_UID (insn),
10159 new_regno, INSN_UID (prev_insn));
10161 /* Update first use of REGNO. */
10162 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10163 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10165 /* Now perform copy propagation to hopefully
10166 remove all uses of REGNO within the loop. */
10167 try_copy_prop (loop, replacement, regno);
10173 /* Replace MEM with its associated pseudo register. This function is
10174 called from load_mems via for_each_rtx. DATA is actually a pointer
10175 to a structure describing the instruction currently being scanned
10176 and the MEM we are currently replacing. */
10178 static int
10179 replace_loop_mem (mem, data)
10180 rtx *mem;
10181 void *data;
10183 loop_replace_args *args = (loop_replace_args *) data;
10184 rtx m = *mem;
10186 if (m == NULL_RTX)
10187 return 0;
10189 switch (GET_CODE (m))
10191 case MEM:
10192 break;
10194 case CONST_DOUBLE:
10195 /* We're not interested in the MEM associated with a
10196 CONST_DOUBLE, so there's no need to traverse into one. */
10197 return -1;
10199 default:
10200 /* This is not a MEM. */
10201 return 0;
10204 if (!rtx_equal_p (args->match, m))
10205 /* This is not the MEM we are currently replacing. */
10206 return 0;
10208 /* Actually replace the MEM. */
10209 validate_change (args->insn, mem, args->replacement, 1);
10211 return 0;
10214 static void
10215 replace_loop_mems (insn, mem, reg)
10216 rtx insn;
10217 rtx mem;
10218 rtx reg;
10220 loop_replace_args args;
10222 args.insn = insn;
10223 args.match = mem;
10224 args.replacement = reg;
10226 for_each_rtx (&insn, replace_loop_mem, &args);
10229 /* Replace one register with another. Called through for_each_rtx; PX points
10230 to the rtx being scanned. DATA is actually a pointer to
10231 a structure of arguments. */
10233 static int
10234 replace_loop_reg (px, data)
10235 rtx *px;
10236 void *data;
10238 rtx x = *px;
10239 loop_replace_args *args = (loop_replace_args *) data;
10241 if (x == NULL_RTX)
10242 return 0;
10244 if (x == args->match)
10245 validate_change (args->insn, px, args->replacement, 1);
10247 return 0;
10250 static void
10251 replace_loop_regs (insn, reg, replacement)
10252 rtx insn;
10253 rtx reg;
10254 rtx replacement;
10256 loop_replace_args args;
10258 args.insn = insn;
10259 args.match = reg;
10260 args.replacement = replacement;
10262 for_each_rtx (&insn, replace_loop_reg, &args);
10265 /* Replace occurrences of the old exit label for the loop with the new
10266 one. DATA is an rtx_pair containing the old and new labels,
10267 respectively. */
10269 static int
10270 replace_label (x, data)
10271 rtx *x;
10272 void *data;
10274 rtx l = *x;
10275 rtx old_label = ((rtx_pair *) data)->r1;
10276 rtx new_label = ((rtx_pair *) data)->r2;
10278 if (l == NULL_RTX)
10279 return 0;
10281 if (GET_CODE (l) != LABEL_REF)
10282 return 0;
10284 if (XEXP (l, 0) != old_label)
10285 return 0;
10287 XEXP (l, 0) = new_label;
10288 ++LABEL_NUSES (new_label);
10289 --LABEL_NUSES (old_label);
10291 return 0;
10294 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10295 (ignored in the interim). */
10297 static rtx
10298 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10299 const struct loop *loop ATTRIBUTE_UNUSED;
10300 basic_block where_bb ATTRIBUTE_UNUSED;
10301 rtx where_insn;
10302 rtx pattern;
10304 return emit_insn_after (pattern, where_insn);
10308 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10309 in basic block WHERE_BB (ignored in the interim) within the loop
10310 otherwise hoist PATTERN into the loop pre-header. */
10313 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10314 const struct loop *loop;
10315 basic_block where_bb ATTRIBUTE_UNUSED;
10316 rtx where_insn;
10317 rtx pattern;
10319 if (! where_insn)
10320 return loop_insn_hoist (loop, pattern);
10321 return emit_insn_before (pattern, where_insn);
10325 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10326 WHERE_BB (ignored in the interim) within the loop. */
10328 static rtx
10329 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10330 const struct loop *loop ATTRIBUTE_UNUSED;
10331 basic_block where_bb ATTRIBUTE_UNUSED;
10332 rtx where_insn;
10333 rtx pattern;
10335 return emit_call_insn_before (pattern, where_insn);
10339 /* Hoist insn for PATTERN into the loop pre-header. */
10342 loop_insn_hoist (loop, pattern)
10343 const struct loop *loop;
10344 rtx pattern;
10346 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10350 /* Hoist call insn for PATTERN into the loop pre-header. */
10352 static rtx
10353 loop_call_insn_hoist (loop, pattern)
10354 const struct loop *loop;
10355 rtx pattern;
10357 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10361 /* Sink insn for PATTERN after the loop end. */
10364 loop_insn_sink (loop, pattern)
10365 const struct loop *loop;
10366 rtx pattern;
10368 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10371 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10372 and constant. Emit sequence of intructions to load it into REG */
10373 static rtx
10374 gen_load_of_final_value (reg, final_value)
10375 rtx reg, final_value;
10377 rtx seq;
10378 start_sequence ();
10379 final_value = force_operand (final_value, reg);
10380 if (final_value != reg)
10381 emit_move_insn (reg, final_value);
10382 seq = gen_sequence ();
10383 end_sequence ();
10384 return seq;
10387 /* If the loop has multiple exits, emit insn for PATTERN before the
10388 loop to ensure that it will always be executed no matter how the
10389 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10390 since this is slightly more efficient. */
10392 static rtx
10393 loop_insn_sink_or_swim (loop, pattern)
10394 const struct loop *loop;
10395 rtx pattern;
10397 if (loop->exit_count)
10398 return loop_insn_hoist (loop, pattern);
10399 else
10400 return loop_insn_sink (loop, pattern);
10403 static void
10404 loop_ivs_dump (loop, file, verbose)
10405 const struct loop *loop;
10406 FILE *file;
10407 int verbose;
10409 struct iv_class *bl;
10410 int iv_num = 0;
10412 if (! loop || ! file)
10413 return;
10415 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10416 iv_num++;
10418 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10420 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10422 loop_iv_class_dump (bl, file, verbose);
10423 fputc ('\n', file);
10428 static void
10429 loop_iv_class_dump (bl, file, verbose)
10430 const struct iv_class *bl;
10431 FILE *file;
10432 int verbose ATTRIBUTE_UNUSED;
10434 struct induction *v;
10435 rtx incr;
10436 int i;
10438 if (! bl || ! file)
10439 return;
10441 fprintf (file, "IV class for reg %d, benefit %d\n",
10442 bl->regno, bl->total_benefit);
10444 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10445 if (bl->initial_value)
10447 fprintf (file, ", init val: ");
10448 print_simple_rtl (file, bl->initial_value);
10450 if (bl->initial_test)
10452 fprintf (file, ", init test: ");
10453 print_simple_rtl (file, bl->initial_test);
10455 fputc ('\n', file);
10457 if (bl->final_value)
10459 fprintf (file, " Final val: ");
10460 print_simple_rtl (file, bl->final_value);
10461 fputc ('\n', file);
10464 if ((incr = biv_total_increment (bl)))
10466 fprintf (file, " Total increment: ");
10467 print_simple_rtl (file, incr);
10468 fputc ('\n', file);
10471 /* List the increments. */
10472 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10474 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10475 print_simple_rtl (file, v->add_val);
10476 fputc ('\n', file);
10479 /* List the givs. */
10480 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10482 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10483 i, INSN_UID (v->insn), v->benefit);
10484 if (v->giv_type == DEST_ADDR)
10485 print_simple_rtl (file, v->mem);
10486 else
10487 print_simple_rtl (file, single_set (v->insn));
10488 fputc ('\n', file);
10493 static void
10494 loop_biv_dump (v, file, verbose)
10495 const struct induction *v;
10496 FILE *file;
10497 int verbose;
10499 if (! v || ! file)
10500 return;
10502 fprintf (file,
10503 "Biv %d: insn %d",
10504 REGNO (v->dest_reg), INSN_UID (v->insn));
10505 fprintf (file, " const ");
10506 print_simple_rtl (file, v->add_val);
10508 if (verbose && v->final_value)
10510 fputc ('\n', file);
10511 fprintf (file, " final ");
10512 print_simple_rtl (file, v->final_value);
10515 fputc ('\n', file);
10519 static void
10520 loop_giv_dump (v, file, verbose)
10521 const struct induction *v;
10522 FILE *file;
10523 int verbose;
10525 if (! v || ! file)
10526 return;
10528 if (v->giv_type == DEST_REG)
10529 fprintf (file, "Giv %d: insn %d",
10530 REGNO (v->dest_reg), INSN_UID (v->insn));
10531 else
10532 fprintf (file, "Dest address: insn %d",
10533 INSN_UID (v->insn));
10535 fprintf (file, " src reg %d benefit %d",
10536 REGNO (v->src_reg), v->benefit);
10537 fprintf (file, " lifetime %d",
10538 v->lifetime);
10540 if (v->replaceable)
10541 fprintf (file, " replaceable");
10543 if (v->no_const_addval)
10544 fprintf (file, " ncav");
10546 if (v->ext_dependent)
10548 switch (GET_CODE (v->ext_dependent))
10550 case SIGN_EXTEND:
10551 fprintf (file, " ext se");
10552 break;
10553 case ZERO_EXTEND:
10554 fprintf (file, " ext ze");
10555 break;
10556 case TRUNCATE:
10557 fprintf (file, " ext tr");
10558 break;
10559 default:
10560 abort ();
10564 fputc ('\n', file);
10565 fprintf (file, " mult ");
10566 print_simple_rtl (file, v->mult_val);
10568 fputc ('\n', file);
10569 fprintf (file, " add ");
10570 print_simple_rtl (file, v->add_val);
10572 if (verbose && v->final_value)
10574 fputc ('\n', file);
10575 fprintf (file, " final ");
10576 print_simple_rtl (file, v->final_value);
10579 fputc ('\n', file);
10583 void
10584 debug_ivs (loop)
10585 const struct loop *loop;
10587 loop_ivs_dump (loop, stderr, 1);
10591 void
10592 debug_iv_class (bl)
10593 const struct iv_class *bl;
10595 loop_iv_class_dump (bl, stderr, 1);
10599 void
10600 debug_biv (v)
10601 const struct induction *v;
10603 loop_biv_dump (v, stderr, 1);
10607 void
10608 debug_giv (v)
10609 const struct induction *v;
10611 loop_giv_dump (v, stderr, 1);
10615 #define LOOP_BLOCK_NUM_1(INSN) \
10616 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10618 /* The notes do not have an assigned block, so look at the next insn. */
10619 #define LOOP_BLOCK_NUM(INSN) \
10620 ((INSN) ? (GET_CODE (INSN) == NOTE \
10621 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10622 : LOOP_BLOCK_NUM_1 (INSN)) \
10623 : -1)
10625 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10627 static void
10628 loop_dump_aux (loop, file, verbose)
10629 const struct loop *loop;
10630 FILE *file;
10631 int verbose ATTRIBUTE_UNUSED;
10633 rtx label;
10635 if (! loop || ! file)
10636 return;
10638 /* Print diagnostics to compare our concept of a loop with
10639 what the loop notes say. */
10640 if (! PREV_INSN (loop->first->head)
10641 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10642 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10643 != NOTE_INSN_LOOP_BEG)
10644 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10645 INSN_UID (PREV_INSN (loop->first->head)));
10646 if (! NEXT_INSN (loop->last->end)
10647 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10648 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10649 != NOTE_INSN_LOOP_END)
10650 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10651 INSN_UID (NEXT_INSN (loop->last->end)));
10653 if (loop->start)
10655 fprintf (file,
10656 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10657 LOOP_BLOCK_NUM (loop->start),
10658 LOOP_INSN_UID (loop->start),
10659 LOOP_BLOCK_NUM (loop->cont),
10660 LOOP_INSN_UID (loop->cont),
10661 LOOP_BLOCK_NUM (loop->cont),
10662 LOOP_INSN_UID (loop->cont),
10663 LOOP_BLOCK_NUM (loop->vtop),
10664 LOOP_INSN_UID (loop->vtop),
10665 LOOP_BLOCK_NUM (loop->end),
10666 LOOP_INSN_UID (loop->end));
10667 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10668 LOOP_BLOCK_NUM (loop->top),
10669 LOOP_INSN_UID (loop->top),
10670 LOOP_BLOCK_NUM (loop->scan_start),
10671 LOOP_INSN_UID (loop->scan_start));
10672 fprintf (file, ";; exit_count %d", loop->exit_count);
10673 if (loop->exit_count)
10675 fputs (", labels:", file);
10676 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10678 fprintf (file, " %d ",
10679 LOOP_INSN_UID (XEXP (label, 0)));
10682 fputs ("\n", file);
10684 /* This can happen when a marked loop appears as two nested loops,
10685 say from while (a || b) {}. The inner loop won't match
10686 the loop markers but the outer one will. */
10687 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10688 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10692 /* Call this function from the debugger to dump LOOP. */
10694 void
10695 debug_loop (loop)
10696 const struct loop *loop;
10698 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10701 /* Call this function from the debugger to dump LOOPS. */
10703 void
10704 debug_loops (loops)
10705 const struct loops *loops;
10707 flow_loops_dump (loops, stderr, loop_dump_aux, 1);