* config/ia64/ia64.c (emit_all_insn_group_barriers): Flush state
[official-gcc.git] / gcc / loop.c
bloba8bf9c7cbd0c6c85918fdd50b0fad503e06a0dca
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
57 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
58 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
60 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
61 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
62 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
65 /* Vector mapping INSN_UIDs to luids.
66 The luids are like uids but increase monotonically always.
67 We use them to see whether a jump comes from outside a given loop. */
69 int *uid_luid;
71 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
72 number the insn is contained in. */
74 struct loop **uid_loop;
76 /* 1 + largest uid of any insn. */
78 int max_uid_for_loop;
80 /* 1 + luid of last insn. */
82 static int max_luid;
84 /* Number of loops detected in current function. Used as index to the
85 next few tables. */
87 static int max_loop_num;
89 /* Bound on pseudo register number before loop optimization.
90 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
91 unsigned int max_reg_before_loop;
93 /* The value to pass to the next call of reg_scan_update. */
94 static int loop_max_reg;
96 #define obstack_chunk_alloc xmalloc
97 #define obstack_chunk_free free
99 /* During the analysis of a loop, a chain of `struct movable's
100 is made to record all the movable insns found.
101 Then the entire chain can be scanned to decide which to move. */
103 struct movable
105 rtx insn; /* A movable insn */
106 rtx set_src; /* The expression this reg is set from. */
107 rtx set_dest; /* The destination of this SET. */
108 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
109 of any registers used within the LIBCALL. */
110 int consec; /* Number of consecutive following insns
111 that must be moved with this one. */
112 unsigned int regno; /* The register it sets */
113 short lifetime; /* lifetime of that register;
114 may be adjusted when matching movables
115 that load the same value are found. */
116 short savings; /* Number of insns we can move for this reg,
117 including other movables that force this
118 or match this one. */
119 unsigned int cond : 1; /* 1 if only conditionally movable */
120 unsigned int force : 1; /* 1 means MUST move this insn */
121 unsigned int global : 1; /* 1 means reg is live outside this loop */
122 /* If PARTIAL is 1, GLOBAL means something different:
123 that the reg is live outside the range from where it is set
124 to the following label. */
125 unsigned int done : 1; /* 1 inhibits further processing of this */
127 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
128 In particular, moving it does not make it
129 invariant. */
130 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
131 load SRC, rather than copying INSN. */
132 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
133 first insn of a consecutive sets group. */
134 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
135 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
136 that we should avoid changing when clearing
137 the rest of the reg. */
138 struct movable *match; /* First entry for same value */
139 struct movable *forces; /* An insn that must be moved if this is */
140 struct movable *next;
144 FILE *loop_dump_stream;
146 /* Forward declarations. */
148 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
149 static void mark_loop_jump PARAMS ((rtx, struct loop *));
150 static void prescan_loop PARAMS ((struct loop *));
151 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
152 static int consec_sets_invariant_p PARAMS ((const struct loop *,
153 rtx, int, rtx));
154 static int labels_in_range_p PARAMS ((rtx, int));
155 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
156 static void note_addr_stored PARAMS ((rtx, rtx, void *));
157 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
158 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
159 static void scan_loop PARAMS ((struct loop*, int));
160 #if 0
161 static void replace_call_address PARAMS ((rtx, rtx, rtx));
162 #endif
163 static rtx skip_consec_insns PARAMS ((rtx, int));
164 static int libcall_benefit PARAMS ((rtx));
165 static void ignore_some_movables PARAMS ((struct loop_movables *));
166 static void force_movables PARAMS ((struct loop_movables *));
167 static void combine_movables PARAMS ((struct loop_movables *,
168 struct loop_regs *));
169 static int num_unmoved_movables PARAMS ((const struct loop *));
170 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
171 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
172 struct loop_regs *));
173 static void add_label_notes PARAMS ((rtx, rtx));
174 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
175 int, int));
176 static void loop_movables_add PARAMS((struct loop_movables *,
177 struct movable *));
178 static void loop_movables_free PARAMS((struct loop_movables *));
179 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
180 static void loop_bivs_find PARAMS((struct loop *));
181 static void loop_bivs_init_find PARAMS((struct loop *));
182 static void loop_bivs_check PARAMS((struct loop *));
183 static void loop_givs_find PARAMS((struct loop *));
184 static void loop_givs_check PARAMS((struct loop *));
185 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
186 int, int));
187 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
188 struct induction *, rtx));
189 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
190 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
191 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
192 rtx *));
193 static void loop_ivs_free PARAMS((struct loop *));
194 static void strength_reduce PARAMS ((struct loop *, int));
195 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
196 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
197 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
198 static void record_biv PARAMS ((struct loop *, struct induction *,
199 rtx, rtx, rtx, rtx, rtx *,
200 int, int));
201 static void check_final_value PARAMS ((const struct loop *,
202 struct induction *));
203 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
204 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
205 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
206 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
207 static void record_giv PARAMS ((const struct loop *, struct induction *,
208 rtx, rtx, rtx, rtx, rtx, rtx, int,
209 enum g_types, int, int, rtx *));
210 static void update_giv_derive PARAMS ((const struct loop *, rtx));
211 static void check_ext_dependant_givs PARAMS ((struct iv_class *,
212 struct loop_info *));
213 static int basic_induction_var PARAMS ((const struct loop *, rtx,
214 enum machine_mode, rtx, rtx,
215 rtx *, rtx *, rtx **));
216 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
217 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
218 rtx *, rtx *, rtx *, int, int *,
219 enum machine_mode));
220 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
221 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
222 static int check_dbra_loop PARAMS ((struct loop *, int));
223 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
224 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
225 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
226 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
227 static int product_cheap_p PARAMS ((rtx, rtx));
228 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
229 int, int, int));
230 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
231 struct iv_class *, int,
232 basic_block, rtx));
233 static int last_use_this_basic_block PARAMS ((rtx, rtx));
234 static void record_initial PARAMS ((rtx, rtx, void *));
235 static void update_reg_last_use PARAMS ((rtx, rtx));
236 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
237 static void loop_regs_scan PARAMS ((const struct loop *, int));
238 static int count_insns_in_loop PARAMS ((const struct loop *));
239 static void load_mems PARAMS ((const struct loop *));
240 static int insert_loop_mem PARAMS ((rtx *, void *));
241 static int replace_loop_mem PARAMS ((rtx *, void *));
242 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
243 static int replace_loop_reg PARAMS ((rtx *, void *));
244 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
245 static void note_reg_stored PARAMS ((rtx, rtx, void *));
246 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
247 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
248 unsigned int));
249 static int replace_label PARAMS ((rtx *, void *));
250 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
251 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
252 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
253 static void loop_regs_update PARAMS ((const struct loop *, rtx));
254 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
256 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
257 rtx, rtx));
258 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
259 basic_block, rtx, rtx));
260 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
261 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
263 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
264 static void loop_delete_insns PARAMS ((rtx, rtx));
265 void debug_ivs PARAMS ((const struct loop *));
266 void debug_iv_class PARAMS ((const struct iv_class *));
267 void debug_biv PARAMS ((const struct induction *));
268 void debug_giv PARAMS ((const struct induction *));
269 void debug_loop PARAMS ((const struct loop *));
270 void debug_loops PARAMS ((const struct loops *));
272 typedef struct rtx_pair
274 rtx r1;
275 rtx r2;
276 } rtx_pair;
278 typedef struct loop_replace_args
280 rtx match;
281 rtx replacement;
282 rtx insn;
283 } loop_replace_args;
285 /* Nonzero iff INSN is between START and END, inclusive. */
286 #define INSN_IN_RANGE_P(INSN, START, END) \
287 (INSN_UID (INSN) < max_uid_for_loop \
288 && INSN_LUID (INSN) >= INSN_LUID (START) \
289 && INSN_LUID (INSN) <= INSN_LUID (END))
291 /* Indirect_jump_in_function is computed once per function. */
292 static int indirect_jump_in_function;
293 static int indirect_jump_in_function_p PARAMS ((rtx));
295 static int compute_luids PARAMS ((rtx, rtx, int));
297 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
298 struct induction *,
299 rtx));
301 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
302 copy the value of the strength reduced giv to its original register. */
303 static int copy_cost;
305 /* Cost of using a register, to normalize the benefits of a giv. */
306 static int reg_address_cost;
308 void
309 init_loop ()
311 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
313 reg_address_cost = address_cost (reg, SImode);
315 copy_cost = COSTS_N_INSNS (1);
318 /* Compute the mapping from uids to luids.
319 LUIDs are numbers assigned to insns, like uids,
320 except that luids increase monotonically through the code.
321 Start at insn START and stop just before END. Assign LUIDs
322 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
323 static int
324 compute_luids (start, end, prev_luid)
325 rtx start, end;
326 int prev_luid;
328 int i;
329 rtx insn;
331 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
333 if (INSN_UID (insn) >= max_uid_for_loop)
334 continue;
335 /* Don't assign luids to line-number NOTEs, so that the distance in
336 luids between two insns is not affected by -g. */
337 if (GET_CODE (insn) != NOTE
338 || NOTE_LINE_NUMBER (insn) <= 0)
339 uid_luid[INSN_UID (insn)] = ++i;
340 else
341 /* Give a line number note the same luid as preceding insn. */
342 uid_luid[INSN_UID (insn)] = i;
344 return i + 1;
347 /* Entry point of this file. Perform loop optimization
348 on the current function. F is the first insn of the function
349 and DUMPFILE is a stream for output of a trace of actions taken
350 (or 0 if none should be output). */
352 void
353 loop_optimize (f, dumpfile, flags)
354 /* f is the first instruction of a chain of insns for one function */
355 rtx f;
356 FILE *dumpfile;
357 int flags;
359 register rtx insn;
360 register int i;
361 struct loops loops_data;
362 struct loops *loops = &loops_data;
363 struct loop_info *loops_info;
365 loop_dump_stream = dumpfile;
367 init_recog_no_volatile ();
369 max_reg_before_loop = max_reg_num ();
370 loop_max_reg = max_reg_before_loop;
372 regs_may_share = 0;
374 /* Count the number of loops. */
376 max_loop_num = 0;
377 for (insn = f; insn; insn = NEXT_INSN (insn))
379 if (GET_CODE (insn) == NOTE
380 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
381 max_loop_num++;
384 /* Don't waste time if no loops. */
385 if (max_loop_num == 0)
386 return;
388 loops->num = max_loop_num;
390 /* Get size to use for tables indexed by uids.
391 Leave some space for labels allocated by find_and_verify_loops. */
392 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
394 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
395 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
396 sizeof (struct loop *));
398 /* Allocate storage for array of loops. */
399 loops->array = (struct loop *)
400 xcalloc (loops->num, sizeof (struct loop));
402 /* Find and process each loop.
403 First, find them, and record them in order of their beginnings. */
404 find_and_verify_loops (f, loops);
406 /* Allocate and initialize auxiliary loop information. */
407 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
408 for (i = 0; i < loops->num; i++)
409 loops->array[i].aux = loops_info + i;
411 /* Now find all register lifetimes. This must be done after
412 find_and_verify_loops, because it might reorder the insns in the
413 function. */
414 reg_scan (f, max_reg_before_loop, 1);
416 /* This must occur after reg_scan so that registers created by gcse
417 will have entries in the register tables.
419 We could have added a call to reg_scan after gcse_main in toplev.c,
420 but moving this call to init_alias_analysis is more efficient. */
421 init_alias_analysis ();
423 /* See if we went too far. Note that get_max_uid already returns
424 one more that the maximum uid of all insn. */
425 if (get_max_uid () > max_uid_for_loop)
426 abort ();
427 /* Now reset it to the actual size we need. See above. */
428 max_uid_for_loop = get_max_uid ();
430 /* find_and_verify_loops has already called compute_luids, but it
431 might have rearranged code afterwards, so we need to recompute
432 the luids now. */
433 max_luid = compute_luids (f, NULL_RTX, 0);
435 /* Don't leave gaps in uid_luid for insns that have been
436 deleted. It is possible that the first or last insn
437 using some register has been deleted by cross-jumping.
438 Make sure that uid_luid for that former insn's uid
439 points to the general area where that insn used to be. */
440 for (i = 0; i < max_uid_for_loop; i++)
442 uid_luid[0] = uid_luid[i];
443 if (uid_luid[0] != 0)
444 break;
446 for (i = 0; i < max_uid_for_loop; i++)
447 if (uid_luid[i] == 0)
448 uid_luid[i] = uid_luid[i - 1];
450 /* Determine if the function has indirect jump. On some systems
451 this prevents low overhead loop instructions from being used. */
452 indirect_jump_in_function = indirect_jump_in_function_p (f);
454 /* Now scan the loops, last ones first, since this means inner ones are done
455 before outer ones. */
456 for (i = max_loop_num - 1; i >= 0; i--)
458 struct loop *loop = &loops->array[i];
460 if (! loop->invalid && loop->end)
461 scan_loop (loop, flags);
464 /* If there were lexical blocks inside the loop, they have been
465 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
466 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
467 the BLOCKs as well. */
468 if (write_symbols != NO_DEBUG)
469 reorder_blocks ();
471 end_alias_analysis ();
473 /* Clean up. */
474 free (uid_luid);
475 free (uid_loop);
476 free (loops_info);
477 free (loops->array);
480 /* Returns the next insn, in execution order, after INSN. START and
481 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
482 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
483 insn-stream; it is used with loops that are entered near the
484 bottom. */
486 static rtx
487 next_insn_in_loop (loop, insn)
488 const struct loop *loop;
489 rtx insn;
491 insn = NEXT_INSN (insn);
493 if (insn == loop->end)
495 if (loop->top)
496 /* Go to the top of the loop, and continue there. */
497 insn = loop->top;
498 else
499 /* We're done. */
500 insn = NULL_RTX;
503 if (insn == loop->scan_start)
504 /* We're done. */
505 insn = NULL_RTX;
507 return insn;
510 /* Optimize one loop described by LOOP. */
512 /* ??? Could also move memory writes out of loops if the destination address
513 is invariant, the source is invariant, the memory write is not volatile,
514 and if we can prove that no read inside the loop can read this address
515 before the write occurs. If there is a read of this address after the
516 write, then we can also mark the memory read as invariant. */
518 static void
519 scan_loop (loop, flags)
520 struct loop *loop;
521 int flags;
523 struct loop_info *loop_info = LOOP_INFO (loop);
524 struct loop_regs *regs = LOOP_REGS (loop);
525 register int i;
526 rtx loop_start = loop->start;
527 rtx loop_end = loop->end;
528 rtx p;
529 /* 1 if we are scanning insns that could be executed zero times. */
530 int maybe_never = 0;
531 /* 1 if we are scanning insns that might never be executed
532 due to a subroutine call which might exit before they are reached. */
533 int call_passed = 0;
534 /* Jump insn that enters the loop, or 0 if control drops in. */
535 rtx loop_entry_jump = 0;
536 /* Number of insns in the loop. */
537 int insn_count;
538 int tem;
539 rtx temp, update_start, update_end;
540 /* The SET from an insn, if it is the only SET in the insn. */
541 rtx set, set1;
542 /* Chain describing insns movable in current loop. */
543 struct loop_movables *movables = LOOP_MOVABLES (loop);
544 /* Ratio of extra register life span we can justify
545 for saving an instruction. More if loop doesn't call subroutines
546 since in that case saving an insn makes more difference
547 and more registers are available. */
548 int threshold;
549 /* Nonzero if we are scanning instructions in a sub-loop. */
550 int loop_depth = 0;
552 loop->top = 0;
554 movables->head = 0;
555 movables->last = 0;
557 /* Determine whether this loop starts with a jump down to a test at
558 the end. This will occur for a small number of loops with a test
559 that is too complex to duplicate in front of the loop.
561 We search for the first insn or label in the loop, skipping NOTEs.
562 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
563 (because we might have a loop executed only once that contains a
564 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
565 (in case we have a degenerate loop).
567 Note that if we mistakenly think that a loop is entered at the top
568 when, in fact, it is entered at the exit test, the only effect will be
569 slightly poorer optimization. Making the opposite error can generate
570 incorrect code. Since very few loops now start with a jump to the
571 exit test, the code here to detect that case is very conservative. */
573 for (p = NEXT_INSN (loop_start);
574 p != loop_end
575 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
576 && (GET_CODE (p) != NOTE
577 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
578 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
579 p = NEXT_INSN (p))
582 loop->scan_start = p;
584 /* If loop end is the end of the current function, then emit a
585 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
586 note insn. This is the position we use when sinking insns out of
587 the loop. */
588 if (NEXT_INSN (loop->end) != 0)
589 loop->sink = NEXT_INSN (loop->end);
590 else
591 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
593 /* Set up variables describing this loop. */
594 prescan_loop (loop);
595 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
597 /* If loop has a jump before the first label,
598 the true entry is the target of that jump.
599 Start scan from there.
600 But record in LOOP->TOP the place where the end-test jumps
601 back to so we can scan that after the end of the loop. */
602 if (GET_CODE (p) == JUMP_INSN)
604 loop_entry_jump = p;
606 /* Loop entry must be unconditional jump (and not a RETURN) */
607 if (any_uncondjump_p (p)
608 && JUMP_LABEL (p) != 0
609 /* Check to see whether the jump actually
610 jumps out of the loop (meaning it's no loop).
611 This case can happen for things like
612 do {..} while (0). If this label was generated previously
613 by loop, we can't tell anything about it and have to reject
614 the loop. */
615 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
617 loop->top = next_label (loop->scan_start);
618 loop->scan_start = JUMP_LABEL (p);
622 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
623 as required by loop_reg_used_before_p. So skip such loops. (This
624 test may never be true, but it's best to play it safe.)
626 Also, skip loops where we do not start scanning at a label. This
627 test also rejects loops starting with a JUMP_INSN that failed the
628 test above. */
630 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
631 || GET_CODE (loop->scan_start) != CODE_LABEL)
633 if (loop_dump_stream)
634 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
635 INSN_UID (loop_start), INSN_UID (loop_end));
636 return;
639 /* Allocate extra space for REGs that might be created by load_mems.
640 We allocate a little extra slop as well, in the hopes that we
641 won't have to reallocate the regs array. */
642 loop_regs_scan (loop, loop_info->mems_idx + 16);
643 insn_count = count_insns_in_loop (loop);
645 if (loop_dump_stream)
647 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
648 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
649 if (loop->cont)
650 fprintf (loop_dump_stream, "Continue at insn %d.\n",
651 INSN_UID (loop->cont));
654 /* Scan through the loop finding insns that are safe to move.
655 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
656 this reg will be considered invariant for subsequent insns.
657 We consider whether subsequent insns use the reg
658 in deciding whether it is worth actually moving.
660 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
661 and therefore it is possible that the insns we are scanning
662 would never be executed. At such times, we must make sure
663 that it is safe to execute the insn once instead of zero times.
664 When MAYBE_NEVER is 0, all insns will be executed at least once
665 so that is not a problem. */
667 for (p = next_insn_in_loop (loop, loop->scan_start);
668 p != NULL_RTX;
669 p = next_insn_in_loop (loop, p))
671 if (GET_CODE (p) == INSN
672 && (set = single_set (p))
673 && GET_CODE (SET_DEST (set)) == REG
674 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
676 int tem1 = 0;
677 int tem2 = 0;
678 int move_insn = 0;
679 rtx src = SET_SRC (set);
680 rtx dependencies = 0;
682 /* Figure out what to use as a source of this insn. If a REG_EQUIV
683 note is given or if a REG_EQUAL note with a constant operand is
684 specified, use it as the source and mark that we should move
685 this insn by calling emit_move_insn rather that duplicating the
686 insn.
688 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
689 is present. */
690 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
691 if (temp)
692 src = XEXP (temp, 0), move_insn = 1;
693 else
695 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
696 if (temp && CONSTANT_P (XEXP (temp, 0)))
697 src = XEXP (temp, 0), move_insn = 1;
698 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
700 src = XEXP (temp, 0);
701 /* A libcall block can use regs that don't appear in
702 the equivalent expression. To move the libcall,
703 we must move those regs too. */
704 dependencies = libcall_other_reg (p, src);
708 /* For parallels, add any possible uses to the depencies, as we can't move
709 the insn without resolving them first. */
710 if (GET_CODE (PATTERN (p)) == PARALLEL)
712 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
714 rtx x = XVECEXP (PATTERN (p), 0, i);
715 if (GET_CODE (x) == USE)
716 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
720 /* Don't try to optimize a register that was made
721 by loop-optimization for an inner loop.
722 We don't know its life-span, so we can't compute the benefit. */
723 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
725 else if (/* The register is used in basic blocks other
726 than the one where it is set (meaning that
727 something after this point in the loop might
728 depend on its value before the set). */
729 ! reg_in_basic_block_p (p, SET_DEST (set))
730 /* And the set is not guaranteed to be executed one
731 the loop starts, or the value before the set is
732 needed before the set occurs...
734 ??? Note we have quadratic behaviour here, mitigated
735 by the fact that the previous test will often fail for
736 large loops. Rather than re-scanning the entire loop
737 each time for register usage, we should build tables
738 of the register usage and use them here instead. */
739 && (maybe_never
740 || loop_reg_used_before_p (loop, set, p)))
741 /* It is unsafe to move the set.
743 This code used to consider it OK to move a set of a variable
744 which was not created by the user and not used in an exit test.
745 That behavior is incorrect and was removed. */
747 else if ((tem = loop_invariant_p (loop, src))
748 && (dependencies == 0
749 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
750 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
751 || (tem1
752 = consec_sets_invariant_p
753 (loop, SET_DEST (set),
754 regs->array[REGNO (SET_DEST (set))].set_in_loop,
755 p)))
756 /* If the insn can cause a trap (such as divide by zero),
757 can't move it unless it's guaranteed to be executed
758 once loop is entered. Even a function call might
759 prevent the trap insn from being reached
760 (since it might exit!) */
761 && ! ((maybe_never || call_passed)
762 && may_trap_p (src)))
764 register struct movable *m;
765 register int regno = REGNO (SET_DEST (set));
767 /* A potential lossage is where we have a case where two insns
768 can be combined as long as they are both in the loop, but
769 we move one of them outside the loop. For large loops,
770 this can lose. The most common case of this is the address
771 of a function being called.
773 Therefore, if this register is marked as being used exactly
774 once if we are in a loop with calls (a "large loop"), see if
775 we can replace the usage of this register with the source
776 of this SET. If we can, delete this insn.
778 Don't do this if P has a REG_RETVAL note or if we have
779 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
781 if (loop_info->has_call
782 && regs->array[regno].single_usage != 0
783 && regs->array[regno].single_usage != const0_rtx
784 && REGNO_FIRST_UID (regno) == INSN_UID (p)
785 && (REGNO_LAST_UID (regno)
786 == INSN_UID (regs->array[regno].single_usage))
787 && regs->array[regno].set_in_loop == 1
788 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
789 && ! side_effects_p (SET_SRC (set))
790 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
791 && (! SMALL_REGISTER_CLASSES
792 || (! (GET_CODE (SET_SRC (set)) == REG
793 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
794 /* This test is not redundant; SET_SRC (set) might be
795 a call-clobbered register and the life of REGNO
796 might span a call. */
797 && ! modified_between_p (SET_SRC (set), p,
798 regs->array[regno].single_usage)
799 && no_labels_between_p (p, regs->array[regno].single_usage)
800 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
801 regs->array[regno].single_usage))
803 /* Replace any usage in a REG_EQUAL note. Must copy the
804 new source, so that we don't get rtx sharing between the
805 SET_SOURCE and REG_NOTES of insn p. */
806 REG_NOTES (regs->array[regno].single_usage)
807 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
808 SET_DEST (set), copy_rtx (SET_SRC (set)));
810 PUT_CODE (p, NOTE);
811 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
812 NOTE_SOURCE_FILE (p) = 0;
813 regs->array[regno].set_in_loop = 0;
814 continue;
817 m = (struct movable *) xmalloc (sizeof (struct movable));
818 m->next = 0;
819 m->insn = p;
820 m->set_src = src;
821 m->dependencies = dependencies;
822 m->set_dest = SET_DEST (set);
823 m->force = 0;
824 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
825 m->done = 0;
826 m->forces = 0;
827 m->partial = 0;
828 m->move_insn = move_insn;
829 m->move_insn_first = 0;
830 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
831 m->savemode = VOIDmode;
832 m->regno = regno;
833 /* Set M->cond if either loop_invariant_p
834 or consec_sets_invariant_p returned 2
835 (only conditionally invariant). */
836 m->cond = ((tem | tem1 | tem2) > 1);
837 m->global = LOOP_REG_GLOBAL_P (loop, regno);
838 m->match = 0;
839 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
840 m->savings = regs->array[regno].n_times_set;
841 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
842 m->savings += libcall_benefit (p);
843 regs->array[regno].set_in_loop = move_insn ? -2 : -1;
844 /* Add M to the end of the chain MOVABLES. */
845 loop_movables_add (movables, m);
847 if (m->consec > 0)
849 /* It is possible for the first instruction to have a
850 REG_EQUAL note but a non-invariant SET_SRC, so we must
851 remember the status of the first instruction in case
852 the last instruction doesn't have a REG_EQUAL note. */
853 m->move_insn_first = m->move_insn;
855 /* Skip this insn, not checking REG_LIBCALL notes. */
856 p = next_nonnote_insn (p);
857 /* Skip the consecutive insns, if there are any. */
858 p = skip_consec_insns (p, m->consec);
859 /* Back up to the last insn of the consecutive group. */
860 p = prev_nonnote_insn (p);
862 /* We must now reset m->move_insn, m->is_equiv, and possibly
863 m->set_src to correspond to the effects of all the
864 insns. */
865 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
866 if (temp)
867 m->set_src = XEXP (temp, 0), m->move_insn = 1;
868 else
870 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
871 if (temp && CONSTANT_P (XEXP (temp, 0)))
872 m->set_src = XEXP (temp, 0), m->move_insn = 1;
873 else
874 m->move_insn = 0;
877 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
880 /* If this register is always set within a STRICT_LOW_PART
881 or set to zero, then its high bytes are constant.
882 So clear them outside the loop and within the loop
883 just load the low bytes.
884 We must check that the machine has an instruction to do so.
885 Also, if the value loaded into the register
886 depends on the same register, this cannot be done. */
887 else if (SET_SRC (set) == const0_rtx
888 && GET_CODE (NEXT_INSN (p)) == INSN
889 && (set1 = single_set (NEXT_INSN (p)))
890 && GET_CODE (set1) == SET
891 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
892 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
893 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
894 == SET_DEST (set))
895 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
897 register int regno = REGNO (SET_DEST (set));
898 if (regs->array[regno].set_in_loop == 2)
900 register struct movable *m;
901 m = (struct movable *) xmalloc (sizeof (struct movable));
902 m->next = 0;
903 m->insn = p;
904 m->set_dest = SET_DEST (set);
905 m->dependencies = 0;
906 m->force = 0;
907 m->consec = 0;
908 m->done = 0;
909 m->forces = 0;
910 m->move_insn = 0;
911 m->move_insn_first = 0;
912 m->partial = 1;
913 /* If the insn may not be executed on some cycles,
914 we can't clear the whole reg; clear just high part.
915 Not even if the reg is used only within this loop.
916 Consider this:
917 while (1)
918 while (s != t) {
919 if (foo ()) x = *s;
920 use (x);
922 Clearing x before the inner loop could clobber a value
923 being saved from the last time around the outer loop.
924 However, if the reg is not used outside this loop
925 and all uses of the register are in the same
926 basic block as the store, there is no problem.
928 If this insn was made by loop, we don't know its
929 INSN_LUID and hence must make a conservative
930 assumption. */
931 m->global = (INSN_UID (p) >= max_uid_for_loop
932 || LOOP_REG_GLOBAL_P (loop, regno)
933 || (labels_in_range_p
934 (p, REGNO_FIRST_LUID (regno))));
935 if (maybe_never && m->global)
936 m->savemode = GET_MODE (SET_SRC (set1));
937 else
938 m->savemode = VOIDmode;
939 m->regno = regno;
940 m->cond = 0;
941 m->match = 0;
942 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
943 m->savings = 1;
944 regs->array[regno].set_in_loop = -1;
945 /* Add M to the end of the chain MOVABLES. */
946 loop_movables_add (movables, m);
950 /* Past a call insn, we get to insns which might not be executed
951 because the call might exit. This matters for insns that trap.
952 Constant and pure call insns always return, so they don't count. */
953 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
954 call_passed = 1;
955 /* Past a label or a jump, we get to insns for which we
956 can't count on whether or how many times they will be
957 executed during each iteration. Therefore, we can
958 only move out sets of trivial variables
959 (those not used after the loop). */
960 /* Similar code appears twice in strength_reduce. */
961 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
962 /* If we enter the loop in the middle, and scan around to the
963 beginning, don't set maybe_never for that. This must be an
964 unconditional jump, otherwise the code at the top of the
965 loop might never be executed. Unconditional jumps are
966 followed a by barrier then loop end. */
967 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
968 && NEXT_INSN (NEXT_INSN (p)) == loop_end
969 && any_uncondjump_p (p)))
970 maybe_never = 1;
971 else if (GET_CODE (p) == NOTE)
973 /* At the virtual top of a converted loop, insns are again known to
974 be executed: logically, the loop begins here even though the exit
975 code has been duplicated. */
976 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
977 maybe_never = call_passed = 0;
978 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
979 loop_depth++;
980 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
981 loop_depth--;
985 /* If one movable subsumes another, ignore that other. */
987 ignore_some_movables (movables);
989 /* For each movable insn, see if the reg that it loads
990 leads when it dies right into another conditionally movable insn.
991 If so, record that the second insn "forces" the first one,
992 since the second can be moved only if the first is. */
994 force_movables (movables);
996 /* See if there are multiple movable insns that load the same value.
997 If there are, make all but the first point at the first one
998 through the `match' field, and add the priorities of them
999 all together as the priority of the first. */
1001 combine_movables (movables, regs);
1003 /* Now consider each movable insn to decide whether it is worth moving.
1004 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1006 Generally this increases code size, so do not move moveables when
1007 optimizing for code size. */
1009 if (! optimize_size)
1010 move_movables (loop, movables, threshold, insn_count);
1012 /* Now candidates that still are negative are those not moved.
1013 Change regs->array[I].set_in_loop to indicate that those are not actually
1014 invariant. */
1015 for (i = 0; i < regs->num; i++)
1016 if (regs->array[i].set_in_loop < 0)
1017 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1019 /* Now that we've moved some things out of the loop, we might be able to
1020 hoist even more memory references. */
1021 load_mems (loop);
1023 /* Recalculate regs->array if load_mems has created new registers. */
1024 if (max_reg_num () > regs->num)
1025 loop_regs_scan (loop, 0);
1027 for (update_start = loop_start;
1028 PREV_INSN (update_start)
1029 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1030 update_start = PREV_INSN (update_start))
1032 update_end = NEXT_INSN (loop_end);
1034 reg_scan_update (update_start, update_end, loop_max_reg);
1035 loop_max_reg = max_reg_num ();
1037 if (flag_strength_reduce)
1039 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1040 /* Ensure our label doesn't go away. */
1041 LABEL_NUSES (update_end)++;
1043 strength_reduce (loop, flags);
1045 reg_scan_update (update_start, update_end, loop_max_reg);
1046 loop_max_reg = max_reg_num ();
1048 if (update_end && GET_CODE (update_end) == CODE_LABEL
1049 && --LABEL_NUSES (update_end) == 0)
1050 delete_insn (update_end);
1054 /* The movable information is required for strength reduction. */
1055 loop_movables_free (movables);
1057 free (regs->array);
1058 regs->array = 0;
1059 regs->num = 0;
1062 /* Add elements to *OUTPUT to record all the pseudo-regs
1063 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1065 void
1066 record_excess_regs (in_this, not_in_this, output)
1067 rtx in_this, not_in_this;
1068 rtx *output;
1070 enum rtx_code code;
1071 const char *fmt;
1072 int i;
1074 code = GET_CODE (in_this);
1076 switch (code)
1078 case PC:
1079 case CC0:
1080 case CONST_INT:
1081 case CONST_DOUBLE:
1082 case CONST:
1083 case SYMBOL_REF:
1084 case LABEL_REF:
1085 return;
1087 case REG:
1088 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1089 && ! reg_mentioned_p (in_this, not_in_this))
1090 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1091 return;
1093 default:
1094 break;
1097 fmt = GET_RTX_FORMAT (code);
1098 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1100 int j;
1102 switch (fmt[i])
1104 case 'E':
1105 for (j = 0; j < XVECLEN (in_this, i); j++)
1106 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1107 break;
1109 case 'e':
1110 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1111 break;
1116 /* Check what regs are referred to in the libcall block ending with INSN,
1117 aside from those mentioned in the equivalent value.
1118 If there are none, return 0.
1119 If there are one or more, return an EXPR_LIST containing all of them. */
1122 libcall_other_reg (insn, equiv)
1123 rtx insn, equiv;
1125 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1126 rtx p = XEXP (note, 0);
1127 rtx output = 0;
1129 /* First, find all the regs used in the libcall block
1130 that are not mentioned as inputs to the result. */
1132 while (p != insn)
1134 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1135 || GET_CODE (p) == CALL_INSN)
1136 record_excess_regs (PATTERN (p), equiv, &output);
1137 p = NEXT_INSN (p);
1140 return output;
1143 /* Return 1 if all uses of REG
1144 are between INSN and the end of the basic block. */
1146 static int
1147 reg_in_basic_block_p (insn, reg)
1148 rtx insn, reg;
1150 int regno = REGNO (reg);
1151 rtx p;
1153 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1154 return 0;
1156 /* Search this basic block for the already recorded last use of the reg. */
1157 for (p = insn; p; p = NEXT_INSN (p))
1159 switch (GET_CODE (p))
1161 case NOTE:
1162 break;
1164 case INSN:
1165 case CALL_INSN:
1166 /* Ordinary insn: if this is the last use, we win. */
1167 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1168 return 1;
1169 break;
1171 case JUMP_INSN:
1172 /* Jump insn: if this is the last use, we win. */
1173 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1174 return 1;
1175 /* Otherwise, it's the end of the basic block, so we lose. */
1176 return 0;
1178 case CODE_LABEL:
1179 case BARRIER:
1180 /* It's the end of the basic block, so we lose. */
1181 return 0;
1183 default:
1184 break;
1188 /* The "last use" that was recorded can't be found after the first
1189 use. This can happen when the last use was deleted while
1190 processing an inner loop, this inner loop was then completely
1191 unrolled, and the outer loop is always exited after the inner loop,
1192 so that everything after the first use becomes a single basic block. */
1193 return 1;
1196 /* Compute the benefit of eliminating the insns in the block whose
1197 last insn is LAST. This may be a group of insns used to compute a
1198 value directly or can contain a library call. */
1200 static int
1201 libcall_benefit (last)
1202 rtx last;
1204 rtx insn;
1205 int benefit = 0;
1207 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1208 insn != last; insn = NEXT_INSN (insn))
1210 if (GET_CODE (insn) == CALL_INSN)
1211 benefit += 10; /* Assume at least this many insns in a library
1212 routine. */
1213 else if (GET_CODE (insn) == INSN
1214 && GET_CODE (PATTERN (insn)) != USE
1215 && GET_CODE (PATTERN (insn)) != CLOBBER)
1216 benefit++;
1219 return benefit;
1222 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1224 static rtx
1225 skip_consec_insns (insn, count)
1226 rtx insn;
1227 int count;
1229 for (; count > 0; count--)
1231 rtx temp;
1233 /* If first insn of libcall sequence, skip to end. */
1234 /* Do this at start of loop, since INSN is guaranteed to
1235 be an insn here. */
1236 if (GET_CODE (insn) != NOTE
1237 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1238 insn = XEXP (temp, 0);
1241 insn = NEXT_INSN (insn);
1242 while (GET_CODE (insn) == NOTE);
1245 return insn;
1248 /* Ignore any movable whose insn falls within a libcall
1249 which is part of another movable.
1250 We make use of the fact that the movable for the libcall value
1251 was made later and so appears later on the chain. */
1253 static void
1254 ignore_some_movables (movables)
1255 struct loop_movables *movables;
1257 register struct movable *m, *m1;
1259 for (m = movables->head; m; m = m->next)
1261 /* Is this a movable for the value of a libcall? */
1262 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1263 if (note)
1265 rtx insn;
1266 /* Check for earlier movables inside that range,
1267 and mark them invalid. We cannot use LUIDs here because
1268 insns created by loop.c for prior loops don't have LUIDs.
1269 Rather than reject all such insns from movables, we just
1270 explicitly check each insn in the libcall (since invariant
1271 libcalls aren't that common). */
1272 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1273 for (m1 = movables->head; m1 != m; m1 = m1->next)
1274 if (m1->insn == insn)
1275 m1->done = 1;
1280 /* For each movable insn, see if the reg that it loads
1281 leads when it dies right into another conditionally movable insn.
1282 If so, record that the second insn "forces" the first one,
1283 since the second can be moved only if the first is. */
1285 static void
1286 force_movables (movables)
1287 struct loop_movables *movables;
1289 register struct movable *m, *m1;
1290 for (m1 = movables->head; m1; m1 = m1->next)
1291 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1292 if (!m1->partial && !m1->done)
1294 int regno = m1->regno;
1295 for (m = m1->next; m; m = m->next)
1296 /* ??? Could this be a bug? What if CSE caused the
1297 register of M1 to be used after this insn?
1298 Since CSE does not update regno_last_uid,
1299 this insn M->insn might not be where it dies.
1300 But very likely this doesn't matter; what matters is
1301 that M's reg is computed from M1's reg. */
1302 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1303 && !m->done)
1304 break;
1305 if (m != 0 && m->set_src == m1->set_dest
1306 /* If m->consec, m->set_src isn't valid. */
1307 && m->consec == 0)
1308 m = 0;
1310 /* Increase the priority of the moving the first insn
1311 since it permits the second to be moved as well. */
1312 if (m != 0)
1314 m->forces = m1;
1315 m1->lifetime += m->lifetime;
1316 m1->savings += m->savings;
1321 /* Find invariant expressions that are equal and can be combined into
1322 one register. */
1324 static void
1325 combine_movables (movables, regs)
1326 struct loop_movables *movables;
1327 struct loop_regs *regs;
1329 register struct movable *m;
1330 char *matched_regs = (char *) xmalloc (regs->num);
1331 enum machine_mode mode;
1333 /* Regs that are set more than once are not allowed to match
1334 or be matched. I'm no longer sure why not. */
1335 /* Perhaps testing m->consec_sets would be more appropriate here? */
1337 for (m = movables->head; m; m = m->next)
1338 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1339 && !m->partial)
1341 register struct movable *m1;
1342 int regno = m->regno;
1344 memset (matched_regs, 0, regs->num);
1345 matched_regs[regno] = 1;
1347 /* We want later insns to match the first one. Don't make the first
1348 one match any later ones. So start this loop at m->next. */
1349 for (m1 = m->next; m1; m1 = m1->next)
1350 if (m != m1 && m1->match == 0
1351 && regs->array[m1->regno].n_times_set == 1
1352 /* A reg used outside the loop mustn't be eliminated. */
1353 && !m1->global
1354 /* A reg used for zero-extending mustn't be eliminated. */
1355 && !m1->partial
1356 && (matched_regs[m1->regno]
1359 /* Can combine regs with different modes loaded from the
1360 same constant only if the modes are the same or
1361 if both are integer modes with M wider or the same
1362 width as M1. The check for integer is redundant, but
1363 safe, since the only case of differing destination
1364 modes with equal sources is when both sources are
1365 VOIDmode, i.e., CONST_INT. */
1366 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1367 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1368 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1369 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1370 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1371 /* See if the source of M1 says it matches M. */
1372 && ((GET_CODE (m1->set_src) == REG
1373 && matched_regs[REGNO (m1->set_src)])
1374 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1375 movables, regs))))
1376 && ((m->dependencies == m1->dependencies)
1377 || rtx_equal_p (m->dependencies, m1->dependencies)))
1379 m->lifetime += m1->lifetime;
1380 m->savings += m1->savings;
1381 m1->done = 1;
1382 m1->match = m;
1383 matched_regs[m1->regno] = 1;
1387 /* Now combine the regs used for zero-extension.
1388 This can be done for those not marked `global'
1389 provided their lives don't overlap. */
1391 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1392 mode = GET_MODE_WIDER_MODE (mode))
1394 register struct movable *m0 = 0;
1396 /* Combine all the registers for extension from mode MODE.
1397 Don't combine any that are used outside this loop. */
1398 for (m = movables->head; m; m = m->next)
1399 if (m->partial && ! m->global
1400 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1402 register struct movable *m1;
1403 int first = REGNO_FIRST_LUID (m->regno);
1404 int last = REGNO_LAST_LUID (m->regno);
1406 if (m0 == 0)
1408 /* First one: don't check for overlap, just record it. */
1409 m0 = m;
1410 continue;
1413 /* Make sure they extend to the same mode.
1414 (Almost always true.) */
1415 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1416 continue;
1418 /* We already have one: check for overlap with those
1419 already combined together. */
1420 for (m1 = movables->head; m1 != m; m1 = m1->next)
1421 if (m1 == m0 || (m1->partial && m1->match == m0))
1422 if (! (REGNO_FIRST_LUID (m1->regno) > last
1423 || REGNO_LAST_LUID (m1->regno) < first))
1424 goto overlap;
1426 /* No overlap: we can combine this with the others. */
1427 m0->lifetime += m->lifetime;
1428 m0->savings += m->savings;
1429 m->done = 1;
1430 m->match = m0;
1432 overlap:
1437 /* Clean up. */
1438 free (matched_regs);
1441 /* Returns the number of movable instructions in LOOP that were not
1442 moved outside the loop. */
1444 static int
1445 num_unmoved_movables (loop)
1446 const struct loop *loop;
1448 int num = 0;
1449 struct movable *m;
1451 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1452 if (!m->done)
1453 ++num;
1455 return num;
1459 /* Return 1 if regs X and Y will become the same if moved. */
1461 static int
1462 regs_match_p (x, y, movables)
1463 rtx x, y;
1464 struct loop_movables *movables;
1466 unsigned int xn = REGNO (x);
1467 unsigned int yn = REGNO (y);
1468 struct movable *mx, *my;
1470 for (mx = movables->head; mx; mx = mx->next)
1471 if (mx->regno == xn)
1472 break;
1474 for (my = movables->head; my; my = my->next)
1475 if (my->regno == yn)
1476 break;
1478 return (mx && my
1479 && ((mx->match == my->match && mx->match != 0)
1480 || mx->match == my
1481 || mx == my->match));
1484 /* Return 1 if X and Y are identical-looking rtx's.
1485 This is the Lisp function EQUAL for rtx arguments.
1487 If two registers are matching movables or a movable register and an
1488 equivalent constant, consider them equal. */
1490 static int
1491 rtx_equal_for_loop_p (x, y, movables, regs)
1492 rtx x, y;
1493 struct loop_movables *movables;
1494 struct loop_regs *regs;
1496 register int i;
1497 register int j;
1498 register struct movable *m;
1499 register enum rtx_code code;
1500 register const char *fmt;
1502 if (x == y)
1503 return 1;
1504 if (x == 0 || y == 0)
1505 return 0;
1507 code = GET_CODE (x);
1509 /* If we have a register and a constant, they may sometimes be
1510 equal. */
1511 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1512 && CONSTANT_P (y))
1514 for (m = movables->head; m; m = m->next)
1515 if (m->move_insn && m->regno == REGNO (x)
1516 && rtx_equal_p (m->set_src, y))
1517 return 1;
1519 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1520 && CONSTANT_P (x))
1522 for (m = movables->head; m; m = m->next)
1523 if (m->move_insn && m->regno == REGNO (y)
1524 && rtx_equal_p (m->set_src, x))
1525 return 1;
1528 /* Otherwise, rtx's of different codes cannot be equal. */
1529 if (code != GET_CODE (y))
1530 return 0;
1532 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1533 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1535 if (GET_MODE (x) != GET_MODE (y))
1536 return 0;
1538 /* These three types of rtx's can be compared nonrecursively. */
1539 if (code == REG)
1540 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1542 if (code == LABEL_REF)
1543 return XEXP (x, 0) == XEXP (y, 0);
1544 if (code == SYMBOL_REF)
1545 return XSTR (x, 0) == XSTR (y, 0);
1547 /* Compare the elements. If any pair of corresponding elements
1548 fail to match, return 0 for the whole things. */
1550 fmt = GET_RTX_FORMAT (code);
1551 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1553 switch (fmt[i])
1555 case 'w':
1556 if (XWINT (x, i) != XWINT (y, i))
1557 return 0;
1558 break;
1560 case 'i':
1561 if (XINT (x, i) != XINT (y, i))
1562 return 0;
1563 break;
1565 case 'E':
1566 /* Two vectors must have the same length. */
1567 if (XVECLEN (x, i) != XVECLEN (y, i))
1568 return 0;
1570 /* And the corresponding elements must match. */
1571 for (j = 0; j < XVECLEN (x, i); j++)
1572 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1573 movables, regs) == 0)
1574 return 0;
1575 break;
1577 case 'e':
1578 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1579 == 0)
1580 return 0;
1581 break;
1583 case 's':
1584 if (strcmp (XSTR (x, i), XSTR (y, i)))
1585 return 0;
1586 break;
1588 case 'u':
1589 /* These are just backpointers, so they don't matter. */
1590 break;
1592 case '0':
1593 break;
1595 /* It is believed that rtx's at this level will never
1596 contain anything but integers and other rtx's,
1597 except for within LABEL_REFs and SYMBOL_REFs. */
1598 default:
1599 abort ();
1602 return 1;
1605 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1606 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1607 references is incremented once for each added note. */
1609 static void
1610 add_label_notes (x, insns)
1611 rtx x;
1612 rtx insns;
1614 enum rtx_code code = GET_CODE (x);
1615 int i, j;
1616 const char *fmt;
1617 rtx insn;
1619 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1621 /* This code used to ignore labels that referred to dispatch tables to
1622 avoid flow generating (slighly) worse code.
1624 We no longer ignore such label references (see LABEL_REF handling in
1625 mark_jump_label for additional information). */
1626 for (insn = insns; insn; insn = NEXT_INSN (insn))
1627 if (reg_mentioned_p (XEXP (x, 0), insn))
1629 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1630 REG_NOTES (insn));
1631 if (LABEL_P (XEXP (x, 0)))
1632 LABEL_NUSES (XEXP (x, 0))++;
1636 fmt = GET_RTX_FORMAT (code);
1637 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1639 if (fmt[i] == 'e')
1640 add_label_notes (XEXP (x, i), insns);
1641 else if (fmt[i] == 'E')
1642 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1643 add_label_notes (XVECEXP (x, i, j), insns);
1647 /* Scan MOVABLES, and move the insns that deserve to be moved.
1648 If two matching movables are combined, replace one reg with the
1649 other throughout. */
1651 static void
1652 move_movables (loop, movables, threshold, insn_count)
1653 struct loop *loop;
1654 struct loop_movables *movables;
1655 int threshold;
1656 int insn_count;
1658 struct loop_regs *regs = LOOP_REGS (loop);
1659 int nregs = regs->num;
1660 rtx new_start = 0;
1661 register struct movable *m;
1662 register rtx p;
1663 rtx loop_start = loop->start;
1664 rtx loop_end = loop->end;
1665 /* Map of pseudo-register replacements to handle combining
1666 when we move several insns that load the same value
1667 into different pseudo-registers. */
1668 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1669 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1671 for (m = movables->head; m; m = m->next)
1673 /* Describe this movable insn. */
1675 if (loop_dump_stream)
1677 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1678 INSN_UID (m->insn), m->regno, m->lifetime);
1679 if (m->consec > 0)
1680 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1681 if (m->cond)
1682 fprintf (loop_dump_stream, "cond ");
1683 if (m->force)
1684 fprintf (loop_dump_stream, "force ");
1685 if (m->global)
1686 fprintf (loop_dump_stream, "global ");
1687 if (m->done)
1688 fprintf (loop_dump_stream, "done ");
1689 if (m->move_insn)
1690 fprintf (loop_dump_stream, "move-insn ");
1691 if (m->match)
1692 fprintf (loop_dump_stream, "matches %d ",
1693 INSN_UID (m->match->insn));
1694 if (m->forces)
1695 fprintf (loop_dump_stream, "forces %d ",
1696 INSN_UID (m->forces->insn));
1699 /* Ignore the insn if it's already done (it matched something else).
1700 Otherwise, see if it is now safe to move. */
1702 if (!m->done
1703 && (! m->cond
1704 || (1 == loop_invariant_p (loop, m->set_src)
1705 && (m->dependencies == 0
1706 || 1 == loop_invariant_p (loop, m->dependencies))
1707 && (m->consec == 0
1708 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1709 m->consec + 1,
1710 m->insn))))
1711 && (! m->forces || m->forces->done))
1713 register int regno;
1714 register rtx p;
1715 int savings = m->savings;
1717 /* We have an insn that is safe to move.
1718 Compute its desirability. */
1720 p = m->insn;
1721 regno = m->regno;
1723 if (loop_dump_stream)
1724 fprintf (loop_dump_stream, "savings %d ", savings);
1726 if (regs->array[regno].moved_once && loop_dump_stream)
1727 fprintf (loop_dump_stream, "halved since already moved ");
1729 /* An insn MUST be moved if we already moved something else
1730 which is safe only if this one is moved too: that is,
1731 if already_moved[REGNO] is nonzero. */
1733 /* An insn is desirable to move if the new lifetime of the
1734 register is no more than THRESHOLD times the old lifetime.
1735 If it's not desirable, it means the loop is so big
1736 that moving won't speed things up much,
1737 and it is liable to make register usage worse. */
1739 /* It is also desirable to move if it can be moved at no
1740 extra cost because something else was already moved. */
1742 if (already_moved[regno]
1743 || flag_move_all_movables
1744 || (threshold * savings * m->lifetime) >=
1745 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1746 || (m->forces && m->forces->done
1747 && regs->array[m->forces->regno].n_times_set == 1))
1749 int count;
1750 register struct movable *m1;
1751 rtx first = NULL_RTX;
1753 /* Now move the insns that set the reg. */
1755 if (m->partial && m->match)
1757 rtx newpat, i1;
1758 rtx r1, r2;
1759 /* Find the end of this chain of matching regs.
1760 Thus, we load each reg in the chain from that one reg.
1761 And that reg is loaded with 0 directly,
1762 since it has ->match == 0. */
1763 for (m1 = m; m1->match; m1 = m1->match);
1764 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1765 SET_DEST (PATTERN (m1->insn)));
1766 i1 = loop_insn_hoist (loop, newpat);
1768 /* Mark the moved, invariant reg as being allowed to
1769 share a hard reg with the other matching invariant. */
1770 REG_NOTES (i1) = REG_NOTES (m->insn);
1771 r1 = SET_DEST (PATTERN (m->insn));
1772 r2 = SET_DEST (PATTERN (m1->insn));
1773 regs_may_share
1774 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1775 gen_rtx_EXPR_LIST (VOIDmode, r2,
1776 regs_may_share));
1777 delete_insn (m->insn);
1779 if (new_start == 0)
1780 new_start = i1;
1782 if (loop_dump_stream)
1783 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1785 /* If we are to re-generate the item being moved with a
1786 new move insn, first delete what we have and then emit
1787 the move insn before the loop. */
1788 else if (m->move_insn)
1790 rtx i1, temp, seq;
1792 for (count = m->consec; count >= 0; count--)
1794 /* If this is the first insn of a library call sequence,
1795 skip to the end. */
1796 if (GET_CODE (p) != NOTE
1797 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1798 p = XEXP (temp, 0);
1800 /* If this is the last insn of a libcall sequence, then
1801 delete every insn in the sequence except the last.
1802 The last insn is handled in the normal manner. */
1803 if (GET_CODE (p) != NOTE
1804 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1806 temp = XEXP (temp, 0);
1807 while (temp != p)
1808 temp = delete_insn (temp);
1811 temp = p;
1812 p = delete_insn (p);
1814 /* simplify_giv_expr expects that it can walk the insns
1815 at m->insn forwards and see this old sequence we are
1816 tossing here. delete_insn does preserve the next
1817 pointers, but when we skip over a NOTE we must fix
1818 it up. Otherwise that code walks into the non-deleted
1819 insn stream. */
1820 while (p && GET_CODE (p) == NOTE)
1821 p = NEXT_INSN (temp) = NEXT_INSN (p);
1824 start_sequence ();
1825 emit_move_insn (m->set_dest, m->set_src);
1826 temp = get_insns ();
1827 seq = gen_sequence ();
1828 end_sequence ();
1830 add_label_notes (m->set_src, temp);
1832 i1 = loop_insn_hoist (loop, seq);
1833 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1834 REG_NOTES (i1)
1835 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1836 m->set_src, REG_NOTES (i1));
1838 if (loop_dump_stream)
1839 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1841 /* The more regs we move, the less we like moving them. */
1842 threshold -= 3;
1844 else
1846 for (count = m->consec; count >= 0; count--)
1848 rtx i1, temp;
1850 /* If first insn of libcall sequence, skip to end. */
1851 /* Do this at start of loop, since p is guaranteed to
1852 be an insn here. */
1853 if (GET_CODE (p) != NOTE
1854 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1855 p = XEXP (temp, 0);
1857 /* If last insn of libcall sequence, move all
1858 insns except the last before the loop. The last
1859 insn is handled in the normal manner. */
1860 if (GET_CODE (p) != NOTE
1861 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1863 rtx fn_address = 0;
1864 rtx fn_reg = 0;
1865 rtx fn_address_insn = 0;
1867 first = 0;
1868 for (temp = XEXP (temp, 0); temp != p;
1869 temp = NEXT_INSN (temp))
1871 rtx body;
1872 rtx n;
1873 rtx next;
1875 if (GET_CODE (temp) == NOTE)
1876 continue;
1878 body = PATTERN (temp);
1880 /* Find the next insn after TEMP,
1881 not counting USE or NOTE insns. */
1882 for (next = NEXT_INSN (temp); next != p;
1883 next = NEXT_INSN (next))
1884 if (! (GET_CODE (next) == INSN
1885 && GET_CODE (PATTERN (next)) == USE)
1886 && GET_CODE (next) != NOTE)
1887 break;
1889 /* If that is the call, this may be the insn
1890 that loads the function address.
1892 Extract the function address from the insn
1893 that loads it into a register.
1894 If this insn was cse'd, we get incorrect code.
1896 So emit a new move insn that copies the
1897 function address into the register that the
1898 call insn will use. flow.c will delete any
1899 redundant stores that we have created. */
1900 if (GET_CODE (next) == CALL_INSN
1901 && GET_CODE (body) == SET
1902 && GET_CODE (SET_DEST (body)) == REG
1903 && (n = find_reg_note (temp, REG_EQUAL,
1904 NULL_RTX)))
1906 fn_reg = SET_SRC (body);
1907 if (GET_CODE (fn_reg) != REG)
1908 fn_reg = SET_DEST (body);
1909 fn_address = XEXP (n, 0);
1910 fn_address_insn = temp;
1912 /* We have the call insn.
1913 If it uses the register we suspect it might,
1914 load it with the correct address directly. */
1915 if (GET_CODE (temp) == CALL_INSN
1916 && fn_address != 0
1917 && reg_referenced_p (fn_reg, body))
1918 loop_insn_emit_after (loop, 0, fn_address_insn,
1919 gen_move_insn
1920 (fn_reg, fn_address));
1922 if (GET_CODE (temp) == CALL_INSN)
1924 i1 = loop_call_insn_hoist (loop, body);
1925 /* Because the USAGE information potentially
1926 contains objects other than hard registers
1927 we need to copy it. */
1928 if (CALL_INSN_FUNCTION_USAGE (temp))
1929 CALL_INSN_FUNCTION_USAGE (i1)
1930 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1932 else
1933 i1 = loop_insn_hoist (loop, body);
1934 if (first == 0)
1935 first = i1;
1936 if (temp == fn_address_insn)
1937 fn_address_insn = i1;
1938 REG_NOTES (i1) = REG_NOTES (temp);
1939 delete_insn (temp);
1941 if (new_start == 0)
1942 new_start = first;
1944 if (m->savemode != VOIDmode)
1946 /* P sets REG to zero; but we should clear only
1947 the bits that are not covered by the mode
1948 m->savemode. */
1949 rtx reg = m->set_dest;
1950 rtx sequence;
1951 rtx tem;
1953 start_sequence ();
1954 tem = expand_simple_binop
1955 (GET_MODE (reg), AND, reg,
1956 GEN_INT ((((HOST_WIDE_INT) 1
1957 << GET_MODE_BITSIZE (m->savemode)))
1958 - 1),
1959 reg, 1, OPTAB_LIB_WIDEN);
1960 if (tem == 0)
1961 abort ();
1962 if (tem != reg)
1963 emit_move_insn (reg, tem);
1964 sequence = gen_sequence ();
1965 end_sequence ();
1966 i1 = loop_insn_hoist (loop, sequence);
1968 else if (GET_CODE (p) == CALL_INSN)
1970 i1 = loop_call_insn_hoist (loop, PATTERN (p));
1971 /* Because the USAGE information potentially
1972 contains objects other than hard registers
1973 we need to copy it. */
1974 if (CALL_INSN_FUNCTION_USAGE (p))
1975 CALL_INSN_FUNCTION_USAGE (i1)
1976 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1978 else if (count == m->consec && m->move_insn_first)
1980 rtx seq;
1981 /* The SET_SRC might not be invariant, so we must
1982 use the REG_EQUAL note. */
1983 start_sequence ();
1984 emit_move_insn (m->set_dest, m->set_src);
1985 temp = get_insns ();
1986 seq = gen_sequence ();
1987 end_sequence ();
1989 add_label_notes (m->set_src, temp);
1991 i1 = loop_insn_hoist (loop, seq);
1992 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1993 REG_NOTES (i1)
1994 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1995 : REG_EQUAL),
1996 m->set_src, REG_NOTES (i1));
1998 else
1999 i1 = loop_insn_hoist (loop, PATTERN (p));
2001 if (REG_NOTES (i1) == 0)
2003 REG_NOTES (i1) = REG_NOTES (p);
2005 /* If there is a REG_EQUAL note present whose value
2006 is not loop invariant, then delete it, since it
2007 may cause problems with later optimization passes.
2008 It is possible for cse to create such notes
2009 like this as a result of record_jump_cond. */
2011 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2012 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2013 remove_note (i1, temp);
2016 if (new_start == 0)
2017 new_start = i1;
2019 if (loop_dump_stream)
2020 fprintf (loop_dump_stream, " moved to %d",
2021 INSN_UID (i1));
2023 /* If library call, now fix the REG_NOTES that contain
2024 insn pointers, namely REG_LIBCALL on FIRST
2025 and REG_RETVAL on I1. */
2026 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2028 XEXP (temp, 0) = first;
2029 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2030 XEXP (temp, 0) = i1;
2033 temp = p;
2034 delete_insn (p);
2035 p = NEXT_INSN (p);
2037 /* simplify_giv_expr expects that it can walk the insns
2038 at m->insn forwards and see this old sequence we are
2039 tossing here. delete_insn does preserve the next
2040 pointers, but when we skip over a NOTE we must fix
2041 it up. Otherwise that code walks into the non-deleted
2042 insn stream. */
2043 while (p && GET_CODE (p) == NOTE)
2044 p = NEXT_INSN (temp) = NEXT_INSN (p);
2047 /* The more regs we move, the less we like moving them. */
2048 threshold -= 3;
2051 /* Any other movable that loads the same register
2052 MUST be moved. */
2053 already_moved[regno] = 1;
2055 /* This reg has been moved out of one loop. */
2056 regs->array[regno].moved_once = 1;
2058 /* The reg set here is now invariant. */
2059 if (! m->partial)
2060 regs->array[regno].set_in_loop = 0;
2062 m->done = 1;
2064 /* Change the length-of-life info for the register
2065 to say it lives at least the full length of this loop.
2066 This will help guide optimizations in outer loops. */
2068 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2069 /* This is the old insn before all the moved insns.
2070 We can't use the moved insn because it is out of range
2071 in uid_luid. Only the old insns have luids. */
2072 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2073 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2074 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2076 /* Combine with this moved insn any other matching movables. */
2078 if (! m->partial)
2079 for (m1 = movables->head; m1; m1 = m1->next)
2080 if (m1->match == m)
2082 rtx temp;
2084 /* Schedule the reg loaded by M1
2085 for replacement so that shares the reg of M.
2086 If the modes differ (only possible in restricted
2087 circumstances, make a SUBREG.
2089 Note this assumes that the target dependent files
2090 treat REG and SUBREG equally, including within
2091 GO_IF_LEGITIMATE_ADDRESS and in all the
2092 predicates since we never verify that replacing the
2093 original register with a SUBREG results in a
2094 recognizable insn. */
2095 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2096 reg_map[m1->regno] = m->set_dest;
2097 else
2098 reg_map[m1->regno]
2099 = gen_lowpart_common (GET_MODE (m1->set_dest),
2100 m->set_dest);
2102 /* Get rid of the matching insn
2103 and prevent further processing of it. */
2104 m1->done = 1;
2106 /* if library call, delete all insn except last, which
2107 is deleted below */
2108 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2109 NULL_RTX)))
2111 for (temp = XEXP (temp, 0); temp != m1->insn;
2112 temp = NEXT_INSN (temp))
2113 delete_insn (temp);
2115 delete_insn (m1->insn);
2117 /* Any other movable that loads the same register
2118 MUST be moved. */
2119 already_moved[m1->regno] = 1;
2121 /* The reg merged here is now invariant,
2122 if the reg it matches is invariant. */
2123 if (! m->partial)
2124 regs->array[m1->regno].set_in_loop = 0;
2127 else if (loop_dump_stream)
2128 fprintf (loop_dump_stream, "not desirable");
2130 else if (loop_dump_stream && !m->match)
2131 fprintf (loop_dump_stream, "not safe");
2133 if (loop_dump_stream)
2134 fprintf (loop_dump_stream, "\n");
2137 if (new_start == 0)
2138 new_start = loop_start;
2140 /* Go through all the instructions in the loop, making
2141 all the register substitutions scheduled in REG_MAP. */
2142 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2143 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2144 || GET_CODE (p) == CALL_INSN)
2146 replace_regs (PATTERN (p), reg_map, nregs, 0);
2147 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2148 INSN_CODE (p) = -1;
2151 /* Clean up. */
2152 free (reg_map);
2153 free (already_moved);
2157 static void
2158 loop_movables_add (movables, m)
2159 struct loop_movables *movables;
2160 struct movable *m;
2162 if (movables->head == 0)
2163 movables->head = m;
2164 else
2165 movables->last->next = m;
2166 movables->last = m;
2170 static void
2171 loop_movables_free (movables)
2172 struct loop_movables *movables;
2174 struct movable *m;
2175 struct movable *m_next;
2177 for (m = movables->head; m; m = m_next)
2179 m_next = m->next;
2180 free (m);
2184 #if 0
2185 /* Scan X and replace the address of any MEM in it with ADDR.
2186 REG is the address that MEM should have before the replacement. */
2188 static void
2189 replace_call_address (x, reg, addr)
2190 rtx x, reg, addr;
2192 register enum rtx_code code;
2193 register int i;
2194 register const char *fmt;
2196 if (x == 0)
2197 return;
2198 code = GET_CODE (x);
2199 switch (code)
2201 case PC:
2202 case CC0:
2203 case CONST_INT:
2204 case CONST_DOUBLE:
2205 case CONST:
2206 case SYMBOL_REF:
2207 case LABEL_REF:
2208 case REG:
2209 return;
2211 case SET:
2212 /* Short cut for very common case. */
2213 replace_call_address (XEXP (x, 1), reg, addr);
2214 return;
2216 case CALL:
2217 /* Short cut for very common case. */
2218 replace_call_address (XEXP (x, 0), reg, addr);
2219 return;
2221 case MEM:
2222 /* If this MEM uses a reg other than the one we expected,
2223 something is wrong. */
2224 if (XEXP (x, 0) != reg)
2225 abort ();
2226 XEXP (x, 0) = addr;
2227 return;
2229 default:
2230 break;
2233 fmt = GET_RTX_FORMAT (code);
2234 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2236 if (fmt[i] == 'e')
2237 replace_call_address (XEXP (x, i), reg, addr);
2238 else if (fmt[i] == 'E')
2240 register int j;
2241 for (j = 0; j < XVECLEN (x, i); j++)
2242 replace_call_address (XVECEXP (x, i, j), reg, addr);
2246 #endif
2248 /* Return the number of memory refs to addresses that vary
2249 in the rtx X. */
2251 static int
2252 count_nonfixed_reads (loop, x)
2253 const struct loop *loop;
2254 rtx x;
2256 register enum rtx_code code;
2257 register int i;
2258 register const char *fmt;
2259 int value;
2261 if (x == 0)
2262 return 0;
2264 code = GET_CODE (x);
2265 switch (code)
2267 case PC:
2268 case CC0:
2269 case CONST_INT:
2270 case CONST_DOUBLE:
2271 case CONST:
2272 case SYMBOL_REF:
2273 case LABEL_REF:
2274 case REG:
2275 return 0;
2277 case MEM:
2278 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2279 + count_nonfixed_reads (loop, XEXP (x, 0)));
2281 default:
2282 break;
2285 value = 0;
2286 fmt = GET_RTX_FORMAT (code);
2287 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2289 if (fmt[i] == 'e')
2290 value += count_nonfixed_reads (loop, XEXP (x, i));
2291 if (fmt[i] == 'E')
2293 register int j;
2294 for (j = 0; j < XVECLEN (x, i); j++)
2295 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2298 return value;
2301 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2302 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2303 `unknown_address_altered', `unknown_constant_address_altered', and
2304 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2305 list `store_mems' in LOOP. */
2307 static void
2308 prescan_loop (loop)
2309 struct loop *loop;
2311 register int level = 1;
2312 rtx insn;
2313 struct loop_info *loop_info = LOOP_INFO (loop);
2314 rtx start = loop->start;
2315 rtx end = loop->end;
2316 /* The label after END. Jumping here is just like falling off the
2317 end of the loop. We use next_nonnote_insn instead of next_label
2318 as a hedge against the (pathological) case where some actual insn
2319 might end up between the two. */
2320 rtx exit_target = next_nonnote_insn (end);
2322 loop_info->has_indirect_jump = indirect_jump_in_function;
2323 loop_info->pre_header_has_call = 0;
2324 loop_info->has_call = 0;
2325 loop_info->has_nonconst_call = 0;
2326 loop_info->has_volatile = 0;
2327 loop_info->has_tablejump = 0;
2328 loop_info->has_multiple_exit_targets = 0;
2329 loop->level = 1;
2331 loop_info->unknown_address_altered = 0;
2332 loop_info->unknown_constant_address_altered = 0;
2333 loop_info->store_mems = NULL_RTX;
2334 loop_info->first_loop_store_insn = NULL_RTX;
2335 loop_info->mems_idx = 0;
2336 loop_info->num_mem_sets = 0;
2339 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2340 insn = PREV_INSN (insn))
2342 if (GET_CODE (insn) == CALL_INSN)
2344 loop_info->pre_header_has_call = 1;
2345 break;
2349 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2350 insn = NEXT_INSN (insn))
2352 if (GET_CODE (insn) == NOTE)
2354 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2356 ++level;
2357 /* Count number of loops contained in this one. */
2358 loop->level++;
2360 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2362 --level;
2365 else if (GET_CODE (insn) == CALL_INSN)
2367 if (! CONST_OR_PURE_CALL_P (insn))
2369 loop_info->unknown_address_altered = 1;
2370 loop_info->has_nonconst_call = 1;
2372 loop_info->has_call = 1;
2374 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2376 rtx label1 = NULL_RTX;
2377 rtx label2 = NULL_RTX;
2379 if (volatile_refs_p (PATTERN (insn)))
2380 loop_info->has_volatile = 1;
2382 if (GET_CODE (insn) == JUMP_INSN
2383 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2384 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2385 loop_info->has_tablejump = 1;
2387 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2388 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2389 loop_info->first_loop_store_insn = insn;
2391 if (! loop_info->has_multiple_exit_targets
2392 && GET_CODE (insn) == JUMP_INSN
2393 && GET_CODE (PATTERN (insn)) == SET
2394 && SET_DEST (PATTERN (insn)) == pc_rtx)
2396 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2398 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2399 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2401 else
2403 label1 = SET_SRC (PATTERN (insn));
2408 if (label1 && label1 != pc_rtx)
2410 if (GET_CODE (label1) != LABEL_REF)
2412 /* Something tricky. */
2413 loop_info->has_multiple_exit_targets = 1;
2414 break;
2416 else if (XEXP (label1, 0) != exit_target
2417 && LABEL_OUTSIDE_LOOP_P (label1))
2419 /* A jump outside the current loop. */
2420 loop_info->has_multiple_exit_targets = 1;
2421 break;
2425 label1 = label2;
2426 label2 = NULL_RTX;
2428 while (label1);
2431 else if (GET_CODE (insn) == RETURN)
2432 loop_info->has_multiple_exit_targets = 1;
2435 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2436 if (/* An exception thrown by a called function might land us
2437 anywhere. */
2438 ! loop_info->has_nonconst_call
2439 /* We don't want loads for MEMs moved to a location before the
2440 one at which their stack memory becomes allocated. (Note
2441 that this is not a problem for malloc, etc., since those
2442 require actual function calls. */
2443 && ! current_function_calls_alloca
2444 /* There are ways to leave the loop other than falling off the
2445 end. */
2446 && ! loop_info->has_multiple_exit_targets)
2447 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2448 insn = NEXT_INSN (insn))
2449 for_each_rtx (&insn, insert_loop_mem, loop_info);
2451 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2452 that loop_invariant_p and load_mems can use true_dependence
2453 to determine what is really clobbered. */
2454 if (loop_info->unknown_address_altered)
2456 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2458 loop_info->store_mems
2459 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2461 if (loop_info->unknown_constant_address_altered)
2463 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2465 RTX_UNCHANGING_P (mem) = 1;
2466 loop_info->store_mems
2467 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2471 /* Scan the function looking for loops. Record the start and end of each loop.
2472 Also mark as invalid loops any loops that contain a setjmp or are branched
2473 to from outside the loop. */
2475 static void
2476 find_and_verify_loops (f, loops)
2477 rtx f;
2478 struct loops *loops;
2480 rtx insn;
2481 rtx label;
2482 int num_loops;
2483 struct loop *current_loop;
2484 struct loop *next_loop;
2485 struct loop *loop;
2487 num_loops = loops->num;
2489 compute_luids (f, NULL_RTX, 0);
2491 /* If there are jumps to undefined labels,
2492 treat them as jumps out of any/all loops.
2493 This also avoids writing past end of tables when there are no loops. */
2494 uid_loop[0] = NULL;
2496 /* Find boundaries of loops, mark which loops are contained within
2497 loops, and invalidate loops that have setjmp. */
2499 num_loops = 0;
2500 current_loop = NULL;
2501 for (insn = f; insn; insn = NEXT_INSN (insn))
2503 if (GET_CODE (insn) == NOTE)
2504 switch (NOTE_LINE_NUMBER (insn))
2506 case NOTE_INSN_LOOP_BEG:
2507 next_loop = loops->array + num_loops;
2508 next_loop->num = num_loops;
2509 num_loops++;
2510 next_loop->start = insn;
2511 next_loop->outer = current_loop;
2512 current_loop = next_loop;
2513 break;
2515 case NOTE_INSN_LOOP_CONT:
2516 current_loop->cont = insn;
2517 break;
2519 case NOTE_INSN_LOOP_VTOP:
2520 current_loop->vtop = insn;
2521 break;
2523 case NOTE_INSN_LOOP_END:
2524 if (! current_loop)
2525 abort ();
2527 current_loop->end = insn;
2528 current_loop = current_loop->outer;
2529 break;
2531 default:
2532 break;
2535 if (GET_CODE (insn) == CALL_INSN
2536 && find_reg_note (insn, REG_SETJMP, NULL))
2538 /* In this case, we must invalidate our current loop and any
2539 enclosing loop. */
2540 for (loop = current_loop; loop; loop = loop->outer)
2542 loop->invalid = 1;
2543 if (loop_dump_stream)
2544 fprintf (loop_dump_stream,
2545 "\nLoop at %d ignored due to setjmp.\n",
2546 INSN_UID (loop->start));
2550 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2551 enclosing loop, but this doesn't matter. */
2552 uid_loop[INSN_UID (insn)] = current_loop;
2555 /* Any loop containing a label used in an initializer must be invalidated,
2556 because it can be jumped into from anywhere. */
2558 for (label = forced_labels; label; label = XEXP (label, 1))
2560 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2561 loop; loop = loop->outer)
2562 loop->invalid = 1;
2565 /* Any loop containing a label used for an exception handler must be
2566 invalidated, because it can be jumped into from anywhere. */
2568 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2570 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2571 loop; loop = loop->outer)
2572 loop->invalid = 1;
2575 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2576 loop that it is not contained within, that loop is marked invalid.
2577 If any INSN or CALL_INSN uses a label's address, then the loop containing
2578 that label is marked invalid, because it could be jumped into from
2579 anywhere.
2581 Also look for blocks of code ending in an unconditional branch that
2582 exits the loop. If such a block is surrounded by a conditional
2583 branch around the block, move the block elsewhere (see below) and
2584 invert the jump to point to the code block. This may eliminate a
2585 label in our loop and will simplify processing by both us and a
2586 possible second cse pass. */
2588 for (insn = f; insn; insn = NEXT_INSN (insn))
2589 if (INSN_P (insn))
2591 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2593 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2595 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2596 if (note)
2598 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2599 loop; loop = loop->outer)
2600 loop->invalid = 1;
2604 if (GET_CODE (insn) != JUMP_INSN)
2605 continue;
2607 mark_loop_jump (PATTERN (insn), this_loop);
2609 /* See if this is an unconditional branch outside the loop. */
2610 if (this_loop
2611 && (GET_CODE (PATTERN (insn)) == RETURN
2612 || (any_uncondjump_p (insn)
2613 && onlyjump_p (insn)
2614 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2615 != this_loop)))
2616 && get_max_uid () < max_uid_for_loop)
2618 rtx p;
2619 rtx our_next = next_real_insn (insn);
2620 rtx last_insn_to_move = NEXT_INSN (insn);
2621 struct loop *dest_loop;
2622 struct loop *outer_loop = NULL;
2624 /* Go backwards until we reach the start of the loop, a label,
2625 or a JUMP_INSN. */
2626 for (p = PREV_INSN (insn);
2627 GET_CODE (p) != CODE_LABEL
2628 && ! (GET_CODE (p) == NOTE
2629 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2630 && GET_CODE (p) != JUMP_INSN;
2631 p = PREV_INSN (p))
2634 /* Check for the case where we have a jump to an inner nested
2635 loop, and do not perform the optimization in that case. */
2637 if (JUMP_LABEL (insn))
2639 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2640 if (dest_loop)
2642 for (outer_loop = dest_loop; outer_loop;
2643 outer_loop = outer_loop->outer)
2644 if (outer_loop == this_loop)
2645 break;
2649 /* Make sure that the target of P is within the current loop. */
2651 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2652 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2653 outer_loop = this_loop;
2655 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2656 we have a block of code to try to move.
2658 We look backward and then forward from the target of INSN
2659 to find a BARRIER at the same loop depth as the target.
2660 If we find such a BARRIER, we make a new label for the start
2661 of the block, invert the jump in P and point it to that label,
2662 and move the block of code to the spot we found. */
2664 if (! outer_loop
2665 && GET_CODE (p) == JUMP_INSN
2666 && JUMP_LABEL (p) != 0
2667 /* Just ignore jumps to labels that were never emitted.
2668 These always indicate compilation errors. */
2669 && INSN_UID (JUMP_LABEL (p)) != 0
2670 && any_condjump_p (p) && onlyjump_p (p)
2671 && next_real_insn (JUMP_LABEL (p)) == our_next
2672 /* If it's not safe to move the sequence, then we
2673 mustn't try. */
2674 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2675 &last_insn_to_move))
2677 rtx target
2678 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2679 struct loop *target_loop = uid_loop[INSN_UID (target)];
2680 rtx loc, loc2;
2681 rtx tmp;
2683 /* Search for possible garbage past the conditional jumps
2684 and look for the last barrier. */
2685 for (tmp = last_insn_to_move;
2686 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2687 if (GET_CODE (tmp) == BARRIER)
2688 last_insn_to_move = tmp;
2690 for (loc = target; loc; loc = PREV_INSN (loc))
2691 if (GET_CODE (loc) == BARRIER
2692 /* Don't move things inside a tablejump. */
2693 && ((loc2 = next_nonnote_insn (loc)) == 0
2694 || GET_CODE (loc2) != CODE_LABEL
2695 || (loc2 = next_nonnote_insn (loc2)) == 0
2696 || GET_CODE (loc2) != JUMP_INSN
2697 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2698 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2699 && uid_loop[INSN_UID (loc)] == target_loop)
2700 break;
2702 if (loc == 0)
2703 for (loc = target; loc; loc = NEXT_INSN (loc))
2704 if (GET_CODE (loc) == BARRIER
2705 /* Don't move things inside a tablejump. */
2706 && ((loc2 = next_nonnote_insn (loc)) == 0
2707 || GET_CODE (loc2) != CODE_LABEL
2708 || (loc2 = next_nonnote_insn (loc2)) == 0
2709 || GET_CODE (loc2) != JUMP_INSN
2710 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2711 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2712 && uid_loop[INSN_UID (loc)] == target_loop)
2713 break;
2715 if (loc)
2717 rtx cond_label = JUMP_LABEL (p);
2718 rtx new_label = get_label_after (p);
2720 /* Ensure our label doesn't go away. */
2721 LABEL_NUSES (cond_label)++;
2723 /* Verify that uid_loop is large enough and that
2724 we can invert P. */
2725 if (invert_jump (p, new_label, 1))
2727 rtx q, r;
2729 /* If no suitable BARRIER was found, create a suitable
2730 one before TARGET. Since TARGET is a fall through
2731 path, we'll need to insert an jump around our block
2732 and a add a BARRIER before TARGET.
2734 This creates an extra unconditional jump outside
2735 the loop. However, the benefits of removing rarely
2736 executed instructions from inside the loop usually
2737 outweighs the cost of the extra unconditional jump
2738 outside the loop. */
2739 if (loc == 0)
2741 rtx temp;
2743 temp = gen_jump (JUMP_LABEL (insn));
2744 temp = emit_jump_insn_before (temp, target);
2745 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2746 LABEL_NUSES (JUMP_LABEL (insn))++;
2747 loc = emit_barrier_before (target);
2750 /* Include the BARRIER after INSN and copy the
2751 block after LOC. */
2752 new_label = squeeze_notes (new_label,
2753 last_insn_to_move);
2754 reorder_insns (new_label, last_insn_to_move, loc);
2756 /* All those insns are now in TARGET_LOOP. */
2757 for (q = new_label;
2758 q != NEXT_INSN (last_insn_to_move);
2759 q = NEXT_INSN (q))
2760 uid_loop[INSN_UID (q)] = target_loop;
2762 /* The label jumped to by INSN is no longer a loop
2763 exit. Unless INSN does not have a label (e.g.,
2764 it is a RETURN insn), search loop->exit_labels
2765 to find its label_ref, and remove it. Also turn
2766 off LABEL_OUTSIDE_LOOP_P bit. */
2767 if (JUMP_LABEL (insn))
2769 for (q = 0, r = this_loop->exit_labels;
2771 q = r, r = LABEL_NEXTREF (r))
2772 if (XEXP (r, 0) == JUMP_LABEL (insn))
2774 LABEL_OUTSIDE_LOOP_P (r) = 0;
2775 if (q)
2776 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2777 else
2778 this_loop->exit_labels = LABEL_NEXTREF (r);
2779 break;
2782 for (loop = this_loop; loop && loop != target_loop;
2783 loop = loop->outer)
2784 loop->exit_count--;
2786 /* If we didn't find it, then something is
2787 wrong. */
2788 if (! r)
2789 abort ();
2792 /* P is now a jump outside the loop, so it must be put
2793 in loop->exit_labels, and marked as such.
2794 The easiest way to do this is to just call
2795 mark_loop_jump again for P. */
2796 mark_loop_jump (PATTERN (p), this_loop);
2798 /* If INSN now jumps to the insn after it,
2799 delete INSN. */
2800 if (JUMP_LABEL (insn) != 0
2801 && (next_real_insn (JUMP_LABEL (insn))
2802 == next_real_insn (insn)))
2803 delete_insn (insn);
2806 /* Continue the loop after where the conditional
2807 branch used to jump, since the only branch insn
2808 in the block (if it still remains) is an inter-loop
2809 branch and hence needs no processing. */
2810 insn = NEXT_INSN (cond_label);
2812 if (--LABEL_NUSES (cond_label) == 0)
2813 delete_insn (cond_label);
2815 /* This loop will be continued with NEXT_INSN (insn). */
2816 insn = PREV_INSN (insn);
2823 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2824 loops it is contained in, mark the target loop invalid.
2826 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2828 static void
2829 mark_loop_jump (x, loop)
2830 rtx x;
2831 struct loop *loop;
2833 struct loop *dest_loop;
2834 struct loop *outer_loop;
2835 int i;
2837 switch (GET_CODE (x))
2839 case PC:
2840 case USE:
2841 case CLOBBER:
2842 case REG:
2843 case MEM:
2844 case CONST_INT:
2845 case CONST_DOUBLE:
2846 case RETURN:
2847 return;
2849 case CONST:
2850 /* There could be a label reference in here. */
2851 mark_loop_jump (XEXP (x, 0), loop);
2852 return;
2854 case PLUS:
2855 case MINUS:
2856 case MULT:
2857 mark_loop_jump (XEXP (x, 0), loop);
2858 mark_loop_jump (XEXP (x, 1), loop);
2859 return;
2861 case LO_SUM:
2862 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2863 mark_loop_jump (XEXP (x, 1), loop);
2864 return;
2866 case SIGN_EXTEND:
2867 case ZERO_EXTEND:
2868 mark_loop_jump (XEXP (x, 0), loop);
2869 return;
2871 case LABEL_REF:
2872 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2874 /* Link together all labels that branch outside the loop. This
2875 is used by final_[bg]iv_value and the loop unrolling code. Also
2876 mark this LABEL_REF so we know that this branch should predict
2877 false. */
2879 /* A check to make sure the label is not in an inner nested loop,
2880 since this does not count as a loop exit. */
2881 if (dest_loop)
2883 for (outer_loop = dest_loop; outer_loop;
2884 outer_loop = outer_loop->outer)
2885 if (outer_loop == loop)
2886 break;
2888 else
2889 outer_loop = NULL;
2891 if (loop && ! outer_loop)
2893 LABEL_OUTSIDE_LOOP_P (x) = 1;
2894 LABEL_NEXTREF (x) = loop->exit_labels;
2895 loop->exit_labels = x;
2897 for (outer_loop = loop;
2898 outer_loop && outer_loop != dest_loop;
2899 outer_loop = outer_loop->outer)
2900 outer_loop->exit_count++;
2903 /* If this is inside a loop, but not in the current loop or one enclosed
2904 by it, it invalidates at least one loop. */
2906 if (! dest_loop)
2907 return;
2909 /* We must invalidate every nested loop containing the target of this
2910 label, except those that also contain the jump insn. */
2912 for (; dest_loop; dest_loop = dest_loop->outer)
2914 /* Stop when we reach a loop that also contains the jump insn. */
2915 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2916 if (dest_loop == outer_loop)
2917 return;
2919 /* If we get here, we know we need to invalidate a loop. */
2920 if (loop_dump_stream && ! dest_loop->invalid)
2921 fprintf (loop_dump_stream,
2922 "\nLoop at %d ignored due to multiple entry points.\n",
2923 INSN_UID (dest_loop->start));
2925 dest_loop->invalid = 1;
2927 return;
2929 case SET:
2930 /* If this is not setting pc, ignore. */
2931 if (SET_DEST (x) == pc_rtx)
2932 mark_loop_jump (SET_SRC (x), loop);
2933 return;
2935 case IF_THEN_ELSE:
2936 mark_loop_jump (XEXP (x, 1), loop);
2937 mark_loop_jump (XEXP (x, 2), loop);
2938 return;
2940 case PARALLEL:
2941 case ADDR_VEC:
2942 for (i = 0; i < XVECLEN (x, 0); i++)
2943 mark_loop_jump (XVECEXP (x, 0, i), loop);
2944 return;
2946 case ADDR_DIFF_VEC:
2947 for (i = 0; i < XVECLEN (x, 1); i++)
2948 mark_loop_jump (XVECEXP (x, 1, i), loop);
2949 return;
2951 default:
2952 /* Strictly speaking this is not a jump into the loop, only a possible
2953 jump out of the loop. However, we have no way to link the destination
2954 of this jump onto the list of exit labels. To be safe we mark this
2955 loop and any containing loops as invalid. */
2956 if (loop)
2958 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2960 if (loop_dump_stream && ! outer_loop->invalid)
2961 fprintf (loop_dump_stream,
2962 "\nLoop at %d ignored due to unknown exit jump.\n",
2963 INSN_UID (outer_loop->start));
2964 outer_loop->invalid = 1;
2967 return;
2971 /* Return nonzero if there is a label in the range from
2972 insn INSN to and including the insn whose luid is END
2973 INSN must have an assigned luid (i.e., it must not have
2974 been previously created by loop.c). */
2976 static int
2977 labels_in_range_p (insn, end)
2978 rtx insn;
2979 int end;
2981 while (insn && INSN_LUID (insn) <= end)
2983 if (GET_CODE (insn) == CODE_LABEL)
2984 return 1;
2985 insn = NEXT_INSN (insn);
2988 return 0;
2991 /* Record that a memory reference X is being set. */
2993 static void
2994 note_addr_stored (x, y, data)
2995 rtx x;
2996 rtx y ATTRIBUTE_UNUSED;
2997 void *data ATTRIBUTE_UNUSED;
2999 struct loop_info *loop_info = data;
3001 if (x == 0 || GET_CODE (x) != MEM)
3002 return;
3004 /* Count number of memory writes.
3005 This affects heuristics in strength_reduce. */
3006 loop_info->num_mem_sets++;
3008 /* BLKmode MEM means all memory is clobbered. */
3009 if (GET_MODE (x) == BLKmode)
3011 if (RTX_UNCHANGING_P (x))
3012 loop_info->unknown_constant_address_altered = 1;
3013 else
3014 loop_info->unknown_address_altered = 1;
3016 return;
3019 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3020 loop_info->store_mems);
3023 /* X is a value modified by an INSN that references a biv inside a loop
3024 exit test (ie, X is somehow related to the value of the biv). If X
3025 is a pseudo that is used more than once, then the biv is (effectively)
3026 used more than once. DATA is a pointer to a loop_regs structure. */
3028 static void
3029 note_set_pseudo_multiple_uses (x, y, data)
3030 rtx x;
3031 rtx y ATTRIBUTE_UNUSED;
3032 void *data;
3034 struct loop_regs *regs = (struct loop_regs *) data;
3036 if (x == 0)
3037 return;
3039 while (GET_CODE (x) == STRICT_LOW_PART
3040 || GET_CODE (x) == SIGN_EXTRACT
3041 || GET_CODE (x) == ZERO_EXTRACT
3042 || GET_CODE (x) == SUBREG)
3043 x = XEXP (x, 0);
3045 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3046 return;
3048 /* If we do not have usage information, or if we know the register
3049 is used more than once, note that fact for check_dbra_loop. */
3050 if (REGNO (x) >= max_reg_before_loop
3051 || ! regs->array[REGNO (x)].single_usage
3052 || regs->array[REGNO (x)].single_usage == const0_rtx)
3053 regs->multiple_uses = 1;
3056 /* Return nonzero if the rtx X is invariant over the current loop.
3058 The value is 2 if we refer to something only conditionally invariant.
3060 A memory ref is invariant if it is not volatile and does not conflict
3061 with anything stored in `loop_info->store_mems'. */
3064 loop_invariant_p (loop, x)
3065 const struct loop *loop;
3066 register rtx x;
3068 struct loop_info *loop_info = LOOP_INFO (loop);
3069 struct loop_regs *regs = LOOP_REGS (loop);
3070 register int i;
3071 register enum rtx_code code;
3072 register const char *fmt;
3073 int conditional = 0;
3074 rtx mem_list_entry;
3076 if (x == 0)
3077 return 1;
3078 code = GET_CODE (x);
3079 switch (code)
3081 case CONST_INT:
3082 case CONST_DOUBLE:
3083 case SYMBOL_REF:
3084 case CONST:
3085 return 1;
3087 case LABEL_REF:
3088 /* A LABEL_REF is normally invariant, however, if we are unrolling
3089 loops, and this label is inside the loop, then it isn't invariant.
3090 This is because each unrolled copy of the loop body will have
3091 a copy of this label. If this was invariant, then an insn loading
3092 the address of this label into a register might get moved outside
3093 the loop, and then each loop body would end up using the same label.
3095 We don't know the loop bounds here though, so just fail for all
3096 labels. */
3097 if (flag_unroll_loops)
3098 return 0;
3099 else
3100 return 1;
3102 case PC:
3103 case CC0:
3104 case UNSPEC_VOLATILE:
3105 return 0;
3107 case REG:
3108 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3109 since the reg might be set by initialization within the loop. */
3111 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3112 || x == arg_pointer_rtx)
3113 && ! current_function_has_nonlocal_goto)
3114 return 1;
3116 if (LOOP_INFO (loop)->has_call
3117 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3118 return 0;
3120 if (regs->array[REGNO (x)].set_in_loop < 0)
3121 return 2;
3123 return regs->array[REGNO (x)].set_in_loop == 0;
3125 case MEM:
3126 /* Volatile memory references must be rejected. Do this before
3127 checking for read-only items, so that volatile read-only items
3128 will be rejected also. */
3129 if (MEM_VOLATILE_P (x))
3130 return 0;
3132 /* See if there is any dependence between a store and this load. */
3133 mem_list_entry = loop_info->store_mems;
3134 while (mem_list_entry)
3136 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3137 x, rtx_varies_p))
3138 return 0;
3140 mem_list_entry = XEXP (mem_list_entry, 1);
3143 /* It's not invalidated by a store in memory
3144 but we must still verify the address is invariant. */
3145 break;
3147 case ASM_OPERANDS:
3148 /* Don't mess with insns declared volatile. */
3149 if (MEM_VOLATILE_P (x))
3150 return 0;
3151 break;
3153 default:
3154 break;
3157 fmt = GET_RTX_FORMAT (code);
3158 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3160 if (fmt[i] == 'e')
3162 int tem = loop_invariant_p (loop, XEXP (x, i));
3163 if (tem == 0)
3164 return 0;
3165 if (tem == 2)
3166 conditional = 1;
3168 else if (fmt[i] == 'E')
3170 register int j;
3171 for (j = 0; j < XVECLEN (x, i); j++)
3173 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3174 if (tem == 0)
3175 return 0;
3176 if (tem == 2)
3177 conditional = 1;
3183 return 1 + conditional;
3186 /* Return nonzero if all the insns in the loop that set REG
3187 are INSN and the immediately following insns,
3188 and if each of those insns sets REG in an invariant way
3189 (not counting uses of REG in them).
3191 The value is 2 if some of these insns are only conditionally invariant.
3193 We assume that INSN itself is the first set of REG
3194 and that its source is invariant. */
3196 static int
3197 consec_sets_invariant_p (loop, reg, n_sets, insn)
3198 const struct loop *loop;
3199 int n_sets;
3200 rtx reg, insn;
3202 struct loop_regs *regs = LOOP_REGS (loop);
3203 rtx p = insn;
3204 unsigned int regno = REGNO (reg);
3205 rtx temp;
3206 /* Number of sets we have to insist on finding after INSN. */
3207 int count = n_sets - 1;
3208 int old = regs->array[regno].set_in_loop;
3209 int value = 0;
3210 int this;
3212 /* If N_SETS hit the limit, we can't rely on its value. */
3213 if (n_sets == 127)
3214 return 0;
3216 regs->array[regno].set_in_loop = 0;
3218 while (count > 0)
3220 register enum rtx_code code;
3221 rtx set;
3223 p = NEXT_INSN (p);
3224 code = GET_CODE (p);
3226 /* If library call, skip to end of it. */
3227 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3228 p = XEXP (temp, 0);
3230 this = 0;
3231 if (code == INSN
3232 && (set = single_set (p))
3233 && GET_CODE (SET_DEST (set)) == REG
3234 && REGNO (SET_DEST (set)) == regno)
3236 this = loop_invariant_p (loop, SET_SRC (set));
3237 if (this != 0)
3238 value |= this;
3239 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3241 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3242 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3243 notes are OK. */
3244 this = (CONSTANT_P (XEXP (temp, 0))
3245 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3246 && loop_invariant_p (loop, XEXP (temp, 0))));
3247 if (this != 0)
3248 value |= this;
3251 if (this != 0)
3252 count--;
3253 else if (code != NOTE)
3255 regs->array[regno].set_in_loop = old;
3256 return 0;
3260 regs->array[regno].set_in_loop = old;
3261 /* If loop_invariant_p ever returned 2, we return 2. */
3262 return 1 + (value & 2);
3265 #if 0
3266 /* I don't think this condition is sufficient to allow INSN
3267 to be moved, so we no longer test it. */
3269 /* Return 1 if all insns in the basic block of INSN and following INSN
3270 that set REG are invariant according to TABLE. */
3272 static int
3273 all_sets_invariant_p (reg, insn, table)
3274 rtx reg, insn;
3275 short *table;
3277 register rtx p = insn;
3278 register int regno = REGNO (reg);
3280 while (1)
3282 register enum rtx_code code;
3283 p = NEXT_INSN (p);
3284 code = GET_CODE (p);
3285 if (code == CODE_LABEL || code == JUMP_INSN)
3286 return 1;
3287 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3288 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3289 && REGNO (SET_DEST (PATTERN (p))) == regno)
3291 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3292 return 0;
3296 #endif /* 0 */
3298 /* Look at all uses (not sets) of registers in X. For each, if it is
3299 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3300 a different insn, set USAGE[REGNO] to const0_rtx. */
3302 static void
3303 find_single_use_in_loop (regs, insn, x)
3304 struct loop_regs *regs;
3305 rtx insn;
3306 rtx x;
3308 enum rtx_code code = GET_CODE (x);
3309 const char *fmt = GET_RTX_FORMAT (code);
3310 int i, j;
3312 if (code == REG)
3313 regs->array[REGNO (x)].single_usage
3314 = (regs->array[REGNO (x)].single_usage != 0
3315 && regs->array[REGNO (x)].single_usage != insn)
3316 ? const0_rtx : insn;
3318 else if (code == SET)
3320 /* Don't count SET_DEST if it is a REG; otherwise count things
3321 in SET_DEST because if a register is partially modified, it won't
3322 show up as a potential movable so we don't care how USAGE is set
3323 for it. */
3324 if (GET_CODE (SET_DEST (x)) != REG)
3325 find_single_use_in_loop (regs, insn, SET_DEST (x));
3326 find_single_use_in_loop (regs, insn, SET_SRC (x));
3328 else
3329 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3331 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3332 find_single_use_in_loop (regs, insn, XEXP (x, i));
3333 else if (fmt[i] == 'E')
3334 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3335 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3339 /* Count and record any set in X which is contained in INSN. Update
3340 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3341 in X. */
3343 static void
3344 count_one_set (regs, insn, x, last_set)
3345 struct loop_regs *regs;
3346 rtx insn, x;
3347 rtx *last_set;
3349 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3350 /* Don't move a reg that has an explicit clobber.
3351 It's not worth the pain to try to do it correctly. */
3352 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3354 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3356 rtx dest = SET_DEST (x);
3357 while (GET_CODE (dest) == SUBREG
3358 || GET_CODE (dest) == ZERO_EXTRACT
3359 || GET_CODE (dest) == SIGN_EXTRACT
3360 || GET_CODE (dest) == STRICT_LOW_PART)
3361 dest = XEXP (dest, 0);
3362 if (GET_CODE (dest) == REG)
3364 register int regno = REGNO (dest);
3365 /* If this is the first setting of this reg
3366 in current basic block, and it was set before,
3367 it must be set in two basic blocks, so it cannot
3368 be moved out of the loop. */
3369 if (regs->array[regno].set_in_loop > 0
3370 && last_set == 0)
3371 regs->array[regno].may_not_optimize = 1;
3372 /* If this is not first setting in current basic block,
3373 see if reg was used in between previous one and this.
3374 If so, neither one can be moved. */
3375 if (last_set[regno] != 0
3376 && reg_used_between_p (dest, last_set[regno], insn))
3377 regs->array[regno].may_not_optimize = 1;
3378 if (regs->array[regno].set_in_loop < 127)
3379 ++regs->array[regno].set_in_loop;
3380 last_set[regno] = insn;
3385 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3386 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3387 contained in insn INSN is used by any insn that precedes INSN in
3388 cyclic order starting from the loop entry point.
3390 We don't want to use INSN_LUID here because if we restrict INSN to those
3391 that have a valid INSN_LUID, it means we cannot move an invariant out
3392 from an inner loop past two loops. */
3394 static int
3395 loop_reg_used_before_p (loop, set, insn)
3396 const struct loop *loop;
3397 rtx set, insn;
3399 rtx reg = SET_DEST (set);
3400 rtx p;
3402 /* Scan forward checking for register usage. If we hit INSN, we
3403 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3404 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3406 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3407 return 1;
3409 if (p == loop->end)
3410 p = loop->start;
3413 return 0;
3416 /* A "basic induction variable" or biv is a pseudo reg that is set
3417 (within this loop) only by incrementing or decrementing it. */
3418 /* A "general induction variable" or giv is a pseudo reg whose
3419 value is a linear function of a biv. */
3421 /* Bivs are recognized by `basic_induction_var';
3422 Givs by `general_induction_var'. */
3424 /* Communication with routines called via `note_stores'. */
3426 static rtx note_insn;
3428 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3430 static rtx addr_placeholder;
3432 /* ??? Unfinished optimizations, and possible future optimizations,
3433 for the strength reduction code. */
3435 /* ??? The interaction of biv elimination, and recognition of 'constant'
3436 bivs, may cause problems. */
3438 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3439 performance problems.
3441 Perhaps don't eliminate things that can be combined with an addressing
3442 mode. Find all givs that have the same biv, mult_val, and add_val;
3443 then for each giv, check to see if its only use dies in a following
3444 memory address. If so, generate a new memory address and check to see
3445 if it is valid. If it is valid, then store the modified memory address,
3446 otherwise, mark the giv as not done so that it will get its own iv. */
3448 /* ??? Could try to optimize branches when it is known that a biv is always
3449 positive. */
3451 /* ??? When replace a biv in a compare insn, we should replace with closest
3452 giv so that an optimized branch can still be recognized by the combiner,
3453 e.g. the VAX acb insn. */
3455 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3456 was rerun in loop_optimize whenever a register was added or moved.
3457 Also, some of the optimizations could be a little less conservative. */
3459 /* Scan the loop body and call FNCALL for each insn. In the addition to the
3460 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
3461 callback.
3463 NOT_EVERY_ITERATION if current insn is not executed at least once for every
3464 loop iteration except for the last one.
3466 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
3467 loop iteration.
3469 void
3470 for_each_insn_in_loop (loop, fncall)
3471 struct loop *loop;
3472 loop_insn_callback fncall;
3474 /* This is 1 if current insn is not executed at least once for every loop
3475 iteration. */
3476 int not_every_iteration = 0;
3477 int maybe_multiple = 0;
3478 int past_loop_latch = 0;
3479 int loop_depth = 0;
3480 rtx p;
3482 /* If loop_scan_start points to the loop exit test, we have to be wary of
3483 subversive use of gotos inside expression statements. */
3484 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
3485 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
3487 /* Scan through loop to find all possible bivs. */
3489 for (p = next_insn_in_loop (loop, loop->scan_start);
3490 p != NULL_RTX;
3491 p = next_insn_in_loop (loop, p))
3493 p = fncall (loop, p, not_every_iteration, maybe_multiple);
3495 /* Past CODE_LABEL, we get to insns that may be executed multiple
3496 times. The only way we can be sure that they can't is if every
3497 jump insn between here and the end of the loop either
3498 returns, exits the loop, is a jump to a location that is still
3499 behind the label, or is a jump to the loop start. */
3501 if (GET_CODE (p) == CODE_LABEL)
3503 rtx insn = p;
3505 maybe_multiple = 0;
3507 while (1)
3509 insn = NEXT_INSN (insn);
3510 if (insn == loop->scan_start)
3511 break;
3512 if (insn == loop->end)
3514 if (loop->top != 0)
3515 insn = loop->top;
3516 else
3517 break;
3518 if (insn == loop->scan_start)
3519 break;
3522 if (GET_CODE (insn) == JUMP_INSN
3523 && GET_CODE (PATTERN (insn)) != RETURN
3524 && (!any_condjump_p (insn)
3525 || (JUMP_LABEL (insn) != 0
3526 && JUMP_LABEL (insn) != loop->scan_start
3527 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
3529 maybe_multiple = 1;
3530 break;
3535 /* Past a jump, we get to insns for which we can't count
3536 on whether they will be executed during each iteration. */
3537 /* This code appears twice in strength_reduce. There is also similar
3538 code in scan_loop. */
3539 if (GET_CODE (p) == JUMP_INSN
3540 /* If we enter the loop in the middle, and scan around to the
3541 beginning, don't set not_every_iteration for that.
3542 This can be any kind of jump, since we want to know if insns
3543 will be executed if the loop is executed. */
3544 && !(JUMP_LABEL (p) == loop->top
3545 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
3546 && any_uncondjump_p (p))
3547 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
3549 rtx label = 0;
3551 /* If this is a jump outside the loop, then it also doesn't
3552 matter. Check to see if the target of this branch is on the
3553 loop->exits_labels list. */
3555 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
3556 if (XEXP (label, 0) == JUMP_LABEL (p))
3557 break;
3559 if (!label)
3560 not_every_iteration = 1;
3563 else if (GET_CODE (p) == NOTE)
3565 /* At the virtual top of a converted loop, insns are again known to
3566 be executed each iteration: logically, the loop begins here
3567 even though the exit code has been duplicated.
3569 Insns are also again known to be executed each iteration at
3570 the LOOP_CONT note. */
3571 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3572 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3573 && loop_depth == 0)
3574 not_every_iteration = 0;
3575 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3576 loop_depth++;
3577 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3578 loop_depth--;
3581 /* Note if we pass a loop latch. If we do, then we can not clear
3582 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3583 a loop since a jump before the last CODE_LABEL may have started
3584 a new loop iteration.
3586 Note that LOOP_TOP is only set for rotated loops and we need
3587 this check for all loops, so compare against the CODE_LABEL
3588 which immediately follows LOOP_START. */
3589 if (GET_CODE (p) == JUMP_INSN
3590 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
3591 past_loop_latch = 1;
3593 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3594 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3595 or not an insn is known to be executed each iteration of the
3596 loop, whether or not any iterations are known to occur.
3598 Therefore, if we have just passed a label and have no more labels
3599 between here and the test insn of the loop, and we have not passed
3600 a jump to the top of the loop, then we know these insns will be
3601 executed each iteration. */
3603 if (not_every_iteration
3604 && !past_loop_latch
3605 && GET_CODE (p) == CODE_LABEL
3606 && no_labels_between_p (p, loop->end)
3607 && loop_insn_first_p (p, loop->cont))
3608 not_every_iteration = 0;
3612 static void
3613 loop_bivs_find (loop)
3614 struct loop *loop;
3616 struct loop_regs *regs = LOOP_REGS (loop);
3617 struct loop_ivs *ivs = LOOP_IVS (loop);
3618 /* Temporary list pointers for traversing ivs->list. */
3619 struct iv_class *bl, **backbl;
3621 ivs->list = 0;
3623 for_each_insn_in_loop (loop, check_insn_for_bivs);
3625 /* Scan ivs->list to remove all regs that proved not to be bivs.
3626 Make a sanity check against regs->n_times_set. */
3627 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
3629 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3630 /* Above happens if register modified by subreg, etc. */
3631 /* Make sure it is not recognized as a basic induction var: */
3632 || regs->array[bl->regno].n_times_set != bl->biv_count
3633 /* If never incremented, it is invariant that we decided not to
3634 move. So leave it alone. */
3635 || ! bl->incremented)
3637 if (loop_dump_stream)
3638 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
3639 bl->regno,
3640 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3641 ? "not induction variable"
3642 : (! bl->incremented ? "never incremented"
3643 : "count error")));
3645 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
3646 *backbl = bl->next;
3648 else
3650 backbl = &bl->next;
3652 if (loop_dump_stream)
3653 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
3659 /* Determine how BIVS are initialised by looking through pre-header
3660 extended basic block. */
3661 static void
3662 loop_bivs_init_find (loop)
3663 struct loop *loop;
3665 struct loop_ivs *ivs = LOOP_IVS (loop);
3666 /* Temporary list pointers for traversing ivs->list. */
3667 struct iv_class *bl;
3668 int call_seen;
3669 rtx p;
3671 /* Find initial value for each biv by searching backwards from loop_start,
3672 halting at first label. Also record any test condition. */
3674 call_seen = 0;
3675 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3677 rtx test;
3679 note_insn = p;
3681 if (GET_CODE (p) == CALL_INSN)
3682 call_seen = 1;
3684 if (INSN_P (p))
3685 note_stores (PATTERN (p), record_initial, ivs);
3687 /* Record any test of a biv that branches around the loop if no store
3688 between it and the start of loop. We only care about tests with
3689 constants and registers and only certain of those. */
3690 if (GET_CODE (p) == JUMP_INSN
3691 && JUMP_LABEL (p) != 0
3692 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
3693 && (test = get_condition_for_loop (loop, p)) != 0
3694 && GET_CODE (XEXP (test, 0)) == REG
3695 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3696 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
3697 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
3698 && bl->init_insn == 0)
3700 /* If an NE test, we have an initial value! */
3701 if (GET_CODE (test) == NE)
3703 bl->init_insn = p;
3704 bl->init_set = gen_rtx_SET (VOIDmode,
3705 XEXP (test, 0), XEXP (test, 1));
3707 else
3708 bl->initial_test = test;
3714 /* Look at the each biv and see if we can say anything better about its
3715 initial value from any initializing insns set up above. (This is done
3716 in two passes to avoid missing SETs in a PARALLEL.) */
3717 static void
3718 loop_bivs_check (loop)
3719 struct loop *loop;
3721 struct loop_ivs *ivs = LOOP_IVS (loop);
3722 /* Temporary list pointers for traversing ivs->list. */
3723 struct iv_class *bl;
3724 struct iv_class **backbl;
3726 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
3728 rtx src;
3729 rtx note;
3731 if (! bl->init_insn)
3732 continue;
3734 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3735 is a constant, use the value of that. */
3736 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3737 && CONSTANT_P (XEXP (note, 0)))
3738 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3739 && CONSTANT_P (XEXP (note, 0))))
3740 src = XEXP (note, 0);
3741 else
3742 src = SET_SRC (bl->init_set);
3744 if (loop_dump_stream)
3745 fprintf (loop_dump_stream,
3746 "Biv %d: initialized at insn %d: initial value ",
3747 bl->regno, INSN_UID (bl->init_insn));
3749 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3750 || GET_MODE (src) == VOIDmode)
3751 && valid_initial_value_p (src, bl->init_insn,
3752 LOOP_INFO (loop)->pre_header_has_call,
3753 loop->start))
3755 bl->initial_value = src;
3757 if (loop_dump_stream)
3759 print_simple_rtl (loop_dump_stream, src);
3760 fputc ('\n', loop_dump_stream);
3763 /* If we can't make it a giv,
3764 let biv keep initial value of "itself". */
3765 else if (loop_dump_stream)
3766 fprintf (loop_dump_stream, "is complex\n");
3771 /* Search the loop for general induction variables. */
3773 static void
3774 loop_givs_find (loop)
3775 struct loop* loop;
3777 for_each_insn_in_loop (loop, check_insn_for_givs);
3781 /* For each giv for which we still don't know whether or not it is
3782 replaceable, check to see if it is replaceable because its final value
3783 can be calculated. */
3785 static void
3786 loop_givs_check (loop)
3787 struct loop *loop;
3789 struct loop_ivs *ivs = LOOP_IVS (loop);
3790 struct iv_class *bl;
3792 for (bl = ivs->list; bl; bl = bl->next)
3794 struct induction *v;
3796 for (v = bl->giv; v; v = v->next_iv)
3797 if (! v->replaceable && ! v->not_replaceable)
3798 check_final_value (loop, v);
3803 /* Return non-zero if it is possible to eliminate the biv BL provided
3804 all givs are reduced. This is possible if either the reg is not
3805 used outside the loop, or we can compute what its final value will
3806 be. */
3808 static int
3809 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
3810 struct loop *loop;
3811 struct iv_class *bl;
3812 int threshold;
3813 int insn_count;
3815 /* For architectures with a decrement_and_branch_until_zero insn,
3816 don't do this if we put a REG_NONNEG note on the endtest for this
3817 biv. */
3819 #ifdef HAVE_decrement_and_branch_until_zero
3820 if (bl->nonneg)
3822 if (loop_dump_stream)
3823 fprintf (loop_dump_stream,
3824 "Cannot eliminate nonneg biv %d.\n", bl->regno);
3825 return 0;
3827 #endif
3829 /* Check that biv is used outside loop or if it has a final value.
3830 Compare against bl->init_insn rather than loop->start. We aren't
3831 concerned with any uses of the biv between init_insn and
3832 loop->start since these won't be affected by the value of the biv
3833 elsewhere in the function, so long as init_insn doesn't use the
3834 biv itself. */
3836 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
3837 && bl->init_insn
3838 && INSN_UID (bl->init_insn) < max_uid_for_loop
3839 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
3840 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3841 || (bl->final_value = final_biv_value (loop, bl)))
3842 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
3844 if (loop_dump_stream)
3846 fprintf (loop_dump_stream,
3847 "Cannot eliminate biv %d.\n",
3848 bl->regno);
3849 fprintf (loop_dump_stream,
3850 "First use: insn %d, last use: insn %d.\n",
3851 REGNO_FIRST_UID (bl->regno),
3852 REGNO_LAST_UID (bl->regno));
3854 return 0;
3858 /* Reduce each giv of BL that we have decided to reduce. */
3860 static void
3861 loop_givs_reduce (loop, bl)
3862 struct loop *loop;
3863 struct iv_class *bl;
3865 struct induction *v;
3867 for (v = bl->giv; v; v = v->next_iv)
3869 struct induction *tv;
3870 if (! v->ignore && v->same == 0)
3872 int auto_inc_opt = 0;
3874 /* If the code for derived givs immediately below has already
3875 allocated a new_reg, we must keep it. */
3876 if (! v->new_reg)
3877 v->new_reg = gen_reg_rtx (v->mode);
3879 #ifdef AUTO_INC_DEC
3880 /* If the target has auto-increment addressing modes, and
3881 this is an address giv, then try to put the increment
3882 immediately after its use, so that flow can create an
3883 auto-increment addressing mode. */
3884 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
3885 && bl->biv->always_executed && ! bl->biv->maybe_multiple
3886 /* We don't handle reversed biv's because bl->biv->insn
3887 does not have a valid INSN_LUID. */
3888 && ! bl->reversed
3889 && v->always_executed && ! v->maybe_multiple
3890 && INSN_UID (v->insn) < max_uid_for_loop)
3892 /* If other giv's have been combined with this one, then
3893 this will work only if all uses of the other giv's occur
3894 before this giv's insn. This is difficult to check.
3896 We simplify this by looking for the common case where
3897 there is one DEST_REG giv, and this giv's insn is the
3898 last use of the dest_reg of that DEST_REG giv. If the
3899 increment occurs after the address giv, then we can
3900 perform the optimization. (Otherwise, the increment
3901 would have to go before other_giv, and we would not be
3902 able to combine it with the address giv to get an
3903 auto-inc address.) */
3904 if (v->combined_with)
3906 struct induction *other_giv = 0;
3908 for (tv = bl->giv; tv; tv = tv->next_iv)
3909 if (tv->same == v)
3911 if (other_giv)
3912 break;
3913 else
3914 other_giv = tv;
3916 if (! tv && other_giv
3917 && REGNO (other_giv->dest_reg) < max_reg_before_loop
3918 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
3919 == INSN_UID (v->insn))
3920 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
3921 auto_inc_opt = 1;
3923 /* Check for case where increment is before the address
3924 giv. Do this test in "loop order". */
3925 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
3926 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3927 || (INSN_LUID (bl->biv->insn)
3928 > INSN_LUID (loop->scan_start))))
3929 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3930 && (INSN_LUID (loop->scan_start)
3931 < INSN_LUID (bl->biv->insn))))
3932 auto_inc_opt = -1;
3933 else
3934 auto_inc_opt = 1;
3936 #ifdef HAVE_cc0
3938 rtx prev;
3940 /* We can't put an insn immediately after one setting
3941 cc0, or immediately before one using cc0. */
3942 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
3943 || (auto_inc_opt == -1
3944 && (prev = prev_nonnote_insn (v->insn)) != 0
3945 && INSN_P (prev)
3946 && sets_cc0_p (PATTERN (prev))))
3947 auto_inc_opt = 0;
3949 #endif
3951 if (auto_inc_opt)
3952 v->auto_inc_opt = 1;
3954 #endif
3956 /* For each place where the biv is incremented, add an insn
3957 to increment the new, reduced reg for the giv. */
3958 for (tv = bl->biv; tv; tv = tv->next_iv)
3960 rtx insert_before;
3962 if (! auto_inc_opt)
3963 insert_before = tv->insn;
3964 else if (auto_inc_opt == 1)
3965 insert_before = NEXT_INSN (v->insn);
3966 else
3967 insert_before = v->insn;
3969 if (tv->mult_val == const1_rtx)
3970 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3971 v->new_reg, v->new_reg,
3972 0, insert_before);
3973 else /* tv->mult_val == const0_rtx */
3974 /* A multiply is acceptable here
3975 since this is presumed to be seldom executed. */
3976 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3977 v->add_val, v->new_reg,
3978 0, insert_before);
3981 /* Add code at loop start to initialize giv's reduced reg. */
3983 loop_iv_add_mult_hoist (loop,
3984 extend_value_for_giv (v, bl->initial_value),
3985 v->mult_val, v->add_val, v->new_reg);
3991 /* Check for givs whose first use is their definition and whose
3992 last use is the definition of another giv. If so, it is likely
3993 dead and should not be used to derive another giv nor to
3994 eliminate a biv. */
3996 static void
3997 loop_givs_dead_check (loop, bl)
3998 struct loop *loop ATTRIBUTE_UNUSED;
3999 struct iv_class *bl;
4001 struct induction *v;
4003 for (v = bl->giv; v; v = v->next_iv)
4005 if (v->ignore
4006 || (v->same && v->same->ignore))
4007 continue;
4009 if (v->giv_type == DEST_REG
4010 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4012 struct induction *v1;
4014 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4015 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4016 v->maybe_dead = 1;
4022 static void
4023 loop_givs_rescan (loop, bl, reg_map)
4024 struct loop *loop;
4025 struct iv_class *bl;
4026 rtx *reg_map;
4028 struct induction *v;
4030 for (v = bl->giv; v; v = v->next_iv)
4032 if (v->same && v->same->ignore)
4033 v->ignore = 1;
4035 if (v->ignore)
4036 continue;
4038 /* Update expression if this was combined, in case other giv was
4039 replaced. */
4040 if (v->same)
4041 v->new_reg = replace_rtx (v->new_reg,
4042 v->same->dest_reg, v->same->new_reg);
4044 /* See if this register is known to be a pointer to something. If
4045 so, see if we can find the alignment. First see if there is a
4046 destination register that is a pointer. If so, this shares the
4047 alignment too. Next see if we can deduce anything from the
4048 computational information. If not, and this is a DEST_ADDR
4049 giv, at least we know that it's a pointer, though we don't know
4050 the alignment. */
4051 if (GET_CODE (v->new_reg) == REG
4052 && v->giv_type == DEST_REG
4053 && REG_POINTER (v->dest_reg))
4054 mark_reg_pointer (v->new_reg,
4055 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4056 else if (GET_CODE (v->new_reg) == REG
4057 && REG_POINTER (v->src_reg))
4059 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4061 if (align == 0
4062 || GET_CODE (v->add_val) != CONST_INT
4063 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4064 align = 0;
4066 mark_reg_pointer (v->new_reg, align);
4068 else if (GET_CODE (v->new_reg) == REG
4069 && GET_CODE (v->add_val) == REG
4070 && REG_POINTER (v->add_val))
4072 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4074 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4075 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4076 align = 0;
4078 mark_reg_pointer (v->new_reg, align);
4080 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4081 mark_reg_pointer (v->new_reg, 0);
4083 if (v->giv_type == DEST_ADDR)
4084 /* Store reduced reg as the address in the memref where we found
4085 this giv. */
4086 validate_change (v->insn, v->location, v->new_reg, 0);
4087 else if (v->replaceable)
4089 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4091 else
4093 /* Not replaceable; emit an insn to set the original giv reg from
4094 the reduced giv, same as above. */
4095 loop_insn_emit_after (loop, 0, v->insn,
4096 gen_move_insn (v->dest_reg, v->new_reg));
4099 /* When a loop is reversed, givs which depend on the reversed
4100 biv, and which are live outside the loop, must be set to their
4101 correct final value. This insn is only needed if the giv is
4102 not replaceable. The correct final value is the same as the
4103 value that the giv starts the reversed loop with. */
4104 if (bl->reversed && ! v->replaceable)
4105 loop_iv_add_mult_sink (loop,
4106 extend_value_for_giv (v, bl->initial_value),
4107 v->mult_val, v->add_val, v->dest_reg);
4108 else if (v->final_value)
4109 loop_insn_sink_or_swim (loop,
4110 gen_move_insn (v->dest_reg, v->final_value));
4112 if (loop_dump_stream)
4114 fprintf (loop_dump_stream, "giv at %d reduced to ",
4115 INSN_UID (v->insn));
4116 print_simple_rtl (loop_dump_stream, v->new_reg);
4117 fprintf (loop_dump_stream, "\n");
4123 static int
4124 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4125 struct loop *loop ATTRIBUTE_UNUSED;
4126 struct iv_class *bl;
4127 struct induction *v;
4128 rtx test_reg;
4130 int add_cost;
4131 int benefit;
4133 benefit = v->benefit;
4134 PUT_MODE (test_reg, v->mode);
4135 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4136 test_reg, test_reg);
4138 /* Reduce benefit if not replaceable, since we will insert a
4139 move-insn to replace the insn that calculates this giv. Don't do
4140 this unless the giv is a user variable, since it will often be
4141 marked non-replaceable because of the duplication of the exit
4142 code outside the loop. In such a case, the copies we insert are
4143 dead and will be deleted. So they don't have a cost. Similar
4144 situations exist. */
4145 /* ??? The new final_[bg]iv_value code does a much better job of
4146 finding replaceable giv's, and hence this code may no longer be
4147 necessary. */
4148 if (! v->replaceable && ! bl->eliminable
4149 && REG_USERVAR_P (v->dest_reg))
4150 benefit -= copy_cost;
4152 /* Decrease the benefit to count the add-insns that we will insert
4153 to increment the reduced reg for the giv. ??? This can
4154 overestimate the run-time cost of the additional insns, e.g. if
4155 there are multiple basic blocks that increment the biv, but only
4156 one of these blocks is executed during each iteration. There is
4157 no good way to detect cases like this with the current structure
4158 of the loop optimizer. This code is more accurate for
4159 determining code size than run-time benefits. */
4160 benefit -= add_cost * bl->biv_count;
4162 /* Decide whether to strength-reduce this giv or to leave the code
4163 unchanged (recompute it from the biv each time it is used). This
4164 decision can be made independently for each giv. */
4166 #ifdef AUTO_INC_DEC
4167 /* Attempt to guess whether autoincrement will handle some of the
4168 new add insns; if so, increase BENEFIT (undo the subtraction of
4169 add_cost that was done above). */
4170 if (v->giv_type == DEST_ADDR
4171 /* Increasing the benefit is risky, since this is only a guess.
4172 Avoid increasing register pressure in cases where there would
4173 be no other benefit from reducing this giv. */
4174 && benefit > 0
4175 && GET_CODE (v->mult_val) == CONST_INT)
4177 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4179 if (HAVE_POST_INCREMENT
4180 && INTVAL (v->mult_val) == size)
4181 benefit += add_cost * bl->biv_count;
4182 else if (HAVE_PRE_INCREMENT
4183 && INTVAL (v->mult_val) == size)
4184 benefit += add_cost * bl->biv_count;
4185 else if (HAVE_POST_DECREMENT
4186 && -INTVAL (v->mult_val) == size)
4187 benefit += add_cost * bl->biv_count;
4188 else if (HAVE_PRE_DECREMENT
4189 && -INTVAL (v->mult_val) == size)
4190 benefit += add_cost * bl->biv_count;
4192 #endif
4194 return benefit;
4198 /* Free IV structures for LOOP. */
4200 static void
4201 loop_ivs_free (loop)
4202 struct loop *loop;
4204 struct loop_ivs *ivs = LOOP_IVS (loop);
4205 struct iv_class *iv = ivs->list;
4207 free (ivs->regs);
4209 while (iv)
4211 struct iv_class *next = iv->next;
4212 struct induction *induction;
4213 struct induction *next_induction;
4215 for (induction = iv->biv; induction; induction = next_induction)
4217 next_induction = induction->next_iv;
4218 free (induction);
4220 for (induction = iv->giv; induction; induction = next_induction)
4222 next_induction = induction->next_iv;
4223 free (induction);
4226 free (iv);
4227 iv = next;
4232 /* Perform strength reduction and induction variable elimination.
4234 Pseudo registers created during this function will be beyond the
4235 last valid index in several tables including
4236 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4237 problem here, because the added registers cannot be givs outside of
4238 their loop, and hence will never be reconsidered. But scan_loop
4239 must check regnos to make sure they are in bounds. */
4241 static void
4242 strength_reduce (loop, flags)
4243 struct loop *loop;
4244 int flags;
4246 struct loop_info *loop_info = LOOP_INFO (loop);
4247 struct loop_regs *regs = LOOP_REGS (loop);
4248 struct loop_ivs *ivs = LOOP_IVS (loop);
4249 rtx p;
4250 /* Temporary list pointer for traversing ivs->list. */
4251 struct iv_class *bl;
4252 /* Ratio of extra register life span we can justify
4253 for saving an instruction. More if loop doesn't call subroutines
4254 since in that case saving an insn makes more difference
4255 and more registers are available. */
4256 /* ??? could set this to last value of threshold in move_movables */
4257 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4258 /* Map of pseudo-register replacements. */
4259 rtx *reg_map = NULL;
4260 int reg_map_size;
4261 int unrolled_insn_copies = 0;
4262 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4263 int insn_count = count_insns_in_loop (loop);
4265 addr_placeholder = gen_reg_rtx (Pmode);
4267 ivs->n_regs = max_reg_before_loop;
4268 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4270 /* Find all BIVs in loop. */
4271 loop_bivs_find (loop);
4273 /* Exit if there are no bivs. */
4274 if (! ivs->list)
4276 /* Can still unroll the loop anyways, but indicate that there is no
4277 strength reduction info available. */
4278 if (flags & LOOP_UNROLL)
4279 unroll_loop (loop, insn_count, 0);
4281 loop_ivs_free (loop);
4282 return;
4285 /* Determine how BIVS are initialised by looking through pre-header
4286 extended basic block. */
4287 loop_bivs_init_find (loop);
4289 /* Look at the each biv and see if we can say anything better about its
4290 initial value from any initializing insns set up above. */
4291 loop_bivs_check (loop);
4293 /* Search the loop for general induction variables. */
4294 loop_givs_find (loop);
4296 /* Try to calculate and save the number of loop iterations. This is
4297 set to zero if the actual number can not be calculated. This must
4298 be called after all giv's have been identified, since otherwise it may
4299 fail if the iteration variable is a giv. */
4300 loop_iterations (loop);
4302 /* Now for each giv for which we still don't know whether or not it is
4303 replaceable, check to see if it is replaceable because its final value
4304 can be calculated. This must be done after loop_iterations is called,
4305 so that final_giv_value will work correctly. */
4306 loop_givs_check (loop);
4308 /* Try to prove that the loop counter variable (if any) is always
4309 nonnegative; if so, record that fact with a REG_NONNEG note
4310 so that "decrement and branch until zero" insn can be used. */
4311 check_dbra_loop (loop, insn_count);
4313 /* Create reg_map to hold substitutions for replaceable giv regs.
4314 Some givs might have been made from biv increments, so look at
4315 ivs->reg_iv_type for a suitable size. */
4316 reg_map_size = ivs->n_regs;
4317 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4319 /* Examine each iv class for feasibility of strength reduction/induction
4320 variable elimination. */
4322 for (bl = ivs->list; bl; bl = bl->next)
4324 struct induction *v;
4325 int benefit;
4327 /* Test whether it will be possible to eliminate this biv
4328 provided all givs are reduced. */
4329 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
4331 /* This will be true at the end, if all givs which depend on this
4332 biv have been strength reduced.
4333 We can't (currently) eliminate the biv unless this is so. */
4334 bl->all_reduced = 1;
4336 /* Check each extension dependent giv in this class to see if its
4337 root biv is safe from wrapping in the interior mode. */
4338 check_ext_dependant_givs (bl, loop_info);
4340 /* Combine all giv's for this iv_class. */
4341 combine_givs (regs, bl);
4343 for (v = bl->giv; v; v = v->next_iv)
4345 struct induction *tv;
4347 if (v->ignore || v->same)
4348 continue;
4350 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
4352 /* If an insn is not to be strength reduced, then set its ignore
4353 flag, and clear bl->all_reduced. */
4355 /* A giv that depends on a reversed biv must be reduced if it is
4356 used after the loop exit, otherwise, it would have the wrong
4357 value after the loop exit. To make it simple, just reduce all
4358 of such giv's whether or not we know they are used after the loop
4359 exit. */
4361 if (! flag_reduce_all_givs
4362 && v->lifetime * threshold * benefit < insn_count
4363 && ! bl->reversed)
4365 if (loop_dump_stream)
4366 fprintf (loop_dump_stream,
4367 "giv of insn %d not worth while, %d vs %d.\n",
4368 INSN_UID (v->insn),
4369 v->lifetime * threshold * benefit, insn_count);
4370 v->ignore = 1;
4371 bl->all_reduced = 0;
4373 else
4375 /* Check that we can increment the reduced giv without a
4376 multiply insn. If not, reject it. */
4378 for (tv = bl->biv; tv; tv = tv->next_iv)
4379 if (tv->mult_val == const1_rtx
4380 && ! product_cheap_p (tv->add_val, v->mult_val))
4382 if (loop_dump_stream)
4383 fprintf (loop_dump_stream,
4384 "giv of insn %d: would need a multiply.\n",
4385 INSN_UID (v->insn));
4386 v->ignore = 1;
4387 bl->all_reduced = 0;
4388 break;
4393 /* Check for givs whose first use is their definition and whose
4394 last use is the definition of another giv. If so, it is likely
4395 dead and should not be used to derive another giv nor to
4396 eliminate a biv. */
4397 loop_givs_dead_check (loop, bl);
4399 /* Reduce each giv that we decided to reduce. */
4400 loop_givs_reduce (loop, bl);
4402 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4403 as not reduced.
4405 For each giv register that can be reduced now: if replaceable,
4406 substitute reduced reg wherever the old giv occurs;
4407 else add new move insn "giv_reg = reduced_reg". */
4408 loop_givs_rescan (loop, bl, reg_map);
4410 /* All the givs based on the biv bl have been reduced if they
4411 merit it. */
4413 /* For each giv not marked as maybe dead that has been combined with a
4414 second giv, clear any "maybe dead" mark on that second giv.
4415 v->new_reg will either be or refer to the register of the giv it
4416 combined with.
4418 Doing this clearing avoids problems in biv elimination where
4419 a giv's new_reg is a complex value that can't be put in the
4420 insn but the giv combined with (with a reg as new_reg) is
4421 marked maybe_dead. Since the register will be used in either
4422 case, we'd prefer it be used from the simpler giv. */
4424 for (v = bl->giv; v; v = v->next_iv)
4425 if (! v->maybe_dead && v->same)
4426 v->same->maybe_dead = 0;
4428 /* Try to eliminate the biv, if it is a candidate.
4429 This won't work if ! bl->all_reduced,
4430 since the givs we planned to use might not have been reduced.
4432 We have to be careful that we didn't initially think we could
4433 eliminate this biv because of a giv that we now think may be
4434 dead and shouldn't be used as a biv replacement.
4436 Also, there is the possibility that we may have a giv that looks
4437 like it can be used to eliminate a biv, but the resulting insn
4438 isn't valid. This can happen, for example, on the 88k, where a
4439 JUMP_INSN can compare a register only with zero. Attempts to
4440 replace it with a compare with a constant will fail.
4442 Note that in cases where this call fails, we may have replaced some
4443 of the occurrences of the biv with a giv, but no harm was done in
4444 doing so in the rare cases where it can occur. */
4446 if (bl->all_reduced == 1 && bl->eliminable
4447 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
4449 /* ?? If we created a new test to bypass the loop entirely,
4450 or otherwise drop straight in, based on this test, then
4451 we might want to rewrite it also. This way some later
4452 pass has more hope of removing the initialization of this
4453 biv entirely. */
4455 /* If final_value != 0, then the biv may be used after loop end
4456 and we must emit an insn to set it just in case.
4458 Reversed bivs already have an insn after the loop setting their
4459 value, so we don't need another one. We can't calculate the
4460 proper final value for such a biv here anyways. */
4461 if (bl->final_value && ! bl->reversed)
4462 loop_insn_sink_or_swim (loop, gen_move_insn
4463 (bl->biv->dest_reg, bl->final_value));
4465 if (loop_dump_stream)
4466 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4467 bl->regno);
4471 /* Go through all the instructions in the loop, making all the
4472 register substitutions scheduled in REG_MAP. */
4474 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
4475 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4476 || GET_CODE (p) == CALL_INSN)
4478 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
4479 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
4480 INSN_CODE (p) = -1;
4483 if (loop_info->n_iterations > 0)
4485 /* When we completely unroll a loop we will likely not need the increment
4486 of the loop BIV and we will not need the conditional branch at the
4487 end of the loop. */
4488 unrolled_insn_copies = insn_count - 2;
4490 #ifdef HAVE_cc0
4491 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
4492 need the comparison before the conditional branch at the end of the
4493 loop. */
4494 unrolled_insn_copies -= 1;
4495 #endif
4497 /* We'll need one copy for each loop iteration. */
4498 unrolled_insn_copies *= loop_info->n_iterations;
4500 /* A little slop to account for the ability to remove initialization
4501 code, better CSE, and other secondary benefits of completely
4502 unrolling some loops. */
4503 unrolled_insn_copies -= 1;
4505 /* Clamp the value. */
4506 if (unrolled_insn_copies < 0)
4507 unrolled_insn_copies = 0;
4510 /* Unroll loops from within strength reduction so that we can use the
4511 induction variable information that strength_reduce has already
4512 collected. Always unroll loops that would be as small or smaller
4513 unrolled than when rolled. */
4514 if ((flags & LOOP_UNROLL)
4515 || (loop_info->n_iterations > 0
4516 && unrolled_insn_copies <= insn_count))
4517 unroll_loop (loop, insn_count, 1);
4519 #ifdef HAVE_doloop_end
4520 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
4521 doloop_optimize (loop);
4522 #endif /* HAVE_doloop_end */
4524 /* In case number of iterations is known, drop branch prediction note
4525 in the branch. Do that only in second loop pass, as loop unrolling
4526 may change the number of iterations performed. */
4527 if ((flags & LOOP_BCT)
4528 && loop_info->n_iterations / loop_info->unroll_number > 1)
4530 int n = loop_info->n_iterations / loop_info->unroll_number;
4531 predict_insn (PREV_INSN (loop->end),
4532 PRED_LOOP_ITERATIONS,
4533 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
4536 if (loop_dump_stream)
4537 fprintf (loop_dump_stream, "\n");
4539 loop_ivs_free (loop);
4540 if (reg_map)
4541 free (reg_map);
4544 /*Record all basic induction variables calculated in the insn. */
4545 static rtx
4546 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
4547 struct loop *loop;
4548 rtx p;
4549 int not_every_iteration;
4550 int maybe_multiple;
4552 struct loop_ivs *ivs = LOOP_IVS (loop);
4553 rtx set;
4554 rtx dest_reg;
4555 rtx inc_val;
4556 rtx mult_val;
4557 rtx *location;
4559 if (GET_CODE (p) == INSN
4560 && (set = single_set (p))
4561 && GET_CODE (SET_DEST (set)) == REG)
4563 dest_reg = SET_DEST (set);
4564 if (REGNO (dest_reg) < max_reg_before_loop
4565 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
4566 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
4568 if (basic_induction_var (loop, SET_SRC (set),
4569 GET_MODE (SET_SRC (set)),
4570 dest_reg, p, &inc_val, &mult_val,
4571 &location))
4573 /* It is a possible basic induction variable.
4574 Create and initialize an induction structure for it. */
4576 struct induction *v
4577 = (struct induction *) xmalloc (sizeof (struct induction));
4579 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
4580 not_every_iteration, maybe_multiple);
4581 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
4583 else if (REGNO (dest_reg) < ivs->n_regs)
4584 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
4587 return p;
4590 /* Record all givs calculated in the insn.
4591 A register is a giv if: it is only set once, it is a function of a
4592 biv and a constant (or invariant), and it is not a biv. */
4593 static rtx
4594 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
4595 struct loop *loop;
4596 rtx p;
4597 int not_every_iteration;
4598 int maybe_multiple;
4600 struct loop_regs *regs = LOOP_REGS (loop);
4602 rtx set;
4603 /* Look for a general induction variable in a register. */
4604 if (GET_CODE (p) == INSN
4605 && (set = single_set (p))
4606 && GET_CODE (SET_DEST (set)) == REG
4607 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
4609 rtx src_reg;
4610 rtx dest_reg;
4611 rtx add_val;
4612 rtx mult_val;
4613 rtx ext_val;
4614 int benefit;
4615 rtx regnote = 0;
4616 rtx last_consec_insn;
4618 dest_reg = SET_DEST (set);
4619 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
4620 return p;
4622 if (/* SET_SRC is a giv. */
4623 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
4624 &mult_val, &ext_val, 0, &benefit, VOIDmode)
4625 /* Equivalent expression is a giv. */
4626 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
4627 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
4628 &add_val, &mult_val, &ext_val, 0,
4629 &benefit, VOIDmode)))
4630 /* Don't try to handle any regs made by loop optimization.
4631 We have nothing on them in regno_first_uid, etc. */
4632 && REGNO (dest_reg) < max_reg_before_loop
4633 /* Don't recognize a BASIC_INDUCT_VAR here. */
4634 && dest_reg != src_reg
4635 /* This must be the only place where the register is set. */
4636 && (regs->array[REGNO (dest_reg)].n_times_set == 1
4637 /* or all sets must be consecutive and make a giv. */
4638 || (benefit = consec_sets_giv (loop, benefit, p,
4639 src_reg, dest_reg,
4640 &add_val, &mult_val, &ext_val,
4641 &last_consec_insn))))
4643 struct induction *v
4644 = (struct induction *) xmalloc (sizeof (struct induction));
4646 /* If this is a library call, increase benefit. */
4647 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
4648 benefit += libcall_benefit (p);
4650 /* Skip the consecutive insns, if there are any. */
4651 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
4652 p = last_consec_insn;
4654 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
4655 ext_val, benefit, DEST_REG, not_every_iteration,
4656 maybe_multiple, (rtx*)0);
4661 #ifndef DONT_REDUCE_ADDR
4662 /* Look for givs which are memory addresses. */
4663 /* This resulted in worse code on a VAX 8600. I wonder if it
4664 still does. */
4665 if (GET_CODE (p) == INSN)
4666 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
4667 maybe_multiple);
4668 #endif
4670 /* Update the status of whether giv can derive other givs. This can
4671 change when we pass a label or an insn that updates a biv. */
4672 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4673 || GET_CODE (p) == CODE_LABEL)
4674 update_giv_derive (loop, p);
4675 return p;
4678 /* Return 1 if X is a valid source for an initial value (or as value being
4679 compared against in an initial test).
4681 X must be either a register or constant and must not be clobbered between
4682 the current insn and the start of the loop.
4684 INSN is the insn containing X. */
4686 static int
4687 valid_initial_value_p (x, insn, call_seen, loop_start)
4688 rtx x;
4689 rtx insn;
4690 int call_seen;
4691 rtx loop_start;
4693 if (CONSTANT_P (x))
4694 return 1;
4696 /* Only consider pseudos we know about initialized in insns whose luids
4697 we know. */
4698 if (GET_CODE (x) != REG
4699 || REGNO (x) >= max_reg_before_loop)
4700 return 0;
4702 /* Don't use call-clobbered registers across a call which clobbers it. On
4703 some machines, don't use any hard registers at all. */
4704 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4705 && (SMALL_REGISTER_CLASSES
4706 || (call_used_regs[REGNO (x)] && call_seen)))
4707 return 0;
4709 /* Don't use registers that have been clobbered before the start of the
4710 loop. */
4711 if (reg_set_between_p (x, insn, loop_start))
4712 return 0;
4714 return 1;
4717 /* Scan X for memory refs and check each memory address
4718 as a possible giv. INSN is the insn whose pattern X comes from.
4719 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4720 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
4721 more thanonce in each loop iteration. */
4723 static void
4724 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
4725 const struct loop *loop;
4726 rtx x;
4727 rtx insn;
4728 int not_every_iteration, maybe_multiple;
4730 register int i, j;
4731 register enum rtx_code code;
4732 register const char *fmt;
4734 if (x == 0)
4735 return;
4737 code = GET_CODE (x);
4738 switch (code)
4740 case REG:
4741 case CONST_INT:
4742 case CONST:
4743 case CONST_DOUBLE:
4744 case SYMBOL_REF:
4745 case LABEL_REF:
4746 case PC:
4747 case CC0:
4748 case ADDR_VEC:
4749 case ADDR_DIFF_VEC:
4750 case USE:
4751 case CLOBBER:
4752 return;
4754 case MEM:
4756 rtx src_reg;
4757 rtx add_val;
4758 rtx mult_val;
4759 rtx ext_val;
4760 int benefit;
4762 /* This code used to disable creating GIVs with mult_val == 1 and
4763 add_val == 0. However, this leads to lost optimizations when
4764 it comes time to combine a set of related DEST_ADDR GIVs, since
4765 this one would not be seen. */
4767 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
4768 &mult_val, &ext_val, 1, &benefit,
4769 GET_MODE (x)))
4771 /* Found one; record it. */
4772 struct induction *v
4773 = (struct induction *) xmalloc (sizeof (struct induction));
4775 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
4776 add_val, ext_val, benefit, DEST_ADDR,
4777 not_every_iteration, maybe_multiple, &XEXP (x, 0));
4779 v->mem = x;
4782 return;
4784 default:
4785 break;
4788 /* Recursively scan the subexpressions for other mem refs. */
4790 fmt = GET_RTX_FORMAT (code);
4791 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4792 if (fmt[i] == 'e')
4793 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
4794 maybe_multiple);
4795 else if (fmt[i] == 'E')
4796 for (j = 0; j < XVECLEN (x, i); j++)
4797 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
4798 maybe_multiple);
4801 /* Fill in the data about one biv update.
4802 V is the `struct induction' in which we record the biv. (It is
4803 allocated by the caller, with alloca.)
4804 INSN is the insn that sets it.
4805 DEST_REG is the biv's reg.
4807 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4808 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4809 being set to INC_VAL.
4811 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4812 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4813 can be executed more than once per iteration. If MAYBE_MULTIPLE
4814 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4815 executed exactly once per iteration. */
4817 static void
4818 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
4819 not_every_iteration, maybe_multiple)
4820 struct loop *loop;
4821 struct induction *v;
4822 rtx insn;
4823 rtx dest_reg;
4824 rtx inc_val;
4825 rtx mult_val;
4826 rtx *location;
4827 int not_every_iteration;
4828 int maybe_multiple;
4830 struct loop_ivs *ivs = LOOP_IVS (loop);
4831 struct iv_class *bl;
4833 v->insn = insn;
4834 v->src_reg = dest_reg;
4835 v->dest_reg = dest_reg;
4836 v->mult_val = mult_val;
4837 v->add_val = inc_val;
4838 v->ext_dependant = NULL_RTX;
4839 v->location = location;
4840 v->mode = GET_MODE (dest_reg);
4841 v->always_computable = ! not_every_iteration;
4842 v->always_executed = ! not_every_iteration;
4843 v->maybe_multiple = maybe_multiple;
4845 /* Add this to the reg's iv_class, creating a class
4846 if this is the first incrementation of the reg. */
4848 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
4849 if (bl == 0)
4851 /* Create and initialize new iv_class. */
4853 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
4855 bl->regno = REGNO (dest_reg);
4856 bl->biv = 0;
4857 bl->giv = 0;
4858 bl->biv_count = 0;
4859 bl->giv_count = 0;
4861 /* Set initial value to the reg itself. */
4862 bl->initial_value = dest_reg;
4863 bl->final_value = 0;
4864 /* We haven't seen the initializing insn yet */
4865 bl->init_insn = 0;
4866 bl->init_set = 0;
4867 bl->initial_test = 0;
4868 bl->incremented = 0;
4869 bl->eliminable = 0;
4870 bl->nonneg = 0;
4871 bl->reversed = 0;
4872 bl->total_benefit = 0;
4874 /* Add this class to ivs->list. */
4875 bl->next = ivs->list;
4876 ivs->list = bl;
4878 /* Put it in the array of biv register classes. */
4879 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
4882 /* Update IV_CLASS entry for this biv. */
4883 v->next_iv = bl->biv;
4884 bl->biv = v;
4885 bl->biv_count++;
4886 if (mult_val == const1_rtx)
4887 bl->incremented = 1;
4889 if (loop_dump_stream)
4890 loop_biv_dump (v, loop_dump_stream, 0);
4893 /* Fill in the data about one giv.
4894 V is the `struct induction' in which we record the giv. (It is
4895 allocated by the caller, with alloca.)
4896 INSN is the insn that sets it.
4897 BENEFIT estimates the savings from deleting this insn.
4898 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4899 into a register or is used as a memory address.
4901 SRC_REG is the biv reg which the giv is computed from.
4902 DEST_REG is the giv's reg (if the giv is stored in a reg).
4903 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4904 LOCATION points to the place where this giv's value appears in INSN. */
4906 static void
4907 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
4908 benefit, type, not_every_iteration, maybe_multiple, location)
4909 const struct loop *loop;
4910 struct induction *v;
4911 rtx insn;
4912 rtx src_reg;
4913 rtx dest_reg;
4914 rtx mult_val, add_val, ext_val;
4915 int benefit;
4916 enum g_types type;
4917 int not_every_iteration, maybe_multiple;
4918 rtx *location;
4920 struct loop_ivs *ivs = LOOP_IVS (loop);
4921 struct induction *b;
4922 struct iv_class *bl;
4923 rtx set = single_set (insn);
4924 rtx temp;
4926 /* Attempt to prove constantness of the values. */
4927 temp = simplify_rtx (add_val);
4928 if (temp)
4929 add_val = temp;
4931 v->insn = insn;
4932 v->src_reg = src_reg;
4933 v->giv_type = type;
4934 v->dest_reg = dest_reg;
4935 v->mult_val = mult_val;
4936 v->add_val = add_val;
4937 v->ext_dependant = ext_val;
4938 v->benefit = benefit;
4939 v->location = location;
4940 v->cant_derive = 0;
4941 v->combined_with = 0;
4942 v->maybe_multiple = maybe_multiple;
4943 v->maybe_dead = 0;
4944 v->derive_adjustment = 0;
4945 v->same = 0;
4946 v->ignore = 0;
4947 v->new_reg = 0;
4948 v->final_value = 0;
4949 v->same_insn = 0;
4950 v->auto_inc_opt = 0;
4951 v->unrolled = 0;
4952 v->shared = 0;
4954 /* The v->always_computable field is used in update_giv_derive, to
4955 determine whether a giv can be used to derive another giv. For a
4956 DEST_REG giv, INSN computes a new value for the giv, so its value
4957 isn't computable if INSN insn't executed every iteration.
4958 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4959 it does not compute a new value. Hence the value is always computable
4960 regardless of whether INSN is executed each iteration. */
4962 if (type == DEST_ADDR)
4963 v->always_computable = 1;
4964 else
4965 v->always_computable = ! not_every_iteration;
4967 v->always_executed = ! not_every_iteration;
4969 if (type == DEST_ADDR)
4971 v->mode = GET_MODE (*location);
4972 v->lifetime = 1;
4974 else /* type == DEST_REG */
4976 v->mode = GET_MODE (SET_DEST (set));
4978 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
4980 /* If the lifetime is zero, it means that this register is
4981 really a dead store. So mark this as a giv that can be
4982 ignored. This will not prevent the biv from being eliminated. */
4983 if (v->lifetime == 0)
4984 v->ignore = 1;
4986 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
4987 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
4990 /* Add the giv to the class of givs computed from one biv. */
4992 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
4993 if (bl)
4995 v->next_iv = bl->giv;
4996 bl->giv = v;
4997 /* Don't count DEST_ADDR. This is supposed to count the number of
4998 insns that calculate givs. */
4999 if (type == DEST_REG)
5000 bl->giv_count++;
5001 bl->total_benefit += benefit;
5003 else
5004 /* Fatal error, biv missing for this giv? */
5005 abort ();
5007 if (type == DEST_ADDR)
5008 v->replaceable = 1;
5009 else
5011 /* The giv can be replaced outright by the reduced register only if all
5012 of the following conditions are true:
5013 - the insn that sets the giv is always executed on any iteration
5014 on which the giv is used at all
5015 (there are two ways to deduce this:
5016 either the insn is executed on every iteration,
5017 or all uses follow that insn in the same basic block),
5018 - the giv is not used outside the loop
5019 - no assignments to the biv occur during the giv's lifetime. */
5021 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5022 /* Previous line always fails if INSN was moved by loop opt. */
5023 && REGNO_LAST_LUID (REGNO (dest_reg))
5024 < INSN_LUID (loop->end)
5025 && (! not_every_iteration
5026 || last_use_this_basic_block (dest_reg, insn)))
5028 /* Now check that there are no assignments to the biv within the
5029 giv's lifetime. This requires two separate checks. */
5031 /* Check each biv update, and fail if any are between the first
5032 and last use of the giv.
5034 If this loop contains an inner loop that was unrolled, then
5035 the insn modifying the biv may have been emitted by the loop
5036 unrolling code, and hence does not have a valid luid. Just
5037 mark the biv as not replaceable in this case. It is not very
5038 useful as a biv, because it is used in two different loops.
5039 It is very unlikely that we would be able to optimize the giv
5040 using this biv anyways. */
5042 v->replaceable = 1;
5043 for (b = bl->biv; b; b = b->next_iv)
5045 if (INSN_UID (b->insn) >= max_uid_for_loop
5046 || ((INSN_LUID (b->insn)
5047 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5048 && (INSN_LUID (b->insn)
5049 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5051 v->replaceable = 0;
5052 v->not_replaceable = 1;
5053 break;
5057 /* If there are any backwards branches that go from after the
5058 biv update to before it, then this giv is not replaceable. */
5059 if (v->replaceable)
5060 for (b = bl->biv; b; b = b->next_iv)
5061 if (back_branch_in_range_p (loop, b->insn))
5063 v->replaceable = 0;
5064 v->not_replaceable = 1;
5065 break;
5068 else
5070 /* May still be replaceable, we don't have enough info here to
5071 decide. */
5072 v->replaceable = 0;
5073 v->not_replaceable = 0;
5077 /* Record whether the add_val contains a const_int, for later use by
5078 combine_givs. */
5080 rtx tem = add_val;
5082 v->no_const_addval = 1;
5083 if (tem == const0_rtx)
5085 else if (CONSTANT_P (add_val))
5086 v->no_const_addval = 0;
5087 if (GET_CODE (tem) == PLUS)
5089 while (1)
5091 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5092 tem = XEXP (tem, 0);
5093 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5094 tem = XEXP (tem, 1);
5095 else
5096 break;
5098 if (CONSTANT_P (XEXP (tem, 1)))
5099 v->no_const_addval = 0;
5103 if (loop_dump_stream)
5104 loop_giv_dump (v, loop_dump_stream, 0);
5107 /* All this does is determine whether a giv can be made replaceable because
5108 its final value can be calculated. This code can not be part of record_giv
5109 above, because final_giv_value requires that the number of loop iterations
5110 be known, and that can not be accurately calculated until after all givs
5111 have been identified. */
5113 static void
5114 check_final_value (loop, v)
5115 const struct loop *loop;
5116 struct induction *v;
5118 struct loop_ivs *ivs = LOOP_IVS (loop);
5119 struct iv_class *bl;
5120 rtx final_value = 0;
5122 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5124 /* DEST_ADDR givs will never reach here, because they are always marked
5125 replaceable above in record_giv. */
5127 /* The giv can be replaced outright by the reduced register only if all
5128 of the following conditions are true:
5129 - the insn that sets the giv is always executed on any iteration
5130 on which the giv is used at all
5131 (there are two ways to deduce this:
5132 either the insn is executed on every iteration,
5133 or all uses follow that insn in the same basic block),
5134 - its final value can be calculated (this condition is different
5135 than the one above in record_giv)
5136 - it's not used before the it's set
5137 - no assignments to the biv occur during the giv's lifetime. */
5139 #if 0
5140 /* This is only called now when replaceable is known to be false. */
5141 /* Clear replaceable, so that it won't confuse final_giv_value. */
5142 v->replaceable = 0;
5143 #endif
5145 if ((final_value = final_giv_value (loop, v))
5146 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5148 int biv_increment_seen = 0, before_giv_insn = 0;
5149 rtx p = v->insn;
5150 rtx last_giv_use;
5152 v->replaceable = 1;
5154 /* When trying to determine whether or not a biv increment occurs
5155 during the lifetime of the giv, we can ignore uses of the variable
5156 outside the loop because final_value is true. Hence we can not
5157 use regno_last_uid and regno_first_uid as above in record_giv. */
5159 /* Search the loop to determine whether any assignments to the
5160 biv occur during the giv's lifetime. Start with the insn
5161 that sets the giv, and search around the loop until we come
5162 back to that insn again.
5164 Also fail if there is a jump within the giv's lifetime that jumps
5165 to somewhere outside the lifetime but still within the loop. This
5166 catches spaghetti code where the execution order is not linear, and
5167 hence the above test fails. Here we assume that the giv lifetime
5168 does not extend from one iteration of the loop to the next, so as
5169 to make the test easier. Since the lifetime isn't known yet,
5170 this requires two loops. See also record_giv above. */
5172 last_giv_use = v->insn;
5174 while (1)
5176 p = NEXT_INSN (p);
5177 if (p == loop->end)
5179 before_giv_insn = 1;
5180 p = NEXT_INSN (loop->start);
5182 if (p == v->insn)
5183 break;
5185 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5186 || GET_CODE (p) == CALL_INSN)
5188 /* It is possible for the BIV increment to use the GIV if we
5189 have a cycle. Thus we must be sure to check each insn for
5190 both BIV and GIV uses, and we must check for BIV uses
5191 first. */
5193 if (! biv_increment_seen
5194 && reg_set_p (v->src_reg, PATTERN (p)))
5195 biv_increment_seen = 1;
5197 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5199 if (biv_increment_seen || before_giv_insn)
5201 v->replaceable = 0;
5202 v->not_replaceable = 1;
5203 break;
5205 last_giv_use = p;
5210 /* Now that the lifetime of the giv is known, check for branches
5211 from within the lifetime to outside the lifetime if it is still
5212 replaceable. */
5214 if (v->replaceable)
5216 p = v->insn;
5217 while (1)
5219 p = NEXT_INSN (p);
5220 if (p == loop->end)
5221 p = NEXT_INSN (loop->start);
5222 if (p == last_giv_use)
5223 break;
5225 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5226 && LABEL_NAME (JUMP_LABEL (p))
5227 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5228 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5229 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5230 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5232 v->replaceable = 0;
5233 v->not_replaceable = 1;
5235 if (loop_dump_stream)
5236 fprintf (loop_dump_stream,
5237 "Found branch outside giv lifetime.\n");
5239 break;
5244 /* If it is replaceable, then save the final value. */
5245 if (v->replaceable)
5246 v->final_value = final_value;
5249 if (loop_dump_stream && v->replaceable)
5250 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5251 INSN_UID (v->insn), REGNO (v->dest_reg));
5254 /* Update the status of whether a giv can derive other givs.
5256 We need to do something special if there is or may be an update to the biv
5257 between the time the giv is defined and the time it is used to derive
5258 another giv.
5260 In addition, a giv that is only conditionally set is not allowed to
5261 derive another giv once a label has been passed.
5263 The cases we look at are when a label or an update to a biv is passed. */
5265 static void
5266 update_giv_derive (loop, p)
5267 const struct loop *loop;
5268 rtx p;
5270 struct loop_ivs *ivs = LOOP_IVS (loop);
5271 struct iv_class *bl;
5272 struct induction *biv, *giv;
5273 rtx tem;
5274 int dummy;
5276 /* Search all IV classes, then all bivs, and finally all givs.
5278 There are three cases we are concerned with. First we have the situation
5279 of a giv that is only updated conditionally. In that case, it may not
5280 derive any givs after a label is passed.
5282 The second case is when a biv update occurs, or may occur, after the
5283 definition of a giv. For certain biv updates (see below) that are
5284 known to occur between the giv definition and use, we can adjust the
5285 giv definition. For others, or when the biv update is conditional,
5286 we must prevent the giv from deriving any other givs. There are two
5287 sub-cases within this case.
5289 If this is a label, we are concerned with any biv update that is done
5290 conditionally, since it may be done after the giv is defined followed by
5291 a branch here (actually, we need to pass both a jump and a label, but
5292 this extra tracking doesn't seem worth it).
5294 If this is a jump, we are concerned about any biv update that may be
5295 executed multiple times. We are actually only concerned about
5296 backward jumps, but it is probably not worth performing the test
5297 on the jump again here.
5299 If this is a biv update, we must adjust the giv status to show that a
5300 subsequent biv update was performed. If this adjustment cannot be done,
5301 the giv cannot derive further givs. */
5303 for (bl = ivs->list; bl; bl = bl->next)
5304 for (biv = bl->biv; biv; biv = biv->next_iv)
5305 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5306 || biv->insn == p)
5308 for (giv = bl->giv; giv; giv = giv->next_iv)
5310 /* If cant_derive is already true, there is no point in
5311 checking all of these conditions again. */
5312 if (giv->cant_derive)
5313 continue;
5315 /* If this giv is conditionally set and we have passed a label,
5316 it cannot derive anything. */
5317 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5318 giv->cant_derive = 1;
5320 /* Skip givs that have mult_val == 0, since
5321 they are really invariants. Also skip those that are
5322 replaceable, since we know their lifetime doesn't contain
5323 any biv update. */
5324 else if (giv->mult_val == const0_rtx || giv->replaceable)
5325 continue;
5327 /* The only way we can allow this giv to derive another
5328 is if this is a biv increment and we can form the product
5329 of biv->add_val and giv->mult_val. In this case, we will
5330 be able to compute a compensation. */
5331 else if (biv->insn == p)
5333 rtx ext_val_dummy;
5335 tem = 0;
5336 if (biv->mult_val == const1_rtx)
5337 tem = simplify_giv_expr (loop,
5338 gen_rtx_MULT (giv->mode,
5339 biv->add_val,
5340 giv->mult_val),
5341 &ext_val_dummy, &dummy);
5343 if (tem && giv->derive_adjustment)
5344 tem = simplify_giv_expr
5345 (loop,
5346 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5347 &ext_val_dummy, &dummy);
5349 if (tem)
5350 giv->derive_adjustment = tem;
5351 else
5352 giv->cant_derive = 1;
5354 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5355 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5356 giv->cant_derive = 1;
5361 /* Check whether an insn is an increment legitimate for a basic induction var.
5362 X is the source of insn P, or a part of it.
5363 MODE is the mode in which X should be interpreted.
5365 DEST_REG is the putative biv, also the destination of the insn.
5366 We accept patterns of these forms:
5367 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5368 REG = INVARIANT + REG
5370 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5371 store the additive term into *INC_VAL, and store the place where
5372 we found the additive term into *LOCATION.
5374 If X is an assignment of an invariant into DEST_REG, we set
5375 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5377 We also want to detect a BIV when it corresponds to a variable
5378 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5379 of the variable may be a PLUS that adds a SUBREG of that variable to
5380 an invariant and then sign- or zero-extends the result of the PLUS
5381 into the variable.
5383 Most GIVs in such cases will be in the promoted mode, since that is the
5384 probably the natural computation mode (and almost certainly the mode
5385 used for addresses) on the machine. So we view the pseudo-reg containing
5386 the variable as the BIV, as if it were simply incremented.
5388 Note that treating the entire pseudo as a BIV will result in making
5389 simple increments to any GIVs based on it. However, if the variable
5390 overflows in its declared mode but not its promoted mode, the result will
5391 be incorrect. This is acceptable if the variable is signed, since
5392 overflows in such cases are undefined, but not if it is unsigned, since
5393 those overflows are defined. So we only check for SIGN_EXTEND and
5394 not ZERO_EXTEND.
5396 If we cannot find a biv, we return 0. */
5398 static int
5399 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
5400 const struct loop *loop;
5401 register rtx x;
5402 enum machine_mode mode;
5403 rtx dest_reg;
5404 rtx p;
5405 rtx *inc_val;
5406 rtx *mult_val;
5407 rtx **location;
5409 register enum rtx_code code;
5410 rtx *argp, arg;
5411 rtx insn, set = 0;
5413 code = GET_CODE (x);
5414 *location = NULL;
5415 switch (code)
5417 case PLUS:
5418 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5419 || (GET_CODE (XEXP (x, 0)) == SUBREG
5420 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5421 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5423 argp = &XEXP (x, 1);
5425 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5426 || (GET_CODE (XEXP (x, 1)) == SUBREG
5427 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5428 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5430 argp = &XEXP (x, 0);
5432 else
5433 return 0;
5435 arg = *argp;
5436 if (loop_invariant_p (loop, arg) != 1)
5437 return 0;
5439 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5440 *mult_val = const1_rtx;
5441 *location = argp;
5442 return 1;
5444 case SUBREG:
5445 /* If this is a SUBREG for a promoted variable, check the inner
5446 value. */
5447 if (SUBREG_PROMOTED_VAR_P (x))
5448 return basic_induction_var (loop, SUBREG_REG (x),
5449 GET_MODE (SUBREG_REG (x)),
5450 dest_reg, p, inc_val, mult_val, location);
5451 return 0;
5453 case REG:
5454 /* If this register is assigned in a previous insn, look at its
5455 source, but don't go outside the loop or past a label. */
5457 /* If this sets a register to itself, we would repeat any previous
5458 biv increment if we applied this strategy blindly. */
5459 if (rtx_equal_p (dest_reg, x))
5460 return 0;
5462 insn = p;
5463 while (1)
5465 rtx dest;
5468 insn = PREV_INSN (insn);
5470 while (insn && GET_CODE (insn) == NOTE
5471 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5473 if (!insn)
5474 break;
5475 set = single_set (insn);
5476 if (set == 0)
5477 break;
5478 dest = SET_DEST (set);
5479 if (dest == x
5480 || (GET_CODE (dest) == SUBREG
5481 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
5482 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
5483 && SUBREG_REG (dest) == x))
5484 return basic_induction_var (loop, SET_SRC (set),
5485 (GET_MODE (SET_SRC (set)) == VOIDmode
5486 ? GET_MODE (x)
5487 : GET_MODE (SET_SRC (set))),
5488 dest_reg, insn,
5489 inc_val, mult_val, location);
5491 while (GET_CODE (dest) == SIGN_EXTRACT
5492 || GET_CODE (dest) == ZERO_EXTRACT
5493 || GET_CODE (dest) == SUBREG
5494 || GET_CODE (dest) == STRICT_LOW_PART)
5495 dest = XEXP (dest, 0);
5496 if (dest == x)
5497 break;
5499 /* Fall through. */
5501 /* Can accept constant setting of biv only when inside inner most loop.
5502 Otherwise, a biv of an inner loop may be incorrectly recognized
5503 as a biv of the outer loop,
5504 causing code to be moved INTO the inner loop. */
5505 case MEM:
5506 if (loop_invariant_p (loop, x) != 1)
5507 return 0;
5508 case CONST_INT:
5509 case SYMBOL_REF:
5510 case CONST:
5511 /* convert_modes aborts if we try to convert to or from CCmode, so just
5512 exclude that case. It is very unlikely that a condition code value
5513 would be a useful iterator anyways. */
5514 if (loop->level == 1
5515 && GET_MODE_CLASS (mode) != MODE_CC
5516 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5518 /* Possible bug here? Perhaps we don't know the mode of X. */
5519 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5520 *mult_val = const0_rtx;
5521 return 1;
5523 else
5524 return 0;
5526 case SIGN_EXTEND:
5527 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5528 dest_reg, p, inc_val, mult_val, location);
5530 case ASHIFTRT:
5531 /* Similar, since this can be a sign extension. */
5532 for (insn = PREV_INSN (p);
5533 (insn && GET_CODE (insn) == NOTE
5534 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5535 insn = PREV_INSN (insn))
5538 if (insn)
5539 set = single_set (insn);
5541 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
5542 && set && SET_DEST (set) == XEXP (x, 0)
5543 && GET_CODE (XEXP (x, 1)) == CONST_INT
5544 && INTVAL (XEXP (x, 1)) >= 0
5545 && GET_CODE (SET_SRC (set)) == ASHIFT
5546 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5547 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
5548 GET_MODE (XEXP (x, 0)),
5549 dest_reg, insn, inc_val, mult_val,
5550 location);
5551 return 0;
5553 default:
5554 return 0;
5558 /* A general induction variable (giv) is any quantity that is a linear
5559 function of a basic induction variable,
5560 i.e. giv = biv * mult_val + add_val.
5561 The coefficients can be any loop invariant quantity.
5562 A giv need not be computed directly from the biv;
5563 it can be computed by way of other givs. */
5565 /* Determine whether X computes a giv.
5566 If it does, return a nonzero value
5567 which is the benefit from eliminating the computation of X;
5568 set *SRC_REG to the register of the biv that it is computed from;
5569 set *ADD_VAL and *MULT_VAL to the coefficients,
5570 such that the value of X is biv * mult + add; */
5572 static int
5573 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
5574 is_addr, pbenefit, addr_mode)
5575 const struct loop *loop;
5576 rtx x;
5577 rtx *src_reg;
5578 rtx *add_val;
5579 rtx *mult_val;
5580 rtx *ext_val;
5581 int is_addr;
5582 int *pbenefit;
5583 enum machine_mode addr_mode;
5585 struct loop_ivs *ivs = LOOP_IVS (loop);
5586 rtx orig_x = x;
5588 /* If this is an invariant, forget it, it isn't a giv. */
5589 if (loop_invariant_p (loop, x) == 1)
5590 return 0;
5592 *pbenefit = 0;
5593 *ext_val = NULL_RTX;
5594 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
5595 if (x == 0)
5596 return 0;
5598 switch (GET_CODE (x))
5600 case USE:
5601 case CONST_INT:
5602 /* Since this is now an invariant and wasn't before, it must be a giv
5603 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5604 with. */
5605 *src_reg = ivs->list->biv->dest_reg;
5606 *mult_val = const0_rtx;
5607 *add_val = x;
5608 break;
5610 case REG:
5611 /* This is equivalent to a BIV. */
5612 *src_reg = x;
5613 *mult_val = const1_rtx;
5614 *add_val = const0_rtx;
5615 break;
5617 case PLUS:
5618 /* Either (plus (biv) (invar)) or
5619 (plus (mult (biv) (invar_1)) (invar_2)). */
5620 if (GET_CODE (XEXP (x, 0)) == MULT)
5622 *src_reg = XEXP (XEXP (x, 0), 0);
5623 *mult_val = XEXP (XEXP (x, 0), 1);
5625 else
5627 *src_reg = XEXP (x, 0);
5628 *mult_val = const1_rtx;
5630 *add_val = XEXP (x, 1);
5631 break;
5633 case MULT:
5634 /* ADD_VAL is zero. */
5635 *src_reg = XEXP (x, 0);
5636 *mult_val = XEXP (x, 1);
5637 *add_val = const0_rtx;
5638 break;
5640 default:
5641 abort ();
5644 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5645 unless they are CONST_INT). */
5646 if (GET_CODE (*add_val) == USE)
5647 *add_val = XEXP (*add_val, 0);
5648 if (GET_CODE (*mult_val) == USE)
5649 *mult_val = XEXP (*mult_val, 0);
5651 if (is_addr)
5652 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
5653 else
5654 *pbenefit += rtx_cost (orig_x, SET);
5656 /* Always return true if this is a giv so it will be detected as such,
5657 even if the benefit is zero or negative. This allows elimination
5658 of bivs that might otherwise not be eliminated. */
5659 return 1;
5662 /* Given an expression, X, try to form it as a linear function of a biv.
5663 We will canonicalize it to be of the form
5664 (plus (mult (BIV) (invar_1))
5665 (invar_2))
5666 with possible degeneracies.
5668 The invariant expressions must each be of a form that can be used as a
5669 machine operand. We surround then with a USE rtx (a hack, but localized
5670 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5671 routine; it is the caller's responsibility to strip them.
5673 If no such canonicalization is possible (i.e., two biv's are used or an
5674 expression that is neither invariant nor a biv or giv), this routine
5675 returns 0.
5677 For a non-zero return, the result will have a code of CONST_INT, USE,
5678 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5680 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5682 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
5683 static rtx sge_plus_constant PARAMS ((rtx, rtx));
5685 static rtx
5686 simplify_giv_expr (loop, x, ext_val, benefit)
5687 const struct loop *loop;
5688 rtx x;
5689 rtx *ext_val;
5690 int *benefit;
5692 struct loop_ivs *ivs = LOOP_IVS (loop);
5693 struct loop_regs *regs = LOOP_REGS (loop);
5694 enum machine_mode mode = GET_MODE (x);
5695 rtx arg0, arg1;
5696 rtx tem;
5698 /* If this is not an integer mode, or if we cannot do arithmetic in this
5699 mode, this can't be a giv. */
5700 if (mode != VOIDmode
5701 && (GET_MODE_CLASS (mode) != MODE_INT
5702 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5703 return NULL_RTX;
5705 switch (GET_CODE (x))
5707 case PLUS:
5708 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5709 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5710 if (arg0 == 0 || arg1 == 0)
5711 return NULL_RTX;
5713 /* Put constant last, CONST_INT last if both constant. */
5714 if ((GET_CODE (arg0) == USE
5715 || GET_CODE (arg0) == CONST_INT)
5716 && ! ((GET_CODE (arg0) == USE
5717 && GET_CODE (arg1) == USE)
5718 || GET_CODE (arg1) == CONST_INT))
5719 tem = arg0, arg0 = arg1, arg1 = tem;
5721 /* Handle addition of zero, then addition of an invariant. */
5722 if (arg1 == const0_rtx)
5723 return arg0;
5724 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5725 switch (GET_CODE (arg0))
5727 case CONST_INT:
5728 case USE:
5729 /* Adding two invariants must result in an invariant, so enclose
5730 addition operation inside a USE and return it. */
5731 if (GET_CODE (arg0) == USE)
5732 arg0 = XEXP (arg0, 0);
5733 if (GET_CODE (arg1) == USE)
5734 arg1 = XEXP (arg1, 0);
5736 if (GET_CODE (arg0) == CONST_INT)
5737 tem = arg0, arg0 = arg1, arg1 = tem;
5738 if (GET_CODE (arg1) == CONST_INT)
5739 tem = sge_plus_constant (arg0, arg1);
5740 else
5741 tem = sge_plus (mode, arg0, arg1);
5743 if (GET_CODE (tem) != CONST_INT)
5744 tem = gen_rtx_USE (mode, tem);
5745 return tem;
5747 case REG:
5748 case MULT:
5749 /* biv + invar or mult + invar. Return sum. */
5750 return gen_rtx_PLUS (mode, arg0, arg1);
5752 case PLUS:
5753 /* (a + invar_1) + invar_2. Associate. */
5754 return
5755 simplify_giv_expr (loop,
5756 gen_rtx_PLUS (mode,
5757 XEXP (arg0, 0),
5758 gen_rtx_PLUS (mode,
5759 XEXP (arg0, 1),
5760 arg1)),
5761 ext_val, benefit);
5763 default:
5764 abort ();
5767 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5768 MULT to reduce cases. */
5769 if (GET_CODE (arg0) == REG)
5770 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5771 if (GET_CODE (arg1) == REG)
5772 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5774 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5775 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5776 Recurse to associate the second PLUS. */
5777 if (GET_CODE (arg1) == MULT)
5778 tem = arg0, arg0 = arg1, arg1 = tem;
5780 if (GET_CODE (arg1) == PLUS)
5781 return
5782 simplify_giv_expr (loop,
5783 gen_rtx_PLUS (mode,
5784 gen_rtx_PLUS (mode, arg0,
5785 XEXP (arg1, 0)),
5786 XEXP (arg1, 1)),
5787 ext_val, benefit);
5789 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5790 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5791 return NULL_RTX;
5793 if (!rtx_equal_p (arg0, arg1))
5794 return NULL_RTX;
5796 return simplify_giv_expr (loop,
5797 gen_rtx_MULT (mode,
5798 XEXP (arg0, 0),
5799 gen_rtx_PLUS (mode,
5800 XEXP (arg0, 1),
5801 XEXP (arg1, 1))),
5802 ext_val, benefit);
5804 case MINUS:
5805 /* Handle "a - b" as "a + b * (-1)". */
5806 return simplify_giv_expr (loop,
5807 gen_rtx_PLUS (mode,
5808 XEXP (x, 0),
5809 gen_rtx_MULT (mode,
5810 XEXP (x, 1),
5811 constm1_rtx)),
5812 ext_val, benefit);
5814 case MULT:
5815 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5816 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5817 if (arg0 == 0 || arg1 == 0)
5818 return NULL_RTX;
5820 /* Put constant last, CONST_INT last if both constant. */
5821 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5822 && GET_CODE (arg1) != CONST_INT)
5823 tem = arg0, arg0 = arg1, arg1 = tem;
5825 /* If second argument is not now constant, not giv. */
5826 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5827 return NULL_RTX;
5829 /* Handle multiply by 0 or 1. */
5830 if (arg1 == const0_rtx)
5831 return const0_rtx;
5833 else if (arg1 == const1_rtx)
5834 return arg0;
5836 switch (GET_CODE (arg0))
5838 case REG:
5839 /* biv * invar. Done. */
5840 return gen_rtx_MULT (mode, arg0, arg1);
5842 case CONST_INT:
5843 /* Product of two constants. */
5844 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5846 case USE:
5847 /* invar * invar is a giv, but attempt to simplify it somehow. */
5848 if (GET_CODE (arg1) != CONST_INT)
5849 return NULL_RTX;
5851 arg0 = XEXP (arg0, 0);
5852 if (GET_CODE (arg0) == MULT)
5854 /* (invar_0 * invar_1) * invar_2. Associate. */
5855 return simplify_giv_expr (loop,
5856 gen_rtx_MULT (mode,
5857 XEXP (arg0, 0),
5858 gen_rtx_MULT (mode,
5859 XEXP (arg0,
5861 arg1)),
5862 ext_val, benefit);
5864 /* Porpagate the MULT expressions to the intermost nodes. */
5865 else if (GET_CODE (arg0) == PLUS)
5867 /* (invar_0 + invar_1) * invar_2. Distribute. */
5868 return simplify_giv_expr (loop,
5869 gen_rtx_PLUS (mode,
5870 gen_rtx_MULT (mode,
5871 XEXP (arg0,
5873 arg1),
5874 gen_rtx_MULT (mode,
5875 XEXP (arg0,
5877 arg1)),
5878 ext_val, benefit);
5880 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
5882 case MULT:
5883 /* (a * invar_1) * invar_2. Associate. */
5884 return simplify_giv_expr (loop,
5885 gen_rtx_MULT (mode,
5886 XEXP (arg0, 0),
5887 gen_rtx_MULT (mode,
5888 XEXP (arg0, 1),
5889 arg1)),
5890 ext_val, benefit);
5892 case PLUS:
5893 /* (a + invar_1) * invar_2. Distribute. */
5894 return simplify_giv_expr (loop,
5895 gen_rtx_PLUS (mode,
5896 gen_rtx_MULT (mode,
5897 XEXP (arg0, 0),
5898 arg1),
5899 gen_rtx_MULT (mode,
5900 XEXP (arg0, 1),
5901 arg1)),
5902 ext_val, benefit);
5904 default:
5905 abort ();
5908 case ASHIFT:
5909 /* Shift by constant is multiply by power of two. */
5910 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5911 return 0;
5913 return
5914 simplify_giv_expr (loop,
5915 gen_rtx_MULT (mode,
5916 XEXP (x, 0),
5917 GEN_INT ((HOST_WIDE_INT) 1
5918 << INTVAL (XEXP (x, 1)))),
5919 ext_val, benefit);
5921 case NEG:
5922 /* "-a" is "a * (-1)" */
5923 return simplify_giv_expr (loop,
5924 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5925 ext_val, benefit);
5927 case NOT:
5928 /* "~a" is "-a - 1". Silly, but easy. */
5929 return simplify_giv_expr (loop,
5930 gen_rtx_MINUS (mode,
5931 gen_rtx_NEG (mode, XEXP (x, 0)),
5932 const1_rtx),
5933 ext_val, benefit);
5935 case USE:
5936 /* Already in proper form for invariant. */
5937 return x;
5939 case SIGN_EXTEND:
5940 case ZERO_EXTEND:
5941 case TRUNCATE:
5942 /* Conditionally recognize extensions of simple IVs. After we've
5943 computed loop traversal counts and verified the range of the
5944 source IV, we'll reevaluate this as a GIV. */
5945 if (*ext_val == NULL_RTX)
5947 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5948 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
5950 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
5951 return arg0;
5954 goto do_default;
5956 case REG:
5957 /* If this is a new register, we can't deal with it. */
5958 if (REGNO (x) >= max_reg_before_loop)
5959 return 0;
5961 /* Check for biv or giv. */
5962 switch (REG_IV_TYPE (ivs, REGNO (x)))
5964 case BASIC_INDUCT:
5965 return x;
5966 case GENERAL_INDUCT:
5968 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
5970 /* Form expression from giv and add benefit. Ensure this giv
5971 can derive another and subtract any needed adjustment if so. */
5973 /* Increasing the benefit here is risky. The only case in which it
5974 is arguably correct is if this is the only use of V. In other
5975 cases, this will artificially inflate the benefit of the current
5976 giv, and lead to suboptimal code. Thus, it is disabled, since
5977 potentially not reducing an only marginally beneficial giv is
5978 less harmful than reducing many givs that are not really
5979 beneficial. */
5981 rtx single_use = regs->array[REGNO (x)].single_usage;
5982 if (single_use && single_use != const0_rtx)
5983 *benefit += v->benefit;
5986 if (v->cant_derive)
5987 return 0;
5989 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
5990 v->src_reg, v->mult_val),
5991 v->add_val);
5993 if (v->derive_adjustment)
5994 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5995 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
5996 if (*ext_val)
5998 if (!v->ext_dependant)
5999 return arg0;
6001 else
6003 *ext_val = v->ext_dependant;
6004 return arg0;
6006 return 0;
6009 default:
6010 do_default:
6011 /* If it isn't an induction variable, and it is invariant, we
6012 may be able to simplify things further by looking through
6013 the bits we just moved outside the loop. */
6014 if (loop_invariant_p (loop, x) == 1)
6016 struct movable *m;
6017 struct loop_movables *movables = LOOP_MOVABLES (loop);
6019 for (m = movables->head; m; m = m->next)
6020 if (rtx_equal_p (x, m->set_dest))
6022 /* Ok, we found a match. Substitute and simplify. */
6024 /* If we match another movable, we must use that, as
6025 this one is going away. */
6026 if (m->match)
6027 return simplify_giv_expr (loop, m->match->set_dest,
6028 ext_val, benefit);
6030 /* If consec is non-zero, this is a member of a group of
6031 instructions that were moved together. We handle this
6032 case only to the point of seeking to the last insn and
6033 looking for a REG_EQUAL. Fail if we don't find one. */
6034 if (m->consec != 0)
6036 int i = m->consec;
6037 tem = m->insn;
6040 tem = NEXT_INSN (tem);
6042 while (--i > 0);
6044 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6045 if (tem)
6046 tem = XEXP (tem, 0);
6048 else
6050 tem = single_set (m->insn);
6051 if (tem)
6052 tem = SET_SRC (tem);
6055 if (tem)
6057 /* What we are most interested in is pointer
6058 arithmetic on invariants -- only take
6059 patterns we may be able to do something with. */
6060 if (GET_CODE (tem) == PLUS
6061 || GET_CODE (tem) == MULT
6062 || GET_CODE (tem) == ASHIFT
6063 || GET_CODE (tem) == CONST_INT
6064 || GET_CODE (tem) == SYMBOL_REF)
6066 tem = simplify_giv_expr (loop, tem, ext_val,
6067 benefit);
6068 if (tem)
6069 return tem;
6071 else if (GET_CODE (tem) == CONST
6072 && GET_CODE (XEXP (tem, 0)) == PLUS
6073 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6074 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6076 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6077 ext_val, benefit);
6078 if (tem)
6079 return tem;
6082 break;
6085 break;
6088 /* Fall through to general case. */
6089 default:
6090 /* If invariant, return as USE (unless CONST_INT).
6091 Otherwise, not giv. */
6092 if (GET_CODE (x) == USE)
6093 x = XEXP (x, 0);
6095 if (loop_invariant_p (loop, x) == 1)
6097 if (GET_CODE (x) == CONST_INT)
6098 return x;
6099 if (GET_CODE (x) == CONST
6100 && GET_CODE (XEXP (x, 0)) == PLUS
6101 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6102 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6103 x = XEXP (x, 0);
6104 return gen_rtx_USE (mode, x);
6106 else
6107 return 0;
6111 /* This routine folds invariants such that there is only ever one
6112 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6114 static rtx
6115 sge_plus_constant (x, c)
6116 rtx x, c;
6118 if (GET_CODE (x) == CONST_INT)
6119 return GEN_INT (INTVAL (x) + INTVAL (c));
6120 else if (GET_CODE (x) != PLUS)
6121 return gen_rtx_PLUS (GET_MODE (x), x, c);
6122 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6124 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6125 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6127 else if (GET_CODE (XEXP (x, 0)) == PLUS
6128 || GET_CODE (XEXP (x, 1)) != PLUS)
6130 return gen_rtx_PLUS (GET_MODE (x),
6131 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6133 else
6135 return gen_rtx_PLUS (GET_MODE (x),
6136 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6140 static rtx
6141 sge_plus (mode, x, y)
6142 enum machine_mode mode;
6143 rtx x, y;
6145 while (GET_CODE (y) == PLUS)
6147 rtx a = XEXP (y, 0);
6148 if (GET_CODE (a) == CONST_INT)
6149 x = sge_plus_constant (x, a);
6150 else
6151 x = gen_rtx_PLUS (mode, x, a);
6152 y = XEXP (y, 1);
6154 if (GET_CODE (y) == CONST_INT)
6155 x = sge_plus_constant (x, y);
6156 else
6157 x = gen_rtx_PLUS (mode, x, y);
6158 return x;
6161 /* Help detect a giv that is calculated by several consecutive insns;
6162 for example,
6163 giv = biv * M
6164 giv = giv + A
6165 The caller has already identified the first insn P as having a giv as dest;
6166 we check that all other insns that set the same register follow
6167 immediately after P, that they alter nothing else,
6168 and that the result of the last is still a giv.
6170 The value is 0 if the reg set in P is not really a giv.
6171 Otherwise, the value is the amount gained by eliminating
6172 all the consecutive insns that compute the value.
6174 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6175 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6177 The coefficients of the ultimate giv value are stored in
6178 *MULT_VAL and *ADD_VAL. */
6180 static int
6181 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6182 add_val, mult_val, ext_val, last_consec_insn)
6183 const struct loop *loop;
6184 int first_benefit;
6185 rtx p;
6186 rtx src_reg;
6187 rtx dest_reg;
6188 rtx *add_val;
6189 rtx *mult_val;
6190 rtx *ext_val;
6191 rtx *last_consec_insn;
6193 struct loop_ivs *ivs = LOOP_IVS (loop);
6194 struct loop_regs *regs = LOOP_REGS (loop);
6195 int count;
6196 enum rtx_code code;
6197 int benefit;
6198 rtx temp;
6199 rtx set;
6201 /* Indicate that this is a giv so that we can update the value produced in
6202 each insn of the multi-insn sequence.
6204 This induction structure will be used only by the call to
6205 general_induction_var below, so we can allocate it on our stack.
6206 If this is a giv, our caller will replace the induct var entry with
6207 a new induction structure. */
6208 struct induction *v;
6210 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6211 return 0;
6213 v = (struct induction *) alloca (sizeof (struct induction));
6214 v->src_reg = src_reg;
6215 v->mult_val = *mult_val;
6216 v->add_val = *add_val;
6217 v->benefit = first_benefit;
6218 v->cant_derive = 0;
6219 v->derive_adjustment = 0;
6220 v->ext_dependant = NULL_RTX;
6222 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6223 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6225 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6227 while (count > 0)
6229 p = NEXT_INSN (p);
6230 code = GET_CODE (p);
6232 /* If libcall, skip to end of call sequence. */
6233 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6234 p = XEXP (temp, 0);
6236 if (code == INSN
6237 && (set = single_set (p))
6238 && GET_CODE (SET_DEST (set)) == REG
6239 && SET_DEST (set) == dest_reg
6240 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6241 add_val, mult_val, ext_val, 0,
6242 &benefit, VOIDmode)
6243 /* Giv created by equivalent expression. */
6244 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6245 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6246 add_val, mult_val, ext_val, 0,
6247 &benefit, VOIDmode)))
6248 && src_reg == v->src_reg)
6250 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6251 benefit += libcall_benefit (p);
6253 count--;
6254 v->mult_val = *mult_val;
6255 v->add_val = *add_val;
6256 v->benefit += benefit;
6258 else if (code != NOTE)
6260 /* Allow insns that set something other than this giv to a
6261 constant. Such insns are needed on machines which cannot
6262 include long constants and should not disqualify a giv. */
6263 if (code == INSN
6264 && (set = single_set (p))
6265 && SET_DEST (set) != dest_reg
6266 && CONSTANT_P (SET_SRC (set)))
6267 continue;
6269 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6270 return 0;
6274 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6275 *last_consec_insn = p;
6276 return v->benefit;
6279 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6280 represented by G1. If no such expression can be found, or it is clear that
6281 it cannot possibly be a valid address, 0 is returned.
6283 To perform the computation, we note that
6284 G1 = x * v + a and
6285 G2 = y * v + b
6286 where `v' is the biv.
6288 So G2 = (y/b) * G1 + (b - a*y/x).
6290 Note that MULT = y/x.
6292 Update: A and B are now allowed to be additive expressions such that
6293 B contains all variables in A. That is, computing B-A will not require
6294 subtracting variables. */
6296 static rtx
6297 express_from_1 (a, b, mult)
6298 rtx a, b, mult;
6300 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6302 if (mult == const0_rtx)
6303 return b;
6305 /* If MULT is not 1, we cannot handle A with non-constants, since we
6306 would then be required to subtract multiples of the registers in A.
6307 This is theoretically possible, and may even apply to some Fortran
6308 constructs, but it is a lot of work and we do not attempt it here. */
6310 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6311 return NULL_RTX;
6313 /* In general these structures are sorted top to bottom (down the PLUS
6314 chain), but not left to right across the PLUS. If B is a higher
6315 order giv than A, we can strip one level and recurse. If A is higher
6316 order, we'll eventually bail out, but won't know that until the end.
6317 If they are the same, we'll strip one level around this loop. */
6319 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6321 rtx ra, rb, oa, ob, tmp;
6323 ra = XEXP (a, 0), oa = XEXP (a, 1);
6324 if (GET_CODE (ra) == PLUS)
6325 tmp = ra, ra = oa, oa = tmp;
6327 rb = XEXP (b, 0), ob = XEXP (b, 1);
6328 if (GET_CODE (rb) == PLUS)
6329 tmp = rb, rb = ob, ob = tmp;
6331 if (rtx_equal_p (ra, rb))
6332 /* We matched: remove one reg completely. */
6333 a = oa, b = ob;
6334 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6335 /* An alternate match. */
6336 a = oa, b = rb;
6337 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6338 /* An alternate match. */
6339 a = ra, b = ob;
6340 else
6342 /* Indicates an extra register in B. Strip one level from B and
6343 recurse, hoping B was the higher order expression. */
6344 ob = express_from_1 (a, ob, mult);
6345 if (ob == NULL_RTX)
6346 return NULL_RTX;
6347 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6351 /* Here we are at the last level of A, go through the cases hoping to
6352 get rid of everything but a constant. */
6354 if (GET_CODE (a) == PLUS)
6356 rtx ra, oa;
6358 ra = XEXP (a, 0), oa = XEXP (a, 1);
6359 if (rtx_equal_p (oa, b))
6360 oa = ra;
6361 else if (!rtx_equal_p (ra, b))
6362 return NULL_RTX;
6364 if (GET_CODE (oa) != CONST_INT)
6365 return NULL_RTX;
6367 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6369 else if (GET_CODE (a) == CONST_INT)
6371 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6373 else if (CONSTANT_P (a))
6375 return simplify_gen_binary (MINUS, GET_MODE (b) != VOIDmode ? GET_MODE (b) : GET_MODE (a), const0_rtx, a);
6377 else if (GET_CODE (b) == PLUS)
6379 if (rtx_equal_p (a, XEXP (b, 0)))
6380 return XEXP (b, 1);
6381 else if (rtx_equal_p (a, XEXP (b, 1)))
6382 return XEXP (b, 0);
6383 else
6384 return NULL_RTX;
6386 else if (rtx_equal_p (a, b))
6387 return const0_rtx;
6389 return NULL_RTX;
6393 express_from (g1, g2)
6394 struct induction *g1, *g2;
6396 rtx mult, add;
6398 /* The value that G1 will be multiplied by must be a constant integer. Also,
6399 the only chance we have of getting a valid address is if b*c/a (see above
6400 for notation) is also an integer. */
6401 if (GET_CODE (g1->mult_val) == CONST_INT
6402 && GET_CODE (g2->mult_val) == CONST_INT)
6404 if (g1->mult_val == const0_rtx
6405 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6406 return NULL_RTX;
6407 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6409 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6410 mult = const1_rtx;
6411 else
6413 /* ??? Find out if the one is a multiple of the other? */
6414 return NULL_RTX;
6417 add = express_from_1 (g1->add_val, g2->add_val, mult);
6418 if (add == NULL_RTX)
6420 /* Failed. If we've got a multiplication factor between G1 and G2,
6421 scale G1's addend and try again. */
6422 if (INTVAL (mult) > 1)
6424 rtx g1_add_val = g1->add_val;
6425 if (GET_CODE (g1_add_val) == MULT
6426 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6428 HOST_WIDE_INT m;
6429 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6430 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6431 XEXP (g1_add_val, 0), GEN_INT (m));
6433 else
6435 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6436 mult);
6439 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6442 if (add == NULL_RTX)
6443 return NULL_RTX;
6445 /* Form simplified final result. */
6446 if (mult == const0_rtx)
6447 return add;
6448 else if (mult == const1_rtx)
6449 mult = g1->dest_reg;
6450 else
6451 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6453 if (add == const0_rtx)
6454 return mult;
6455 else
6457 if (GET_CODE (add) == PLUS
6458 && CONSTANT_P (XEXP (add, 1)))
6460 rtx tem = XEXP (add, 1);
6461 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
6462 add = tem;
6465 return gen_rtx_PLUS (g2->mode, mult, add);
6469 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6470 represented by G1. This indicates that G2 should be combined with G1 and
6471 that G2 can use (either directly or via an address expression) a register
6472 used to represent G1. */
6474 static rtx
6475 combine_givs_p (g1, g2)
6476 struct induction *g1, *g2;
6478 rtx comb, ret;
6480 /* With the introduction of ext dependant givs, we must care for modes.
6481 G2 must not use a wider mode than G1. */
6482 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
6483 return NULL_RTX;
6485 ret = comb = express_from (g1, g2);
6486 if (comb == NULL_RTX)
6487 return NULL_RTX;
6488 if (g1->mode != g2->mode)
6489 ret = gen_lowpart (g2->mode, comb);
6491 /* If these givs are identical, they can be combined. We use the results
6492 of express_from because the addends are not in a canonical form, so
6493 rtx_equal_p is a weaker test. */
6494 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
6495 combination to be the other way round. */
6496 if (comb == g1->dest_reg
6497 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
6499 return ret;
6502 /* If G2 can be expressed as a function of G1 and that function is valid
6503 as an address and no more expensive than using a register for G2,
6504 the expression of G2 in terms of G1 can be used. */
6505 if (ret != NULL_RTX
6506 && g2->giv_type == DEST_ADDR
6507 && memory_address_p (GET_MODE (g2->mem), ret)
6508 /* ??? Looses, especially with -fforce-addr, where *g2->location
6509 will always be a register, and so anything more complicated
6510 gets discarded. */
6511 #if 0
6512 #ifdef ADDRESS_COST
6513 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6514 #else
6515 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6516 #endif
6517 #endif
6520 return ret;
6523 return NULL_RTX;
6526 /* Check each extension dependant giv in this class to see if its
6527 root biv is safe from wrapping in the interior mode, which would
6528 make the giv illegal. */
6530 static void
6531 check_ext_dependant_givs (bl, loop_info)
6532 struct iv_class *bl;
6533 struct loop_info *loop_info;
6535 int ze_ok = 0, se_ok = 0, info_ok = 0;
6536 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
6537 HOST_WIDE_INT start_val;
6538 unsigned HOST_WIDE_INT u_end_val = 0;
6539 unsigned HOST_WIDE_INT u_start_val = 0;
6540 rtx incr = pc_rtx;
6541 struct induction *v;
6543 /* Make sure the iteration data is available. We must have
6544 constants in order to be certain of no overflow. */
6545 /* ??? An unknown iteration count with an increment of +-1
6546 combined with friendly exit tests of against an invariant
6547 value is also ameanable to optimization. Not implemented. */
6548 if (loop_info->n_iterations > 0
6549 && bl->initial_value
6550 && GET_CODE (bl->initial_value) == CONST_INT
6551 && (incr = biv_total_increment (bl))
6552 && GET_CODE (incr) == CONST_INT
6553 /* Make sure the host can represent the arithmetic. */
6554 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
6556 unsigned HOST_WIDE_INT abs_incr, total_incr;
6557 HOST_WIDE_INT s_end_val;
6558 int neg_incr;
6560 info_ok = 1;
6561 start_val = INTVAL (bl->initial_value);
6562 u_start_val = start_val;
6564 neg_incr = 0, abs_incr = INTVAL (incr);
6565 if (INTVAL (incr) < 0)
6566 neg_incr = 1, abs_incr = -abs_incr;
6567 total_incr = abs_incr * loop_info->n_iterations;
6569 /* Check for host arithmatic overflow. */
6570 if (total_incr / loop_info->n_iterations == abs_incr)
6572 unsigned HOST_WIDE_INT u_max;
6573 HOST_WIDE_INT s_max;
6575 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
6576 s_end_val = u_end_val;
6577 u_max = GET_MODE_MASK (biv_mode);
6578 s_max = u_max >> 1;
6580 /* Check zero extension of biv ok. */
6581 if (start_val >= 0
6582 /* Check for host arithmatic overflow. */
6583 && (neg_incr
6584 ? u_end_val < u_start_val
6585 : u_end_val > u_start_val)
6586 /* Check for target arithmetic overflow. */
6587 && (neg_incr
6588 ? 1 /* taken care of with host overflow */
6589 : u_end_val <= u_max))
6591 ze_ok = 1;
6594 /* Check sign extension of biv ok. */
6595 /* ??? While it is true that overflow with signed and pointer
6596 arithmetic is undefined, I fear too many programmers don't
6597 keep this fact in mind -- myself included on occasion.
6598 So leave alone with the signed overflow optimizations. */
6599 if (start_val >= -s_max - 1
6600 /* Check for host arithmatic overflow. */
6601 && (neg_incr
6602 ? s_end_val < start_val
6603 : s_end_val > start_val)
6604 /* Check for target arithmetic overflow. */
6605 && (neg_incr
6606 ? s_end_val >= -s_max - 1
6607 : s_end_val <= s_max))
6609 se_ok = 1;
6614 /* Invalidate givs that fail the tests. */
6615 for (v = bl->giv; v; v = v->next_iv)
6616 if (v->ext_dependant)
6618 enum rtx_code code = GET_CODE (v->ext_dependant);
6619 int ok = 0;
6621 switch (code)
6623 case SIGN_EXTEND:
6624 ok = se_ok;
6625 break;
6626 case ZERO_EXTEND:
6627 ok = ze_ok;
6628 break;
6630 case TRUNCATE:
6631 /* We don't know whether this value is being used as either
6632 signed or unsigned, so to safely truncate we must satisfy
6633 both. The initial check here verifies the BIV itself;
6634 once that is successful we may check its range wrt the
6635 derived GIV. */
6636 if (se_ok && ze_ok)
6638 enum machine_mode outer_mode = GET_MODE (v->ext_dependant);
6639 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
6641 /* We know from the above that both endpoints are nonnegative,
6642 and that there is no wrapping. Verify that both endpoints
6643 are within the (signed) range of the outer mode. */
6644 if (u_start_val <= max && u_end_val <= max)
6645 ok = 1;
6647 break;
6649 default:
6650 abort ();
6653 if (ok)
6655 if (loop_dump_stream)
6657 fprintf (loop_dump_stream,
6658 "Verified ext dependant giv at %d of reg %d\n",
6659 INSN_UID (v->insn), bl->regno);
6662 else
6664 if (loop_dump_stream)
6666 const char *why;
6668 if (info_ok)
6669 why = "biv iteration values overflowed";
6670 else
6672 if (incr == pc_rtx)
6673 incr = biv_total_increment (bl);
6674 if (incr == const1_rtx)
6675 why = "biv iteration info incomplete; incr by 1";
6676 else
6677 why = "biv iteration info incomplete";
6680 fprintf (loop_dump_stream,
6681 "Failed ext dependant giv at %d, %s\n",
6682 INSN_UID (v->insn), why);
6684 v->ignore = 1;
6685 bl->all_reduced = 0;
6690 /* Generate a version of VALUE in a mode appropriate for initializing V. */
6693 extend_value_for_giv (v, value)
6694 struct induction *v;
6695 rtx value;
6697 rtx ext_dep = v->ext_dependant;
6699 if (! ext_dep)
6700 return value;
6702 /* Recall that check_ext_dependant_givs verified that the known bounds
6703 of a biv did not overflow or wrap with respect to the extension for
6704 the giv. Therefore, constants need no additional adjustment. */
6705 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
6706 return value;
6708 /* Otherwise, we must adjust the value to compensate for the
6709 differing modes of the biv and the giv. */
6710 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
6713 struct combine_givs_stats
6715 int giv_number;
6716 int total_benefit;
6719 static int
6720 cmp_combine_givs_stats (xp, yp)
6721 const PTR xp;
6722 const PTR yp;
6724 const struct combine_givs_stats * const x =
6725 (const struct combine_givs_stats *) xp;
6726 const struct combine_givs_stats * const y =
6727 (const struct combine_givs_stats *) yp;
6728 int d;
6729 d = y->total_benefit - x->total_benefit;
6730 /* Stabilize the sort. */
6731 if (!d)
6732 d = x->giv_number - y->giv_number;
6733 return d;
6736 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6737 any other. If so, point SAME to the giv combined with and set NEW_REG to
6738 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6739 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6741 static void
6742 combine_givs (regs, bl)
6743 struct loop_regs *regs;
6744 struct iv_class *bl;
6746 /* Additional benefit to add for being combined multiple times. */
6747 const int extra_benefit = 3;
6749 struct induction *g1, *g2, **giv_array;
6750 int i, j, k, giv_count;
6751 struct combine_givs_stats *stats;
6752 rtx *can_combine;
6754 /* Count givs, because bl->giv_count is incorrect here. */
6755 giv_count = 0;
6756 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6757 if (!g1->ignore)
6758 giv_count++;
6760 giv_array
6761 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6762 i = 0;
6763 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6764 if (!g1->ignore)
6765 giv_array[i++] = g1;
6767 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
6768 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
6770 for (i = 0; i < giv_count; i++)
6772 int this_benefit;
6773 rtx single_use;
6775 g1 = giv_array[i];
6776 stats[i].giv_number = i;
6778 /* If a DEST_REG GIV is used only once, do not allow it to combine
6779 with anything, for in doing so we will gain nothing that cannot
6780 be had by simply letting the GIV with which we would have combined
6781 to be reduced on its own. The losage shows up in particular with
6782 DEST_ADDR targets on hosts with reg+reg addressing, though it can
6783 be seen elsewhere as well. */
6784 if (g1->giv_type == DEST_REG
6785 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
6786 && single_use != const0_rtx)
6787 continue;
6789 this_benefit = g1->benefit;
6790 /* Add an additional weight for zero addends. */
6791 if (g1->no_const_addval)
6792 this_benefit += 1;
6794 for (j = 0; j < giv_count; j++)
6796 rtx this_combine;
6798 g2 = giv_array[j];
6799 if (g1 != g2
6800 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6802 can_combine[i * giv_count + j] = this_combine;
6803 this_benefit += g2->benefit + extra_benefit;
6806 stats[i].total_benefit = this_benefit;
6809 /* Iterate, combining until we can't. */
6810 restart:
6811 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
6813 if (loop_dump_stream)
6815 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6816 for (k = 0; k < giv_count; k++)
6818 g1 = giv_array[stats[k].giv_number];
6819 if (!g1->combined_with && !g1->same)
6820 fprintf (loop_dump_stream, " {%d, %d}",
6821 INSN_UID (giv_array[stats[k].giv_number]->insn),
6822 stats[k].total_benefit);
6824 putc ('\n', loop_dump_stream);
6827 for (k = 0; k < giv_count; k++)
6829 int g1_add_benefit = 0;
6831 i = stats[k].giv_number;
6832 g1 = giv_array[i];
6834 /* If it has already been combined, skip. */
6835 if (g1->combined_with || g1->same)
6836 continue;
6838 for (j = 0; j < giv_count; j++)
6840 g2 = giv_array[j];
6841 if (g1 != g2 && can_combine[i * giv_count + j]
6842 /* If it has already been combined, skip. */
6843 && ! g2->same && ! g2->combined_with)
6845 int l;
6847 g2->new_reg = can_combine[i * giv_count + j];
6848 g2->same = g1;
6849 g1->combined_with++;
6850 g1->lifetime += g2->lifetime;
6852 g1_add_benefit += g2->benefit;
6854 /* ??? The new final_[bg]iv_value code does a much better job
6855 of finding replaceable giv's, and hence this code may no
6856 longer be necessary. */
6857 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6858 g1_add_benefit -= copy_cost;
6860 /* To help optimize the next set of combinations, remove
6861 this giv from the benefits of other potential mates. */
6862 for (l = 0; l < giv_count; ++l)
6864 int m = stats[l].giv_number;
6865 if (can_combine[m * giv_count + j])
6866 stats[l].total_benefit -= g2->benefit + extra_benefit;
6869 if (loop_dump_stream)
6870 fprintf (loop_dump_stream,
6871 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
6872 INSN_UID (g2->insn), INSN_UID (g1->insn),
6873 g1->benefit, g1_add_benefit, g1->lifetime);
6877 /* To help optimize the next set of combinations, remove
6878 this giv from the benefits of other potential mates. */
6879 if (g1->combined_with)
6881 for (j = 0; j < giv_count; ++j)
6883 int m = stats[j].giv_number;
6884 if (can_combine[m * giv_count + i])
6885 stats[j].total_benefit -= g1->benefit + extra_benefit;
6888 g1->benefit += g1_add_benefit;
6890 /* We've finished with this giv, and everything it touched.
6891 Restart the combination so that proper weights for the
6892 rest of the givs are properly taken into account. */
6893 /* ??? Ideally we would compact the arrays at this point, so
6894 as to not cover old ground. But sanely compacting
6895 can_combine is tricky. */
6896 goto restart;
6900 /* Clean up. */
6901 free (stats);
6902 free (can_combine);
6905 /* Generate sequence for REG = B * M + A. */
6907 static rtx
6908 gen_add_mult (b, m, a, reg)
6909 rtx b; /* initial value of basic induction variable */
6910 rtx m; /* multiplicative constant */
6911 rtx a; /* additive constant */
6912 rtx reg; /* destination register */
6914 rtx seq;
6915 rtx result;
6917 start_sequence ();
6918 /* Use unsigned arithmetic. */
6919 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
6920 if (reg != result)
6921 emit_move_insn (reg, result);
6922 seq = gen_sequence ();
6923 end_sequence ();
6925 return seq;
6929 /* Update registers created in insn sequence SEQ. */
6931 static void
6932 loop_regs_update (loop, seq)
6933 const struct loop *loop ATTRIBUTE_UNUSED;
6934 rtx seq;
6936 /* Update register info for alias analysis. */
6938 if (GET_CODE (seq) == SEQUENCE)
6940 int i;
6941 for (i = 0; i < XVECLEN (seq, 0); ++i)
6943 rtx set = single_set (XVECEXP (seq, 0, i));
6944 if (set && GET_CODE (SET_DEST (set)) == REG)
6945 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6948 else
6950 rtx set = single_set (seq);
6951 if (set && GET_CODE (SET_DEST (set)) == REG)
6952 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6957 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
6959 void
6960 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
6961 const struct loop *loop;
6962 rtx b; /* initial value of basic induction variable */
6963 rtx m; /* multiplicative constant */
6964 rtx a; /* additive constant */
6965 rtx reg; /* destination register */
6966 basic_block before_bb;
6967 rtx before_insn;
6969 rtx seq;
6971 if (! before_insn)
6973 loop_iv_add_mult_hoist (loop, b, m, a, reg);
6974 return;
6977 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6978 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
6980 /* Increase the lifetime of any invariants moved further in code. */
6981 update_reg_last_use (a, before_insn);
6982 update_reg_last_use (b, before_insn);
6983 update_reg_last_use (m, before_insn);
6985 loop_insn_emit_before (loop, before_bb, before_insn, seq);
6987 /* It is possible that the expansion created lots of new registers.
6988 Iterate over the sequence we just created and record them all. */
6989 loop_regs_update (loop, seq);
6993 /* Emit insns in loop pre-header to set REG = B * M + A. */
6995 void
6996 loop_iv_add_mult_sink (loop, b, m, a, reg)
6997 const struct loop *loop;
6998 rtx b; /* initial value of basic induction variable */
6999 rtx m; /* multiplicative constant */
7000 rtx a; /* additive constant */
7001 rtx reg; /* destination register */
7003 rtx seq;
7005 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7006 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7008 /* Increase the lifetime of any invariants moved further in code.
7009 ???? Is this really necessary? */
7010 update_reg_last_use (a, loop->sink);
7011 update_reg_last_use (b, loop->sink);
7012 update_reg_last_use (m, loop->sink);
7014 loop_insn_sink (loop, seq);
7016 /* It is possible that the expansion created lots of new registers.
7017 Iterate over the sequence we just created and record them all. */
7018 loop_regs_update (loop, seq);
7022 /* Emit insns after loop to set REG = B * M + A. */
7024 void
7025 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7026 const struct loop *loop;
7027 rtx b; /* initial value of basic induction variable */
7028 rtx m; /* multiplicative constant */
7029 rtx a; /* additive constant */
7030 rtx reg; /* destination register */
7032 rtx seq;
7034 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7035 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7037 loop_insn_hoist (loop, seq);
7039 /* It is possible that the expansion created lots of new registers.
7040 Iterate over the sequence we just created and record them all. */
7041 loop_regs_update (loop, seq);
7046 /* Similar to gen_add_mult, but compute cost rather than generating
7047 sequence. */
7049 static int
7050 iv_add_mult_cost (b, m, a, reg)
7051 rtx b; /* initial value of basic induction variable */
7052 rtx m; /* multiplicative constant */
7053 rtx a; /* additive constant */
7054 rtx reg; /* destination register */
7056 int cost = 0;
7057 rtx last, result;
7059 start_sequence ();
7060 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7061 if (reg != result)
7062 emit_move_insn (reg, result);
7063 last = get_last_insn ();
7064 while (last)
7066 rtx t = single_set (last);
7067 if (t)
7068 cost += rtx_cost (SET_SRC (t), SET);
7069 last = PREV_INSN (last);
7071 end_sequence ();
7072 return cost;
7075 /* Test whether A * B can be computed without
7076 an actual multiply insn. Value is 1 if so. */
7078 static int
7079 product_cheap_p (a, b)
7080 rtx a;
7081 rtx b;
7083 int i;
7084 rtx tmp;
7085 int win = 1;
7087 /* If only one is constant, make it B. */
7088 if (GET_CODE (a) == CONST_INT)
7089 tmp = a, a = b, b = tmp;
7091 /* If first constant, both constant, so don't need multiply. */
7092 if (GET_CODE (a) == CONST_INT)
7093 return 1;
7095 /* If second not constant, neither is constant, so would need multiply. */
7096 if (GET_CODE (b) != CONST_INT)
7097 return 0;
7099 /* One operand is constant, so might not need multiply insn. Generate the
7100 code for the multiply and see if a call or multiply, or long sequence
7101 of insns is generated. */
7103 start_sequence ();
7104 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7105 tmp = gen_sequence ();
7106 end_sequence ();
7108 if (GET_CODE (tmp) == SEQUENCE)
7110 if (XVEC (tmp, 0) == 0)
7111 win = 1;
7112 else if (XVECLEN (tmp, 0) > 3)
7113 win = 0;
7114 else
7115 for (i = 0; i < XVECLEN (tmp, 0); i++)
7117 rtx insn = XVECEXP (tmp, 0, i);
7119 if (GET_CODE (insn) != INSN
7120 || (GET_CODE (PATTERN (insn)) == SET
7121 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7122 || (GET_CODE (PATTERN (insn)) == PARALLEL
7123 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7124 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7126 win = 0;
7127 break;
7131 else if (GET_CODE (tmp) == SET
7132 && GET_CODE (SET_SRC (tmp)) == MULT)
7133 win = 0;
7134 else if (GET_CODE (tmp) == PARALLEL
7135 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7136 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7137 win = 0;
7139 return win;
7142 /* Check to see if loop can be terminated by a "decrement and branch until
7143 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7144 Also try reversing an increment loop to a decrement loop
7145 to see if the optimization can be performed.
7146 Value is nonzero if optimization was performed. */
7148 /* This is useful even if the architecture doesn't have such an insn,
7149 because it might change a loops which increments from 0 to n to a loop
7150 which decrements from n to 0. A loop that decrements to zero is usually
7151 faster than one that increments from zero. */
7153 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7154 such as approx_final_value, biv_total_increment, loop_iterations, and
7155 final_[bg]iv_value. */
7157 static int
7158 check_dbra_loop (loop, insn_count)
7159 struct loop *loop;
7160 int insn_count;
7162 struct loop_info *loop_info = LOOP_INFO (loop);
7163 struct loop_regs *regs = LOOP_REGS (loop);
7164 struct loop_ivs *ivs = LOOP_IVS (loop);
7165 struct iv_class *bl;
7166 rtx reg;
7167 rtx jump_label;
7168 rtx final_value;
7169 rtx start_value;
7170 rtx new_add_val;
7171 rtx comparison;
7172 rtx before_comparison;
7173 rtx p;
7174 rtx jump;
7175 rtx first_compare;
7176 int compare_and_branch;
7177 rtx loop_start = loop->start;
7178 rtx loop_end = loop->end;
7180 /* If last insn is a conditional branch, and the insn before tests a
7181 register value, try to optimize it. Otherwise, we can't do anything. */
7183 jump = PREV_INSN (loop_end);
7184 comparison = get_condition_for_loop (loop, jump);
7185 if (comparison == 0)
7186 return 0;
7187 if (!onlyjump_p (jump))
7188 return 0;
7190 /* Try to compute whether the compare/branch at the loop end is one or
7191 two instructions. */
7192 get_condition (jump, &first_compare);
7193 if (first_compare == jump)
7194 compare_and_branch = 1;
7195 else if (first_compare == prev_nonnote_insn (jump))
7196 compare_and_branch = 2;
7197 else
7198 return 0;
7201 /* If more than one condition is present to control the loop, then
7202 do not proceed, as this function does not know how to rewrite
7203 loop tests with more than one condition.
7205 Look backwards from the first insn in the last comparison
7206 sequence and see if we've got another comparison sequence. */
7208 rtx jump1;
7209 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7210 if (GET_CODE (jump1) == JUMP_INSN)
7211 return 0;
7214 /* Check all of the bivs to see if the compare uses one of them.
7215 Skip biv's set more than once because we can't guarantee that
7216 it will be zero on the last iteration. Also skip if the biv is
7217 used between its update and the test insn. */
7219 for (bl = ivs->list; bl; bl = bl->next)
7221 if (bl->biv_count == 1
7222 && ! bl->biv->maybe_multiple
7223 && bl->biv->dest_reg == XEXP (comparison, 0)
7224 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7225 first_compare))
7226 break;
7229 if (! bl)
7230 return 0;
7232 /* Look for the case where the basic induction variable is always
7233 nonnegative, and equals zero on the last iteration.
7234 In this case, add a reg_note REG_NONNEG, which allows the
7235 m68k DBRA instruction to be used. */
7237 if (((GET_CODE (comparison) == GT
7238 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7239 && INTVAL (XEXP (comparison, 1)) == -1)
7240 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7241 && GET_CODE (bl->biv->add_val) == CONST_INT
7242 && INTVAL (bl->biv->add_val) < 0)
7244 /* Initial value must be greater than 0,
7245 init_val % -dec_value == 0 to ensure that it equals zero on
7246 the last iteration */
7248 if (GET_CODE (bl->initial_value) == CONST_INT
7249 && INTVAL (bl->initial_value) > 0
7250 && (INTVAL (bl->initial_value)
7251 % (-INTVAL (bl->biv->add_val))) == 0)
7253 /* register always nonnegative, add REG_NOTE to branch */
7254 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7255 REG_NOTES (jump)
7256 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7257 REG_NOTES (jump));
7258 bl->nonneg = 1;
7260 return 1;
7263 /* If the decrement is 1 and the value was tested as >= 0 before
7264 the loop, then we can safely optimize. */
7265 for (p = loop_start; p; p = PREV_INSN (p))
7267 if (GET_CODE (p) == CODE_LABEL)
7268 break;
7269 if (GET_CODE (p) != JUMP_INSN)
7270 continue;
7272 before_comparison = get_condition_for_loop (loop, p);
7273 if (before_comparison
7274 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7275 && GET_CODE (before_comparison) == LT
7276 && XEXP (before_comparison, 1) == const0_rtx
7277 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7278 && INTVAL (bl->biv->add_val) == -1)
7280 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7281 REG_NOTES (jump)
7282 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7283 REG_NOTES (jump));
7284 bl->nonneg = 1;
7286 return 1;
7290 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7291 && INTVAL (bl->biv->add_val) > 0)
7293 /* Try to change inc to dec, so can apply above optimization. */
7294 /* Can do this if:
7295 all registers modified are induction variables or invariant,
7296 all memory references have non-overlapping addresses
7297 (obviously true if only one write)
7298 allow 2 insns for the compare/jump at the end of the loop. */
7299 /* Also, we must avoid any instructions which use both the reversed
7300 biv and another biv. Such instructions will fail if the loop is
7301 reversed. We meet this condition by requiring that either
7302 no_use_except_counting is true, or else that there is only
7303 one biv. */
7304 int num_nonfixed_reads = 0;
7305 /* 1 if the iteration var is used only to count iterations. */
7306 int no_use_except_counting = 0;
7307 /* 1 if the loop has no memory store, or it has a single memory store
7308 which is reversible. */
7309 int reversible_mem_store = 1;
7311 if (bl->giv_count == 0 && ! loop->exit_count)
7313 rtx bivreg = regno_reg_rtx[bl->regno];
7314 struct iv_class *blt;
7316 /* If there are no givs for this biv, and the only exit is the
7317 fall through at the end of the loop, then
7318 see if perhaps there are no uses except to count. */
7319 no_use_except_counting = 1;
7320 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7321 if (INSN_P (p))
7323 rtx set = single_set (p);
7325 if (set && GET_CODE (SET_DEST (set)) == REG
7326 && REGNO (SET_DEST (set)) == bl->regno)
7327 /* An insn that sets the biv is okay. */
7329 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7330 || p == prev_nonnote_insn (loop_end))
7331 && reg_mentioned_p (bivreg, PATTERN (p)))
7333 /* If either of these insns uses the biv and sets a pseudo
7334 that has more than one usage, then the biv has uses
7335 other than counting since it's used to derive a value
7336 that is used more than one time. */
7337 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7338 regs);
7339 if (regs->multiple_uses)
7341 no_use_except_counting = 0;
7342 break;
7345 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7347 no_use_except_counting = 0;
7348 break;
7352 /* A biv has uses besides counting if it is used to set another biv. */
7353 for (blt = ivs->list; blt; blt = blt->next)
7354 if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
7356 no_use_except_counting = 0;
7357 break;
7361 if (no_use_except_counting)
7362 /* No need to worry about MEMs. */
7364 else if (loop_info->num_mem_sets <= 1)
7366 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7367 if (INSN_P (p))
7368 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
7370 /* If the loop has a single store, and the destination address is
7371 invariant, then we can't reverse the loop, because this address
7372 might then have the wrong value at loop exit.
7373 This would work if the source was invariant also, however, in that
7374 case, the insn should have been moved out of the loop. */
7376 if (loop_info->num_mem_sets == 1)
7378 struct induction *v;
7380 /* If we could prove that each of the memory locations
7381 written to was different, then we could reverse the
7382 store -- but we don't presently have any way of
7383 knowing that. */
7384 reversible_mem_store = 0;
7386 /* If the store depends on a register that is set after the
7387 store, it depends on the initial value, and is thus not
7388 reversible. */
7389 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
7391 if (v->giv_type == DEST_REG
7392 && reg_mentioned_p (v->dest_reg,
7393 PATTERN (loop_info->first_loop_store_insn))
7394 && loop_insn_first_p (loop_info->first_loop_store_insn,
7395 v->insn))
7396 reversible_mem_store = 0;
7400 else
7401 return 0;
7403 /* This code only acts for innermost loops. Also it simplifies
7404 the memory address check by only reversing loops with
7405 zero or one memory access.
7406 Two memory accesses could involve parts of the same array,
7407 and that can't be reversed.
7408 If the biv is used only for counting, than we don't need to worry
7409 about all these things. */
7411 if ((num_nonfixed_reads <= 1
7412 && ! loop_info->has_nonconst_call
7413 && ! loop_info->has_volatile
7414 && reversible_mem_store
7415 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
7416 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
7417 && (bl == ivs->list && bl->next == 0))
7418 || no_use_except_counting)
7420 rtx tem;
7422 /* Loop can be reversed. */
7423 if (loop_dump_stream)
7424 fprintf (loop_dump_stream, "Can reverse loop\n");
7426 /* Now check other conditions:
7428 The increment must be a constant, as must the initial value,
7429 and the comparison code must be LT.
7431 This test can probably be improved since +/- 1 in the constant
7432 can be obtained by changing LT to LE and vice versa; this is
7433 confusing. */
7435 if (comparison
7436 /* for constants, LE gets turned into LT */
7437 && (GET_CODE (comparison) == LT
7438 || (GET_CODE (comparison) == LE
7439 && no_use_except_counting)))
7441 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
7442 rtx initial_value, comparison_value;
7443 int nonneg = 0;
7444 enum rtx_code cmp_code;
7445 int comparison_const_width;
7446 unsigned HOST_WIDE_INT comparison_sign_mask;
7448 add_val = INTVAL (bl->biv->add_val);
7449 comparison_value = XEXP (comparison, 1);
7450 if (GET_MODE (comparison_value) == VOIDmode)
7451 comparison_const_width
7452 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
7453 else
7454 comparison_const_width
7455 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
7456 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
7457 comparison_const_width = HOST_BITS_PER_WIDE_INT;
7458 comparison_sign_mask
7459 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
7461 /* If the comparison value is not a loop invariant, then we
7462 can not reverse this loop.
7464 ??? If the insns which initialize the comparison value as
7465 a whole compute an invariant result, then we could move
7466 them out of the loop and proceed with loop reversal. */
7467 if (! loop_invariant_p (loop, comparison_value))
7468 return 0;
7470 if (GET_CODE (comparison_value) == CONST_INT)
7471 comparison_val = INTVAL (comparison_value);
7472 initial_value = bl->initial_value;
7474 /* Normalize the initial value if it is an integer and
7475 has no other use except as a counter. This will allow
7476 a few more loops to be reversed. */
7477 if (no_use_except_counting
7478 && GET_CODE (comparison_value) == CONST_INT
7479 && GET_CODE (initial_value) == CONST_INT)
7481 comparison_val = comparison_val - INTVAL (bl->initial_value);
7482 /* The code below requires comparison_val to be a multiple
7483 of add_val in order to do the loop reversal, so
7484 round up comparison_val to a multiple of add_val.
7485 Since comparison_value is constant, we know that the
7486 current comparison code is LT. */
7487 comparison_val = comparison_val + add_val - 1;
7488 comparison_val
7489 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
7490 /* We postpone overflow checks for COMPARISON_VAL here;
7491 even if there is an overflow, we might still be able to
7492 reverse the loop, if converting the loop exit test to
7493 NE is possible. */
7494 initial_value = const0_rtx;
7497 /* First check if we can do a vanilla loop reversal. */
7498 if (initial_value == const0_rtx
7499 /* If we have a decrement_and_branch_on_count,
7500 prefer the NE test, since this will allow that
7501 instruction to be generated. Note that we must
7502 use a vanilla loop reversal if the biv is used to
7503 calculate a giv or has a non-counting use. */
7504 #if ! defined (HAVE_decrement_and_branch_until_zero) \
7505 && defined (HAVE_decrement_and_branch_on_count)
7506 && (! (add_val == 1 && loop->vtop
7507 && (bl->biv_count == 0
7508 || no_use_except_counting)))
7509 #endif
7510 && GET_CODE (comparison_value) == CONST_INT
7511 /* Now do postponed overflow checks on COMPARISON_VAL. */
7512 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
7513 & comparison_sign_mask))
7515 /* Register will always be nonnegative, with value
7516 0 on last iteration */
7517 add_adjust = add_val;
7518 nonneg = 1;
7519 cmp_code = GE;
7521 else if (add_val == 1 && loop->vtop
7522 && (bl->biv_count == 0
7523 || no_use_except_counting))
7525 add_adjust = 0;
7526 cmp_code = NE;
7528 else
7529 return 0;
7531 if (GET_CODE (comparison) == LE)
7532 add_adjust -= add_val;
7534 /* If the initial value is not zero, or if the comparison
7535 value is not an exact multiple of the increment, then we
7536 can not reverse this loop. */
7537 if (initial_value == const0_rtx
7538 && GET_CODE (comparison_value) == CONST_INT)
7540 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
7541 return 0;
7543 else
7545 if (! no_use_except_counting || add_val != 1)
7546 return 0;
7549 final_value = comparison_value;
7551 /* Reset these in case we normalized the initial value
7552 and comparison value above. */
7553 if (GET_CODE (comparison_value) == CONST_INT
7554 && GET_CODE (initial_value) == CONST_INT)
7556 comparison_value = GEN_INT (comparison_val);
7557 final_value
7558 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
7560 bl->initial_value = initial_value;
7562 /* Save some info needed to produce the new insns. */
7563 reg = bl->biv->dest_reg;
7564 jump_label = condjump_label (PREV_INSN (loop_end));
7565 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
7567 /* Set start_value; if this is not a CONST_INT, we need
7568 to generate a SUB.
7569 Initialize biv to start_value before loop start.
7570 The old initializing insn will be deleted as a
7571 dead store by flow.c. */
7572 if (initial_value == const0_rtx
7573 && GET_CODE (comparison_value) == CONST_INT)
7575 start_value = GEN_INT (comparison_val - add_adjust);
7576 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
7578 else if (GET_CODE (initial_value) == CONST_INT)
7580 enum machine_mode mode = GET_MODE (reg);
7581 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
7582 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
7584 if (add_insn == 0)
7585 return 0;
7587 start_value
7588 = gen_rtx_PLUS (mode, comparison_value, offset);
7589 loop_insn_hoist (loop, add_insn);
7590 if (GET_CODE (comparison) == LE)
7591 final_value = gen_rtx_PLUS (mode, comparison_value,
7592 GEN_INT (add_val));
7594 else if (! add_adjust)
7596 enum machine_mode mode = GET_MODE (reg);
7597 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
7598 initial_value);
7600 if (sub_insn == 0)
7601 return 0;
7602 start_value
7603 = gen_rtx_MINUS (mode, comparison_value, initial_value);
7604 loop_insn_hoist (loop, sub_insn);
7606 else
7607 /* We could handle the other cases too, but it'll be
7608 better to have a testcase first. */
7609 return 0;
7611 /* We may not have a single insn which can increment a reg, so
7612 create a sequence to hold all the insns from expand_inc. */
7613 start_sequence ();
7614 expand_inc (reg, new_add_val);
7615 tem = gen_sequence ();
7616 end_sequence ();
7618 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
7619 delete_insn (bl->biv->insn);
7621 /* Update biv info to reflect its new status. */
7622 bl->biv->insn = p;
7623 bl->initial_value = start_value;
7624 bl->biv->add_val = new_add_val;
7626 /* Update loop info. */
7627 loop_info->initial_value = reg;
7628 loop_info->initial_equiv_value = reg;
7629 loop_info->final_value = const0_rtx;
7630 loop_info->final_equiv_value = const0_rtx;
7631 loop_info->comparison_value = const0_rtx;
7632 loop_info->comparison_code = cmp_code;
7633 loop_info->increment = new_add_val;
7635 /* Inc LABEL_NUSES so that delete_insn will
7636 not delete the label. */
7637 LABEL_NUSES (XEXP (jump_label, 0))++;
7639 /* Emit an insn after the end of the loop to set the biv's
7640 proper exit value if it is used anywhere outside the loop. */
7641 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
7642 || ! bl->init_insn
7643 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
7644 loop_insn_sink (loop, gen_move_insn (reg, final_value));
7646 /* Delete compare/branch at end of loop. */
7647 delete_insn (PREV_INSN (loop_end));
7648 if (compare_and_branch == 2)
7649 delete_insn (first_compare);
7651 /* Add new compare/branch insn at end of loop. */
7652 start_sequence ();
7653 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
7654 GET_MODE (reg), 0, 0,
7655 XEXP (jump_label, 0));
7656 tem = gen_sequence ();
7657 end_sequence ();
7658 emit_jump_insn_before (tem, loop_end);
7660 for (tem = PREV_INSN (loop_end);
7661 tem && GET_CODE (tem) != JUMP_INSN;
7662 tem = PREV_INSN (tem))
7665 if (tem)
7666 JUMP_LABEL (tem) = XEXP (jump_label, 0);
7668 if (nonneg)
7670 if (tem)
7672 /* Increment of LABEL_NUSES done above. */
7673 /* Register is now always nonnegative,
7674 so add REG_NONNEG note to the branch. */
7675 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
7676 REG_NOTES (tem));
7678 bl->nonneg = 1;
7681 /* No insn may reference both the reversed and another biv or it
7682 will fail (see comment near the top of the loop reversal
7683 code).
7684 Earlier on, we have verified that the biv has no use except
7685 counting, or it is the only biv in this function.
7686 However, the code that computes no_use_except_counting does
7687 not verify reg notes. It's possible to have an insn that
7688 references another biv, and has a REG_EQUAL note with an
7689 expression based on the reversed biv. To avoid this case,
7690 remove all REG_EQUAL notes based on the reversed biv
7691 here. */
7692 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7693 if (INSN_P (p))
7695 rtx *pnote;
7696 rtx set = single_set (p);
7697 /* If this is a set of a GIV based on the reversed biv, any
7698 REG_EQUAL notes should still be correct. */
7699 if (! set
7700 || GET_CODE (SET_DEST (set)) != REG
7701 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
7702 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
7703 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
7704 for (pnote = &REG_NOTES (p); *pnote;)
7706 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
7707 && reg_mentioned_p (regno_reg_rtx[bl->regno],
7708 XEXP (*pnote, 0)))
7709 *pnote = XEXP (*pnote, 1);
7710 else
7711 pnote = &XEXP (*pnote, 1);
7715 /* Mark that this biv has been reversed. Each giv which depends
7716 on this biv, and which is also live past the end of the loop
7717 will have to be fixed up. */
7719 bl->reversed = 1;
7721 if (loop_dump_stream)
7723 fprintf (loop_dump_stream, "Reversed loop");
7724 if (bl->nonneg)
7725 fprintf (loop_dump_stream, " and added reg_nonneg\n");
7726 else
7727 fprintf (loop_dump_stream, "\n");
7730 return 1;
7735 return 0;
7738 /* Verify whether the biv BL appears to be eliminable,
7739 based on the insns in the loop that refer to it.
7741 If ELIMINATE_P is non-zero, actually do the elimination.
7743 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7744 determine whether invariant insns should be placed inside or at the
7745 start of the loop. */
7747 static int
7748 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
7749 const struct loop *loop;
7750 struct iv_class *bl;
7751 int eliminate_p;
7752 int threshold, insn_count;
7754 struct loop_ivs *ivs = LOOP_IVS (loop);
7755 rtx reg = bl->biv->dest_reg;
7756 rtx p;
7758 /* Scan all insns in the loop, stopping if we find one that uses the
7759 biv in a way that we cannot eliminate. */
7761 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
7763 enum rtx_code code = GET_CODE (p);
7764 basic_block where_bb = 0;
7765 rtx where_insn = threshold >= insn_count ? 0 : p;
7767 /* If this is a libcall that sets a giv, skip ahead to its end. */
7768 if (GET_RTX_CLASS (code) == 'i')
7770 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
7772 if (note)
7774 rtx last = XEXP (note, 0);
7775 rtx set = single_set (last);
7777 if (set && GET_CODE (SET_DEST (set)) == REG)
7779 unsigned int regno = REGNO (SET_DEST (set));
7781 if (regno < ivs->n_regs
7782 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
7783 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
7784 p = last;
7788 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7789 && reg_mentioned_p (reg, PATTERN (p))
7790 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
7791 eliminate_p, where_bb, where_insn))
7793 if (loop_dump_stream)
7794 fprintf (loop_dump_stream,
7795 "Cannot eliminate biv %d: biv used in insn %d.\n",
7796 bl->regno, INSN_UID (p));
7797 break;
7801 if (p == loop->end)
7803 if (loop_dump_stream)
7804 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7805 bl->regno, eliminate_p ? "was" : "can be");
7806 return 1;
7809 return 0;
7812 /* INSN and REFERENCE are instructions in the same insn chain.
7813 Return non-zero if INSN is first. */
7816 loop_insn_first_p (insn, reference)
7817 rtx insn, reference;
7819 rtx p, q;
7821 for (p = insn, q = reference;;)
7823 /* Start with test for not first so that INSN == REFERENCE yields not
7824 first. */
7825 if (q == insn || ! p)
7826 return 0;
7827 if (p == reference || ! q)
7828 return 1;
7830 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
7831 previous insn, hence the <= comparison below does not work if
7832 P is a note. */
7833 if (INSN_UID (p) < max_uid_for_loop
7834 && INSN_UID (q) < max_uid_for_loop
7835 && GET_CODE (p) != NOTE)
7836 return INSN_LUID (p) <= INSN_LUID (q);
7838 if (INSN_UID (p) >= max_uid_for_loop
7839 || GET_CODE (p) == NOTE)
7840 p = NEXT_INSN (p);
7841 if (INSN_UID (q) >= max_uid_for_loop)
7842 q = NEXT_INSN (q);
7846 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
7847 the offset that we have to take into account due to auto-increment /
7848 div derivation is zero. */
7849 static int
7850 biv_elimination_giv_has_0_offset (biv, giv, insn)
7851 struct induction *biv, *giv;
7852 rtx insn;
7854 /* If the giv V had the auto-inc address optimization applied
7855 to it, and INSN occurs between the giv insn and the biv
7856 insn, then we'd have to adjust the value used here.
7857 This is rare, so we don't bother to make this possible. */
7858 if (giv->auto_inc_opt
7859 && ((loop_insn_first_p (giv->insn, insn)
7860 && loop_insn_first_p (insn, biv->insn))
7861 || (loop_insn_first_p (biv->insn, insn)
7862 && loop_insn_first_p (insn, giv->insn))))
7863 return 0;
7865 return 1;
7868 /* If BL appears in X (part of the pattern of INSN), see if we can
7869 eliminate its use. If so, return 1. If not, return 0.
7871 If BIV does not appear in X, return 1.
7873 If ELIMINATE_P is non-zero, actually do the elimination.
7874 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
7875 Depending on how many items have been moved out of the loop, it
7876 will either be before INSN (when WHERE_INSN is non-zero) or at the
7877 start of the loop (when WHERE_INSN is zero). */
7879 static int
7880 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
7881 const struct loop *loop;
7882 rtx x, insn;
7883 struct iv_class *bl;
7884 int eliminate_p;
7885 basic_block where_bb;
7886 rtx where_insn;
7888 enum rtx_code code = GET_CODE (x);
7889 rtx reg = bl->biv->dest_reg;
7890 enum machine_mode mode = GET_MODE (reg);
7891 struct induction *v;
7892 rtx arg, tem;
7893 #ifdef HAVE_cc0
7894 rtx new;
7895 #endif
7896 int arg_operand;
7897 const char *fmt;
7898 int i, j;
7900 switch (code)
7902 case REG:
7903 /* If we haven't already been able to do something with this BIV,
7904 we can't eliminate it. */
7905 if (x == reg)
7906 return 0;
7907 return 1;
7909 case SET:
7910 /* If this sets the BIV, it is not a problem. */
7911 if (SET_DEST (x) == reg)
7912 return 1;
7914 /* If this is an insn that defines a giv, it is also ok because
7915 it will go away when the giv is reduced. */
7916 for (v = bl->giv; v; v = v->next_iv)
7917 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7918 return 1;
7920 #ifdef HAVE_cc0
7921 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7923 /* Can replace with any giv that was reduced and
7924 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7925 Require a constant for MULT_VAL, so we know it's nonzero.
7926 ??? We disable this optimization to avoid potential
7927 overflows. */
7929 for (v = bl->giv; v; v = v->next_iv)
7930 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
7931 && v->add_val == const0_rtx
7932 && ! v->ignore && ! v->maybe_dead && v->always_computable
7933 && v->mode == mode
7934 && 0)
7936 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7937 continue;
7939 if (! eliminate_p)
7940 return 1;
7942 /* If the giv has the opposite direction of change,
7943 then reverse the comparison. */
7944 if (INTVAL (v->mult_val) < 0)
7945 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7946 const0_rtx, v->new_reg);
7947 else
7948 new = v->new_reg;
7950 /* We can probably test that giv's reduced reg. */
7951 if (validate_change (insn, &SET_SRC (x), new, 0))
7952 return 1;
7955 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7956 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7957 Require a constant for MULT_VAL, so we know it's nonzero.
7958 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7959 overflow problem. */
7961 for (v = bl->giv; v; v = v->next_iv)
7962 if (GET_CODE (v->mult_val) == CONST_INT
7963 && v->mult_val != const0_rtx
7964 && ! v->ignore && ! v->maybe_dead && v->always_computable
7965 && v->mode == mode
7966 && (GET_CODE (v->add_val) == SYMBOL_REF
7967 || GET_CODE (v->add_val) == LABEL_REF
7968 || GET_CODE (v->add_val) == CONST
7969 || (GET_CODE (v->add_val) == REG
7970 && REG_POINTER (v->add_val))))
7972 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7973 continue;
7975 if (! eliminate_p)
7976 return 1;
7978 /* If the giv has the opposite direction of change,
7979 then reverse the comparison. */
7980 if (INTVAL (v->mult_val) < 0)
7981 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7982 v->new_reg);
7983 else
7984 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7985 copy_rtx (v->add_val));
7987 /* Replace biv with the giv's reduced register. */
7988 update_reg_last_use (v->add_val, insn);
7989 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7990 return 1;
7992 /* Insn doesn't support that constant or invariant. Copy it
7993 into a register (it will be a loop invariant.) */
7994 tem = gen_reg_rtx (GET_MODE (v->new_reg));
7996 loop_insn_emit_before (loop, 0, where_insn,
7997 gen_move_insn (tem,
7998 copy_rtx (v->add_val)));
8000 /* Substitute the new register for its invariant value in
8001 the compare expression. */
8002 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8003 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8004 return 1;
8007 #endif
8008 break;
8010 case COMPARE:
8011 case EQ: case NE:
8012 case GT: case GE: case GTU: case GEU:
8013 case LT: case LE: case LTU: case LEU:
8014 /* See if either argument is the biv. */
8015 if (XEXP (x, 0) == reg)
8016 arg = XEXP (x, 1), arg_operand = 1;
8017 else if (XEXP (x, 1) == reg)
8018 arg = XEXP (x, 0), arg_operand = 0;
8019 else
8020 break;
8022 if (CONSTANT_P (arg))
8024 /* First try to replace with any giv that has constant positive
8025 mult_val and constant add_val. We might be able to support
8026 negative mult_val, but it seems complex to do it in general. */
8028 for (v = bl->giv; v; v = v->next_iv)
8029 if (GET_CODE (v->mult_val) == CONST_INT
8030 && INTVAL (v->mult_val) > 0
8031 && (GET_CODE (v->add_val) == SYMBOL_REF
8032 || GET_CODE (v->add_val) == LABEL_REF
8033 || GET_CODE (v->add_val) == CONST
8034 || (GET_CODE (v->add_val) == REG
8035 && REG_POINTER (v->add_val)))
8036 && ! v->ignore && ! v->maybe_dead && v->always_computable
8037 && v->mode == mode)
8039 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8040 continue;
8042 if (! eliminate_p)
8043 return 1;
8045 /* Replace biv with the giv's reduced reg. */
8046 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8048 /* If all constants are actually constant integers and
8049 the derived constant can be directly placed in the COMPARE,
8050 do so. */
8051 if (GET_CODE (arg) == CONST_INT
8052 && GET_CODE (v->mult_val) == CONST_INT
8053 && GET_CODE (v->add_val) == CONST_INT)
8055 validate_change (insn, &XEXP (x, arg_operand),
8056 GEN_INT (INTVAL (arg)
8057 * INTVAL (v->mult_val)
8058 + INTVAL (v->add_val)), 1);
8060 else
8062 /* Otherwise, load it into a register. */
8063 tem = gen_reg_rtx (mode);
8064 loop_iv_add_mult_emit_before (loop, arg,
8065 v->mult_val, v->add_val,
8066 tem, where_bb, where_insn);
8067 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8069 if (apply_change_group ())
8070 return 1;
8073 /* Look for giv with positive constant mult_val and nonconst add_val.
8074 Insert insns to calculate new compare value.
8075 ??? Turn this off due to possible overflow. */
8077 for (v = bl->giv; v; v = v->next_iv)
8078 if (GET_CODE (v->mult_val) == CONST_INT
8079 && INTVAL (v->mult_val) > 0
8080 && ! v->ignore && ! v->maybe_dead && v->always_computable
8081 && v->mode == mode
8082 && 0)
8084 rtx tem;
8086 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8087 continue;
8089 if (! eliminate_p)
8090 return 1;
8092 tem = gen_reg_rtx (mode);
8094 /* Replace biv with giv's reduced register. */
8095 validate_change (insn, &XEXP (x, 1 - arg_operand),
8096 v->new_reg, 1);
8098 /* Compute value to compare against. */
8099 loop_iv_add_mult_emit_before (loop, arg,
8100 v->mult_val, v->add_val,
8101 tem, where_bb, where_insn);
8102 /* Use it in this insn. */
8103 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8104 if (apply_change_group ())
8105 return 1;
8108 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8110 if (loop_invariant_p (loop, arg) == 1)
8112 /* Look for giv with constant positive mult_val and nonconst
8113 add_val. Insert insns to compute new compare value.
8114 ??? Turn this off due to possible overflow. */
8116 for (v = bl->giv; v; v = v->next_iv)
8117 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8118 && ! v->ignore && ! v->maybe_dead && v->always_computable
8119 && v->mode == mode
8120 && 0)
8122 rtx tem;
8124 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8125 continue;
8127 if (! eliminate_p)
8128 return 1;
8130 tem = gen_reg_rtx (mode);
8132 /* Replace biv with giv's reduced register. */
8133 validate_change (insn, &XEXP (x, 1 - arg_operand),
8134 v->new_reg, 1);
8136 /* Compute value to compare against. */
8137 loop_iv_add_mult_emit_before (loop, arg,
8138 v->mult_val, v->add_val,
8139 tem, where_bb, where_insn);
8140 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8141 if (apply_change_group ())
8142 return 1;
8146 /* This code has problems. Basically, you can't know when
8147 seeing if we will eliminate BL, whether a particular giv
8148 of ARG will be reduced. If it isn't going to be reduced,
8149 we can't eliminate BL. We can try forcing it to be reduced,
8150 but that can generate poor code.
8152 The problem is that the benefit of reducing TV, below should
8153 be increased if BL can actually be eliminated, but this means
8154 we might have to do a topological sort of the order in which
8155 we try to process biv. It doesn't seem worthwhile to do
8156 this sort of thing now. */
8158 #if 0
8159 /* Otherwise the reg compared with had better be a biv. */
8160 if (GET_CODE (arg) != REG
8161 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8162 return 0;
8164 /* Look for a pair of givs, one for each biv,
8165 with identical coefficients. */
8166 for (v = bl->giv; v; v = v->next_iv)
8168 struct induction *tv;
8170 if (v->ignore || v->maybe_dead || v->mode != mode)
8171 continue;
8173 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8174 tv = tv->next_iv)
8175 if (! tv->ignore && ! tv->maybe_dead
8176 && rtx_equal_p (tv->mult_val, v->mult_val)
8177 && rtx_equal_p (tv->add_val, v->add_val)
8178 && tv->mode == mode)
8180 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8181 continue;
8183 if (! eliminate_p)
8184 return 1;
8186 /* Replace biv with its giv's reduced reg. */
8187 XEXP (x, 1 - arg_operand) = v->new_reg;
8188 /* Replace other operand with the other giv's
8189 reduced reg. */
8190 XEXP (x, arg_operand) = tv->new_reg;
8191 return 1;
8194 #endif
8197 /* If we get here, the biv can't be eliminated. */
8198 return 0;
8200 case MEM:
8201 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8202 biv is used in it, since it will be replaced. */
8203 for (v = bl->giv; v; v = v->next_iv)
8204 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8205 return 1;
8206 break;
8208 default:
8209 break;
8212 /* See if any subexpression fails elimination. */
8213 fmt = GET_RTX_FORMAT (code);
8214 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8216 switch (fmt[i])
8218 case 'e':
8219 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8220 eliminate_p, where_bb, where_insn))
8221 return 0;
8222 break;
8224 case 'E':
8225 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8226 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8227 eliminate_p, where_bb, where_insn))
8228 return 0;
8229 break;
8233 return 1;
8236 /* Return nonzero if the last use of REG
8237 is in an insn following INSN in the same basic block. */
8239 static int
8240 last_use_this_basic_block (reg, insn)
8241 rtx reg;
8242 rtx insn;
8244 rtx n;
8245 for (n = insn;
8246 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8247 n = NEXT_INSN (n))
8249 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8250 return 1;
8252 return 0;
8255 /* Called via `note_stores' to record the initial value of a biv. Here we
8256 just record the location of the set and process it later. */
8258 static void
8259 record_initial (dest, set, data)
8260 rtx dest;
8261 rtx set;
8262 void *data ATTRIBUTE_UNUSED;
8264 struct loop_ivs *ivs = (struct loop_ivs *) data;
8265 struct iv_class *bl;
8267 if (GET_CODE (dest) != REG
8268 || REGNO (dest) >= ivs->n_regs
8269 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8270 return;
8272 bl = REG_IV_CLASS (ivs, REGNO (dest));
8274 /* If this is the first set found, record it. */
8275 if (bl->init_insn == 0)
8277 bl->init_insn = note_insn;
8278 bl->init_set = set;
8282 /* If any of the registers in X are "old" and currently have a last use earlier
8283 than INSN, update them to have a last use of INSN. Their actual last use
8284 will be the previous insn but it will not have a valid uid_luid so we can't
8285 use it. X must be a source expression only. */
8287 static void
8288 update_reg_last_use (x, insn)
8289 rtx x;
8290 rtx insn;
8292 /* Check for the case where INSN does not have a valid luid. In this case,
8293 there is no need to modify the regno_last_uid, as this can only happen
8294 when code is inserted after the loop_end to set a pseudo's final value,
8295 and hence this insn will never be the last use of x.
8296 ???? This comment is not correct. See for example loop_givs_reduce.
8297 This may insert an insn before another new insn. */
8298 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8299 && INSN_UID (insn) < max_uid_for_loop
8300 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
8302 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8304 else
8306 register int i, j;
8307 register const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8308 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8310 if (fmt[i] == 'e')
8311 update_reg_last_use (XEXP (x, i), insn);
8312 else if (fmt[i] == 'E')
8313 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8314 update_reg_last_use (XVECEXP (x, i, j), insn);
8319 /* Given an insn INSN and condition COND, return the condition in a
8320 canonical form to simplify testing by callers. Specifically:
8322 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8323 (2) Both operands will be machine operands; (cc0) will have been replaced.
8324 (3) If an operand is a constant, it will be the second operand.
8325 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8326 for GE, GEU, and LEU.
8328 If the condition cannot be understood, or is an inequality floating-point
8329 comparison which needs to be reversed, 0 will be returned.
8331 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
8333 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8334 insn used in locating the condition was found. If a replacement test
8335 of the condition is desired, it should be placed in front of that
8336 insn and we will be sure that the inputs are still valid.
8338 If WANT_REG is non-zero, we wish the condition to be relative to that
8339 register, if possible. Therefore, do not canonicalize the condition
8340 further. */
8343 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
8344 rtx insn;
8345 rtx cond;
8346 int reverse;
8347 rtx *earliest;
8348 rtx want_reg;
8350 enum rtx_code code;
8351 rtx prev = insn;
8352 rtx set;
8353 rtx tem;
8354 rtx op0, op1;
8355 int reverse_code = 0;
8356 enum machine_mode mode;
8358 code = GET_CODE (cond);
8359 mode = GET_MODE (cond);
8360 op0 = XEXP (cond, 0);
8361 op1 = XEXP (cond, 1);
8363 if (reverse)
8364 code = reversed_comparison_code (cond, insn);
8365 if (code == UNKNOWN)
8366 return 0;
8368 if (earliest)
8369 *earliest = insn;
8371 /* If we are comparing a register with zero, see if the register is set
8372 in the previous insn to a COMPARE or a comparison operation. Perform
8373 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
8374 in cse.c */
8376 while (GET_RTX_CLASS (code) == '<'
8377 && op1 == CONST0_RTX (GET_MODE (op0))
8378 && op0 != want_reg)
8380 /* Set non-zero when we find something of interest. */
8381 rtx x = 0;
8383 #ifdef HAVE_cc0
8384 /* If comparison with cc0, import actual comparison from compare
8385 insn. */
8386 if (op0 == cc0_rtx)
8388 if ((prev = prev_nonnote_insn (prev)) == 0
8389 || GET_CODE (prev) != INSN
8390 || (set = single_set (prev)) == 0
8391 || SET_DEST (set) != cc0_rtx)
8392 return 0;
8394 op0 = SET_SRC (set);
8395 op1 = CONST0_RTX (GET_MODE (op0));
8396 if (earliest)
8397 *earliest = prev;
8399 #endif
8401 /* If this is a COMPARE, pick up the two things being compared. */
8402 if (GET_CODE (op0) == COMPARE)
8404 op1 = XEXP (op0, 1);
8405 op0 = XEXP (op0, 0);
8406 continue;
8408 else if (GET_CODE (op0) != REG)
8409 break;
8411 /* Go back to the previous insn. Stop if it is not an INSN. We also
8412 stop if it isn't a single set or if it has a REG_INC note because
8413 we don't want to bother dealing with it. */
8415 if ((prev = prev_nonnote_insn (prev)) == 0
8416 || GET_CODE (prev) != INSN
8417 || FIND_REG_INC_NOTE (prev, 0))
8418 break;
8420 set = set_of (op0, prev);
8422 if (set
8423 && (GET_CODE (set) != SET
8424 || !rtx_equal_p (SET_DEST (set), op0)))
8425 break;
8427 /* If this is setting OP0, get what it sets it to if it looks
8428 relevant. */
8429 if (set)
8431 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
8433 /* ??? We may not combine comparisons done in a CCmode with
8434 comparisons not done in a CCmode. This is to aid targets
8435 like Alpha that have an IEEE compliant EQ instruction, and
8436 a non-IEEE compliant BEQ instruction. The use of CCmode is
8437 actually artificial, simply to prevent the combination, but
8438 should not affect other platforms.
8440 However, we must allow VOIDmode comparisons to match either
8441 CCmode or non-CCmode comparison, because some ports have
8442 modeless comparisons inside branch patterns.
8444 ??? This mode check should perhaps look more like the mode check
8445 in simplify_comparison in combine. */
8447 if ((GET_CODE (SET_SRC (set)) == COMPARE
8448 || (((code == NE
8449 || (code == LT
8450 && GET_MODE_CLASS (inner_mode) == MODE_INT
8451 && (GET_MODE_BITSIZE (inner_mode)
8452 <= HOST_BITS_PER_WIDE_INT)
8453 && (STORE_FLAG_VALUE
8454 & ((HOST_WIDE_INT) 1
8455 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8456 #ifdef FLOAT_STORE_FLAG_VALUE
8457 || (code == LT
8458 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8459 && (REAL_VALUE_NEGATIVE
8460 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8461 #endif
8463 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
8464 && (((GET_MODE_CLASS (mode) == MODE_CC)
8465 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8466 || mode == VOIDmode || inner_mode == VOIDmode))
8467 x = SET_SRC (set);
8468 else if (((code == EQ
8469 || (code == GE
8470 && (GET_MODE_BITSIZE (inner_mode)
8471 <= HOST_BITS_PER_WIDE_INT)
8472 && GET_MODE_CLASS (inner_mode) == MODE_INT
8473 && (STORE_FLAG_VALUE
8474 & ((HOST_WIDE_INT) 1
8475 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8476 #ifdef FLOAT_STORE_FLAG_VALUE
8477 || (code == GE
8478 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8479 && (REAL_VALUE_NEGATIVE
8480 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8481 #endif
8483 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
8484 && (((GET_MODE_CLASS (mode) == MODE_CC)
8485 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8486 || mode == VOIDmode || inner_mode == VOIDmode))
8489 reverse_code = 1;
8490 x = SET_SRC (set);
8492 else
8493 break;
8496 else if (reg_set_p (op0, prev))
8497 /* If this sets OP0, but not directly, we have to give up. */
8498 break;
8500 if (x)
8502 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
8503 code = GET_CODE (x);
8504 if (reverse_code)
8506 code = reversed_comparison_code (x, prev);
8507 if (code == UNKNOWN)
8508 return 0;
8509 reverse_code = 0;
8512 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
8513 if (earliest)
8514 *earliest = prev;
8518 /* If constant is first, put it last. */
8519 if (CONSTANT_P (op0))
8520 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
8522 /* If OP0 is the result of a comparison, we weren't able to find what
8523 was really being compared, so fail. */
8524 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
8525 return 0;
8527 /* Canonicalize any ordered comparison with integers involving equality
8528 if we can do computations in the relevant mode and we do not
8529 overflow. */
8531 if (GET_CODE (op1) == CONST_INT
8532 && GET_MODE (op0) != VOIDmode
8533 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
8535 HOST_WIDE_INT const_val = INTVAL (op1);
8536 unsigned HOST_WIDE_INT uconst_val = const_val;
8537 unsigned HOST_WIDE_INT max_val
8538 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
8540 switch (code)
8542 case LE:
8543 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
8544 code = LT, op1 = GEN_INT (const_val + 1);
8545 break;
8547 /* When cross-compiling, const_val might be sign-extended from
8548 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
8549 case GE:
8550 if ((HOST_WIDE_INT) (const_val & max_val)
8551 != (((HOST_WIDE_INT) 1
8552 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
8553 code = GT, op1 = GEN_INT (const_val - 1);
8554 break;
8556 case LEU:
8557 if (uconst_val < max_val)
8558 code = LTU, op1 = GEN_INT (uconst_val + 1);
8559 break;
8561 case GEU:
8562 if (uconst_val != 0)
8563 code = GTU, op1 = GEN_INT (uconst_val - 1);
8564 break;
8566 default:
8567 break;
8571 #ifdef HAVE_cc0
8572 /* Never return CC0; return zero instead. */
8573 if (op0 == cc0_rtx)
8574 return 0;
8575 #endif
8577 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
8580 /* Given a jump insn JUMP, return the condition that will cause it to branch
8581 to its JUMP_LABEL. If the condition cannot be understood, or is an
8582 inequality floating-point comparison which needs to be reversed, 0 will
8583 be returned.
8585 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8586 insn used in locating the condition was found. If a replacement test
8587 of the condition is desired, it should be placed in front of that
8588 insn and we will be sure that the inputs are still valid. */
8591 get_condition (jump, earliest)
8592 rtx jump;
8593 rtx *earliest;
8595 rtx cond;
8596 int reverse;
8597 rtx set;
8599 /* If this is not a standard conditional jump, we can't parse it. */
8600 if (GET_CODE (jump) != JUMP_INSN
8601 || ! any_condjump_p (jump))
8602 return 0;
8603 set = pc_set (jump);
8605 cond = XEXP (SET_SRC (set), 0);
8607 /* If this branches to JUMP_LABEL when the condition is false, reverse
8608 the condition. */
8609 reverse
8610 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
8611 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
8613 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
8616 /* Similar to above routine, except that we also put an invariant last
8617 unless both operands are invariants. */
8620 get_condition_for_loop (loop, x)
8621 const struct loop *loop;
8622 rtx x;
8624 rtx comparison = get_condition (x, (rtx*)0);
8626 if (comparison == 0
8627 || ! loop_invariant_p (loop, XEXP (comparison, 0))
8628 || loop_invariant_p (loop, XEXP (comparison, 1)))
8629 return comparison;
8631 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
8632 XEXP (comparison, 1), XEXP (comparison, 0));
8635 /* Scan the function and determine whether it has indirect (computed) jumps.
8637 This is taken mostly from flow.c; similar code exists elsewhere
8638 in the compiler. It may be useful to put this into rtlanal.c. */
8639 static int
8640 indirect_jump_in_function_p (start)
8641 rtx start;
8643 rtx insn;
8645 for (insn = start; insn; insn = NEXT_INSN (insn))
8646 if (computed_jump_p (insn))
8647 return 1;
8649 return 0;
8652 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8653 documentation for LOOP_MEMS for the definition of `appropriate'.
8654 This function is called from prescan_loop via for_each_rtx. */
8656 static int
8657 insert_loop_mem (mem, data)
8658 rtx *mem;
8659 void *data ATTRIBUTE_UNUSED;
8661 struct loop_info *loop_info = data;
8662 int i;
8663 rtx m = *mem;
8665 if (m == NULL_RTX)
8666 return 0;
8668 switch (GET_CODE (m))
8670 case MEM:
8671 break;
8673 case CLOBBER:
8674 /* We're not interested in MEMs that are only clobbered. */
8675 return -1;
8677 case CONST_DOUBLE:
8678 /* We're not interested in the MEM associated with a
8679 CONST_DOUBLE, so there's no need to traverse into this. */
8680 return -1;
8682 case EXPR_LIST:
8683 /* We're not interested in any MEMs that only appear in notes. */
8684 return -1;
8686 default:
8687 /* This is not a MEM. */
8688 return 0;
8691 /* See if we've already seen this MEM. */
8692 for (i = 0; i < loop_info->mems_idx; ++i)
8693 if (rtx_equal_p (m, loop_info->mems[i].mem))
8695 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
8696 /* The modes of the two memory accesses are different. If
8697 this happens, something tricky is going on, and we just
8698 don't optimize accesses to this MEM. */
8699 loop_info->mems[i].optimize = 0;
8701 return 0;
8704 /* Resize the array, if necessary. */
8705 if (loop_info->mems_idx == loop_info->mems_allocated)
8707 if (loop_info->mems_allocated != 0)
8708 loop_info->mems_allocated *= 2;
8709 else
8710 loop_info->mems_allocated = 32;
8712 loop_info->mems = (loop_mem_info *)
8713 xrealloc (loop_info->mems,
8714 loop_info->mems_allocated * sizeof (loop_mem_info));
8717 /* Actually insert the MEM. */
8718 loop_info->mems[loop_info->mems_idx].mem = m;
8719 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8720 because we can't put it in a register. We still store it in the
8721 table, though, so that if we see the same address later, but in a
8722 non-BLK mode, we'll not think we can optimize it at that point. */
8723 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
8724 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
8725 ++loop_info->mems_idx;
8727 return 0;
8731 /* Allocate REGS->ARRAY or reallocate it if it is too small.
8733 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
8734 register that is modified by an insn between FROM and TO. If the
8735 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
8736 more, stop incrementing it, to avoid overflow.
8738 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
8739 register I is used, if it is only used once. Otherwise, it is set
8740 to 0 (for no uses) or const0_rtx for more than one use. This
8741 parameter may be zero, in which case this processing is not done.
8743 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
8744 optimize register I. */
8746 static void
8747 loop_regs_scan (loop, extra_size)
8748 const struct loop *loop;
8749 int extra_size;
8751 struct loop_regs *regs = LOOP_REGS (loop);
8752 int old_nregs;
8753 /* last_set[n] is nonzero iff reg n has been set in the current
8754 basic block. In that case, it is the insn that last set reg n. */
8755 rtx *last_set;
8756 rtx insn;
8757 int i;
8759 old_nregs = regs->num;
8760 regs->num = max_reg_num ();
8762 /* Grow the regs array if not allocated or too small. */
8763 if (regs->num >= regs->size)
8765 regs->size = regs->num + extra_size;
8767 regs->array = (struct loop_reg *)
8768 xrealloc (regs->array, regs->size * sizeof (*regs->array));
8770 /* Zero the new elements. */
8771 memset (regs->array + old_nregs, 0,
8772 (regs->size - old_nregs) * sizeof (*regs->array));
8775 /* Clear previously scanned fields but do not clear n_times_set. */
8776 for (i = 0; i < old_nregs; i++)
8778 regs->array[i].set_in_loop = 0;
8779 regs->array[i].may_not_optimize = 0;
8780 regs->array[i].single_usage = NULL_RTX;
8783 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
8785 /* Scan the loop, recording register usage. */
8786 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8787 insn = NEXT_INSN (insn))
8789 if (INSN_P (insn))
8791 /* Record registers that have exactly one use. */
8792 find_single_use_in_loop (regs, insn, PATTERN (insn));
8794 /* Include uses in REG_EQUAL notes. */
8795 if (REG_NOTES (insn))
8796 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
8798 if (GET_CODE (PATTERN (insn)) == SET
8799 || GET_CODE (PATTERN (insn)) == CLOBBER)
8800 count_one_set (regs, insn, PATTERN (insn), last_set);
8801 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8803 register int i;
8804 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8805 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
8806 last_set);
8810 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
8811 memset (last_set, 0, regs->num * sizeof (rtx));
8814 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8816 regs->array[i].may_not_optimize = 1;
8817 regs->array[i].set_in_loop = 1;
8820 #ifdef AVOID_CCMODE_COPIES
8821 /* Don't try to move insns which set CC registers if we should not
8822 create CCmode register copies. */
8823 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
8824 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8825 regs->array[i].may_not_optimize = 1;
8826 #endif
8828 /* Set regs->array[I].n_times_set for the new registers. */
8829 for (i = old_nregs; i < regs->num; i++)
8830 regs->array[i].n_times_set = regs->array[i].set_in_loop;
8832 free (last_set);
8835 /* Returns the number of real INSNs in the LOOP. */
8837 static int
8838 count_insns_in_loop (loop)
8839 const struct loop *loop;
8841 int count = 0;
8842 rtx insn;
8844 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8845 insn = NEXT_INSN (insn))
8846 if (INSN_P (insn))
8847 ++count;
8849 return count;
8852 /* Move MEMs into registers for the duration of the loop. */
8854 static void
8855 load_mems (loop)
8856 const struct loop *loop;
8858 struct loop_info *loop_info = LOOP_INFO (loop);
8859 struct loop_regs *regs = LOOP_REGS (loop);
8860 int maybe_never = 0;
8861 int i;
8862 rtx p, prev_ebb_head;
8863 rtx label = NULL_RTX;
8864 rtx end_label;
8865 /* Nonzero if the next instruction may never be executed. */
8866 int next_maybe_never = 0;
8867 unsigned int last_max_reg = max_reg_num ();
8869 if (loop_info->mems_idx == 0)
8870 return;
8872 /* We cannot use next_label here because it skips over normal insns. */
8873 end_label = next_nonnote_insn (loop->end);
8874 if (end_label && GET_CODE (end_label) != CODE_LABEL)
8875 end_label = NULL_RTX;
8877 /* Check to see if it's possible that some instructions in the loop are
8878 never executed. Also check if there is a goto out of the loop other
8879 than right after the end of the loop. */
8880 for (p = next_insn_in_loop (loop, loop->scan_start);
8881 p != NULL_RTX;
8882 p = next_insn_in_loop (loop, p))
8884 if (GET_CODE (p) == CODE_LABEL)
8885 maybe_never = 1;
8886 else if (GET_CODE (p) == JUMP_INSN
8887 /* If we enter the loop in the middle, and scan
8888 around to the beginning, don't set maybe_never
8889 for that. This must be an unconditional jump,
8890 otherwise the code at the top of the loop might
8891 never be executed. Unconditional jumps are
8892 followed a by barrier then loop end. */
8893 && ! (GET_CODE (p) == JUMP_INSN
8894 && JUMP_LABEL (p) == loop->top
8895 && NEXT_INSN (NEXT_INSN (p)) == loop->end
8896 && any_uncondjump_p (p)))
8898 /* If this is a jump outside of the loop but not right
8899 after the end of the loop, we would have to emit new fixup
8900 sequences for each such label. */
8901 if (/* If we can't tell where control might go when this
8902 JUMP_INSN is executed, we must be conservative. */
8903 !JUMP_LABEL (p)
8904 || (JUMP_LABEL (p) != end_label
8905 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
8906 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
8907 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
8908 return;
8910 if (!any_condjump_p (p))
8911 /* Something complicated. */
8912 maybe_never = 1;
8913 else
8914 /* If there are any more instructions in the loop, they
8915 might not be reached. */
8916 next_maybe_never = 1;
8918 else if (next_maybe_never)
8919 maybe_never = 1;
8922 /* Find start of the extended basic block that enters the loop. */
8923 for (p = loop->start;
8924 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
8925 p = PREV_INSN (p))
8927 prev_ebb_head = p;
8929 cselib_init ();
8931 /* Build table of mems that get set to constant values before the
8932 loop. */
8933 for (; p != loop->start; p = NEXT_INSN (p))
8934 cselib_process_insn (p);
8936 /* Actually move the MEMs. */
8937 for (i = 0; i < loop_info->mems_idx; ++i)
8939 regset_head load_copies;
8940 regset_head store_copies;
8941 int written = 0;
8942 rtx reg;
8943 rtx mem = loop_info->mems[i].mem;
8944 rtx mem_list_entry;
8946 if (MEM_VOLATILE_P (mem)
8947 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
8948 /* There's no telling whether or not MEM is modified. */
8949 loop_info->mems[i].optimize = 0;
8951 /* Go through the MEMs written to in the loop to see if this
8952 one is aliased by one of them. */
8953 mem_list_entry = loop_info->store_mems;
8954 while (mem_list_entry)
8956 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
8957 written = 1;
8958 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
8959 mem, rtx_varies_p))
8961 /* MEM is indeed aliased by this store. */
8962 loop_info->mems[i].optimize = 0;
8963 break;
8965 mem_list_entry = XEXP (mem_list_entry, 1);
8968 if (flag_float_store && written
8969 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
8970 loop_info->mems[i].optimize = 0;
8972 /* If this MEM is written to, we must be sure that there
8973 are no reads from another MEM that aliases this one. */
8974 if (loop_info->mems[i].optimize && written)
8976 int j;
8978 for (j = 0; j < loop_info->mems_idx; ++j)
8980 if (j == i)
8981 continue;
8982 else if (true_dependence (mem,
8983 VOIDmode,
8984 loop_info->mems[j].mem,
8985 rtx_varies_p))
8987 /* It's not safe to hoist loop_info->mems[i] out of
8988 the loop because writes to it might not be
8989 seen by reads from loop_info->mems[j]. */
8990 loop_info->mems[i].optimize = 0;
8991 break;
8996 if (maybe_never && may_trap_p (mem))
8997 /* We can't access the MEM outside the loop; it might
8998 cause a trap that wouldn't have happened otherwise. */
8999 loop_info->mems[i].optimize = 0;
9001 if (!loop_info->mems[i].optimize)
9002 /* We thought we were going to lift this MEM out of the
9003 loop, but later discovered that we could not. */
9004 continue;
9006 INIT_REG_SET (&load_copies);
9007 INIT_REG_SET (&store_copies);
9009 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9010 order to keep scan_loop from moving stores to this MEM
9011 out of the loop just because this REG is neither a
9012 user-variable nor used in the loop test. */
9013 reg = gen_reg_rtx (GET_MODE (mem));
9014 REG_USERVAR_P (reg) = 1;
9015 loop_info->mems[i].reg = reg;
9017 /* Now, replace all references to the MEM with the
9018 corresponding pseudos. */
9019 maybe_never = 0;
9020 for (p = next_insn_in_loop (loop, loop->scan_start);
9021 p != NULL_RTX;
9022 p = next_insn_in_loop (loop, p))
9024 if (INSN_P (p))
9026 rtx set;
9028 set = single_set (p);
9030 /* See if this copies the mem into a register that isn't
9031 modified afterwards. We'll try to do copy propagation
9032 a little further on. */
9033 if (set
9034 /* @@@ This test is _way_ too conservative. */
9035 && ! maybe_never
9036 && GET_CODE (SET_DEST (set)) == REG
9037 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9038 && REGNO (SET_DEST (set)) < last_max_reg
9039 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9040 && rtx_equal_p (SET_SRC (set), mem))
9041 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9043 /* See if this copies the mem from a register that isn't
9044 modified afterwards. We'll try to remove the
9045 redundant copy later on by doing a little register
9046 renaming and copy propagation. This will help
9047 to untangle things for the BIV detection code. */
9048 if (set
9049 && ! maybe_never
9050 && GET_CODE (SET_SRC (set)) == REG
9051 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9052 && REGNO (SET_SRC (set)) < last_max_reg
9053 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9054 && rtx_equal_p (SET_DEST (set), mem))
9055 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9057 /* Replace the memory reference with the shadow register. */
9058 replace_loop_mems (p, loop_info->mems[i].mem,
9059 loop_info->mems[i].reg);
9062 if (GET_CODE (p) == CODE_LABEL
9063 || GET_CODE (p) == JUMP_INSN)
9064 maybe_never = 1;
9067 if (! apply_change_group ())
9068 /* We couldn't replace all occurrences of the MEM. */
9069 loop_info->mems[i].optimize = 0;
9070 else
9072 /* Load the memory immediately before LOOP->START, which is
9073 the NOTE_LOOP_BEG. */
9074 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9075 rtx set;
9076 rtx best = mem;
9077 int j;
9078 struct elt_loc_list *const_equiv = 0;
9080 if (e)
9082 struct elt_loc_list *equiv;
9083 struct elt_loc_list *best_equiv = 0;
9084 for (equiv = e->locs; equiv; equiv = equiv->next)
9086 if (CONSTANT_P (equiv->loc))
9087 const_equiv = equiv;
9088 else if (GET_CODE (equiv->loc) == REG
9089 /* Extending hard register lifetimes causes crash
9090 on SRC targets. Doing so on non-SRC is
9091 probably also not good idea, since we most
9092 probably have pseudoregister equivalence as
9093 well. */
9094 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9095 best_equiv = equiv;
9097 /* Use the constant equivalence if that is cheap enough. */
9098 if (! best_equiv)
9099 best_equiv = const_equiv;
9100 else if (const_equiv
9101 && (rtx_cost (const_equiv->loc, SET)
9102 <= rtx_cost (best_equiv->loc, SET)))
9104 best_equiv = const_equiv;
9105 const_equiv = 0;
9108 /* If best_equiv is nonzero, we know that MEM is set to a
9109 constant or register before the loop. We will use this
9110 knowledge to initialize the shadow register with that
9111 constant or reg rather than by loading from MEM. */
9112 if (best_equiv)
9113 best = copy_rtx (best_equiv->loc);
9116 set = gen_move_insn (reg, best);
9117 set = loop_insn_hoist (loop, set);
9118 if (REG_P (best))
9120 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9121 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9123 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9124 break;
9128 if (const_equiv)
9129 REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL,
9130 copy_rtx (const_equiv->loc),
9131 REG_NOTES (set));
9133 if (written)
9135 if (label == NULL_RTX)
9137 label = gen_label_rtx ();
9138 emit_label_after (label, loop->end);
9141 /* Store the memory immediately after END, which is
9142 the NOTE_LOOP_END. */
9143 set = gen_move_insn (copy_rtx (mem), reg);
9144 loop_insn_emit_after (loop, 0, label, set);
9147 if (loop_dump_stream)
9149 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9150 REGNO (reg), (written ? "r/w" : "r/o"));
9151 print_rtl (loop_dump_stream, mem);
9152 fputc ('\n', loop_dump_stream);
9155 /* Attempt a bit of copy propagation. This helps untangle the
9156 data flow, and enables {basic,general}_induction_var to find
9157 more bivs/givs. */
9158 EXECUTE_IF_SET_IN_REG_SET
9159 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9161 try_copy_prop (loop, reg, j);
9163 CLEAR_REG_SET (&load_copies);
9165 EXECUTE_IF_SET_IN_REG_SET
9166 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9168 try_swap_copy_prop (loop, reg, j);
9170 CLEAR_REG_SET (&store_copies);
9174 if (label != NULL_RTX && end_label != NULL_RTX)
9176 /* Now, we need to replace all references to the previous exit
9177 label with the new one. */
9178 rtx_pair rr;
9179 rr.r1 = end_label;
9180 rr.r2 = label;
9182 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9184 for_each_rtx (&p, replace_label, &rr);
9186 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9187 field. This is not handled by for_each_rtx because it doesn't
9188 handle unprinted ('0') fields. We need to update JUMP_LABEL
9189 because the immediately following unroll pass will use it.
9190 replace_label would not work anyways, because that only handles
9191 LABEL_REFs. */
9192 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9193 JUMP_LABEL (p) = label;
9197 cselib_finish ();
9200 /* For communication between note_reg_stored and its caller. */
9201 struct note_reg_stored_arg
9203 int set_seen;
9204 rtx reg;
9207 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9208 is equal to ARG. */
9209 static void
9210 note_reg_stored (x, setter, arg)
9211 rtx x, setter ATTRIBUTE_UNUSED;
9212 void *arg;
9214 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9215 if (t->reg == x)
9216 t->set_seen = 1;
9219 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9220 There must be exactly one insn that sets this pseudo; it will be
9221 deleted if all replacements succeed and we can prove that the register
9222 is not used after the loop. */
9224 static void
9225 try_copy_prop (loop, replacement, regno)
9226 const struct loop *loop;
9227 rtx replacement;
9228 unsigned int regno;
9230 /* This is the reg that we are copying from. */
9231 rtx reg_rtx = regno_reg_rtx[regno];
9232 rtx init_insn = 0;
9233 rtx insn;
9234 /* These help keep track of whether we replaced all uses of the reg. */
9235 int replaced_last = 0;
9236 int store_is_first = 0;
9238 for (insn = next_insn_in_loop (loop, loop->scan_start);
9239 insn != NULL_RTX;
9240 insn = next_insn_in_loop (loop, insn))
9242 rtx set;
9244 /* Only substitute within one extended basic block from the initializing
9245 insn. */
9246 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9247 break;
9249 if (! INSN_P (insn))
9250 continue;
9252 /* Is this the initializing insn? */
9253 set = single_set (insn);
9254 if (set
9255 && GET_CODE (SET_DEST (set)) == REG
9256 && REGNO (SET_DEST (set)) == regno)
9258 if (init_insn)
9259 abort ();
9261 init_insn = insn;
9262 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9263 store_is_first = 1;
9266 /* Only substitute after seeing the initializing insn. */
9267 if (init_insn && insn != init_insn)
9269 struct note_reg_stored_arg arg;
9271 replace_loop_regs (insn, reg_rtx, replacement);
9272 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9273 replaced_last = 1;
9275 /* Stop replacing when REPLACEMENT is modified. */
9276 arg.reg = replacement;
9277 arg.set_seen = 0;
9278 note_stores (PATTERN (insn), note_reg_stored, &arg);
9279 if (arg.set_seen)
9281 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
9283 /* It is possible that we've turned previously valid REG_EQUAL to
9284 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
9285 REPLACEMENT is modified, we get different meaning. */
9286 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
9287 remove_note (insn, note);
9288 break;
9292 if (! init_insn)
9293 abort ();
9294 if (apply_change_group ())
9296 if (loop_dump_stream)
9297 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9298 if (store_is_first && replaced_last)
9300 rtx first;
9301 rtx retval_note;
9303 /* Assume we're just deleting INIT_INSN. */
9304 first = init_insn;
9305 /* Look for REG_RETVAL note. If we're deleting the end of
9306 the libcall sequence, the whole sequence can go. */
9307 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
9308 /* If we found a REG_RETVAL note, find the first instruction
9309 in the sequence. */
9310 if (retval_note)
9311 first = XEXP (retval_note, 0);
9313 /* Delete the instructions. */
9314 loop_delete_insns (first, init_insn);
9316 if (loop_dump_stream)
9317 fprintf (loop_dump_stream, ".\n");
9321 /* Replace all the instructions from FIRST up to and including LAST
9322 with NOTE_INSN_DELETED notes. */
9324 static void
9325 loop_delete_insns (first, last)
9326 rtx first;
9327 rtx last;
9329 while (1)
9331 PUT_CODE (first, NOTE);
9332 NOTE_LINE_NUMBER (first) = NOTE_INSN_DELETED;
9333 if (loop_dump_stream)
9334 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
9335 INSN_UID (first));
9337 /* If this was the LAST instructions we're supposed to delete,
9338 we're done. */
9339 if (first == last)
9340 break;
9342 first = NEXT_INSN (first);
9346 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
9347 loop LOOP if the order of the sets of these registers can be
9348 swapped. There must be exactly one insn within the loop that sets
9349 this pseudo followed immediately by a move insn that sets
9350 REPLACEMENT with REGNO. */
9351 static void
9352 try_swap_copy_prop (loop, replacement, regno)
9353 const struct loop *loop;
9354 rtx replacement;
9355 unsigned int regno;
9357 rtx insn;
9358 rtx set = NULL_RTX;
9359 unsigned int new_regno;
9361 new_regno = REGNO (replacement);
9363 for (insn = next_insn_in_loop (loop, loop->scan_start);
9364 insn != NULL_RTX;
9365 insn = next_insn_in_loop (loop, insn))
9367 /* Search for the insn that copies REGNO to NEW_REGNO? */
9368 if (INSN_P (insn)
9369 && (set = single_set (insn))
9370 && GET_CODE (SET_DEST (set)) == REG
9371 && REGNO (SET_DEST (set)) == new_regno
9372 && GET_CODE (SET_SRC (set)) == REG
9373 && REGNO (SET_SRC (set)) == regno)
9374 break;
9377 if (insn != NULL_RTX)
9379 rtx prev_insn;
9380 rtx prev_set;
9382 /* Some DEF-USE info would come in handy here to make this
9383 function more general. For now, just check the previous insn
9384 which is the most likely candidate for setting REGNO. */
9386 prev_insn = PREV_INSN (insn);
9388 if (INSN_P (insn)
9389 && (prev_set = single_set (prev_insn))
9390 && GET_CODE (SET_DEST (prev_set)) == REG
9391 && REGNO (SET_DEST (prev_set)) == regno)
9393 /* We have:
9394 (set (reg regno) (expr))
9395 (set (reg new_regno) (reg regno))
9397 so try converting this to:
9398 (set (reg new_regno) (expr))
9399 (set (reg regno) (reg new_regno))
9401 The former construct is often generated when a global
9402 variable used for an induction variable is shadowed by a
9403 register (NEW_REGNO). The latter construct improves the
9404 chances of GIV replacement and BIV elimination. */
9406 validate_change (prev_insn, &SET_DEST (prev_set),
9407 replacement, 1);
9408 validate_change (insn, &SET_DEST (set),
9409 SET_SRC (set), 1);
9410 validate_change (insn, &SET_SRC (set),
9411 replacement, 1);
9413 if (apply_change_group ())
9415 if (loop_dump_stream)
9416 fprintf (loop_dump_stream,
9417 " Swapped set of reg %d at %d with reg %d at %d.\n",
9418 regno, INSN_UID (insn),
9419 new_regno, INSN_UID (prev_insn));
9421 /* Update first use of REGNO. */
9422 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
9423 REGNO_FIRST_UID (regno) = INSN_UID (insn);
9425 /* Now perform copy propagation to hopefully
9426 remove all uses of REGNO within the loop. */
9427 try_copy_prop (loop, replacement, regno);
9433 /* Replace MEM with its associated pseudo register. This function is
9434 called from load_mems via for_each_rtx. DATA is actually a pointer
9435 to a structure describing the instruction currently being scanned
9436 and the MEM we are currently replacing. */
9438 static int
9439 replace_loop_mem (mem, data)
9440 rtx *mem;
9441 void *data;
9443 loop_replace_args *args = (loop_replace_args *) data;
9444 rtx m = *mem;
9446 if (m == NULL_RTX)
9447 return 0;
9449 switch (GET_CODE (m))
9451 case MEM:
9452 break;
9454 case CONST_DOUBLE:
9455 /* We're not interested in the MEM associated with a
9456 CONST_DOUBLE, so there's no need to traverse into one. */
9457 return -1;
9459 default:
9460 /* This is not a MEM. */
9461 return 0;
9464 if (!rtx_equal_p (args->match, m))
9465 /* This is not the MEM we are currently replacing. */
9466 return 0;
9468 /* Actually replace the MEM. */
9469 validate_change (args->insn, mem, args->replacement, 1);
9471 return 0;
9474 static void
9475 replace_loop_mems (insn, mem, reg)
9476 rtx insn;
9477 rtx mem;
9478 rtx reg;
9480 loop_replace_args args;
9482 args.insn = insn;
9483 args.match = mem;
9484 args.replacement = reg;
9486 for_each_rtx (&insn, replace_loop_mem, &args);
9489 /* Replace one register with another. Called through for_each_rtx; PX points
9490 to the rtx being scanned. DATA is actually a pointer to
9491 a structure of arguments. */
9493 static int
9494 replace_loop_reg (px, data)
9495 rtx *px;
9496 void *data;
9498 rtx x = *px;
9499 loop_replace_args *args = (loop_replace_args *) data;
9501 if (x == NULL_RTX)
9502 return 0;
9504 if (x == args->match)
9505 validate_change (args->insn, px, args->replacement, 1);
9507 return 0;
9510 static void
9511 replace_loop_regs (insn, reg, replacement)
9512 rtx insn;
9513 rtx reg;
9514 rtx replacement;
9516 loop_replace_args args;
9518 args.insn = insn;
9519 args.match = reg;
9520 args.replacement = replacement;
9522 for_each_rtx (&insn, replace_loop_reg, &args);
9525 /* Replace occurrences of the old exit label for the loop with the new
9526 one. DATA is an rtx_pair containing the old and new labels,
9527 respectively. */
9529 static int
9530 replace_label (x, data)
9531 rtx *x;
9532 void *data;
9534 rtx l = *x;
9535 rtx old_label = ((rtx_pair *) data)->r1;
9536 rtx new_label = ((rtx_pair *) data)->r2;
9538 if (l == NULL_RTX)
9539 return 0;
9541 if (GET_CODE (l) != LABEL_REF)
9542 return 0;
9544 if (XEXP (l, 0) != old_label)
9545 return 0;
9547 XEXP (l, 0) = new_label;
9548 ++LABEL_NUSES (new_label);
9549 --LABEL_NUSES (old_label);
9551 return 0;
9554 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
9555 (ignored in the interim). */
9557 static rtx
9558 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
9559 const struct loop *loop ATTRIBUTE_UNUSED;
9560 basic_block where_bb ATTRIBUTE_UNUSED;
9561 rtx where_insn;
9562 rtx pattern;
9564 return emit_insn_after (pattern, where_insn);
9568 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
9569 in basic block WHERE_BB (ignored in the interim) within the loop
9570 otherwise hoist PATTERN into the loop pre-header. */
9573 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
9574 const struct loop *loop;
9575 basic_block where_bb ATTRIBUTE_UNUSED;
9576 rtx where_insn;
9577 rtx pattern;
9579 if (! where_insn)
9580 return loop_insn_hoist (loop, pattern);
9581 return emit_insn_before (pattern, where_insn);
9585 /* Emit call insn for PATTERN before WHERE_INSN in basic block
9586 WHERE_BB (ignored in the interim) within the loop. */
9588 static rtx
9589 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
9590 const struct loop *loop ATTRIBUTE_UNUSED;
9591 basic_block where_bb ATTRIBUTE_UNUSED;
9592 rtx where_insn;
9593 rtx pattern;
9595 return emit_call_insn_before (pattern, where_insn);
9599 /* Hoist insn for PATTERN into the loop pre-header. */
9602 loop_insn_hoist (loop, pattern)
9603 const struct loop *loop;
9604 rtx pattern;
9606 return loop_insn_emit_before (loop, 0, loop->start, pattern);
9610 /* Hoist call insn for PATTERN into the loop pre-header. */
9612 static rtx
9613 loop_call_insn_hoist (loop, pattern)
9614 const struct loop *loop;
9615 rtx pattern;
9617 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
9621 /* Sink insn for PATTERN after the loop end. */
9624 loop_insn_sink (loop, pattern)
9625 const struct loop *loop;
9626 rtx pattern;
9628 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
9632 /* If the loop has multiple exits, emit insn for PATTERN before the
9633 loop to ensure that it will always be executed no matter how the
9634 loop exits. Otherwise, emit the insn for PATTERN after the loop,
9635 since this is slightly more efficient. */
9637 static rtx
9638 loop_insn_sink_or_swim (loop, pattern)
9639 const struct loop *loop;
9640 rtx pattern;
9642 if (loop->exit_count)
9643 return loop_insn_hoist (loop, pattern);
9644 else
9645 return loop_insn_sink (loop, pattern);
9648 static void
9649 loop_ivs_dump (loop, file, verbose)
9650 const struct loop *loop;
9651 FILE *file;
9652 int verbose;
9654 struct iv_class *bl;
9655 int iv_num = 0;
9657 if (! loop || ! file)
9658 return;
9660 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9661 iv_num++;
9663 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
9665 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9667 loop_iv_class_dump (bl, file, verbose);
9668 fputc ('\n', file);
9673 static void
9674 loop_iv_class_dump (bl, file, verbose)
9675 const struct iv_class *bl;
9676 FILE *file;
9677 int verbose ATTRIBUTE_UNUSED;
9679 struct induction *v;
9680 rtx incr;
9681 int i;
9683 if (! bl || ! file)
9684 return;
9686 fprintf (file, "IV class for reg %d, benefit %d\n",
9687 bl->regno, bl->total_benefit);
9689 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
9690 if (bl->initial_value)
9692 fprintf (file, ", init val: ");
9693 print_simple_rtl (file, bl->initial_value);
9695 if (bl->initial_test)
9697 fprintf (file, ", init test: ");
9698 print_simple_rtl (file, bl->initial_test);
9700 fputc ('\n', file);
9702 if (bl->final_value)
9704 fprintf (file, " Final val: ");
9705 print_simple_rtl (file, bl->final_value);
9706 fputc ('\n', file);
9709 if ((incr = biv_total_increment (bl)))
9711 fprintf (file, " Total increment: ");
9712 print_simple_rtl (file, incr);
9713 fputc ('\n', file);
9716 /* List the increments. */
9717 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
9719 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
9720 print_simple_rtl (file, v->add_val);
9721 fputc ('\n', file);
9724 /* List the givs. */
9725 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
9727 fprintf (file, " Giv%d: insn %d, benefit %d, ",
9728 i, INSN_UID (v->insn), v->benefit);
9729 if (v->giv_type == DEST_ADDR)
9730 print_simple_rtl (file, v->mem);
9731 else
9732 print_simple_rtl (file, single_set (v->insn));
9733 fputc ('\n', file);
9738 static void
9739 loop_biv_dump (v, file, verbose)
9740 const struct induction *v;
9741 FILE *file;
9742 int verbose;
9744 if (! v || ! file)
9745 return;
9747 fprintf (file,
9748 "Biv %d: insn %d",
9749 REGNO (v->dest_reg), INSN_UID (v->insn));
9750 fprintf (file, " const ");
9751 print_simple_rtl (file, v->add_val);
9753 if (verbose && v->final_value)
9755 fputc ('\n', file);
9756 fprintf (file, " final ");
9757 print_simple_rtl (file, v->final_value);
9760 fputc ('\n', file);
9764 static void
9765 loop_giv_dump (v, file, verbose)
9766 const struct induction *v;
9767 FILE *file;
9768 int verbose;
9770 if (! v || ! file)
9771 return;
9773 if (v->giv_type == DEST_REG)
9774 fprintf (file, "Giv %d: insn %d",
9775 REGNO (v->dest_reg), INSN_UID (v->insn));
9776 else
9777 fprintf (file, "Dest address: insn %d",
9778 INSN_UID (v->insn));
9780 fprintf (file, " src reg %d benefit %d",
9781 REGNO (v->src_reg), v->benefit);
9782 fprintf (file, " lifetime %d",
9783 v->lifetime);
9785 if (v->replaceable)
9786 fprintf (file, " replaceable");
9788 if (v->no_const_addval)
9789 fprintf (file, " ncav");
9791 if (v->ext_dependant)
9793 switch (GET_CODE (v->ext_dependant))
9795 case SIGN_EXTEND:
9796 fprintf (file, " ext se");
9797 break;
9798 case ZERO_EXTEND:
9799 fprintf (file, " ext ze");
9800 break;
9801 case TRUNCATE:
9802 fprintf (file, " ext tr");
9803 break;
9804 default:
9805 abort ();
9809 fputc ('\n', file);
9810 fprintf (file, " mult ");
9811 print_simple_rtl (file, v->mult_val);
9813 fputc ('\n', file);
9814 fprintf (file, " add ");
9815 print_simple_rtl (file, v->add_val);
9817 if (verbose && v->final_value)
9819 fputc ('\n', file);
9820 fprintf (file, " final ");
9821 print_simple_rtl (file, v->final_value);
9824 fputc ('\n', file);
9828 void
9829 debug_ivs (loop)
9830 const struct loop *loop;
9832 loop_ivs_dump (loop, stderr, 1);
9836 void
9837 debug_iv_class (bl)
9838 const struct iv_class *bl;
9840 loop_iv_class_dump (bl, stderr, 1);
9844 void
9845 debug_biv (v)
9846 const struct induction *v;
9848 loop_biv_dump (v, stderr, 1);
9852 void
9853 debug_giv (v)
9854 const struct induction *v;
9856 loop_giv_dump (v, stderr, 1);
9860 #define LOOP_BLOCK_NUM_1(INSN) \
9861 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
9863 /* The notes do not have an assigned block, so look at the next insn. */
9864 #define LOOP_BLOCK_NUM(INSN) \
9865 ((INSN) ? (GET_CODE (INSN) == NOTE \
9866 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
9867 : LOOP_BLOCK_NUM_1 (INSN)) \
9868 : -1)
9870 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
9872 static void
9873 loop_dump_aux (loop, file, verbose)
9874 const struct loop *loop;
9875 FILE *file;
9876 int verbose ATTRIBUTE_UNUSED;
9878 rtx label;
9880 if (! loop || ! file)
9881 return;
9883 /* Print diagnostics to compare our concept of a loop with
9884 what the loop notes say. */
9885 if (! PREV_INSN (loop->first->head)
9886 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
9887 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
9888 != NOTE_INSN_LOOP_BEG)
9889 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
9890 INSN_UID (PREV_INSN (loop->first->head)));
9891 if (! NEXT_INSN (loop->last->end)
9892 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
9893 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
9894 != NOTE_INSN_LOOP_END)
9895 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
9896 INSN_UID (NEXT_INSN (loop->last->end)));
9898 if (loop->start)
9900 fprintf (file,
9901 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
9902 LOOP_BLOCK_NUM (loop->start),
9903 LOOP_INSN_UID (loop->start),
9904 LOOP_BLOCK_NUM (loop->cont),
9905 LOOP_INSN_UID (loop->cont),
9906 LOOP_BLOCK_NUM (loop->cont),
9907 LOOP_INSN_UID (loop->cont),
9908 LOOP_BLOCK_NUM (loop->vtop),
9909 LOOP_INSN_UID (loop->vtop),
9910 LOOP_BLOCK_NUM (loop->end),
9911 LOOP_INSN_UID (loop->end));
9912 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
9913 LOOP_BLOCK_NUM (loop->top),
9914 LOOP_INSN_UID (loop->top),
9915 LOOP_BLOCK_NUM (loop->scan_start),
9916 LOOP_INSN_UID (loop->scan_start));
9917 fprintf (file, ";; exit_count %d", loop->exit_count);
9918 if (loop->exit_count)
9920 fputs (", labels:", file);
9921 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
9923 fprintf (file, " %d ",
9924 LOOP_INSN_UID (XEXP (label, 0)));
9927 fputs ("\n", file);
9929 /* This can happen when a marked loop appears as two nested loops,
9930 say from while (a || b) {}. The inner loop won't match
9931 the loop markers but the outer one will. */
9932 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
9933 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
9937 /* Call this function from the debugger to dump LOOP. */
9939 void
9940 debug_loop (loop)
9941 const struct loop *loop;
9943 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
9946 /* Call this function from the debugger to dump LOOPS. */
9948 void
9949 debug_loops (loops)
9950 const struct loops *loops;
9952 flow_loops_dump (loops, stderr, loop_dump_aux, 1);