* config/h8300/h8300.h (ENCODE_SECTION_INFO): Check to see if DECL
[official-gcc.git] / gcc / loop.c
blob53e807374679c006851155e28453c54323cf3006
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
57 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
58 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
60 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
61 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
62 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
65 /* Vector mapping INSN_UIDs to luids.
66 The luids are like uids but increase monotonically always.
67 We use them to see whether a jump comes from outside a given loop. */
69 int *uid_luid;
71 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
72 number the insn is contained in. */
74 struct loop **uid_loop;
76 /* 1 + largest uid of any insn. */
78 int max_uid_for_loop;
80 /* 1 + luid of last insn. */
82 static int max_luid;
84 /* Number of loops detected in current function. Used as index to the
85 next few tables. */
87 static int max_loop_num;
89 /* Bound on pseudo register number before loop optimization.
90 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
91 unsigned int max_reg_before_loop;
93 /* The value to pass to the next call of reg_scan_update. */
94 static int loop_max_reg;
96 #define obstack_chunk_alloc xmalloc
97 #define obstack_chunk_free free
99 /* During the analysis of a loop, a chain of `struct movable's
100 is made to record all the movable insns found.
101 Then the entire chain can be scanned to decide which to move. */
103 struct movable
105 rtx insn; /* A movable insn */
106 rtx set_src; /* The expression this reg is set from. */
107 rtx set_dest; /* The destination of this SET. */
108 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
109 of any registers used within the LIBCALL. */
110 int consec; /* Number of consecutive following insns
111 that must be moved with this one. */
112 unsigned int regno; /* The register it sets */
113 short lifetime; /* lifetime of that register;
114 may be adjusted when matching movables
115 that load the same value are found. */
116 short savings; /* Number of insns we can move for this reg,
117 including other movables that force this
118 or match this one. */
119 unsigned int cond : 1; /* 1 if only conditionally movable */
120 unsigned int force : 1; /* 1 means MUST move this insn */
121 unsigned int global : 1; /* 1 means reg is live outside this loop */
122 /* If PARTIAL is 1, GLOBAL means something different:
123 that the reg is live outside the range from where it is set
124 to the following label. */
125 unsigned int done : 1; /* 1 inhibits further processing of this */
127 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
128 In particular, moving it does not make it
129 invariant. */
130 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
131 load SRC, rather than copying INSN. */
132 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
133 first insn of a consecutive sets group. */
134 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
135 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
136 that we should avoid changing when clearing
137 the rest of the reg. */
138 struct movable *match; /* First entry for same value */
139 struct movable *forces; /* An insn that must be moved if this is */
140 struct movable *next;
144 FILE *loop_dump_stream;
146 /* Forward declarations. */
148 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
149 static void mark_loop_jump PARAMS ((rtx, struct loop *));
150 static void prescan_loop PARAMS ((struct loop *));
151 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
152 static int consec_sets_invariant_p PARAMS ((const struct loop *,
153 rtx, int, rtx));
154 static int labels_in_range_p PARAMS ((rtx, int));
155 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
156 static void note_addr_stored PARAMS ((rtx, rtx, void *));
157 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
158 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
159 static void scan_loop PARAMS ((struct loop*, int));
160 #if 0
161 static void replace_call_address PARAMS ((rtx, rtx, rtx));
162 #endif
163 static rtx skip_consec_insns PARAMS ((rtx, int));
164 static int libcall_benefit PARAMS ((rtx));
165 static void ignore_some_movables PARAMS ((struct loop_movables *));
166 static void force_movables PARAMS ((struct loop_movables *));
167 static void combine_movables PARAMS ((struct loop_movables *,
168 struct loop_regs *));
169 static int num_unmoved_movables PARAMS ((const struct loop *));
170 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
171 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
172 struct loop_regs *));
173 static void add_label_notes PARAMS ((rtx, rtx));
174 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
175 int, int));
176 static void loop_movables_add PARAMS((struct loop_movables *,
177 struct movable *));
178 static void loop_movables_free PARAMS((struct loop_movables *));
179 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
180 static void loop_bivs_find PARAMS((struct loop *));
181 static void loop_bivs_init_find PARAMS((struct loop *));
182 static void loop_bivs_check PARAMS((struct loop *));
183 static void loop_givs_find PARAMS((struct loop *));
184 static void loop_givs_check PARAMS((struct loop *));
185 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
186 int, int));
187 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
188 struct induction *, rtx));
189 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
190 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
191 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
192 rtx *));
193 static void loop_ivs_free PARAMS((struct loop *));
194 static void strength_reduce PARAMS ((struct loop *, int));
195 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
196 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
197 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
198 static void record_biv PARAMS ((struct loop *, struct induction *,
199 rtx, rtx, rtx, rtx, rtx *,
200 int, int));
201 static void check_final_value PARAMS ((const struct loop *,
202 struct induction *));
203 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
204 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
205 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
206 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
207 static void record_giv PARAMS ((const struct loop *, struct induction *,
208 rtx, rtx, rtx, rtx, rtx, rtx, int,
209 enum g_types, int, int, rtx *));
210 static void update_giv_derive PARAMS ((const struct loop *, rtx));
211 static void check_ext_dependant_givs PARAMS ((struct iv_class *,
212 struct loop_info *));
213 static int basic_induction_var PARAMS ((const struct loop *, rtx,
214 enum machine_mode, rtx, rtx,
215 rtx *, rtx *, rtx **));
216 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
217 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
218 rtx *, rtx *, rtx *, int, int *,
219 enum machine_mode));
220 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
221 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
222 static int check_dbra_loop PARAMS ((struct loop *, int));
223 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
224 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
225 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
226 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
227 static int product_cheap_p PARAMS ((rtx, rtx));
228 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
229 int, int, int));
230 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
231 struct iv_class *, int,
232 basic_block, rtx));
233 static int last_use_this_basic_block PARAMS ((rtx, rtx));
234 static void record_initial PARAMS ((rtx, rtx, void *));
235 static void update_reg_last_use PARAMS ((rtx, rtx));
236 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
237 static void loop_regs_scan PARAMS ((const struct loop *, int));
238 static int count_insns_in_loop PARAMS ((const struct loop *));
239 static void load_mems PARAMS ((const struct loop *));
240 static int insert_loop_mem PARAMS ((rtx *, void *));
241 static int replace_loop_mem PARAMS ((rtx *, void *));
242 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
243 static int replace_loop_reg PARAMS ((rtx *, void *));
244 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
245 static void note_reg_stored PARAMS ((rtx, rtx, void *));
246 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
247 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
248 unsigned int));
249 static int replace_label PARAMS ((rtx *, void *));
250 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
251 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
252 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
253 static void loop_regs_update PARAMS ((const struct loop *, rtx));
254 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
256 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
257 rtx, rtx));
258 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
259 basic_block, rtx, rtx));
260 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
261 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
263 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
264 static void loop_delete_insns PARAMS ((rtx, rtx));
265 void debug_ivs PARAMS ((const struct loop *));
266 void debug_iv_class PARAMS ((const struct iv_class *));
267 void debug_biv PARAMS ((const struct induction *));
268 void debug_giv PARAMS ((const struct induction *));
269 void debug_loop PARAMS ((const struct loop *));
270 void debug_loops PARAMS ((const struct loops *));
272 typedef struct rtx_pair
274 rtx r1;
275 rtx r2;
276 } rtx_pair;
278 typedef struct loop_replace_args
280 rtx match;
281 rtx replacement;
282 rtx insn;
283 } loop_replace_args;
285 /* Nonzero iff INSN is between START and END, inclusive. */
286 #define INSN_IN_RANGE_P(INSN, START, END) \
287 (INSN_UID (INSN) < max_uid_for_loop \
288 && INSN_LUID (INSN) >= INSN_LUID (START) \
289 && INSN_LUID (INSN) <= INSN_LUID (END))
291 /* Indirect_jump_in_function is computed once per function. */
292 static int indirect_jump_in_function;
293 static int indirect_jump_in_function_p PARAMS ((rtx));
295 static int compute_luids PARAMS ((rtx, rtx, int));
297 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
298 struct induction *,
299 rtx));
301 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
302 copy the value of the strength reduced giv to its original register. */
303 static int copy_cost;
305 /* Cost of using a register, to normalize the benefits of a giv. */
306 static int reg_address_cost;
308 void
309 init_loop ()
311 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
313 reg_address_cost = address_cost (reg, SImode);
315 copy_cost = COSTS_N_INSNS (1);
318 /* Compute the mapping from uids to luids.
319 LUIDs are numbers assigned to insns, like uids,
320 except that luids increase monotonically through the code.
321 Start at insn START and stop just before END. Assign LUIDs
322 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
323 static int
324 compute_luids (start, end, prev_luid)
325 rtx start, end;
326 int prev_luid;
328 int i;
329 rtx insn;
331 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
333 if (INSN_UID (insn) >= max_uid_for_loop)
334 continue;
335 /* Don't assign luids to line-number NOTEs, so that the distance in
336 luids between two insns is not affected by -g. */
337 if (GET_CODE (insn) != NOTE
338 || NOTE_LINE_NUMBER (insn) <= 0)
339 uid_luid[INSN_UID (insn)] = ++i;
340 else
341 /* Give a line number note the same luid as preceding insn. */
342 uid_luid[INSN_UID (insn)] = i;
344 return i + 1;
347 /* Entry point of this file. Perform loop optimization
348 on the current function. F is the first insn of the function
349 and DUMPFILE is a stream for output of a trace of actions taken
350 (or 0 if none should be output). */
352 void
353 loop_optimize (f, dumpfile, flags)
354 /* f is the first instruction of a chain of insns for one function */
355 rtx f;
356 FILE *dumpfile;
357 int flags;
359 register rtx insn;
360 register int i;
361 struct loops loops_data;
362 struct loops *loops = &loops_data;
363 struct loop_info *loops_info;
365 loop_dump_stream = dumpfile;
367 init_recog_no_volatile ();
369 max_reg_before_loop = max_reg_num ();
370 loop_max_reg = max_reg_before_loop;
372 regs_may_share = 0;
374 /* Count the number of loops. */
376 max_loop_num = 0;
377 for (insn = f; insn; insn = NEXT_INSN (insn))
379 if (GET_CODE (insn) == NOTE
380 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
381 max_loop_num++;
384 /* Don't waste time if no loops. */
385 if (max_loop_num == 0)
386 return;
388 loops->num = max_loop_num;
390 /* Get size to use for tables indexed by uids.
391 Leave some space for labels allocated by find_and_verify_loops. */
392 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
394 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
395 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
396 sizeof (struct loop *));
398 /* Allocate storage for array of loops. */
399 loops->array = (struct loop *)
400 xcalloc (loops->num, sizeof (struct loop));
402 /* Find and process each loop.
403 First, find them, and record them in order of their beginnings. */
404 find_and_verify_loops (f, loops);
406 /* Allocate and initialize auxiliary loop information. */
407 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
408 for (i = 0; i < loops->num; i++)
409 loops->array[i].aux = loops_info + i;
411 /* Now find all register lifetimes. This must be done after
412 find_and_verify_loops, because it might reorder the insns in the
413 function. */
414 reg_scan (f, max_reg_before_loop, 1);
416 /* This must occur after reg_scan so that registers created by gcse
417 will have entries in the register tables.
419 We could have added a call to reg_scan after gcse_main in toplev.c,
420 but moving this call to init_alias_analysis is more efficient. */
421 init_alias_analysis ();
423 /* See if we went too far. Note that get_max_uid already returns
424 one more that the maximum uid of all insn. */
425 if (get_max_uid () > max_uid_for_loop)
426 abort ();
427 /* Now reset it to the actual size we need. See above. */
428 max_uid_for_loop = get_max_uid ();
430 /* find_and_verify_loops has already called compute_luids, but it
431 might have rearranged code afterwards, so we need to recompute
432 the luids now. */
433 max_luid = compute_luids (f, NULL_RTX, 0);
435 /* Don't leave gaps in uid_luid for insns that have been
436 deleted. It is possible that the first or last insn
437 using some register has been deleted by cross-jumping.
438 Make sure that uid_luid for that former insn's uid
439 points to the general area where that insn used to be. */
440 for (i = 0; i < max_uid_for_loop; i++)
442 uid_luid[0] = uid_luid[i];
443 if (uid_luid[0] != 0)
444 break;
446 for (i = 0; i < max_uid_for_loop; i++)
447 if (uid_luid[i] == 0)
448 uid_luid[i] = uid_luid[i - 1];
450 /* Determine if the function has indirect jump. On some systems
451 this prevents low overhead loop instructions from being used. */
452 indirect_jump_in_function = indirect_jump_in_function_p (f);
454 /* Now scan the loops, last ones first, since this means inner ones are done
455 before outer ones. */
456 for (i = max_loop_num - 1; i >= 0; i--)
458 struct loop *loop = &loops->array[i];
460 if (! loop->invalid && loop->end)
461 scan_loop (loop, flags);
464 /* If there were lexical blocks inside the loop, they have been
465 replicated. We will now have more than one NOTE_INSN_BLOCK_BEG
466 and NOTE_INSN_BLOCK_END for each such block. We must duplicate
467 the BLOCKs as well. */
468 if (write_symbols != NO_DEBUG)
469 reorder_blocks ();
471 end_alias_analysis ();
473 /* Clean up. */
474 free (uid_luid);
475 free (uid_loop);
476 free (loops_info);
477 free (loops->array);
480 /* Returns the next insn, in execution order, after INSN. START and
481 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
482 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
483 insn-stream; it is used with loops that are entered near the
484 bottom. */
486 static rtx
487 next_insn_in_loop (loop, insn)
488 const struct loop *loop;
489 rtx insn;
491 insn = NEXT_INSN (insn);
493 if (insn == loop->end)
495 if (loop->top)
496 /* Go to the top of the loop, and continue there. */
497 insn = loop->top;
498 else
499 /* We're done. */
500 insn = NULL_RTX;
503 if (insn == loop->scan_start)
504 /* We're done. */
505 insn = NULL_RTX;
507 return insn;
510 /* Optimize one loop described by LOOP. */
512 /* ??? Could also move memory writes out of loops if the destination address
513 is invariant, the source is invariant, the memory write is not volatile,
514 and if we can prove that no read inside the loop can read this address
515 before the write occurs. If there is a read of this address after the
516 write, then we can also mark the memory read as invariant. */
518 static void
519 scan_loop (loop, flags)
520 struct loop *loop;
521 int flags;
523 struct loop_info *loop_info = LOOP_INFO (loop);
524 struct loop_regs *regs = LOOP_REGS (loop);
525 register int i;
526 rtx loop_start = loop->start;
527 rtx loop_end = loop->end;
528 rtx p;
529 /* 1 if we are scanning insns that could be executed zero times. */
530 int maybe_never = 0;
531 /* 1 if we are scanning insns that might never be executed
532 due to a subroutine call which might exit before they are reached. */
533 int call_passed = 0;
534 /* Jump insn that enters the loop, or 0 if control drops in. */
535 rtx loop_entry_jump = 0;
536 /* Number of insns in the loop. */
537 int insn_count;
538 int tem;
539 rtx temp, update_start, update_end;
540 /* The SET from an insn, if it is the only SET in the insn. */
541 rtx set, set1;
542 /* Chain describing insns movable in current loop. */
543 struct loop_movables *movables = LOOP_MOVABLES (loop);
544 /* Ratio of extra register life span we can justify
545 for saving an instruction. More if loop doesn't call subroutines
546 since in that case saving an insn makes more difference
547 and more registers are available. */
548 int threshold;
549 /* Nonzero if we are scanning instructions in a sub-loop. */
550 int loop_depth = 0;
552 loop->top = 0;
554 movables->head = 0;
555 movables->last = 0;
557 /* Determine whether this loop starts with a jump down to a test at
558 the end. This will occur for a small number of loops with a test
559 that is too complex to duplicate in front of the loop.
561 We search for the first insn or label in the loop, skipping NOTEs.
562 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
563 (because we might have a loop executed only once that contains a
564 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
565 (in case we have a degenerate loop).
567 Note that if we mistakenly think that a loop is entered at the top
568 when, in fact, it is entered at the exit test, the only effect will be
569 slightly poorer optimization. Making the opposite error can generate
570 incorrect code. Since very few loops now start with a jump to the
571 exit test, the code here to detect that case is very conservative. */
573 for (p = NEXT_INSN (loop_start);
574 p != loop_end
575 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
576 && (GET_CODE (p) != NOTE
577 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
578 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
579 p = NEXT_INSN (p))
582 loop->scan_start = p;
584 /* If loop end is the end of the current function, then emit a
585 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
586 note insn. This is the position we use when sinking insns out of
587 the loop. */
588 if (NEXT_INSN (loop->end) != 0)
589 loop->sink = NEXT_INSN (loop->end);
590 else
591 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
593 /* Set up variables describing this loop. */
594 prescan_loop (loop);
595 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
597 /* If loop has a jump before the first label,
598 the true entry is the target of that jump.
599 Start scan from there.
600 But record in LOOP->TOP the place where the end-test jumps
601 back to so we can scan that after the end of the loop. */
602 if (GET_CODE (p) == JUMP_INSN)
604 loop_entry_jump = p;
606 /* Loop entry must be unconditional jump (and not a RETURN) */
607 if (any_uncondjump_p (p)
608 && JUMP_LABEL (p) != 0
609 /* Check to see whether the jump actually
610 jumps out of the loop (meaning it's no loop).
611 This case can happen for things like
612 do {..} while (0). If this label was generated previously
613 by loop, we can't tell anything about it and have to reject
614 the loop. */
615 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
617 loop->top = next_label (loop->scan_start);
618 loop->scan_start = JUMP_LABEL (p);
622 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
623 as required by loop_reg_used_before_p. So skip such loops. (This
624 test may never be true, but it's best to play it safe.)
626 Also, skip loops where we do not start scanning at a label. This
627 test also rejects loops starting with a JUMP_INSN that failed the
628 test above. */
630 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
631 || GET_CODE (loop->scan_start) != CODE_LABEL)
633 if (loop_dump_stream)
634 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
635 INSN_UID (loop_start), INSN_UID (loop_end));
636 return;
639 /* Allocate extra space for REGs that might be created by load_mems.
640 We allocate a little extra slop as well, in the hopes that we
641 won't have to reallocate the regs array. */
642 loop_regs_scan (loop, loop_info->mems_idx + 16);
643 insn_count = count_insns_in_loop (loop);
645 if (loop_dump_stream)
647 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
648 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
649 if (loop->cont)
650 fprintf (loop_dump_stream, "Continue at insn %d.\n",
651 INSN_UID (loop->cont));
654 /* Scan through the loop finding insns that are safe to move.
655 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
656 this reg will be considered invariant for subsequent insns.
657 We consider whether subsequent insns use the reg
658 in deciding whether it is worth actually moving.
660 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
661 and therefore it is possible that the insns we are scanning
662 would never be executed. At such times, we must make sure
663 that it is safe to execute the insn once instead of zero times.
664 When MAYBE_NEVER is 0, all insns will be executed at least once
665 so that is not a problem. */
667 for (p = next_insn_in_loop (loop, loop->scan_start);
668 p != NULL_RTX;
669 p = next_insn_in_loop (loop, p))
671 if (GET_CODE (p) == INSN
672 && (set = single_set (p))
673 && GET_CODE (SET_DEST (set)) == REG
674 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
676 int tem1 = 0;
677 int tem2 = 0;
678 int move_insn = 0;
679 rtx src = SET_SRC (set);
680 rtx dependencies = 0;
682 /* Figure out what to use as a source of this insn. If a REG_EQUIV
683 note is given or if a REG_EQUAL note with a constant operand is
684 specified, use it as the source and mark that we should move
685 this insn by calling emit_move_insn rather that duplicating the
686 insn.
688 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
689 is present. */
690 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
691 if (temp)
692 src = XEXP (temp, 0), move_insn = 1;
693 else
695 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
696 if (temp && CONSTANT_P (XEXP (temp, 0)))
697 src = XEXP (temp, 0), move_insn = 1;
698 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
700 src = XEXP (temp, 0);
701 /* A libcall block can use regs that don't appear in
702 the equivalent expression. To move the libcall,
703 we must move those regs too. */
704 dependencies = libcall_other_reg (p, src);
708 /* For parallels, add any possible uses to the depencies, as we can't move
709 the insn without resolving them first. */
710 if (GET_CODE (PATTERN (p)) == PARALLEL)
712 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
714 rtx x = XVECEXP (PATTERN (p), 0, i);
715 if (GET_CODE (x) == USE)
716 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
720 /* Don't try to optimize a register that was made
721 by loop-optimization for an inner loop.
722 We don't know its life-span, so we can't compute the benefit. */
723 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
725 else if (/* The register is used in basic blocks other
726 than the one where it is set (meaning that
727 something after this point in the loop might
728 depend on its value before the set). */
729 ! reg_in_basic_block_p (p, SET_DEST (set))
730 /* And the set is not guaranteed to be executed one
731 the loop starts, or the value before the set is
732 needed before the set occurs...
734 ??? Note we have quadratic behaviour here, mitigated
735 by the fact that the previous test will often fail for
736 large loops. Rather than re-scanning the entire loop
737 each time for register usage, we should build tables
738 of the register usage and use them here instead. */
739 && (maybe_never
740 || loop_reg_used_before_p (loop, set, p)))
741 /* It is unsafe to move the set.
743 This code used to consider it OK to move a set of a variable
744 which was not created by the user and not used in an exit test.
745 That behavior is incorrect and was removed. */
747 else if ((tem = loop_invariant_p (loop, src))
748 && (dependencies == 0
749 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
750 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
751 || (tem1
752 = consec_sets_invariant_p
753 (loop, SET_DEST (set),
754 regs->array[REGNO (SET_DEST (set))].set_in_loop,
755 p)))
756 /* If the insn can cause a trap (such as divide by zero),
757 can't move it unless it's guaranteed to be executed
758 once loop is entered. Even a function call might
759 prevent the trap insn from being reached
760 (since it might exit!) */
761 && ! ((maybe_never || call_passed)
762 && may_trap_p (src)))
764 register struct movable *m;
765 register int regno = REGNO (SET_DEST (set));
767 /* A potential lossage is where we have a case where two insns
768 can be combined as long as they are both in the loop, but
769 we move one of them outside the loop. For large loops,
770 this can lose. The most common case of this is the address
771 of a function being called.
773 Therefore, if this register is marked as being used exactly
774 once if we are in a loop with calls (a "large loop"), see if
775 we can replace the usage of this register with the source
776 of this SET. If we can, delete this insn.
778 Don't do this if P has a REG_RETVAL note or if we have
779 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
781 if (loop_info->has_call
782 && regs->array[regno].single_usage != 0
783 && regs->array[regno].single_usage != const0_rtx
784 && REGNO_FIRST_UID (regno) == INSN_UID (p)
785 && (REGNO_LAST_UID (regno)
786 == INSN_UID (regs->array[regno].single_usage))
787 && regs->array[regno].set_in_loop == 1
788 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
789 && ! side_effects_p (SET_SRC (set))
790 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
791 && (! SMALL_REGISTER_CLASSES
792 || (! (GET_CODE (SET_SRC (set)) == REG
793 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
794 /* This test is not redundant; SET_SRC (set) might be
795 a call-clobbered register and the life of REGNO
796 might span a call. */
797 && ! modified_between_p (SET_SRC (set), p,
798 regs->array[regno].single_usage)
799 && no_labels_between_p (p, regs->array[regno].single_usage)
800 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
801 regs->array[regno].single_usage))
803 /* Replace any usage in a REG_EQUAL note. Must copy the
804 new source, so that we don't get rtx sharing between the
805 SET_SOURCE and REG_NOTES of insn p. */
806 REG_NOTES (regs->array[regno].single_usage)
807 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
808 SET_DEST (set), copy_rtx (SET_SRC (set)));
810 PUT_CODE (p, NOTE);
811 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
812 NOTE_SOURCE_FILE (p) = 0;
813 regs->array[regno].set_in_loop = 0;
814 continue;
817 m = (struct movable *) xmalloc (sizeof (struct movable));
818 m->next = 0;
819 m->insn = p;
820 m->set_src = src;
821 m->dependencies = dependencies;
822 m->set_dest = SET_DEST (set);
823 m->force = 0;
824 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
825 m->done = 0;
826 m->forces = 0;
827 m->partial = 0;
828 m->move_insn = move_insn;
829 m->move_insn_first = 0;
830 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
831 m->savemode = VOIDmode;
832 m->regno = regno;
833 /* Set M->cond if either loop_invariant_p
834 or consec_sets_invariant_p returned 2
835 (only conditionally invariant). */
836 m->cond = ((tem | tem1 | tem2) > 1);
837 m->global = LOOP_REG_GLOBAL_P (loop, regno);
838 m->match = 0;
839 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
840 m->savings = regs->array[regno].n_times_set;
841 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
842 m->savings += libcall_benefit (p);
843 regs->array[regno].set_in_loop = move_insn ? -2 : -1;
844 /* Add M to the end of the chain MOVABLES. */
845 loop_movables_add (movables, m);
847 if (m->consec > 0)
849 /* It is possible for the first instruction to have a
850 REG_EQUAL note but a non-invariant SET_SRC, so we must
851 remember the status of the first instruction in case
852 the last instruction doesn't have a REG_EQUAL note. */
853 m->move_insn_first = m->move_insn;
855 /* Skip this insn, not checking REG_LIBCALL notes. */
856 p = next_nonnote_insn (p);
857 /* Skip the consecutive insns, if there are any. */
858 p = skip_consec_insns (p, m->consec);
859 /* Back up to the last insn of the consecutive group. */
860 p = prev_nonnote_insn (p);
862 /* We must now reset m->move_insn, m->is_equiv, and possibly
863 m->set_src to correspond to the effects of all the
864 insns. */
865 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
866 if (temp)
867 m->set_src = XEXP (temp, 0), m->move_insn = 1;
868 else
870 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
871 if (temp && CONSTANT_P (XEXP (temp, 0)))
872 m->set_src = XEXP (temp, 0), m->move_insn = 1;
873 else
874 m->move_insn = 0;
877 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
880 /* If this register is always set within a STRICT_LOW_PART
881 or set to zero, then its high bytes are constant.
882 So clear them outside the loop and within the loop
883 just load the low bytes.
884 We must check that the machine has an instruction to do so.
885 Also, if the value loaded into the register
886 depends on the same register, this cannot be done. */
887 else if (SET_SRC (set) == const0_rtx
888 && GET_CODE (NEXT_INSN (p)) == INSN
889 && (set1 = single_set (NEXT_INSN (p)))
890 && GET_CODE (set1) == SET
891 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
892 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
893 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
894 == SET_DEST (set))
895 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
897 register int regno = REGNO (SET_DEST (set));
898 if (regs->array[regno].set_in_loop == 2)
900 register struct movable *m;
901 m = (struct movable *) xmalloc (sizeof (struct movable));
902 m->next = 0;
903 m->insn = p;
904 m->set_dest = SET_DEST (set);
905 m->dependencies = 0;
906 m->force = 0;
907 m->consec = 0;
908 m->done = 0;
909 m->forces = 0;
910 m->move_insn = 0;
911 m->move_insn_first = 0;
912 m->partial = 1;
913 /* If the insn may not be executed on some cycles,
914 we can't clear the whole reg; clear just high part.
915 Not even if the reg is used only within this loop.
916 Consider this:
917 while (1)
918 while (s != t) {
919 if (foo ()) x = *s;
920 use (x);
922 Clearing x before the inner loop could clobber a value
923 being saved from the last time around the outer loop.
924 However, if the reg is not used outside this loop
925 and all uses of the register are in the same
926 basic block as the store, there is no problem.
928 If this insn was made by loop, we don't know its
929 INSN_LUID and hence must make a conservative
930 assumption. */
931 m->global = (INSN_UID (p) >= max_uid_for_loop
932 || LOOP_REG_GLOBAL_P (loop, regno)
933 || (labels_in_range_p
934 (p, REGNO_FIRST_LUID (regno))));
935 if (maybe_never && m->global)
936 m->savemode = GET_MODE (SET_SRC (set1));
937 else
938 m->savemode = VOIDmode;
939 m->regno = regno;
940 m->cond = 0;
941 m->match = 0;
942 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
943 m->savings = 1;
944 regs->array[regno].set_in_loop = -1;
945 /* Add M to the end of the chain MOVABLES. */
946 loop_movables_add (movables, m);
950 /* Past a call insn, we get to insns which might not be executed
951 because the call might exit. This matters for insns that trap.
952 Constant and pure call insns always return, so they don't count. */
953 else if (GET_CODE (p) == CALL_INSN && ! CONST_CALL_P (p))
954 call_passed = 1;
955 /* Past a label or a jump, we get to insns for which we
956 can't count on whether or how many times they will be
957 executed during each iteration. Therefore, we can
958 only move out sets of trivial variables
959 (those not used after the loop). */
960 /* Similar code appears twice in strength_reduce. */
961 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
962 /* If we enter the loop in the middle, and scan around to the
963 beginning, don't set maybe_never for that. This must be an
964 unconditional jump, otherwise the code at the top of the
965 loop might never be executed. Unconditional jumps are
966 followed a by barrier then loop end. */
967 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
968 && NEXT_INSN (NEXT_INSN (p)) == loop_end
969 && any_uncondjump_p (p)))
970 maybe_never = 1;
971 else if (GET_CODE (p) == NOTE)
973 /* At the virtual top of a converted loop, insns are again known to
974 be executed: logically, the loop begins here even though the exit
975 code has been duplicated. */
976 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
977 maybe_never = call_passed = 0;
978 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
979 loop_depth++;
980 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
981 loop_depth--;
985 /* If one movable subsumes another, ignore that other. */
987 ignore_some_movables (movables);
989 /* For each movable insn, see if the reg that it loads
990 leads when it dies right into another conditionally movable insn.
991 If so, record that the second insn "forces" the first one,
992 since the second can be moved only if the first is. */
994 force_movables (movables);
996 /* See if there are multiple movable insns that load the same value.
997 If there are, make all but the first point at the first one
998 through the `match' field, and add the priorities of them
999 all together as the priority of the first. */
1001 combine_movables (movables, regs);
1003 /* Now consider each movable insn to decide whether it is worth moving.
1004 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1006 Generally this increases code size, so do not move moveables when
1007 optimizing for code size. */
1009 if (! optimize_size)
1010 move_movables (loop, movables, threshold, insn_count);
1012 /* Now candidates that still are negative are those not moved.
1013 Change regs->array[I].set_in_loop to indicate that those are not actually
1014 invariant. */
1015 for (i = 0; i < regs->num; i++)
1016 if (regs->array[i].set_in_loop < 0)
1017 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1019 /* Now that we've moved some things out of the loop, we might be able to
1020 hoist even more memory references. */
1021 load_mems (loop);
1023 /* Recalculate regs->array if load_mems has created new registers. */
1024 if (max_reg_num () > regs->num)
1025 loop_regs_scan (loop, 0);
1027 for (update_start = loop_start;
1028 PREV_INSN (update_start)
1029 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1030 update_start = PREV_INSN (update_start))
1032 update_end = NEXT_INSN (loop_end);
1034 reg_scan_update (update_start, update_end, loop_max_reg);
1035 loop_max_reg = max_reg_num ();
1037 if (flag_strength_reduce)
1039 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1040 /* Ensure our label doesn't go away. */
1041 LABEL_NUSES (update_end)++;
1043 strength_reduce (loop, flags);
1045 reg_scan_update (update_start, update_end, loop_max_reg);
1046 loop_max_reg = max_reg_num ();
1048 if (update_end && GET_CODE (update_end) == CODE_LABEL
1049 && --LABEL_NUSES (update_end) == 0)
1050 delete_insn (update_end);
1054 /* The movable information is required for strength reduction. */
1055 loop_movables_free (movables);
1057 free (regs->array);
1058 regs->array = 0;
1059 regs->num = 0;
1062 /* Add elements to *OUTPUT to record all the pseudo-regs
1063 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1065 void
1066 record_excess_regs (in_this, not_in_this, output)
1067 rtx in_this, not_in_this;
1068 rtx *output;
1070 enum rtx_code code;
1071 const char *fmt;
1072 int i;
1074 code = GET_CODE (in_this);
1076 switch (code)
1078 case PC:
1079 case CC0:
1080 case CONST_INT:
1081 case CONST_DOUBLE:
1082 case CONST:
1083 case SYMBOL_REF:
1084 case LABEL_REF:
1085 return;
1087 case REG:
1088 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1089 && ! reg_mentioned_p (in_this, not_in_this))
1090 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1091 return;
1093 default:
1094 break;
1097 fmt = GET_RTX_FORMAT (code);
1098 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1100 int j;
1102 switch (fmt[i])
1104 case 'E':
1105 for (j = 0; j < XVECLEN (in_this, i); j++)
1106 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1107 break;
1109 case 'e':
1110 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1111 break;
1116 /* Check what regs are referred to in the libcall block ending with INSN,
1117 aside from those mentioned in the equivalent value.
1118 If there are none, return 0.
1119 If there are one or more, return an EXPR_LIST containing all of them. */
1122 libcall_other_reg (insn, equiv)
1123 rtx insn, equiv;
1125 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1126 rtx p = XEXP (note, 0);
1127 rtx output = 0;
1129 /* First, find all the regs used in the libcall block
1130 that are not mentioned as inputs to the result. */
1132 while (p != insn)
1134 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1135 || GET_CODE (p) == CALL_INSN)
1136 record_excess_regs (PATTERN (p), equiv, &output);
1137 p = NEXT_INSN (p);
1140 return output;
1143 /* Return 1 if all uses of REG
1144 are between INSN and the end of the basic block. */
1146 static int
1147 reg_in_basic_block_p (insn, reg)
1148 rtx insn, reg;
1150 int regno = REGNO (reg);
1151 rtx p;
1153 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1154 return 0;
1156 /* Search this basic block for the already recorded last use of the reg. */
1157 for (p = insn; p; p = NEXT_INSN (p))
1159 switch (GET_CODE (p))
1161 case NOTE:
1162 break;
1164 case INSN:
1165 case CALL_INSN:
1166 /* Ordinary insn: if this is the last use, we win. */
1167 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1168 return 1;
1169 break;
1171 case JUMP_INSN:
1172 /* Jump insn: if this is the last use, we win. */
1173 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1174 return 1;
1175 /* Otherwise, it's the end of the basic block, so we lose. */
1176 return 0;
1178 case CODE_LABEL:
1179 case BARRIER:
1180 /* It's the end of the basic block, so we lose. */
1181 return 0;
1183 default:
1184 break;
1188 /* The "last use" that was recorded can't be found after the first
1189 use. This can happen when the last use was deleted while
1190 processing an inner loop, this inner loop was then completely
1191 unrolled, and the outer loop is always exited after the inner loop,
1192 so that everything after the first use becomes a single basic block. */
1193 return 1;
1196 /* Compute the benefit of eliminating the insns in the block whose
1197 last insn is LAST. This may be a group of insns used to compute a
1198 value directly or can contain a library call. */
1200 static int
1201 libcall_benefit (last)
1202 rtx last;
1204 rtx insn;
1205 int benefit = 0;
1207 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1208 insn != last; insn = NEXT_INSN (insn))
1210 if (GET_CODE (insn) == CALL_INSN)
1211 benefit += 10; /* Assume at least this many insns in a library
1212 routine. */
1213 else if (GET_CODE (insn) == INSN
1214 && GET_CODE (PATTERN (insn)) != USE
1215 && GET_CODE (PATTERN (insn)) != CLOBBER)
1216 benefit++;
1219 return benefit;
1222 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1224 static rtx
1225 skip_consec_insns (insn, count)
1226 rtx insn;
1227 int count;
1229 for (; count > 0; count--)
1231 rtx temp;
1233 /* If first insn of libcall sequence, skip to end. */
1234 /* Do this at start of loop, since INSN is guaranteed to
1235 be an insn here. */
1236 if (GET_CODE (insn) != NOTE
1237 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1238 insn = XEXP (temp, 0);
1241 insn = NEXT_INSN (insn);
1242 while (GET_CODE (insn) == NOTE);
1245 return insn;
1248 /* Ignore any movable whose insn falls within a libcall
1249 which is part of another movable.
1250 We make use of the fact that the movable for the libcall value
1251 was made later and so appears later on the chain. */
1253 static void
1254 ignore_some_movables (movables)
1255 struct loop_movables *movables;
1257 register struct movable *m, *m1;
1259 for (m = movables->head; m; m = m->next)
1261 /* Is this a movable for the value of a libcall? */
1262 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1263 if (note)
1265 rtx insn;
1266 /* Check for earlier movables inside that range,
1267 and mark them invalid. We cannot use LUIDs here because
1268 insns created by loop.c for prior loops don't have LUIDs.
1269 Rather than reject all such insns from movables, we just
1270 explicitly check each insn in the libcall (since invariant
1271 libcalls aren't that common). */
1272 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1273 for (m1 = movables->head; m1 != m; m1 = m1->next)
1274 if (m1->insn == insn)
1275 m1->done = 1;
1280 /* For each movable insn, see if the reg that it loads
1281 leads when it dies right into another conditionally movable insn.
1282 If so, record that the second insn "forces" the first one,
1283 since the second can be moved only if the first is. */
1285 static void
1286 force_movables (movables)
1287 struct loop_movables *movables;
1289 register struct movable *m, *m1;
1290 for (m1 = movables->head; m1; m1 = m1->next)
1291 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1292 if (!m1->partial && !m1->done)
1294 int regno = m1->regno;
1295 for (m = m1->next; m; m = m->next)
1296 /* ??? Could this be a bug? What if CSE caused the
1297 register of M1 to be used after this insn?
1298 Since CSE does not update regno_last_uid,
1299 this insn M->insn might not be where it dies.
1300 But very likely this doesn't matter; what matters is
1301 that M's reg is computed from M1's reg. */
1302 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1303 && !m->done)
1304 break;
1305 if (m != 0 && m->set_src == m1->set_dest
1306 /* If m->consec, m->set_src isn't valid. */
1307 && m->consec == 0)
1308 m = 0;
1310 /* Increase the priority of the moving the first insn
1311 since it permits the second to be moved as well. */
1312 if (m != 0)
1314 m->forces = m1;
1315 m1->lifetime += m->lifetime;
1316 m1->savings += m->savings;
1321 /* Find invariant expressions that are equal and can be combined into
1322 one register. */
1324 static void
1325 combine_movables (movables, regs)
1326 struct loop_movables *movables;
1327 struct loop_regs *regs;
1329 register struct movable *m;
1330 char *matched_regs = (char *) xmalloc (regs->num);
1331 enum machine_mode mode;
1333 /* Regs that are set more than once are not allowed to match
1334 or be matched. I'm no longer sure why not. */
1335 /* Perhaps testing m->consec_sets would be more appropriate here? */
1337 for (m = movables->head; m; m = m->next)
1338 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1339 && !m->partial)
1341 register struct movable *m1;
1342 int regno = m->regno;
1344 memset (matched_regs, 0, regs->num);
1345 matched_regs[regno] = 1;
1347 /* We want later insns to match the first one. Don't make the first
1348 one match any later ones. So start this loop at m->next. */
1349 for (m1 = m->next; m1; m1 = m1->next)
1350 if (m != m1 && m1->match == 0
1351 && regs->array[m1->regno].n_times_set == 1
1352 /* A reg used outside the loop mustn't be eliminated. */
1353 && !m1->global
1354 /* A reg used for zero-extending mustn't be eliminated. */
1355 && !m1->partial
1356 && (matched_regs[m1->regno]
1359 /* Can combine regs with different modes loaded from the
1360 same constant only if the modes are the same or
1361 if both are integer modes with M wider or the same
1362 width as M1. The check for integer is redundant, but
1363 safe, since the only case of differing destination
1364 modes with equal sources is when both sources are
1365 VOIDmode, i.e., CONST_INT. */
1366 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1367 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1368 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1369 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1370 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1371 /* See if the source of M1 says it matches M. */
1372 && ((GET_CODE (m1->set_src) == REG
1373 && matched_regs[REGNO (m1->set_src)])
1374 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1375 movables, regs))))
1376 && ((m->dependencies == m1->dependencies)
1377 || rtx_equal_p (m->dependencies, m1->dependencies)))
1379 m->lifetime += m1->lifetime;
1380 m->savings += m1->savings;
1381 m1->done = 1;
1382 m1->match = m;
1383 matched_regs[m1->regno] = 1;
1387 /* Now combine the regs used for zero-extension.
1388 This can be done for those not marked `global'
1389 provided their lives don't overlap. */
1391 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1392 mode = GET_MODE_WIDER_MODE (mode))
1394 register struct movable *m0 = 0;
1396 /* Combine all the registers for extension from mode MODE.
1397 Don't combine any that are used outside this loop. */
1398 for (m = movables->head; m; m = m->next)
1399 if (m->partial && ! m->global
1400 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1402 register struct movable *m1;
1403 int first = REGNO_FIRST_LUID (m->regno);
1404 int last = REGNO_LAST_LUID (m->regno);
1406 if (m0 == 0)
1408 /* First one: don't check for overlap, just record it. */
1409 m0 = m;
1410 continue;
1413 /* Make sure they extend to the same mode.
1414 (Almost always true.) */
1415 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1416 continue;
1418 /* We already have one: check for overlap with those
1419 already combined together. */
1420 for (m1 = movables->head; m1 != m; m1 = m1->next)
1421 if (m1 == m0 || (m1->partial && m1->match == m0))
1422 if (! (REGNO_FIRST_LUID (m1->regno) > last
1423 || REGNO_LAST_LUID (m1->regno) < first))
1424 goto overlap;
1426 /* No overlap: we can combine this with the others. */
1427 m0->lifetime += m->lifetime;
1428 m0->savings += m->savings;
1429 m->done = 1;
1430 m->match = m0;
1432 overlap:
1437 /* Clean up. */
1438 free (matched_regs);
1441 /* Returns the number of movable instructions in LOOP that were not
1442 moved outside the loop. */
1444 static int
1445 num_unmoved_movables (loop)
1446 const struct loop *loop;
1448 int num = 0;
1449 struct movable *m;
1451 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1452 if (!m->done)
1453 ++num;
1455 return num;
1459 /* Return 1 if regs X and Y will become the same if moved. */
1461 static int
1462 regs_match_p (x, y, movables)
1463 rtx x, y;
1464 struct loop_movables *movables;
1466 unsigned int xn = REGNO (x);
1467 unsigned int yn = REGNO (y);
1468 struct movable *mx, *my;
1470 for (mx = movables->head; mx; mx = mx->next)
1471 if (mx->regno == xn)
1472 break;
1474 for (my = movables->head; my; my = my->next)
1475 if (my->regno == yn)
1476 break;
1478 return (mx && my
1479 && ((mx->match == my->match && mx->match != 0)
1480 || mx->match == my
1481 || mx == my->match));
1484 /* Return 1 if X and Y are identical-looking rtx's.
1485 This is the Lisp function EQUAL for rtx arguments.
1487 If two registers are matching movables or a movable register and an
1488 equivalent constant, consider them equal. */
1490 static int
1491 rtx_equal_for_loop_p (x, y, movables, regs)
1492 rtx x, y;
1493 struct loop_movables *movables;
1494 struct loop_regs *regs;
1496 register int i;
1497 register int j;
1498 register struct movable *m;
1499 register enum rtx_code code;
1500 register const char *fmt;
1502 if (x == y)
1503 return 1;
1504 if (x == 0 || y == 0)
1505 return 0;
1507 code = GET_CODE (x);
1509 /* If we have a register and a constant, they may sometimes be
1510 equal. */
1511 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1512 && CONSTANT_P (y))
1514 for (m = movables->head; m; m = m->next)
1515 if (m->move_insn && m->regno == REGNO (x)
1516 && rtx_equal_p (m->set_src, y))
1517 return 1;
1519 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1520 && CONSTANT_P (x))
1522 for (m = movables->head; m; m = m->next)
1523 if (m->move_insn && m->regno == REGNO (y)
1524 && rtx_equal_p (m->set_src, x))
1525 return 1;
1528 /* Otherwise, rtx's of different codes cannot be equal. */
1529 if (code != GET_CODE (y))
1530 return 0;
1532 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1533 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1535 if (GET_MODE (x) != GET_MODE (y))
1536 return 0;
1538 /* These three types of rtx's can be compared nonrecursively. */
1539 if (code == REG)
1540 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1542 if (code == LABEL_REF)
1543 return XEXP (x, 0) == XEXP (y, 0);
1544 if (code == SYMBOL_REF)
1545 return XSTR (x, 0) == XSTR (y, 0);
1547 /* Compare the elements. If any pair of corresponding elements
1548 fail to match, return 0 for the whole things. */
1550 fmt = GET_RTX_FORMAT (code);
1551 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1553 switch (fmt[i])
1555 case 'w':
1556 if (XWINT (x, i) != XWINT (y, i))
1557 return 0;
1558 break;
1560 case 'i':
1561 if (XINT (x, i) != XINT (y, i))
1562 return 0;
1563 break;
1565 case 'E':
1566 /* Two vectors must have the same length. */
1567 if (XVECLEN (x, i) != XVECLEN (y, i))
1568 return 0;
1570 /* And the corresponding elements must match. */
1571 for (j = 0; j < XVECLEN (x, i); j++)
1572 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1573 movables, regs) == 0)
1574 return 0;
1575 break;
1577 case 'e':
1578 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1579 == 0)
1580 return 0;
1581 break;
1583 case 's':
1584 if (strcmp (XSTR (x, i), XSTR (y, i)))
1585 return 0;
1586 break;
1588 case 'u':
1589 /* These are just backpointers, so they don't matter. */
1590 break;
1592 case '0':
1593 break;
1595 /* It is believed that rtx's at this level will never
1596 contain anything but integers and other rtx's,
1597 except for within LABEL_REFs and SYMBOL_REFs. */
1598 default:
1599 abort ();
1602 return 1;
1605 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1606 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1607 references is incremented once for each added note. */
1609 static void
1610 add_label_notes (x, insns)
1611 rtx x;
1612 rtx insns;
1614 enum rtx_code code = GET_CODE (x);
1615 int i, j;
1616 const char *fmt;
1617 rtx insn;
1619 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1621 /* This code used to ignore labels that referred to dispatch tables to
1622 avoid flow generating (slighly) worse code.
1624 We no longer ignore such label references (see LABEL_REF handling in
1625 mark_jump_label for additional information). */
1626 for (insn = insns; insn; insn = NEXT_INSN (insn))
1627 if (reg_mentioned_p (XEXP (x, 0), insn))
1629 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1630 REG_NOTES (insn));
1631 if (LABEL_P (XEXP (x, 0)))
1632 LABEL_NUSES (XEXP (x, 0))++;
1636 fmt = GET_RTX_FORMAT (code);
1637 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1639 if (fmt[i] == 'e')
1640 add_label_notes (XEXP (x, i), insns);
1641 else if (fmt[i] == 'E')
1642 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1643 add_label_notes (XVECEXP (x, i, j), insns);
1647 /* Scan MOVABLES, and move the insns that deserve to be moved.
1648 If two matching movables are combined, replace one reg with the
1649 other throughout. */
1651 static void
1652 move_movables (loop, movables, threshold, insn_count)
1653 struct loop *loop;
1654 struct loop_movables *movables;
1655 int threshold;
1656 int insn_count;
1658 struct loop_regs *regs = LOOP_REGS (loop);
1659 int nregs = regs->num;
1660 rtx new_start = 0;
1661 register struct movable *m;
1662 register rtx p;
1663 rtx loop_start = loop->start;
1664 rtx loop_end = loop->end;
1665 /* Map of pseudo-register replacements to handle combining
1666 when we move several insns that load the same value
1667 into different pseudo-registers. */
1668 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1669 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1671 for (m = movables->head; m; m = m->next)
1673 /* Describe this movable insn. */
1675 if (loop_dump_stream)
1677 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1678 INSN_UID (m->insn), m->regno, m->lifetime);
1679 if (m->consec > 0)
1680 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1681 if (m->cond)
1682 fprintf (loop_dump_stream, "cond ");
1683 if (m->force)
1684 fprintf (loop_dump_stream, "force ");
1685 if (m->global)
1686 fprintf (loop_dump_stream, "global ");
1687 if (m->done)
1688 fprintf (loop_dump_stream, "done ");
1689 if (m->move_insn)
1690 fprintf (loop_dump_stream, "move-insn ");
1691 if (m->match)
1692 fprintf (loop_dump_stream, "matches %d ",
1693 INSN_UID (m->match->insn));
1694 if (m->forces)
1695 fprintf (loop_dump_stream, "forces %d ",
1696 INSN_UID (m->forces->insn));
1699 /* Ignore the insn if it's already done (it matched something else).
1700 Otherwise, see if it is now safe to move. */
1702 if (!m->done
1703 && (! m->cond
1704 || (1 == loop_invariant_p (loop, m->set_src)
1705 && (m->dependencies == 0
1706 || 1 == loop_invariant_p (loop, m->dependencies))
1707 && (m->consec == 0
1708 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1709 m->consec + 1,
1710 m->insn))))
1711 && (! m->forces || m->forces->done))
1713 register int regno;
1714 register rtx p;
1715 int savings = m->savings;
1717 /* We have an insn that is safe to move.
1718 Compute its desirability. */
1720 p = m->insn;
1721 regno = m->regno;
1723 if (loop_dump_stream)
1724 fprintf (loop_dump_stream, "savings %d ", savings);
1726 if (regs->array[regno].moved_once && loop_dump_stream)
1727 fprintf (loop_dump_stream, "halved since already moved ");
1729 /* An insn MUST be moved if we already moved something else
1730 which is safe only if this one is moved too: that is,
1731 if already_moved[REGNO] is nonzero. */
1733 /* An insn is desirable to move if the new lifetime of the
1734 register is no more than THRESHOLD times the old lifetime.
1735 If it's not desirable, it means the loop is so big
1736 that moving won't speed things up much,
1737 and it is liable to make register usage worse. */
1739 /* It is also desirable to move if it can be moved at no
1740 extra cost because something else was already moved. */
1742 if (already_moved[regno]
1743 || flag_move_all_movables
1744 || (threshold * savings * m->lifetime) >=
1745 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1746 || (m->forces && m->forces->done
1747 && regs->array[m->forces->regno].n_times_set == 1))
1749 int count;
1750 register struct movable *m1;
1751 rtx first = NULL_RTX;
1753 /* Now move the insns that set the reg. */
1755 if (m->partial && m->match)
1757 rtx newpat, i1;
1758 rtx r1, r2;
1759 /* Find the end of this chain of matching regs.
1760 Thus, we load each reg in the chain from that one reg.
1761 And that reg is loaded with 0 directly,
1762 since it has ->match == 0. */
1763 for (m1 = m; m1->match; m1 = m1->match);
1764 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1765 SET_DEST (PATTERN (m1->insn)));
1766 i1 = loop_insn_hoist (loop, newpat);
1768 /* Mark the moved, invariant reg as being allowed to
1769 share a hard reg with the other matching invariant. */
1770 REG_NOTES (i1) = REG_NOTES (m->insn);
1771 r1 = SET_DEST (PATTERN (m->insn));
1772 r2 = SET_DEST (PATTERN (m1->insn));
1773 regs_may_share
1774 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1775 gen_rtx_EXPR_LIST (VOIDmode, r2,
1776 regs_may_share));
1777 delete_insn (m->insn);
1779 if (new_start == 0)
1780 new_start = i1;
1782 if (loop_dump_stream)
1783 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1785 /* If we are to re-generate the item being moved with a
1786 new move insn, first delete what we have and then emit
1787 the move insn before the loop. */
1788 else if (m->move_insn)
1790 rtx i1, temp, seq;
1792 for (count = m->consec; count >= 0; count--)
1794 /* If this is the first insn of a library call sequence,
1795 skip to the end. */
1796 if (GET_CODE (p) != NOTE
1797 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1798 p = XEXP (temp, 0);
1800 /* If this is the last insn of a libcall sequence, then
1801 delete every insn in the sequence except the last.
1802 The last insn is handled in the normal manner. */
1803 if (GET_CODE (p) != NOTE
1804 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1806 temp = XEXP (temp, 0);
1807 while (temp != p)
1808 temp = delete_insn (temp);
1811 temp = p;
1812 p = delete_insn (p);
1814 /* simplify_giv_expr expects that it can walk the insns
1815 at m->insn forwards and see this old sequence we are
1816 tossing here. delete_insn does preserve the next
1817 pointers, but when we skip over a NOTE we must fix
1818 it up. Otherwise that code walks into the non-deleted
1819 insn stream. */
1820 while (p && GET_CODE (p) == NOTE)
1821 p = NEXT_INSN (temp) = NEXT_INSN (p);
1824 start_sequence ();
1825 emit_move_insn (m->set_dest, m->set_src);
1826 temp = get_insns ();
1827 seq = gen_sequence ();
1828 end_sequence ();
1830 add_label_notes (m->set_src, temp);
1832 i1 = loop_insn_hoist (loop, seq);
1833 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1834 REG_NOTES (i1)
1835 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1836 m->set_src, REG_NOTES (i1));
1838 if (loop_dump_stream)
1839 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1841 /* The more regs we move, the less we like moving them. */
1842 threshold -= 3;
1844 else
1846 for (count = m->consec; count >= 0; count--)
1848 rtx i1, temp;
1850 /* If first insn of libcall sequence, skip to end. */
1851 /* Do this at start of loop, since p is guaranteed to
1852 be an insn here. */
1853 if (GET_CODE (p) != NOTE
1854 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1855 p = XEXP (temp, 0);
1857 /* If last insn of libcall sequence, move all
1858 insns except the last before the loop. The last
1859 insn is handled in the normal manner. */
1860 if (GET_CODE (p) != NOTE
1861 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1863 rtx fn_address = 0;
1864 rtx fn_reg = 0;
1865 rtx fn_address_insn = 0;
1867 first = 0;
1868 for (temp = XEXP (temp, 0); temp != p;
1869 temp = NEXT_INSN (temp))
1871 rtx body;
1872 rtx n;
1873 rtx next;
1875 if (GET_CODE (temp) == NOTE)
1876 continue;
1878 body = PATTERN (temp);
1880 /* Find the next insn after TEMP,
1881 not counting USE or NOTE insns. */
1882 for (next = NEXT_INSN (temp); next != p;
1883 next = NEXT_INSN (next))
1884 if (! (GET_CODE (next) == INSN
1885 && GET_CODE (PATTERN (next)) == USE)
1886 && GET_CODE (next) != NOTE)
1887 break;
1889 /* If that is the call, this may be the insn
1890 that loads the function address.
1892 Extract the function address from the insn
1893 that loads it into a register.
1894 If this insn was cse'd, we get incorrect code.
1896 So emit a new move insn that copies the
1897 function address into the register that the
1898 call insn will use. flow.c will delete any
1899 redundant stores that we have created. */
1900 if (GET_CODE (next) == CALL_INSN
1901 && GET_CODE (body) == SET
1902 && GET_CODE (SET_DEST (body)) == REG
1903 && (n = find_reg_note (temp, REG_EQUAL,
1904 NULL_RTX)))
1906 fn_reg = SET_SRC (body);
1907 if (GET_CODE (fn_reg) != REG)
1908 fn_reg = SET_DEST (body);
1909 fn_address = XEXP (n, 0);
1910 fn_address_insn = temp;
1912 /* We have the call insn.
1913 If it uses the register we suspect it might,
1914 load it with the correct address directly. */
1915 if (GET_CODE (temp) == CALL_INSN
1916 && fn_address != 0
1917 && reg_referenced_p (fn_reg, body))
1918 loop_insn_emit_after (loop, 0, fn_address_insn,
1919 gen_move_insn
1920 (fn_reg, fn_address));
1922 if (GET_CODE (temp) == CALL_INSN)
1924 i1 = loop_call_insn_hoist (loop, body);
1925 /* Because the USAGE information potentially
1926 contains objects other than hard registers
1927 we need to copy it. */
1928 if (CALL_INSN_FUNCTION_USAGE (temp))
1929 CALL_INSN_FUNCTION_USAGE (i1)
1930 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
1932 else
1933 i1 = loop_insn_hoist (loop, body);
1934 if (first == 0)
1935 first = i1;
1936 if (temp == fn_address_insn)
1937 fn_address_insn = i1;
1938 REG_NOTES (i1) = REG_NOTES (temp);
1939 delete_insn (temp);
1941 if (new_start == 0)
1942 new_start = first;
1944 if (m->savemode != VOIDmode)
1946 /* P sets REG to zero; but we should clear only
1947 the bits that are not covered by the mode
1948 m->savemode. */
1949 rtx reg = m->set_dest;
1950 rtx sequence;
1951 rtx tem;
1953 start_sequence ();
1954 tem = expand_binop
1955 (GET_MODE (reg), and_optab, reg,
1956 GEN_INT ((((HOST_WIDE_INT) 1
1957 << GET_MODE_BITSIZE (m->savemode)))
1958 - 1),
1959 reg, 1, OPTAB_LIB_WIDEN);
1960 if (tem == 0)
1961 abort ();
1962 if (tem != reg)
1963 emit_move_insn (reg, tem);
1964 sequence = gen_sequence ();
1965 end_sequence ();
1966 i1 = loop_insn_hoist (loop, sequence);
1968 else if (GET_CODE (p) == CALL_INSN)
1970 i1 = loop_call_insn_hoist (loop, PATTERN (p));
1971 /* Because the USAGE information potentially
1972 contains objects other than hard registers
1973 we need to copy it. */
1974 if (CALL_INSN_FUNCTION_USAGE (p))
1975 CALL_INSN_FUNCTION_USAGE (i1)
1976 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
1978 else if (count == m->consec && m->move_insn_first)
1980 rtx seq;
1981 /* The SET_SRC might not be invariant, so we must
1982 use the REG_EQUAL note. */
1983 start_sequence ();
1984 emit_move_insn (m->set_dest, m->set_src);
1985 temp = get_insns ();
1986 seq = gen_sequence ();
1987 end_sequence ();
1989 add_label_notes (m->set_src, temp);
1991 i1 = loop_insn_hoist (loop, seq);
1992 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1993 REG_NOTES (i1)
1994 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
1995 : REG_EQUAL),
1996 m->set_src, REG_NOTES (i1));
1998 else
1999 i1 = loop_insn_hoist (loop, PATTERN (p));
2001 if (REG_NOTES (i1) == 0)
2003 REG_NOTES (i1) = REG_NOTES (p);
2005 /* If there is a REG_EQUAL note present whose value
2006 is not loop invariant, then delete it, since it
2007 may cause problems with later optimization passes.
2008 It is possible for cse to create such notes
2009 like this as a result of record_jump_cond. */
2011 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2012 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2013 remove_note (i1, temp);
2016 if (new_start == 0)
2017 new_start = i1;
2019 if (loop_dump_stream)
2020 fprintf (loop_dump_stream, " moved to %d",
2021 INSN_UID (i1));
2023 /* If library call, now fix the REG_NOTES that contain
2024 insn pointers, namely REG_LIBCALL on FIRST
2025 and REG_RETVAL on I1. */
2026 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2028 XEXP (temp, 0) = first;
2029 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2030 XEXP (temp, 0) = i1;
2033 temp = p;
2034 delete_insn (p);
2035 p = NEXT_INSN (p);
2037 /* simplify_giv_expr expects that it can walk the insns
2038 at m->insn forwards and see this old sequence we are
2039 tossing here. delete_insn does preserve the next
2040 pointers, but when we skip over a NOTE we must fix
2041 it up. Otherwise that code walks into the non-deleted
2042 insn stream. */
2043 while (p && GET_CODE (p) == NOTE)
2044 p = NEXT_INSN (temp) = NEXT_INSN (p);
2047 /* The more regs we move, the less we like moving them. */
2048 threshold -= 3;
2051 /* Any other movable that loads the same register
2052 MUST be moved. */
2053 already_moved[regno] = 1;
2055 /* This reg has been moved out of one loop. */
2056 regs->array[regno].moved_once = 1;
2058 /* The reg set here is now invariant. */
2059 if (! m->partial)
2060 regs->array[regno].set_in_loop = 0;
2062 m->done = 1;
2064 /* Change the length-of-life info for the register
2065 to say it lives at least the full length of this loop.
2066 This will help guide optimizations in outer loops. */
2068 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2069 /* This is the old insn before all the moved insns.
2070 We can't use the moved insn because it is out of range
2071 in uid_luid. Only the old insns have luids. */
2072 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2073 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2074 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2076 /* Combine with this moved insn any other matching movables. */
2078 if (! m->partial)
2079 for (m1 = movables->head; m1; m1 = m1->next)
2080 if (m1->match == m)
2082 rtx temp;
2084 /* Schedule the reg loaded by M1
2085 for replacement so that shares the reg of M.
2086 If the modes differ (only possible in restricted
2087 circumstances, make a SUBREG.
2089 Note this assumes that the target dependent files
2090 treat REG and SUBREG equally, including within
2091 GO_IF_LEGITIMATE_ADDRESS and in all the
2092 predicates since we never verify that replacing the
2093 original register with a SUBREG results in a
2094 recognizable insn. */
2095 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2096 reg_map[m1->regno] = m->set_dest;
2097 else
2098 reg_map[m1->regno]
2099 = gen_lowpart_common (GET_MODE (m1->set_dest),
2100 m->set_dest);
2102 /* Get rid of the matching insn
2103 and prevent further processing of it. */
2104 m1->done = 1;
2106 /* if library call, delete all insn except last, which
2107 is deleted below */
2108 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2109 NULL_RTX)))
2111 for (temp = XEXP (temp, 0); temp != m1->insn;
2112 temp = NEXT_INSN (temp))
2113 delete_insn (temp);
2115 delete_insn (m1->insn);
2117 /* Any other movable that loads the same register
2118 MUST be moved. */
2119 already_moved[m1->regno] = 1;
2121 /* The reg merged here is now invariant,
2122 if the reg it matches is invariant. */
2123 if (! m->partial)
2124 regs->array[m1->regno].set_in_loop = 0;
2127 else if (loop_dump_stream)
2128 fprintf (loop_dump_stream, "not desirable");
2130 else if (loop_dump_stream && !m->match)
2131 fprintf (loop_dump_stream, "not safe");
2133 if (loop_dump_stream)
2134 fprintf (loop_dump_stream, "\n");
2137 if (new_start == 0)
2138 new_start = loop_start;
2140 /* Go through all the instructions in the loop, making
2141 all the register substitutions scheduled in REG_MAP. */
2142 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2143 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2144 || GET_CODE (p) == CALL_INSN)
2146 replace_regs (PATTERN (p), reg_map, nregs, 0);
2147 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2148 INSN_CODE (p) = -1;
2151 /* Clean up. */
2152 free (reg_map);
2153 free (already_moved);
2157 static void
2158 loop_movables_add (movables, m)
2159 struct loop_movables *movables;
2160 struct movable *m;
2162 if (movables->head == 0)
2163 movables->head = m;
2164 else
2165 movables->last->next = m;
2166 movables->last = m;
2170 static void
2171 loop_movables_free (movables)
2172 struct loop_movables *movables;
2174 struct movable *m;
2175 struct movable *m_next;
2177 for (m = movables->head; m; m = m_next)
2179 m_next = m->next;
2180 free (m);
2184 #if 0
2185 /* Scan X and replace the address of any MEM in it with ADDR.
2186 REG is the address that MEM should have before the replacement. */
2188 static void
2189 replace_call_address (x, reg, addr)
2190 rtx x, reg, addr;
2192 register enum rtx_code code;
2193 register int i;
2194 register const char *fmt;
2196 if (x == 0)
2197 return;
2198 code = GET_CODE (x);
2199 switch (code)
2201 case PC:
2202 case CC0:
2203 case CONST_INT:
2204 case CONST_DOUBLE:
2205 case CONST:
2206 case SYMBOL_REF:
2207 case LABEL_REF:
2208 case REG:
2209 return;
2211 case SET:
2212 /* Short cut for very common case. */
2213 replace_call_address (XEXP (x, 1), reg, addr);
2214 return;
2216 case CALL:
2217 /* Short cut for very common case. */
2218 replace_call_address (XEXP (x, 0), reg, addr);
2219 return;
2221 case MEM:
2222 /* If this MEM uses a reg other than the one we expected,
2223 something is wrong. */
2224 if (XEXP (x, 0) != reg)
2225 abort ();
2226 XEXP (x, 0) = addr;
2227 return;
2229 default:
2230 break;
2233 fmt = GET_RTX_FORMAT (code);
2234 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2236 if (fmt[i] == 'e')
2237 replace_call_address (XEXP (x, i), reg, addr);
2238 else if (fmt[i] == 'E')
2240 register int j;
2241 for (j = 0; j < XVECLEN (x, i); j++)
2242 replace_call_address (XVECEXP (x, i, j), reg, addr);
2246 #endif
2248 /* Return the number of memory refs to addresses that vary
2249 in the rtx X. */
2251 static int
2252 count_nonfixed_reads (loop, x)
2253 const struct loop *loop;
2254 rtx x;
2256 register enum rtx_code code;
2257 register int i;
2258 register const char *fmt;
2259 int value;
2261 if (x == 0)
2262 return 0;
2264 code = GET_CODE (x);
2265 switch (code)
2267 case PC:
2268 case CC0:
2269 case CONST_INT:
2270 case CONST_DOUBLE:
2271 case CONST:
2272 case SYMBOL_REF:
2273 case LABEL_REF:
2274 case REG:
2275 return 0;
2277 case MEM:
2278 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2279 + count_nonfixed_reads (loop, XEXP (x, 0)));
2281 default:
2282 break;
2285 value = 0;
2286 fmt = GET_RTX_FORMAT (code);
2287 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2289 if (fmt[i] == 'e')
2290 value += count_nonfixed_reads (loop, XEXP (x, i));
2291 if (fmt[i] == 'E')
2293 register int j;
2294 for (j = 0; j < XVECLEN (x, i); j++)
2295 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2298 return value;
2301 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2302 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2303 `unknown_address_altered', `unknown_constant_address_altered', and
2304 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2305 list `store_mems' in LOOP. */
2307 static void
2308 prescan_loop (loop)
2309 struct loop *loop;
2311 register int level = 1;
2312 rtx insn;
2313 struct loop_info *loop_info = LOOP_INFO (loop);
2314 rtx start = loop->start;
2315 rtx end = loop->end;
2316 /* The label after END. Jumping here is just like falling off the
2317 end of the loop. We use next_nonnote_insn instead of next_label
2318 as a hedge against the (pathological) case where some actual insn
2319 might end up between the two. */
2320 rtx exit_target = next_nonnote_insn (end);
2322 loop_info->has_indirect_jump = indirect_jump_in_function;
2323 loop_info->pre_header_has_call = 0;
2324 loop_info->has_call = 0;
2325 loop_info->has_nonconst_call = 0;
2326 loop_info->has_volatile = 0;
2327 loop_info->has_tablejump = 0;
2328 loop_info->has_multiple_exit_targets = 0;
2329 loop->level = 1;
2331 loop_info->unknown_address_altered = 0;
2332 loop_info->unknown_constant_address_altered = 0;
2333 loop_info->store_mems = NULL_RTX;
2334 loop_info->first_loop_store_insn = NULL_RTX;
2335 loop_info->mems_idx = 0;
2336 loop_info->num_mem_sets = 0;
2339 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2340 insn = PREV_INSN (insn))
2342 if (GET_CODE (insn) == CALL_INSN)
2344 loop_info->pre_header_has_call = 1;
2345 break;
2349 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2350 insn = NEXT_INSN (insn))
2352 if (GET_CODE (insn) == NOTE)
2354 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2356 ++level;
2357 /* Count number of loops contained in this one. */
2358 loop->level++;
2360 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2362 --level;
2365 else if (GET_CODE (insn) == CALL_INSN)
2367 if (! CONST_CALL_P (insn))
2369 loop_info->unknown_address_altered = 1;
2370 loop_info->has_nonconst_call = 1;
2372 loop_info->has_call = 1;
2374 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2376 rtx label1 = NULL_RTX;
2377 rtx label2 = NULL_RTX;
2379 if (volatile_refs_p (PATTERN (insn)))
2380 loop_info->has_volatile = 1;
2382 if (GET_CODE (insn) == JUMP_INSN
2383 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2384 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2385 loop_info->has_tablejump = 1;
2387 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2388 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2389 loop_info->first_loop_store_insn = insn;
2391 if (! loop_info->has_multiple_exit_targets
2392 && GET_CODE (insn) == JUMP_INSN
2393 && GET_CODE (PATTERN (insn)) == SET
2394 && SET_DEST (PATTERN (insn)) == pc_rtx)
2396 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2398 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2399 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2401 else
2403 label1 = SET_SRC (PATTERN (insn));
2408 if (label1 && label1 != pc_rtx)
2410 if (GET_CODE (label1) != LABEL_REF)
2412 /* Something tricky. */
2413 loop_info->has_multiple_exit_targets = 1;
2414 break;
2416 else if (XEXP (label1, 0) != exit_target
2417 && LABEL_OUTSIDE_LOOP_P (label1))
2419 /* A jump outside the current loop. */
2420 loop_info->has_multiple_exit_targets = 1;
2421 break;
2425 label1 = label2;
2426 label2 = NULL_RTX;
2428 while (label1);
2431 else if (GET_CODE (insn) == RETURN)
2432 loop_info->has_multiple_exit_targets = 1;
2435 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2436 if (/* An exception thrown by a called function might land us
2437 anywhere. */
2438 ! loop_info->has_nonconst_call
2439 /* We don't want loads for MEMs moved to a location before the
2440 one at which their stack memory becomes allocated. (Note
2441 that this is not a problem for malloc, etc., since those
2442 require actual function calls. */
2443 && ! current_function_calls_alloca
2444 /* There are ways to leave the loop other than falling off the
2445 end. */
2446 && ! loop_info->has_multiple_exit_targets)
2447 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2448 insn = NEXT_INSN (insn))
2449 for_each_rtx (&insn, insert_loop_mem, loop_info);
2451 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2452 that loop_invariant_p and load_mems can use true_dependence
2453 to determine what is really clobbered. */
2454 if (loop_info->unknown_address_altered)
2456 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2458 loop_info->store_mems
2459 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2461 if (loop_info->unknown_constant_address_altered)
2463 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2465 RTX_UNCHANGING_P (mem) = 1;
2466 loop_info->store_mems
2467 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2471 /* Scan the function looking for loops. Record the start and end of each loop.
2472 Also mark as invalid loops any loops that contain a setjmp or are branched
2473 to from outside the loop. */
2475 static void
2476 find_and_verify_loops (f, loops)
2477 rtx f;
2478 struct loops *loops;
2480 rtx insn;
2481 rtx label;
2482 int num_loops;
2483 struct loop *current_loop;
2484 struct loop *next_loop;
2485 struct loop *loop;
2487 num_loops = loops->num;
2489 compute_luids (f, NULL_RTX, 0);
2491 /* If there are jumps to undefined labels,
2492 treat them as jumps out of any/all loops.
2493 This also avoids writing past end of tables when there are no loops. */
2494 uid_loop[0] = NULL;
2496 /* Find boundaries of loops, mark which loops are contained within
2497 loops, and invalidate loops that have setjmp. */
2499 num_loops = 0;
2500 current_loop = NULL;
2501 for (insn = f; insn; insn = NEXT_INSN (insn))
2503 if (GET_CODE (insn) == NOTE)
2504 switch (NOTE_LINE_NUMBER (insn))
2506 case NOTE_INSN_LOOP_BEG:
2507 next_loop = loops->array + num_loops;
2508 next_loop->num = num_loops;
2509 num_loops++;
2510 next_loop->start = insn;
2511 next_loop->outer = current_loop;
2512 current_loop = next_loop;
2513 break;
2515 case NOTE_INSN_SETJMP:
2516 /* In this case, we must invalidate our current loop and any
2517 enclosing loop. */
2518 for (loop = current_loop; loop; loop = loop->outer)
2520 loop->invalid = 1;
2521 if (loop_dump_stream)
2522 fprintf (loop_dump_stream,
2523 "\nLoop at %d ignored due to setjmp.\n",
2524 INSN_UID (loop->start));
2526 break;
2528 case NOTE_INSN_LOOP_CONT:
2529 current_loop->cont = insn;
2530 break;
2532 case NOTE_INSN_LOOP_VTOP:
2533 current_loop->vtop = insn;
2534 break;
2536 case NOTE_INSN_LOOP_END:
2537 if (! current_loop)
2538 abort ();
2540 current_loop->end = insn;
2541 current_loop = current_loop->outer;
2542 break;
2544 default:
2545 break;
2548 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2549 enclosing loop, but this doesn't matter. */
2550 uid_loop[INSN_UID (insn)] = current_loop;
2553 /* Any loop containing a label used in an initializer must be invalidated,
2554 because it can be jumped into from anywhere. */
2556 for (label = forced_labels; label; label = XEXP (label, 1))
2558 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2559 loop; loop = loop->outer)
2560 loop->invalid = 1;
2563 /* Any loop containing a label used for an exception handler must be
2564 invalidated, because it can be jumped into from anywhere. */
2566 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2568 for (loop = uid_loop[INSN_UID (XEXP (label, 0))];
2569 loop; loop = loop->outer)
2570 loop->invalid = 1;
2573 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2574 loop that it is not contained within, that loop is marked invalid.
2575 If any INSN or CALL_INSN uses a label's address, then the loop containing
2576 that label is marked invalid, because it could be jumped into from
2577 anywhere.
2579 Also look for blocks of code ending in an unconditional branch that
2580 exits the loop. If such a block is surrounded by a conditional
2581 branch around the block, move the block elsewhere (see below) and
2582 invert the jump to point to the code block. This may eliminate a
2583 label in our loop and will simplify processing by both us and a
2584 possible second cse pass. */
2586 for (insn = f; insn; insn = NEXT_INSN (insn))
2587 if (INSN_P (insn))
2589 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2591 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2593 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2594 if (note)
2596 for (loop = uid_loop[INSN_UID (XEXP (note, 0))];
2597 loop; loop = loop->outer)
2598 loop->invalid = 1;
2602 if (GET_CODE (insn) != JUMP_INSN)
2603 continue;
2605 mark_loop_jump (PATTERN (insn), this_loop);
2607 /* See if this is an unconditional branch outside the loop. */
2608 if (this_loop
2609 && (GET_CODE (PATTERN (insn)) == RETURN
2610 || (any_uncondjump_p (insn)
2611 && onlyjump_p (insn)
2612 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2613 != this_loop)))
2614 && get_max_uid () < max_uid_for_loop)
2616 rtx p;
2617 rtx our_next = next_real_insn (insn);
2618 rtx last_insn_to_move = NEXT_INSN (insn);
2619 struct loop *dest_loop;
2620 struct loop *outer_loop = NULL;
2622 /* Go backwards until we reach the start of the loop, a label,
2623 or a JUMP_INSN. */
2624 for (p = PREV_INSN (insn);
2625 GET_CODE (p) != CODE_LABEL
2626 && ! (GET_CODE (p) == NOTE
2627 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2628 && GET_CODE (p) != JUMP_INSN;
2629 p = PREV_INSN (p))
2632 /* Check for the case where we have a jump to an inner nested
2633 loop, and do not perform the optimization in that case. */
2635 if (JUMP_LABEL (insn))
2637 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2638 if (dest_loop)
2640 for (outer_loop = dest_loop; outer_loop;
2641 outer_loop = outer_loop->outer)
2642 if (outer_loop == this_loop)
2643 break;
2647 /* Make sure that the target of P is within the current loop. */
2649 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2650 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2651 outer_loop = this_loop;
2653 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2654 we have a block of code to try to move.
2656 We look backward and then forward from the target of INSN
2657 to find a BARRIER at the same loop depth as the target.
2658 If we find such a BARRIER, we make a new label for the start
2659 of the block, invert the jump in P and point it to that label,
2660 and move the block of code to the spot we found. */
2662 if (! outer_loop
2663 && GET_CODE (p) == JUMP_INSN
2664 && JUMP_LABEL (p) != 0
2665 /* Just ignore jumps to labels that were never emitted.
2666 These always indicate compilation errors. */
2667 && INSN_UID (JUMP_LABEL (p)) != 0
2668 && any_condjump_p (p) && onlyjump_p (p)
2669 && next_real_insn (JUMP_LABEL (p)) == our_next
2670 /* If it's not safe to move the sequence, then we
2671 mustn't try. */
2672 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2673 &last_insn_to_move))
2675 rtx target
2676 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2677 struct loop *target_loop = uid_loop[INSN_UID (target)];
2678 rtx loc, loc2;
2679 rtx tmp;
2681 /* Search for possible garbage past the conditional jumps
2682 and look for the last barrier. */
2683 for (tmp = last_insn_to_move;
2684 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2685 if (GET_CODE (tmp) == BARRIER)
2686 last_insn_to_move = tmp;
2688 for (loc = target; loc; loc = PREV_INSN (loc))
2689 if (GET_CODE (loc) == BARRIER
2690 /* Don't move things inside a tablejump. */
2691 && ((loc2 = next_nonnote_insn (loc)) == 0
2692 || GET_CODE (loc2) != CODE_LABEL
2693 || (loc2 = next_nonnote_insn (loc2)) == 0
2694 || GET_CODE (loc2) != JUMP_INSN
2695 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2696 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2697 && uid_loop[INSN_UID (loc)] == target_loop)
2698 break;
2700 if (loc == 0)
2701 for (loc = target; loc; loc = NEXT_INSN (loc))
2702 if (GET_CODE (loc) == BARRIER
2703 /* Don't move things inside a tablejump. */
2704 && ((loc2 = next_nonnote_insn (loc)) == 0
2705 || GET_CODE (loc2) != CODE_LABEL
2706 || (loc2 = next_nonnote_insn (loc2)) == 0
2707 || GET_CODE (loc2) != JUMP_INSN
2708 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2709 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2710 && uid_loop[INSN_UID (loc)] == target_loop)
2711 break;
2713 if (loc)
2715 rtx cond_label = JUMP_LABEL (p);
2716 rtx new_label = get_label_after (p);
2718 /* Ensure our label doesn't go away. */
2719 LABEL_NUSES (cond_label)++;
2721 /* Verify that uid_loop is large enough and that
2722 we can invert P. */
2723 if (invert_jump (p, new_label, 1))
2725 rtx q, r;
2727 /* If no suitable BARRIER was found, create a suitable
2728 one before TARGET. Since TARGET is a fall through
2729 path, we'll need to insert an jump around our block
2730 and a add a BARRIER before TARGET.
2732 This creates an extra unconditional jump outside
2733 the loop. However, the benefits of removing rarely
2734 executed instructions from inside the loop usually
2735 outweighs the cost of the extra unconditional jump
2736 outside the loop. */
2737 if (loc == 0)
2739 rtx temp;
2741 temp = gen_jump (JUMP_LABEL (insn));
2742 temp = emit_jump_insn_before (temp, target);
2743 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2744 LABEL_NUSES (JUMP_LABEL (insn))++;
2745 loc = emit_barrier_before (target);
2748 /* Include the BARRIER after INSN and copy the
2749 block after LOC. */
2750 new_label = squeeze_notes (new_label,
2751 last_insn_to_move);
2752 reorder_insns (new_label, last_insn_to_move, loc);
2754 /* All those insns are now in TARGET_LOOP. */
2755 for (q = new_label;
2756 q != NEXT_INSN (last_insn_to_move);
2757 q = NEXT_INSN (q))
2758 uid_loop[INSN_UID (q)] = target_loop;
2760 /* The label jumped to by INSN is no longer a loop
2761 exit. Unless INSN does not have a label (e.g.,
2762 it is a RETURN insn), search loop->exit_labels
2763 to find its label_ref, and remove it. Also turn
2764 off LABEL_OUTSIDE_LOOP_P bit. */
2765 if (JUMP_LABEL (insn))
2767 for (q = 0, r = this_loop->exit_labels;
2769 q = r, r = LABEL_NEXTREF (r))
2770 if (XEXP (r, 0) == JUMP_LABEL (insn))
2772 LABEL_OUTSIDE_LOOP_P (r) = 0;
2773 if (q)
2774 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2775 else
2776 this_loop->exit_labels = LABEL_NEXTREF (r);
2777 break;
2780 for (loop = this_loop; loop && loop != target_loop;
2781 loop = loop->outer)
2782 loop->exit_count--;
2784 /* If we didn't find it, then something is
2785 wrong. */
2786 if (! r)
2787 abort ();
2790 /* P is now a jump outside the loop, so it must be put
2791 in loop->exit_labels, and marked as such.
2792 The easiest way to do this is to just call
2793 mark_loop_jump again for P. */
2794 mark_loop_jump (PATTERN (p), this_loop);
2796 /* If INSN now jumps to the insn after it,
2797 delete INSN. */
2798 if (JUMP_LABEL (insn) != 0
2799 && (next_real_insn (JUMP_LABEL (insn))
2800 == next_real_insn (insn)))
2801 delete_insn (insn);
2804 /* Continue the loop after where the conditional
2805 branch used to jump, since the only branch insn
2806 in the block (if it still remains) is an inter-loop
2807 branch and hence needs no processing. */
2808 insn = NEXT_INSN (cond_label);
2810 if (--LABEL_NUSES (cond_label) == 0)
2811 delete_insn (cond_label);
2813 /* This loop will be continued with NEXT_INSN (insn). */
2814 insn = PREV_INSN (insn);
2821 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2822 loops it is contained in, mark the target loop invalid.
2824 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2826 static void
2827 mark_loop_jump (x, loop)
2828 rtx x;
2829 struct loop *loop;
2831 struct loop *dest_loop;
2832 struct loop *outer_loop;
2833 int i;
2835 switch (GET_CODE (x))
2837 case PC:
2838 case USE:
2839 case CLOBBER:
2840 case REG:
2841 case MEM:
2842 case CONST_INT:
2843 case CONST_DOUBLE:
2844 case RETURN:
2845 return;
2847 case CONST:
2848 /* There could be a label reference in here. */
2849 mark_loop_jump (XEXP (x, 0), loop);
2850 return;
2852 case PLUS:
2853 case MINUS:
2854 case MULT:
2855 mark_loop_jump (XEXP (x, 0), loop);
2856 mark_loop_jump (XEXP (x, 1), loop);
2857 return;
2859 case LO_SUM:
2860 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2861 mark_loop_jump (XEXP (x, 1), loop);
2862 return;
2864 case SIGN_EXTEND:
2865 case ZERO_EXTEND:
2866 mark_loop_jump (XEXP (x, 0), loop);
2867 return;
2869 case LABEL_REF:
2870 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
2872 /* Link together all labels that branch outside the loop. This
2873 is used by final_[bg]iv_value and the loop unrolling code. Also
2874 mark this LABEL_REF so we know that this branch should predict
2875 false. */
2877 /* A check to make sure the label is not in an inner nested loop,
2878 since this does not count as a loop exit. */
2879 if (dest_loop)
2881 for (outer_loop = dest_loop; outer_loop;
2882 outer_loop = outer_loop->outer)
2883 if (outer_loop == loop)
2884 break;
2886 else
2887 outer_loop = NULL;
2889 if (loop && ! outer_loop)
2891 LABEL_OUTSIDE_LOOP_P (x) = 1;
2892 LABEL_NEXTREF (x) = loop->exit_labels;
2893 loop->exit_labels = x;
2895 for (outer_loop = loop;
2896 outer_loop && outer_loop != dest_loop;
2897 outer_loop = outer_loop->outer)
2898 outer_loop->exit_count++;
2901 /* If this is inside a loop, but not in the current loop or one enclosed
2902 by it, it invalidates at least one loop. */
2904 if (! dest_loop)
2905 return;
2907 /* We must invalidate every nested loop containing the target of this
2908 label, except those that also contain the jump insn. */
2910 for (; dest_loop; dest_loop = dest_loop->outer)
2912 /* Stop when we reach a loop that also contains the jump insn. */
2913 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2914 if (dest_loop == outer_loop)
2915 return;
2917 /* If we get here, we know we need to invalidate a loop. */
2918 if (loop_dump_stream && ! dest_loop->invalid)
2919 fprintf (loop_dump_stream,
2920 "\nLoop at %d ignored due to multiple entry points.\n",
2921 INSN_UID (dest_loop->start));
2923 dest_loop->invalid = 1;
2925 return;
2927 case SET:
2928 /* If this is not setting pc, ignore. */
2929 if (SET_DEST (x) == pc_rtx)
2930 mark_loop_jump (SET_SRC (x), loop);
2931 return;
2933 case IF_THEN_ELSE:
2934 mark_loop_jump (XEXP (x, 1), loop);
2935 mark_loop_jump (XEXP (x, 2), loop);
2936 return;
2938 case PARALLEL:
2939 case ADDR_VEC:
2940 for (i = 0; i < XVECLEN (x, 0); i++)
2941 mark_loop_jump (XVECEXP (x, 0, i), loop);
2942 return;
2944 case ADDR_DIFF_VEC:
2945 for (i = 0; i < XVECLEN (x, 1); i++)
2946 mark_loop_jump (XVECEXP (x, 1, i), loop);
2947 return;
2949 default:
2950 /* Strictly speaking this is not a jump into the loop, only a possible
2951 jump out of the loop. However, we have no way to link the destination
2952 of this jump onto the list of exit labels. To be safe we mark this
2953 loop and any containing loops as invalid. */
2954 if (loop)
2956 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
2958 if (loop_dump_stream && ! outer_loop->invalid)
2959 fprintf (loop_dump_stream,
2960 "\nLoop at %d ignored due to unknown exit jump.\n",
2961 INSN_UID (outer_loop->start));
2962 outer_loop->invalid = 1;
2965 return;
2969 /* Return nonzero if there is a label in the range from
2970 insn INSN to and including the insn whose luid is END
2971 INSN must have an assigned luid (i.e., it must not have
2972 been previously created by loop.c). */
2974 static int
2975 labels_in_range_p (insn, end)
2976 rtx insn;
2977 int end;
2979 while (insn && INSN_LUID (insn) <= end)
2981 if (GET_CODE (insn) == CODE_LABEL)
2982 return 1;
2983 insn = NEXT_INSN (insn);
2986 return 0;
2989 /* Record that a memory reference X is being set. */
2991 static void
2992 note_addr_stored (x, y, data)
2993 rtx x;
2994 rtx y ATTRIBUTE_UNUSED;
2995 void *data ATTRIBUTE_UNUSED;
2997 struct loop_info *loop_info = data;
2999 if (x == 0 || GET_CODE (x) != MEM)
3000 return;
3002 /* Count number of memory writes.
3003 This affects heuristics in strength_reduce. */
3004 loop_info->num_mem_sets++;
3006 /* BLKmode MEM means all memory is clobbered. */
3007 if (GET_MODE (x) == BLKmode)
3009 if (RTX_UNCHANGING_P (x))
3010 loop_info->unknown_constant_address_altered = 1;
3011 else
3012 loop_info->unknown_address_altered = 1;
3014 return;
3017 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3018 loop_info->store_mems);
3021 /* X is a value modified by an INSN that references a biv inside a loop
3022 exit test (ie, X is somehow related to the value of the biv). If X
3023 is a pseudo that is used more than once, then the biv is (effectively)
3024 used more than once. DATA is a pointer to a loop_regs structure. */
3026 static void
3027 note_set_pseudo_multiple_uses (x, y, data)
3028 rtx x;
3029 rtx y ATTRIBUTE_UNUSED;
3030 void *data;
3032 struct loop_regs *regs = (struct loop_regs *) data;
3034 if (x == 0)
3035 return;
3037 while (GET_CODE (x) == STRICT_LOW_PART
3038 || GET_CODE (x) == SIGN_EXTRACT
3039 || GET_CODE (x) == ZERO_EXTRACT
3040 || GET_CODE (x) == SUBREG)
3041 x = XEXP (x, 0);
3043 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3044 return;
3046 /* If we do not have usage information, or if we know the register
3047 is used more than once, note that fact for check_dbra_loop. */
3048 if (REGNO (x) >= max_reg_before_loop
3049 || ! regs->array[REGNO (x)].single_usage
3050 || regs->array[REGNO (x)].single_usage == const0_rtx)
3051 regs->multiple_uses = 1;
3054 /* Return nonzero if the rtx X is invariant over the current loop.
3056 The value is 2 if we refer to something only conditionally invariant.
3058 A memory ref is invariant if it is not volatile and does not conflict
3059 with anything stored in `loop_info->store_mems'. */
3062 loop_invariant_p (loop, x)
3063 const struct loop *loop;
3064 register rtx x;
3066 struct loop_info *loop_info = LOOP_INFO (loop);
3067 struct loop_regs *regs = LOOP_REGS (loop);
3068 register int i;
3069 register enum rtx_code code;
3070 register const char *fmt;
3071 int conditional = 0;
3072 rtx mem_list_entry;
3074 if (x == 0)
3075 return 1;
3076 code = GET_CODE (x);
3077 switch (code)
3079 case CONST_INT:
3080 case CONST_DOUBLE:
3081 case SYMBOL_REF:
3082 case CONST:
3083 return 1;
3085 case LABEL_REF:
3086 /* A LABEL_REF is normally invariant, however, if we are unrolling
3087 loops, and this label is inside the loop, then it isn't invariant.
3088 This is because each unrolled copy of the loop body will have
3089 a copy of this label. If this was invariant, then an insn loading
3090 the address of this label into a register might get moved outside
3091 the loop, and then each loop body would end up using the same label.
3093 We don't know the loop bounds here though, so just fail for all
3094 labels. */
3095 if (flag_unroll_loops)
3096 return 0;
3097 else
3098 return 1;
3100 case PC:
3101 case CC0:
3102 case UNSPEC_VOLATILE:
3103 return 0;
3105 case REG:
3106 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3107 since the reg might be set by initialization within the loop. */
3109 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3110 || x == arg_pointer_rtx)
3111 && ! current_function_has_nonlocal_goto)
3112 return 1;
3114 if (LOOP_INFO (loop)->has_call
3115 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3116 return 0;
3118 if (regs->array[REGNO (x)].set_in_loop < 0)
3119 return 2;
3121 return regs->array[REGNO (x)].set_in_loop == 0;
3123 case MEM:
3124 /* Volatile memory references must be rejected. Do this before
3125 checking for read-only items, so that volatile read-only items
3126 will be rejected also. */
3127 if (MEM_VOLATILE_P (x))
3128 return 0;
3130 /* See if there is any dependence between a store and this load. */
3131 mem_list_entry = loop_info->store_mems;
3132 while (mem_list_entry)
3134 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3135 x, rtx_varies_p))
3136 return 0;
3138 mem_list_entry = XEXP (mem_list_entry, 1);
3141 /* It's not invalidated by a store in memory
3142 but we must still verify the address is invariant. */
3143 break;
3145 case ASM_OPERANDS:
3146 /* Don't mess with insns declared volatile. */
3147 if (MEM_VOLATILE_P (x))
3148 return 0;
3149 break;
3151 default:
3152 break;
3155 fmt = GET_RTX_FORMAT (code);
3156 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3158 if (fmt[i] == 'e')
3160 int tem = loop_invariant_p (loop, XEXP (x, i));
3161 if (tem == 0)
3162 return 0;
3163 if (tem == 2)
3164 conditional = 1;
3166 else if (fmt[i] == 'E')
3168 register int j;
3169 for (j = 0; j < XVECLEN (x, i); j++)
3171 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3172 if (tem == 0)
3173 return 0;
3174 if (tem == 2)
3175 conditional = 1;
3181 return 1 + conditional;
3184 /* Return nonzero if all the insns in the loop that set REG
3185 are INSN and the immediately following insns,
3186 and if each of those insns sets REG in an invariant way
3187 (not counting uses of REG in them).
3189 The value is 2 if some of these insns are only conditionally invariant.
3191 We assume that INSN itself is the first set of REG
3192 and that its source is invariant. */
3194 static int
3195 consec_sets_invariant_p (loop, reg, n_sets, insn)
3196 const struct loop *loop;
3197 int n_sets;
3198 rtx reg, insn;
3200 struct loop_regs *regs = LOOP_REGS (loop);
3201 rtx p = insn;
3202 unsigned int regno = REGNO (reg);
3203 rtx temp;
3204 /* Number of sets we have to insist on finding after INSN. */
3205 int count = n_sets - 1;
3206 int old = regs->array[regno].set_in_loop;
3207 int value = 0;
3208 int this;
3210 /* If N_SETS hit the limit, we can't rely on its value. */
3211 if (n_sets == 127)
3212 return 0;
3214 regs->array[regno].set_in_loop = 0;
3216 while (count > 0)
3218 register enum rtx_code code;
3219 rtx set;
3221 p = NEXT_INSN (p);
3222 code = GET_CODE (p);
3224 /* If library call, skip to end of it. */
3225 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3226 p = XEXP (temp, 0);
3228 this = 0;
3229 if (code == INSN
3230 && (set = single_set (p))
3231 && GET_CODE (SET_DEST (set)) == REG
3232 && REGNO (SET_DEST (set)) == regno)
3234 this = loop_invariant_p (loop, SET_SRC (set));
3235 if (this != 0)
3236 value |= this;
3237 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3239 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3240 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3241 notes are OK. */
3242 this = (CONSTANT_P (XEXP (temp, 0))
3243 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3244 && loop_invariant_p (loop, XEXP (temp, 0))));
3245 if (this != 0)
3246 value |= this;
3249 if (this != 0)
3250 count--;
3251 else if (code != NOTE)
3253 regs->array[regno].set_in_loop = old;
3254 return 0;
3258 regs->array[regno].set_in_loop = old;
3259 /* If loop_invariant_p ever returned 2, we return 2. */
3260 return 1 + (value & 2);
3263 #if 0
3264 /* I don't think this condition is sufficient to allow INSN
3265 to be moved, so we no longer test it. */
3267 /* Return 1 if all insns in the basic block of INSN and following INSN
3268 that set REG are invariant according to TABLE. */
3270 static int
3271 all_sets_invariant_p (reg, insn, table)
3272 rtx reg, insn;
3273 short *table;
3275 register rtx p = insn;
3276 register int regno = REGNO (reg);
3278 while (1)
3280 register enum rtx_code code;
3281 p = NEXT_INSN (p);
3282 code = GET_CODE (p);
3283 if (code == CODE_LABEL || code == JUMP_INSN)
3284 return 1;
3285 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3286 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3287 && REGNO (SET_DEST (PATTERN (p))) == regno)
3289 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3290 return 0;
3294 #endif /* 0 */
3296 /* Look at all uses (not sets) of registers in X. For each, if it is
3297 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3298 a different insn, set USAGE[REGNO] to const0_rtx. */
3300 static void
3301 find_single_use_in_loop (regs, insn, x)
3302 struct loop_regs *regs;
3303 rtx insn;
3304 rtx x;
3306 enum rtx_code code = GET_CODE (x);
3307 const char *fmt = GET_RTX_FORMAT (code);
3308 int i, j;
3310 if (code == REG)
3311 regs->array[REGNO (x)].single_usage
3312 = (regs->array[REGNO (x)].single_usage != 0
3313 && regs->array[REGNO (x)].single_usage != insn)
3314 ? const0_rtx : insn;
3316 else if (code == SET)
3318 /* Don't count SET_DEST if it is a REG; otherwise count things
3319 in SET_DEST because if a register is partially modified, it won't
3320 show up as a potential movable so we don't care how USAGE is set
3321 for it. */
3322 if (GET_CODE (SET_DEST (x)) != REG)
3323 find_single_use_in_loop (regs, insn, SET_DEST (x));
3324 find_single_use_in_loop (regs, insn, SET_SRC (x));
3326 else
3327 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3329 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3330 find_single_use_in_loop (regs, insn, XEXP (x, i));
3331 else if (fmt[i] == 'E')
3332 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3333 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3337 /* Count and record any set in X which is contained in INSN. Update
3338 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3339 in X. */
3341 static void
3342 count_one_set (regs, insn, x, last_set)
3343 struct loop_regs *regs;
3344 rtx insn, x;
3345 rtx *last_set;
3347 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3348 /* Don't move a reg that has an explicit clobber.
3349 It's not worth the pain to try to do it correctly. */
3350 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3352 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3354 rtx dest = SET_DEST (x);
3355 while (GET_CODE (dest) == SUBREG
3356 || GET_CODE (dest) == ZERO_EXTRACT
3357 || GET_CODE (dest) == SIGN_EXTRACT
3358 || GET_CODE (dest) == STRICT_LOW_PART)
3359 dest = XEXP (dest, 0);
3360 if (GET_CODE (dest) == REG)
3362 register int regno = REGNO (dest);
3363 /* If this is the first setting of this reg
3364 in current basic block, and it was set before,
3365 it must be set in two basic blocks, so it cannot
3366 be moved out of the loop. */
3367 if (regs->array[regno].set_in_loop > 0
3368 && last_set == 0)
3369 regs->array[regno].may_not_optimize = 1;
3370 /* If this is not first setting in current basic block,
3371 see if reg was used in between previous one and this.
3372 If so, neither one can be moved. */
3373 if (last_set[regno] != 0
3374 && reg_used_between_p (dest, last_set[regno], insn))
3375 regs->array[regno].may_not_optimize = 1;
3376 if (regs->array[regno].set_in_loop < 127)
3377 ++regs->array[regno].set_in_loop;
3378 last_set[regno] = insn;
3383 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3384 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3385 contained in insn INSN is used by any insn that precedes INSN in
3386 cyclic order starting from the loop entry point.
3388 We don't want to use INSN_LUID here because if we restrict INSN to those
3389 that have a valid INSN_LUID, it means we cannot move an invariant out
3390 from an inner loop past two loops. */
3392 static int
3393 loop_reg_used_before_p (loop, set, insn)
3394 const struct loop *loop;
3395 rtx set, insn;
3397 rtx reg = SET_DEST (set);
3398 rtx p;
3400 /* Scan forward checking for register usage. If we hit INSN, we
3401 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3402 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3404 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3405 return 1;
3407 if (p == loop->end)
3408 p = loop->start;
3411 return 0;
3414 /* A "basic induction variable" or biv is a pseudo reg that is set
3415 (within this loop) only by incrementing or decrementing it. */
3416 /* A "general induction variable" or giv is a pseudo reg whose
3417 value is a linear function of a biv. */
3419 /* Bivs are recognized by `basic_induction_var';
3420 Givs by `general_induction_var'. */
3422 /* Communication with routines called via `note_stores'. */
3424 static rtx note_insn;
3426 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3428 static rtx addr_placeholder;
3430 /* ??? Unfinished optimizations, and possible future optimizations,
3431 for the strength reduction code. */
3433 /* ??? The interaction of biv elimination, and recognition of 'constant'
3434 bivs, may cause problems. */
3436 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3437 performance problems.
3439 Perhaps don't eliminate things that can be combined with an addressing
3440 mode. Find all givs that have the same biv, mult_val, and add_val;
3441 then for each giv, check to see if its only use dies in a following
3442 memory address. If so, generate a new memory address and check to see
3443 if it is valid. If it is valid, then store the modified memory address,
3444 otherwise, mark the giv as not done so that it will get its own iv. */
3446 /* ??? Could try to optimize branches when it is known that a biv is always
3447 positive. */
3449 /* ??? When replace a biv in a compare insn, we should replace with closest
3450 giv so that an optimized branch can still be recognized by the combiner,
3451 e.g. the VAX acb insn. */
3453 /* ??? Many of the checks involving uid_luid could be simplified if regscan
3454 was rerun in loop_optimize whenever a register was added or moved.
3455 Also, some of the optimizations could be a little less conservative. */
3457 /* Scan the loop body and call FNCALL for each insn. In the addition to the
3458 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
3459 callback.
3461 NOT_EVERY_ITERATION if current insn is not executed at least once for every
3462 loop iteration except for the last one.
3464 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
3465 loop iteration.
3467 void
3468 for_each_insn_in_loop (loop, fncall)
3469 struct loop *loop;
3470 loop_insn_callback fncall;
3472 /* This is 1 if current insn is not executed at least once for every loop
3473 iteration. */
3474 int not_every_iteration = 0;
3475 int maybe_multiple = 0;
3476 int past_loop_latch = 0;
3477 int loop_depth = 0;
3478 rtx p;
3480 /* If loop_scan_start points to the loop exit test, we have to be wary of
3481 subversive use of gotos inside expression statements. */
3482 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
3483 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
3485 /* Scan through loop to find all possible bivs. */
3487 for (p = next_insn_in_loop (loop, loop->scan_start);
3488 p != NULL_RTX;
3489 p = next_insn_in_loop (loop, p))
3491 p = fncall (loop, p, not_every_iteration, maybe_multiple);
3493 /* Past CODE_LABEL, we get to insns that may be executed multiple
3494 times. The only way we can be sure that they can't is if every
3495 jump insn between here and the end of the loop either
3496 returns, exits the loop, is a jump to a location that is still
3497 behind the label, or is a jump to the loop start. */
3499 if (GET_CODE (p) == CODE_LABEL)
3501 rtx insn = p;
3503 maybe_multiple = 0;
3505 while (1)
3507 insn = NEXT_INSN (insn);
3508 if (insn == loop->scan_start)
3509 break;
3510 if (insn == loop->end)
3512 if (loop->top != 0)
3513 insn = loop->top;
3514 else
3515 break;
3516 if (insn == loop->scan_start)
3517 break;
3520 if (GET_CODE (insn) == JUMP_INSN
3521 && GET_CODE (PATTERN (insn)) != RETURN
3522 && (!any_condjump_p (insn)
3523 || (JUMP_LABEL (insn) != 0
3524 && JUMP_LABEL (insn) != loop->scan_start
3525 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
3527 maybe_multiple = 1;
3528 break;
3533 /* Past a jump, we get to insns for which we can't count
3534 on whether they will be executed during each iteration. */
3535 /* This code appears twice in strength_reduce. There is also similar
3536 code in scan_loop. */
3537 if (GET_CODE (p) == JUMP_INSN
3538 /* If we enter the loop in the middle, and scan around to the
3539 beginning, don't set not_every_iteration for that.
3540 This can be any kind of jump, since we want to know if insns
3541 will be executed if the loop is executed. */
3542 && !(JUMP_LABEL (p) == loop->top
3543 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
3544 && any_uncondjump_p (p))
3545 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
3547 rtx label = 0;
3549 /* If this is a jump outside the loop, then it also doesn't
3550 matter. Check to see if the target of this branch is on the
3551 loop->exits_labels list. */
3553 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
3554 if (XEXP (label, 0) == JUMP_LABEL (p))
3555 break;
3557 if (!label)
3558 not_every_iteration = 1;
3561 else if (GET_CODE (p) == NOTE)
3563 /* At the virtual top of a converted loop, insns are again known to
3564 be executed each iteration: logically, the loop begins here
3565 even though the exit code has been duplicated.
3567 Insns are also again known to be executed each iteration at
3568 the LOOP_CONT note. */
3569 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
3570 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
3571 && loop_depth == 0)
3572 not_every_iteration = 0;
3573 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3574 loop_depth++;
3575 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3576 loop_depth--;
3579 /* Note if we pass a loop latch. If we do, then we can not clear
3580 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
3581 a loop since a jump before the last CODE_LABEL may have started
3582 a new loop iteration.
3584 Note that LOOP_TOP is only set for rotated loops and we need
3585 this check for all loops, so compare against the CODE_LABEL
3586 which immediately follows LOOP_START. */
3587 if (GET_CODE (p) == JUMP_INSN
3588 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
3589 past_loop_latch = 1;
3591 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3592 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3593 or not an insn is known to be executed each iteration of the
3594 loop, whether or not any iterations are known to occur.
3596 Therefore, if we have just passed a label and have no more labels
3597 between here and the test insn of the loop, and we have not passed
3598 a jump to the top of the loop, then we know these insns will be
3599 executed each iteration. */
3601 if (not_every_iteration
3602 && !past_loop_latch
3603 && GET_CODE (p) == CODE_LABEL
3604 && no_labels_between_p (p, loop->end)
3605 && loop_insn_first_p (p, loop->cont))
3606 not_every_iteration = 0;
3610 static void
3611 loop_bivs_find (loop)
3612 struct loop *loop;
3614 struct loop_regs *regs = LOOP_REGS (loop);
3615 struct loop_ivs *ivs = LOOP_IVS (loop);
3616 /* Temporary list pointers for traversing ivs->list. */
3617 struct iv_class *bl, **backbl;
3619 ivs->list = 0;
3621 for_each_insn_in_loop (loop, check_insn_for_bivs);
3623 /* Scan ivs->list to remove all regs that proved not to be bivs.
3624 Make a sanity check against regs->n_times_set. */
3625 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
3627 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3628 /* Above happens if register modified by subreg, etc. */
3629 /* Make sure it is not recognized as a basic induction var: */
3630 || regs->array[bl->regno].n_times_set != bl->biv_count
3631 /* If never incremented, it is invariant that we decided not to
3632 move. So leave it alone. */
3633 || ! bl->incremented)
3635 if (loop_dump_stream)
3636 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
3637 bl->regno,
3638 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
3639 ? "not induction variable"
3640 : (! bl->incremented ? "never incremented"
3641 : "count error")));
3643 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
3644 *backbl = bl->next;
3646 else
3648 backbl = &bl->next;
3650 if (loop_dump_stream)
3651 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
3657 /* Determine how BIVS are initialised by looking through pre-header
3658 extended basic block. */
3659 static void
3660 loop_bivs_init_find (loop)
3661 struct loop *loop;
3663 struct loop_ivs *ivs = LOOP_IVS (loop);
3664 /* Temporary list pointers for traversing ivs->list. */
3665 struct iv_class *bl;
3666 int call_seen;
3667 rtx p;
3669 /* Find initial value for each biv by searching backwards from loop_start,
3670 halting at first label. Also record any test condition. */
3672 call_seen = 0;
3673 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3675 rtx test;
3677 note_insn = p;
3679 if (GET_CODE (p) == CALL_INSN)
3680 call_seen = 1;
3682 if (INSN_P (p))
3683 note_stores (PATTERN (p), record_initial, ivs);
3685 /* Record any test of a biv that branches around the loop if no store
3686 between it and the start of loop. We only care about tests with
3687 constants and registers and only certain of those. */
3688 if (GET_CODE (p) == JUMP_INSN
3689 && JUMP_LABEL (p) != 0
3690 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
3691 && (test = get_condition_for_loop (loop, p)) != 0
3692 && GET_CODE (XEXP (test, 0)) == REG
3693 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3694 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
3695 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
3696 && bl->init_insn == 0)
3698 /* If an NE test, we have an initial value! */
3699 if (GET_CODE (test) == NE)
3701 bl->init_insn = p;
3702 bl->init_set = gen_rtx_SET (VOIDmode,
3703 XEXP (test, 0), XEXP (test, 1));
3705 else
3706 bl->initial_test = test;
3712 /* Look at the each biv and see if we can say anything better about its
3713 initial value from any initializing insns set up above. (This is done
3714 in two passes to avoid missing SETs in a PARALLEL.) */
3715 static void
3716 loop_bivs_check (loop)
3717 struct loop *loop;
3719 struct loop_ivs *ivs = LOOP_IVS (loop);
3720 /* Temporary list pointers for traversing ivs->list. */
3721 struct iv_class *bl;
3722 struct iv_class **backbl;
3724 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
3726 rtx src;
3727 rtx note;
3729 if (! bl->init_insn)
3730 continue;
3732 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3733 is a constant, use the value of that. */
3734 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3735 && CONSTANT_P (XEXP (note, 0)))
3736 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3737 && CONSTANT_P (XEXP (note, 0))))
3738 src = XEXP (note, 0);
3739 else
3740 src = SET_SRC (bl->init_set);
3742 if (loop_dump_stream)
3743 fprintf (loop_dump_stream,
3744 "Biv %d: initialized at insn %d: initial value ",
3745 bl->regno, INSN_UID (bl->init_insn));
3747 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3748 || GET_MODE (src) == VOIDmode)
3749 && valid_initial_value_p (src, bl->init_insn,
3750 LOOP_INFO (loop)->pre_header_has_call,
3751 loop->start))
3753 bl->initial_value = src;
3755 if (loop_dump_stream)
3757 print_simple_rtl (loop_dump_stream, src);
3758 fputc ('\n', loop_dump_stream);
3761 /* If we can't make it a giv,
3762 let biv keep initial value of "itself". */
3763 else if (loop_dump_stream)
3764 fprintf (loop_dump_stream, "is complex\n");
3769 /* Search the loop for general induction variables. */
3771 static void
3772 loop_givs_find (loop)
3773 struct loop* loop;
3775 for_each_insn_in_loop (loop, check_insn_for_givs);
3779 /* For each giv for which we still don't know whether or not it is
3780 replaceable, check to see if it is replaceable because its final value
3781 can be calculated. */
3783 static void
3784 loop_givs_check (loop)
3785 struct loop *loop;
3787 struct loop_ivs *ivs = LOOP_IVS (loop);
3788 struct iv_class *bl;
3790 for (bl = ivs->list; bl; bl = bl->next)
3792 struct induction *v;
3794 for (v = bl->giv; v; v = v->next_iv)
3795 if (! v->replaceable && ! v->not_replaceable)
3796 check_final_value (loop, v);
3801 /* Return non-zero if it is possible to eliminate the biv BL provided
3802 all givs are reduced. This is possible if either the reg is not
3803 used outside the loop, or we can compute what its final value will
3804 be. */
3806 static int
3807 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
3808 struct loop *loop;
3809 struct iv_class *bl;
3810 int threshold;
3811 int insn_count;
3813 /* For architectures with a decrement_and_branch_until_zero insn,
3814 don't do this if we put a REG_NONNEG note on the endtest for this
3815 biv. */
3817 #ifdef HAVE_decrement_and_branch_until_zero
3818 if (bl->nonneg)
3820 if (loop_dump_stream)
3821 fprintf (loop_dump_stream,
3822 "Cannot eliminate nonneg biv %d.\n", bl->regno);
3823 return 0;
3825 #endif
3827 /* Check that biv is used outside loop or if it has a final value.
3828 Compare against bl->init_insn rather than loop->start. We aren't
3829 concerned with any uses of the biv between init_insn and
3830 loop->start since these won't be affected by the value of the biv
3831 elsewhere in the function, so long as init_insn doesn't use the
3832 biv itself. */
3834 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
3835 && bl->init_insn
3836 && INSN_UID (bl->init_insn) < max_uid_for_loop
3837 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
3838 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
3839 || (bl->final_value = final_biv_value (loop, bl)))
3840 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
3842 if (loop_dump_stream)
3844 fprintf (loop_dump_stream,
3845 "Cannot eliminate biv %d.\n",
3846 bl->regno);
3847 fprintf (loop_dump_stream,
3848 "First use: insn %d, last use: insn %d.\n",
3849 REGNO_FIRST_UID (bl->regno),
3850 REGNO_LAST_UID (bl->regno));
3852 return 0;
3856 /* Reduce each giv of BL that we have decided to reduce. */
3858 static void
3859 loop_givs_reduce (loop, bl)
3860 struct loop *loop;
3861 struct iv_class *bl;
3863 struct induction *v;
3865 for (v = bl->giv; v; v = v->next_iv)
3867 struct induction *tv;
3868 if (! v->ignore && v->same == 0)
3870 int auto_inc_opt = 0;
3872 /* If the code for derived givs immediately below has already
3873 allocated a new_reg, we must keep it. */
3874 if (! v->new_reg)
3875 v->new_reg = gen_reg_rtx (v->mode);
3877 #ifdef AUTO_INC_DEC
3878 /* If the target has auto-increment addressing modes, and
3879 this is an address giv, then try to put the increment
3880 immediately after its use, so that flow can create an
3881 auto-increment addressing mode. */
3882 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
3883 && bl->biv->always_executed && ! bl->biv->maybe_multiple
3884 /* We don't handle reversed biv's because bl->biv->insn
3885 does not have a valid INSN_LUID. */
3886 && ! bl->reversed
3887 && v->always_executed && ! v->maybe_multiple
3888 && INSN_UID (v->insn) < max_uid_for_loop)
3890 /* If other giv's have been combined with this one, then
3891 this will work only if all uses of the other giv's occur
3892 before this giv's insn. This is difficult to check.
3894 We simplify this by looking for the common case where
3895 there is one DEST_REG giv, and this giv's insn is the
3896 last use of the dest_reg of that DEST_REG giv. If the
3897 increment occurs after the address giv, then we can
3898 perform the optimization. (Otherwise, the increment
3899 would have to go before other_giv, and we would not be
3900 able to combine it with the address giv to get an
3901 auto-inc address.) */
3902 if (v->combined_with)
3904 struct induction *other_giv = 0;
3906 for (tv = bl->giv; tv; tv = tv->next_iv)
3907 if (tv->same == v)
3909 if (other_giv)
3910 break;
3911 else
3912 other_giv = tv;
3914 if (! tv && other_giv
3915 && REGNO (other_giv->dest_reg) < max_reg_before_loop
3916 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
3917 == INSN_UID (v->insn))
3918 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
3919 auto_inc_opt = 1;
3921 /* Check for case where increment is before the address
3922 giv. Do this test in "loop order". */
3923 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
3924 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3925 || (INSN_LUID (bl->biv->insn)
3926 > INSN_LUID (loop->scan_start))))
3927 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
3928 && (INSN_LUID (loop->scan_start)
3929 < INSN_LUID (bl->biv->insn))))
3930 auto_inc_opt = -1;
3931 else
3932 auto_inc_opt = 1;
3934 #ifdef HAVE_cc0
3936 rtx prev;
3938 /* We can't put an insn immediately after one setting
3939 cc0, or immediately before one using cc0. */
3940 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
3941 || (auto_inc_opt == -1
3942 && (prev = prev_nonnote_insn (v->insn)) != 0
3943 && INSN_P (prev)
3944 && sets_cc0_p (PATTERN (prev))))
3945 auto_inc_opt = 0;
3947 #endif
3949 if (auto_inc_opt)
3950 v->auto_inc_opt = 1;
3952 #endif
3954 /* For each place where the biv is incremented, add an insn
3955 to increment the new, reduced reg for the giv. */
3956 for (tv = bl->biv; tv; tv = tv->next_iv)
3958 rtx insert_before;
3960 if (! auto_inc_opt)
3961 insert_before = tv->insn;
3962 else if (auto_inc_opt == 1)
3963 insert_before = NEXT_INSN (v->insn);
3964 else
3965 insert_before = v->insn;
3967 if (tv->mult_val == const1_rtx)
3968 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3969 v->new_reg, v->new_reg,
3970 0, insert_before);
3971 else /* tv->mult_val == const0_rtx */
3972 /* A multiply is acceptable here
3973 since this is presumed to be seldom executed. */
3974 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
3975 v->add_val, v->new_reg,
3976 0, insert_before);
3979 /* Add code at loop start to initialize giv's reduced reg. */
3981 loop_iv_add_mult_hoist (loop,
3982 extend_value_for_giv (v, bl->initial_value),
3983 v->mult_val, v->add_val, v->new_reg);
3989 /* Check for givs whose first use is their definition and whose
3990 last use is the definition of another giv. If so, it is likely
3991 dead and should not be used to derive another giv nor to
3992 eliminate a biv. */
3994 static void
3995 loop_givs_dead_check (loop, bl)
3996 struct loop *loop ATTRIBUTE_UNUSED;
3997 struct iv_class *bl;
3999 struct induction *v;
4001 for (v = bl->giv; v; v = v->next_iv)
4003 if (v->ignore
4004 || (v->same && v->same->ignore))
4005 continue;
4007 if (v->giv_type == DEST_REG
4008 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4010 struct induction *v1;
4012 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4013 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4014 v->maybe_dead = 1;
4020 static void
4021 loop_givs_rescan (loop, bl, reg_map)
4022 struct loop *loop;
4023 struct iv_class *bl;
4024 rtx *reg_map;
4026 struct induction *v;
4028 for (v = bl->giv; v; v = v->next_iv)
4030 if (v->same && v->same->ignore)
4031 v->ignore = 1;
4033 if (v->ignore)
4034 continue;
4036 /* Update expression if this was combined, in case other giv was
4037 replaced. */
4038 if (v->same)
4039 v->new_reg = replace_rtx (v->new_reg,
4040 v->same->dest_reg, v->same->new_reg);
4042 /* See if this register is known to be a pointer to something. If
4043 so, see if we can find the alignment. First see if there is a
4044 destination register that is a pointer. If so, this shares the
4045 alignment too. Next see if we can deduce anything from the
4046 computational information. If not, and this is a DEST_ADDR
4047 giv, at least we know that it's a pointer, though we don't know
4048 the alignment. */
4049 if (GET_CODE (v->new_reg) == REG
4050 && v->giv_type == DEST_REG
4051 && REG_POINTER (v->dest_reg))
4052 mark_reg_pointer (v->new_reg,
4053 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4054 else if (GET_CODE (v->new_reg) == REG
4055 && REG_POINTER (v->src_reg))
4057 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4059 if (align == 0
4060 || GET_CODE (v->add_val) != CONST_INT
4061 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4062 align = 0;
4064 mark_reg_pointer (v->new_reg, align);
4066 else if (GET_CODE (v->new_reg) == REG
4067 && GET_CODE (v->add_val) == REG
4068 && REG_POINTER (v->add_val))
4070 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4072 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4073 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4074 align = 0;
4076 mark_reg_pointer (v->new_reg, align);
4078 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4079 mark_reg_pointer (v->new_reg, 0);
4081 if (v->giv_type == DEST_ADDR)
4082 /* Store reduced reg as the address in the memref where we found
4083 this giv. */
4084 validate_change (v->insn, v->location, v->new_reg, 0);
4085 else if (v->replaceable)
4087 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4089 else
4091 /* Not replaceable; emit an insn to set the original giv reg from
4092 the reduced giv, same as above. */
4093 loop_insn_emit_after (loop, 0, v->insn,
4094 gen_move_insn (v->dest_reg, v->new_reg));
4097 /* When a loop is reversed, givs which depend on the reversed
4098 biv, and which are live outside the loop, must be set to their
4099 correct final value. This insn is only needed if the giv is
4100 not replaceable. The correct final value is the same as the
4101 value that the giv starts the reversed loop with. */
4102 if (bl->reversed && ! v->replaceable)
4103 loop_iv_add_mult_sink (loop,
4104 extend_value_for_giv (v, bl->initial_value),
4105 v->mult_val, v->add_val, v->dest_reg);
4106 else if (v->final_value)
4107 loop_insn_sink_or_swim (loop,
4108 gen_move_insn (v->dest_reg, v->final_value));
4110 if (loop_dump_stream)
4112 fprintf (loop_dump_stream, "giv at %d reduced to ",
4113 INSN_UID (v->insn));
4114 print_simple_rtl (loop_dump_stream, v->new_reg);
4115 fprintf (loop_dump_stream, "\n");
4121 static int
4122 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4123 struct loop *loop ATTRIBUTE_UNUSED;
4124 struct iv_class *bl;
4125 struct induction *v;
4126 rtx test_reg;
4128 int add_cost;
4129 int benefit;
4131 benefit = v->benefit;
4132 PUT_MODE (test_reg, v->mode);
4133 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4134 test_reg, test_reg);
4136 /* Reduce benefit if not replaceable, since we will insert a
4137 move-insn to replace the insn that calculates this giv. Don't do
4138 this unless the giv is a user variable, since it will often be
4139 marked non-replaceable because of the duplication of the exit
4140 code outside the loop. In such a case, the copies we insert are
4141 dead and will be deleted. So they don't have a cost. Similar
4142 situations exist. */
4143 /* ??? The new final_[bg]iv_value code does a much better job of
4144 finding replaceable giv's, and hence this code may no longer be
4145 necessary. */
4146 if (! v->replaceable && ! bl->eliminable
4147 && REG_USERVAR_P (v->dest_reg))
4148 benefit -= copy_cost;
4150 /* Decrease the benefit to count the add-insns that we will insert
4151 to increment the reduced reg for the giv. ??? This can
4152 overestimate the run-time cost of the additional insns, e.g. if
4153 there are multiple basic blocks that increment the biv, but only
4154 one of these blocks is executed during each iteration. There is
4155 no good way to detect cases like this with the current structure
4156 of the loop optimizer. This code is more accurate for
4157 determining code size than run-time benefits. */
4158 benefit -= add_cost * bl->biv_count;
4160 /* Decide whether to strength-reduce this giv or to leave the code
4161 unchanged (recompute it from the biv each time it is used). This
4162 decision can be made independently for each giv. */
4164 #ifdef AUTO_INC_DEC
4165 /* Attempt to guess whether autoincrement will handle some of the
4166 new add insns; if so, increase BENEFIT (undo the subtraction of
4167 add_cost that was done above). */
4168 if (v->giv_type == DEST_ADDR
4169 /* Increasing the benefit is risky, since this is only a guess.
4170 Avoid increasing register pressure in cases where there would
4171 be no other benefit from reducing this giv. */
4172 && benefit > 0
4173 && GET_CODE (v->mult_val) == CONST_INT)
4175 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4177 if (HAVE_POST_INCREMENT
4178 && INTVAL (v->mult_val) == size)
4179 benefit += add_cost * bl->biv_count;
4180 else if (HAVE_PRE_INCREMENT
4181 && INTVAL (v->mult_val) == size)
4182 benefit += add_cost * bl->biv_count;
4183 else if (HAVE_POST_DECREMENT
4184 && -INTVAL (v->mult_val) == size)
4185 benefit += add_cost * bl->biv_count;
4186 else if (HAVE_PRE_DECREMENT
4187 && -INTVAL (v->mult_val) == size)
4188 benefit += add_cost * bl->biv_count;
4190 #endif
4192 return benefit;
4196 /* Free IV structures for LOOP. */
4198 static void
4199 loop_ivs_free (loop)
4200 struct loop *loop;
4202 struct loop_ivs *ivs = LOOP_IVS (loop);
4203 struct iv_class *iv = ivs->list;
4205 free (ivs->regs);
4207 while (iv)
4209 struct iv_class *next = iv->next;
4210 struct induction *induction;
4211 struct induction *next_induction;
4213 for (induction = iv->biv; induction; induction = next_induction)
4215 next_induction = induction->next_iv;
4216 free (induction);
4218 for (induction = iv->giv; induction; induction = next_induction)
4220 next_induction = induction->next_iv;
4221 free (induction);
4224 free (iv);
4225 iv = next;
4230 /* Perform strength reduction and induction variable elimination.
4232 Pseudo registers created during this function will be beyond the
4233 last valid index in several tables including
4234 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4235 problem here, because the added registers cannot be givs outside of
4236 their loop, and hence will never be reconsidered. But scan_loop
4237 must check regnos to make sure they are in bounds. */
4239 static void
4240 strength_reduce (loop, flags)
4241 struct loop *loop;
4242 int flags;
4244 struct loop_info *loop_info = LOOP_INFO (loop);
4245 struct loop_regs *regs = LOOP_REGS (loop);
4246 struct loop_ivs *ivs = LOOP_IVS (loop);
4247 rtx p;
4248 /* Temporary list pointer for traversing ivs->list. */
4249 struct iv_class *bl;
4250 /* Ratio of extra register life span we can justify
4251 for saving an instruction. More if loop doesn't call subroutines
4252 since in that case saving an insn makes more difference
4253 and more registers are available. */
4254 /* ??? could set this to last value of threshold in move_movables */
4255 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
4256 /* Map of pseudo-register replacements. */
4257 rtx *reg_map = NULL;
4258 int reg_map_size;
4259 int unrolled_insn_copies = 0;
4260 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
4261 int insn_count = count_insns_in_loop (loop);
4263 addr_placeholder = gen_reg_rtx (Pmode);
4265 ivs->n_regs = max_reg_before_loop;
4266 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
4268 /* Find all BIVs in loop. */
4269 loop_bivs_find (loop);
4271 /* Exit if there are no bivs. */
4272 if (! ivs->list)
4274 /* Can still unroll the loop anyways, but indicate that there is no
4275 strength reduction info available. */
4276 if (flags & LOOP_UNROLL)
4277 unroll_loop (loop, insn_count, 0);
4279 loop_ivs_free (loop);
4280 return;
4283 /* Determine how BIVS are initialised by looking through pre-header
4284 extended basic block. */
4285 loop_bivs_init_find (loop);
4287 /* Look at the each biv and see if we can say anything better about its
4288 initial value from any initializing insns set up above. */
4289 loop_bivs_check (loop);
4291 /* Search the loop for general induction variables. */
4292 loop_givs_find (loop);
4294 /* Try to calculate and save the number of loop iterations. This is
4295 set to zero if the actual number can not be calculated. This must
4296 be called after all giv's have been identified, since otherwise it may
4297 fail if the iteration variable is a giv. */
4298 loop_iterations (loop);
4300 /* Now for each giv for which we still don't know whether or not it is
4301 replaceable, check to see if it is replaceable because its final value
4302 can be calculated. This must be done after loop_iterations is called,
4303 so that final_giv_value will work correctly. */
4304 loop_givs_check (loop);
4306 /* Try to prove that the loop counter variable (if any) is always
4307 nonnegative; if so, record that fact with a REG_NONNEG note
4308 so that "decrement and branch until zero" insn can be used. */
4309 check_dbra_loop (loop, insn_count);
4311 /* Create reg_map to hold substitutions for replaceable giv regs.
4312 Some givs might have been made from biv increments, so look at
4313 ivs->reg_iv_type for a suitable size. */
4314 reg_map_size = ivs->n_regs;
4315 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
4317 /* Examine each iv class for feasibility of strength reduction/induction
4318 variable elimination. */
4320 for (bl = ivs->list; bl; bl = bl->next)
4322 struct induction *v;
4323 int benefit;
4325 /* Test whether it will be possible to eliminate this biv
4326 provided all givs are reduced. */
4327 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
4329 /* This will be true at the end, if all givs which depend on this
4330 biv have been strength reduced.
4331 We can't (currently) eliminate the biv unless this is so. */
4332 bl->all_reduced = 1;
4334 /* Check each extension dependent giv in this class to see if its
4335 root biv is safe from wrapping in the interior mode. */
4336 check_ext_dependant_givs (bl, loop_info);
4338 /* Combine all giv's for this iv_class. */
4339 combine_givs (regs, bl);
4341 for (v = bl->giv; v; v = v->next_iv)
4343 struct induction *tv;
4345 if (v->ignore || v->same)
4346 continue;
4348 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
4350 /* If an insn is not to be strength reduced, then set its ignore
4351 flag, and clear bl->all_reduced. */
4353 /* A giv that depends on a reversed biv must be reduced if it is
4354 used after the loop exit, otherwise, it would have the wrong
4355 value after the loop exit. To make it simple, just reduce all
4356 of such giv's whether or not we know they are used after the loop
4357 exit. */
4359 if (! flag_reduce_all_givs
4360 && v->lifetime * threshold * benefit < insn_count
4361 && ! bl->reversed)
4363 if (loop_dump_stream)
4364 fprintf (loop_dump_stream,
4365 "giv of insn %d not worth while, %d vs %d.\n",
4366 INSN_UID (v->insn),
4367 v->lifetime * threshold * benefit, insn_count);
4368 v->ignore = 1;
4369 bl->all_reduced = 0;
4371 else
4373 /* Check that we can increment the reduced giv without a
4374 multiply insn. If not, reject it. */
4376 for (tv = bl->biv; tv; tv = tv->next_iv)
4377 if (tv->mult_val == const1_rtx
4378 && ! product_cheap_p (tv->add_val, v->mult_val))
4380 if (loop_dump_stream)
4381 fprintf (loop_dump_stream,
4382 "giv of insn %d: would need a multiply.\n",
4383 INSN_UID (v->insn));
4384 v->ignore = 1;
4385 bl->all_reduced = 0;
4386 break;
4391 /* Check for givs whose first use is their definition and whose
4392 last use is the definition of another giv. If so, it is likely
4393 dead and should not be used to derive another giv nor to
4394 eliminate a biv. */
4395 loop_givs_dead_check (loop, bl);
4397 /* Reduce each giv that we decided to reduce. */
4398 loop_givs_reduce (loop, bl);
4400 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4401 as not reduced.
4403 For each giv register that can be reduced now: if replaceable,
4404 substitute reduced reg wherever the old giv occurs;
4405 else add new move insn "giv_reg = reduced_reg". */
4406 loop_givs_rescan (loop, bl, reg_map);
4408 /* All the givs based on the biv bl have been reduced if they
4409 merit it. */
4411 /* For each giv not marked as maybe dead that has been combined with a
4412 second giv, clear any "maybe dead" mark on that second giv.
4413 v->new_reg will either be or refer to the register of the giv it
4414 combined with.
4416 Doing this clearing avoids problems in biv elimination where
4417 a giv's new_reg is a complex value that can't be put in the
4418 insn but the giv combined with (with a reg as new_reg) is
4419 marked maybe_dead. Since the register will be used in either
4420 case, we'd prefer it be used from the simpler giv. */
4422 for (v = bl->giv; v; v = v->next_iv)
4423 if (! v->maybe_dead && v->same)
4424 v->same->maybe_dead = 0;
4426 /* Try to eliminate the biv, if it is a candidate.
4427 This won't work if ! bl->all_reduced,
4428 since the givs we planned to use might not have been reduced.
4430 We have to be careful that we didn't initially think we could
4431 eliminate this biv because of a giv that we now think may be
4432 dead and shouldn't be used as a biv replacement.
4434 Also, there is the possibility that we may have a giv that looks
4435 like it can be used to eliminate a biv, but the resulting insn
4436 isn't valid. This can happen, for example, on the 88k, where a
4437 JUMP_INSN can compare a register only with zero. Attempts to
4438 replace it with a compare with a constant will fail.
4440 Note that in cases where this call fails, we may have replaced some
4441 of the occurrences of the biv with a giv, but no harm was done in
4442 doing so in the rare cases where it can occur. */
4444 if (bl->all_reduced == 1 && bl->eliminable
4445 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
4447 /* ?? If we created a new test to bypass the loop entirely,
4448 or otherwise drop straight in, based on this test, then
4449 we might want to rewrite it also. This way some later
4450 pass has more hope of removing the initialization of this
4451 biv entirely. */
4453 /* If final_value != 0, then the biv may be used after loop end
4454 and we must emit an insn to set it just in case.
4456 Reversed bivs already have an insn after the loop setting their
4457 value, so we don't need another one. We can't calculate the
4458 proper final value for such a biv here anyways. */
4459 if (bl->final_value && ! bl->reversed)
4460 loop_insn_sink_or_swim (loop, gen_move_insn
4461 (bl->biv->dest_reg, bl->final_value));
4463 if (loop_dump_stream)
4464 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4465 bl->regno);
4469 /* Go through all the instructions in the loop, making all the
4470 register substitutions scheduled in REG_MAP. */
4472 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
4473 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4474 || GET_CODE (p) == CALL_INSN)
4476 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
4477 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
4478 INSN_CODE (p) = -1;
4481 if (loop_info->n_iterations > 0)
4483 /* When we completely unroll a loop we will likely not need the increment
4484 of the loop BIV and we will not need the conditional branch at the
4485 end of the loop. */
4486 unrolled_insn_copies = insn_count - 2;
4488 #ifdef HAVE_cc0
4489 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
4490 need the comparison before the conditional branch at the end of the
4491 loop. */
4492 unrolled_insn_copies -= 1;
4493 #endif
4495 /* We'll need one copy for each loop iteration. */
4496 unrolled_insn_copies *= loop_info->n_iterations;
4498 /* A little slop to account for the ability to remove initialization
4499 code, better CSE, and other secondary benefits of completely
4500 unrolling some loops. */
4501 unrolled_insn_copies -= 1;
4503 /* Clamp the value. */
4504 if (unrolled_insn_copies < 0)
4505 unrolled_insn_copies = 0;
4508 /* Unroll loops from within strength reduction so that we can use the
4509 induction variable information that strength_reduce has already
4510 collected. Always unroll loops that would be as small or smaller
4511 unrolled than when rolled. */
4512 if ((flags & LOOP_UNROLL)
4513 || (loop_info->n_iterations > 0
4514 && unrolled_insn_copies <= insn_count))
4515 unroll_loop (loop, insn_count, 1);
4517 #ifdef HAVE_doloop_end
4518 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
4519 doloop_optimize (loop);
4520 #endif /* HAVE_doloop_end */
4522 /* In case number of iterations is known, drop branch prediction note
4523 in the branch. Do that only in second loop pass, as loop unrolling
4524 may change the number of iterations performed. */
4525 if ((flags & LOOP_BCT)
4526 && loop_info->n_iterations / loop_info->unroll_number > 1)
4528 int n = loop_info->n_iterations / loop_info->unroll_number;
4529 predict_insn (PREV_INSN (loop->end),
4530 PRED_LOOP_ITERATIONS,
4531 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
4534 if (loop_dump_stream)
4535 fprintf (loop_dump_stream, "\n");
4537 loop_ivs_free (loop);
4538 if (reg_map)
4539 free (reg_map);
4542 /*Record all basic induction variables calculated in the insn. */
4543 static rtx
4544 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
4545 struct loop *loop;
4546 rtx p;
4547 int not_every_iteration;
4548 int maybe_multiple;
4550 struct loop_ivs *ivs = LOOP_IVS (loop);
4551 rtx set;
4552 rtx dest_reg;
4553 rtx inc_val;
4554 rtx mult_val;
4555 rtx *location;
4557 if (GET_CODE (p) == INSN
4558 && (set = single_set (p))
4559 && GET_CODE (SET_DEST (set)) == REG)
4561 dest_reg = SET_DEST (set);
4562 if (REGNO (dest_reg) < max_reg_before_loop
4563 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
4564 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
4566 if (basic_induction_var (loop, SET_SRC (set),
4567 GET_MODE (SET_SRC (set)),
4568 dest_reg, p, &inc_val, &mult_val,
4569 &location))
4571 /* It is a possible basic induction variable.
4572 Create and initialize an induction structure for it. */
4574 struct induction *v
4575 = (struct induction *) xmalloc (sizeof (struct induction));
4577 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
4578 not_every_iteration, maybe_multiple);
4579 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
4581 else if (REGNO (dest_reg) < ivs->n_regs)
4582 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
4585 return p;
4588 /* Record all givs calculated in the insn.
4589 A register is a giv if: it is only set once, it is a function of a
4590 biv and a constant (or invariant), and it is not a biv. */
4591 static rtx
4592 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
4593 struct loop *loop;
4594 rtx p;
4595 int not_every_iteration;
4596 int maybe_multiple;
4598 struct loop_regs *regs = LOOP_REGS (loop);
4600 rtx set;
4601 /* Look for a general induction variable in a register. */
4602 if (GET_CODE (p) == INSN
4603 && (set = single_set (p))
4604 && GET_CODE (SET_DEST (set)) == REG
4605 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
4607 rtx src_reg;
4608 rtx dest_reg;
4609 rtx add_val;
4610 rtx mult_val;
4611 rtx ext_val;
4612 int benefit;
4613 rtx regnote = 0;
4614 rtx last_consec_insn;
4616 dest_reg = SET_DEST (set);
4617 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
4618 return p;
4620 if (/* SET_SRC is a giv. */
4621 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
4622 &mult_val, &ext_val, 0, &benefit, VOIDmode)
4623 /* Equivalent expression is a giv. */
4624 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
4625 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
4626 &add_val, &mult_val, &ext_val, 0,
4627 &benefit, VOIDmode)))
4628 /* Don't try to handle any regs made by loop optimization.
4629 We have nothing on them in regno_first_uid, etc. */
4630 && REGNO (dest_reg) < max_reg_before_loop
4631 /* Don't recognize a BASIC_INDUCT_VAR here. */
4632 && dest_reg != src_reg
4633 /* This must be the only place where the register is set. */
4634 && (regs->array[REGNO (dest_reg)].n_times_set == 1
4635 /* or all sets must be consecutive and make a giv. */
4636 || (benefit = consec_sets_giv (loop, benefit, p,
4637 src_reg, dest_reg,
4638 &add_val, &mult_val, &ext_val,
4639 &last_consec_insn))))
4641 struct induction *v
4642 = (struct induction *) xmalloc (sizeof (struct induction));
4644 /* If this is a library call, increase benefit. */
4645 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
4646 benefit += libcall_benefit (p);
4648 /* Skip the consecutive insns, if there are any. */
4649 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
4650 p = last_consec_insn;
4652 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
4653 ext_val, benefit, DEST_REG, not_every_iteration,
4654 maybe_multiple, (rtx*)0);
4659 #ifndef DONT_REDUCE_ADDR
4660 /* Look for givs which are memory addresses. */
4661 /* This resulted in worse code on a VAX 8600. I wonder if it
4662 still does. */
4663 if (GET_CODE (p) == INSN)
4664 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
4665 maybe_multiple);
4666 #endif
4668 /* Update the status of whether giv can derive other givs. This can
4669 change when we pass a label or an insn that updates a biv. */
4670 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4671 || GET_CODE (p) == CODE_LABEL)
4672 update_giv_derive (loop, p);
4673 return p;
4676 /* Return 1 if X is a valid source for an initial value (or as value being
4677 compared against in an initial test).
4679 X must be either a register or constant and must not be clobbered between
4680 the current insn and the start of the loop.
4682 INSN is the insn containing X. */
4684 static int
4685 valid_initial_value_p (x, insn, call_seen, loop_start)
4686 rtx x;
4687 rtx insn;
4688 int call_seen;
4689 rtx loop_start;
4691 if (CONSTANT_P (x))
4692 return 1;
4694 /* Only consider pseudos we know about initialized in insns whose luids
4695 we know. */
4696 if (GET_CODE (x) != REG
4697 || REGNO (x) >= max_reg_before_loop)
4698 return 0;
4700 /* Don't use call-clobbered registers across a call which clobbers it. On
4701 some machines, don't use any hard registers at all. */
4702 if (REGNO (x) < FIRST_PSEUDO_REGISTER
4703 && (SMALL_REGISTER_CLASSES
4704 || (call_used_regs[REGNO (x)] && call_seen)))
4705 return 0;
4707 /* Don't use registers that have been clobbered before the start of the
4708 loop. */
4709 if (reg_set_between_p (x, insn, loop_start))
4710 return 0;
4712 return 1;
4715 /* Scan X for memory refs and check each memory address
4716 as a possible giv. INSN is the insn whose pattern X comes from.
4717 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4718 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
4719 more thanonce in each loop iteration. */
4721 static void
4722 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
4723 const struct loop *loop;
4724 rtx x;
4725 rtx insn;
4726 int not_every_iteration, maybe_multiple;
4728 register int i, j;
4729 register enum rtx_code code;
4730 register const char *fmt;
4732 if (x == 0)
4733 return;
4735 code = GET_CODE (x);
4736 switch (code)
4738 case REG:
4739 case CONST_INT:
4740 case CONST:
4741 case CONST_DOUBLE:
4742 case SYMBOL_REF:
4743 case LABEL_REF:
4744 case PC:
4745 case CC0:
4746 case ADDR_VEC:
4747 case ADDR_DIFF_VEC:
4748 case USE:
4749 case CLOBBER:
4750 return;
4752 case MEM:
4754 rtx src_reg;
4755 rtx add_val;
4756 rtx mult_val;
4757 rtx ext_val;
4758 int benefit;
4760 /* This code used to disable creating GIVs with mult_val == 1 and
4761 add_val == 0. However, this leads to lost optimizations when
4762 it comes time to combine a set of related DEST_ADDR GIVs, since
4763 this one would not be seen. */
4765 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
4766 &mult_val, &ext_val, 1, &benefit,
4767 GET_MODE (x)))
4769 /* Found one; record it. */
4770 struct induction *v
4771 = (struct induction *) xmalloc (sizeof (struct induction));
4773 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
4774 add_val, ext_val, benefit, DEST_ADDR,
4775 not_every_iteration, maybe_multiple, &XEXP (x, 0));
4777 v->mem = x;
4780 return;
4782 default:
4783 break;
4786 /* Recursively scan the subexpressions for other mem refs. */
4788 fmt = GET_RTX_FORMAT (code);
4789 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4790 if (fmt[i] == 'e')
4791 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
4792 maybe_multiple);
4793 else if (fmt[i] == 'E')
4794 for (j = 0; j < XVECLEN (x, i); j++)
4795 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
4796 maybe_multiple);
4799 /* Fill in the data about one biv update.
4800 V is the `struct induction' in which we record the biv. (It is
4801 allocated by the caller, with alloca.)
4802 INSN is the insn that sets it.
4803 DEST_REG is the biv's reg.
4805 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4806 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
4807 being set to INC_VAL.
4809 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4810 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4811 can be executed more than once per iteration. If MAYBE_MULTIPLE
4812 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4813 executed exactly once per iteration. */
4815 static void
4816 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
4817 not_every_iteration, maybe_multiple)
4818 struct loop *loop;
4819 struct induction *v;
4820 rtx insn;
4821 rtx dest_reg;
4822 rtx inc_val;
4823 rtx mult_val;
4824 rtx *location;
4825 int not_every_iteration;
4826 int maybe_multiple;
4828 struct loop_ivs *ivs = LOOP_IVS (loop);
4829 struct iv_class *bl;
4831 v->insn = insn;
4832 v->src_reg = dest_reg;
4833 v->dest_reg = dest_reg;
4834 v->mult_val = mult_val;
4835 v->add_val = inc_val;
4836 v->ext_dependant = NULL_RTX;
4837 v->location = location;
4838 v->mode = GET_MODE (dest_reg);
4839 v->always_computable = ! not_every_iteration;
4840 v->always_executed = ! not_every_iteration;
4841 v->maybe_multiple = maybe_multiple;
4843 /* Add this to the reg's iv_class, creating a class
4844 if this is the first incrementation of the reg. */
4846 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
4847 if (bl == 0)
4849 /* Create and initialize new iv_class. */
4851 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
4853 bl->regno = REGNO (dest_reg);
4854 bl->biv = 0;
4855 bl->giv = 0;
4856 bl->biv_count = 0;
4857 bl->giv_count = 0;
4859 /* Set initial value to the reg itself. */
4860 bl->initial_value = dest_reg;
4861 bl->final_value = 0;
4862 /* We haven't seen the initializing insn yet */
4863 bl->init_insn = 0;
4864 bl->init_set = 0;
4865 bl->initial_test = 0;
4866 bl->incremented = 0;
4867 bl->eliminable = 0;
4868 bl->nonneg = 0;
4869 bl->reversed = 0;
4870 bl->total_benefit = 0;
4872 /* Add this class to ivs->list. */
4873 bl->next = ivs->list;
4874 ivs->list = bl;
4876 /* Put it in the array of biv register classes. */
4877 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
4880 /* Update IV_CLASS entry for this biv. */
4881 v->next_iv = bl->biv;
4882 bl->biv = v;
4883 bl->biv_count++;
4884 if (mult_val == const1_rtx)
4885 bl->incremented = 1;
4887 if (loop_dump_stream)
4888 loop_biv_dump (v, loop_dump_stream, 0);
4891 /* Fill in the data about one giv.
4892 V is the `struct induction' in which we record the giv. (It is
4893 allocated by the caller, with alloca.)
4894 INSN is the insn that sets it.
4895 BENEFIT estimates the savings from deleting this insn.
4896 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4897 into a register or is used as a memory address.
4899 SRC_REG is the biv reg which the giv is computed from.
4900 DEST_REG is the giv's reg (if the giv is stored in a reg).
4901 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4902 LOCATION points to the place where this giv's value appears in INSN. */
4904 static void
4905 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
4906 benefit, type, not_every_iteration, maybe_multiple, location)
4907 const struct loop *loop;
4908 struct induction *v;
4909 rtx insn;
4910 rtx src_reg;
4911 rtx dest_reg;
4912 rtx mult_val, add_val, ext_val;
4913 int benefit;
4914 enum g_types type;
4915 int not_every_iteration, maybe_multiple;
4916 rtx *location;
4918 struct loop_ivs *ivs = LOOP_IVS (loop);
4919 struct induction *b;
4920 struct iv_class *bl;
4921 rtx set = single_set (insn);
4922 rtx temp;
4924 /* Attempt to prove constantness of the values. */
4925 temp = simplify_rtx (add_val);
4926 if (temp)
4927 add_val = temp;
4929 v->insn = insn;
4930 v->src_reg = src_reg;
4931 v->giv_type = type;
4932 v->dest_reg = dest_reg;
4933 v->mult_val = mult_val;
4934 v->add_val = add_val;
4935 v->ext_dependant = ext_val;
4936 v->benefit = benefit;
4937 v->location = location;
4938 v->cant_derive = 0;
4939 v->combined_with = 0;
4940 v->maybe_multiple = maybe_multiple;
4941 v->maybe_dead = 0;
4942 v->derive_adjustment = 0;
4943 v->same = 0;
4944 v->ignore = 0;
4945 v->new_reg = 0;
4946 v->final_value = 0;
4947 v->same_insn = 0;
4948 v->auto_inc_opt = 0;
4949 v->unrolled = 0;
4950 v->shared = 0;
4952 /* The v->always_computable field is used in update_giv_derive, to
4953 determine whether a giv can be used to derive another giv. For a
4954 DEST_REG giv, INSN computes a new value for the giv, so its value
4955 isn't computable if INSN insn't executed every iteration.
4956 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4957 it does not compute a new value. Hence the value is always computable
4958 regardless of whether INSN is executed each iteration. */
4960 if (type == DEST_ADDR)
4961 v->always_computable = 1;
4962 else
4963 v->always_computable = ! not_every_iteration;
4965 v->always_executed = ! not_every_iteration;
4967 if (type == DEST_ADDR)
4969 v->mode = GET_MODE (*location);
4970 v->lifetime = 1;
4972 else /* type == DEST_REG */
4974 v->mode = GET_MODE (SET_DEST (set));
4976 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
4978 /* If the lifetime is zero, it means that this register is
4979 really a dead store. So mark this as a giv that can be
4980 ignored. This will not prevent the biv from being eliminated. */
4981 if (v->lifetime == 0)
4982 v->ignore = 1;
4984 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
4985 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
4988 /* Add the giv to the class of givs computed from one biv. */
4990 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
4991 if (bl)
4993 v->next_iv = bl->giv;
4994 bl->giv = v;
4995 /* Don't count DEST_ADDR. This is supposed to count the number of
4996 insns that calculate givs. */
4997 if (type == DEST_REG)
4998 bl->giv_count++;
4999 bl->total_benefit += benefit;
5001 else
5002 /* Fatal error, biv missing for this giv? */
5003 abort ();
5005 if (type == DEST_ADDR)
5006 v->replaceable = 1;
5007 else
5009 /* The giv can be replaced outright by the reduced register only if all
5010 of the following conditions are true:
5011 - the insn that sets the giv is always executed on any iteration
5012 on which the giv is used at all
5013 (there are two ways to deduce this:
5014 either the insn is executed on every iteration,
5015 or all uses follow that insn in the same basic block),
5016 - the giv is not used outside the loop
5017 - no assignments to the biv occur during the giv's lifetime. */
5019 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5020 /* Previous line always fails if INSN was moved by loop opt. */
5021 && REGNO_LAST_LUID (REGNO (dest_reg))
5022 < INSN_LUID (loop->end)
5023 && (! not_every_iteration
5024 || last_use_this_basic_block (dest_reg, insn)))
5026 /* Now check that there are no assignments to the biv within the
5027 giv's lifetime. This requires two separate checks. */
5029 /* Check each biv update, and fail if any are between the first
5030 and last use of the giv.
5032 If this loop contains an inner loop that was unrolled, then
5033 the insn modifying the biv may have been emitted by the loop
5034 unrolling code, and hence does not have a valid luid. Just
5035 mark the biv as not replaceable in this case. It is not very
5036 useful as a biv, because it is used in two different loops.
5037 It is very unlikely that we would be able to optimize the giv
5038 using this biv anyways. */
5040 v->replaceable = 1;
5041 for (b = bl->biv; b; b = b->next_iv)
5043 if (INSN_UID (b->insn) >= max_uid_for_loop
5044 || ((INSN_LUID (b->insn)
5045 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5046 && (INSN_LUID (b->insn)
5047 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5049 v->replaceable = 0;
5050 v->not_replaceable = 1;
5051 break;
5055 /* If there are any backwards branches that go from after the
5056 biv update to before it, then this giv is not replaceable. */
5057 if (v->replaceable)
5058 for (b = bl->biv; b; b = b->next_iv)
5059 if (back_branch_in_range_p (loop, b->insn))
5061 v->replaceable = 0;
5062 v->not_replaceable = 1;
5063 break;
5066 else
5068 /* May still be replaceable, we don't have enough info here to
5069 decide. */
5070 v->replaceable = 0;
5071 v->not_replaceable = 0;
5075 /* Record whether the add_val contains a const_int, for later use by
5076 combine_givs. */
5078 rtx tem = add_val;
5080 v->no_const_addval = 1;
5081 if (tem == const0_rtx)
5083 else if (CONSTANT_P (add_val))
5084 v->no_const_addval = 0;
5085 if (GET_CODE (tem) == PLUS)
5087 while (1)
5089 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5090 tem = XEXP (tem, 0);
5091 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5092 tem = XEXP (tem, 1);
5093 else
5094 break;
5096 if (CONSTANT_P (XEXP (tem, 1)))
5097 v->no_const_addval = 0;
5101 if (loop_dump_stream)
5102 loop_giv_dump (v, loop_dump_stream, 0);
5105 /* All this does is determine whether a giv can be made replaceable because
5106 its final value can be calculated. This code can not be part of record_giv
5107 above, because final_giv_value requires that the number of loop iterations
5108 be known, and that can not be accurately calculated until after all givs
5109 have been identified. */
5111 static void
5112 check_final_value (loop, v)
5113 const struct loop *loop;
5114 struct induction *v;
5116 struct loop_ivs *ivs = LOOP_IVS (loop);
5117 struct iv_class *bl;
5118 rtx final_value = 0;
5120 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5122 /* DEST_ADDR givs will never reach here, because they are always marked
5123 replaceable above in record_giv. */
5125 /* The giv can be replaced outright by the reduced register only if all
5126 of the following conditions are true:
5127 - the insn that sets the giv is always executed on any iteration
5128 on which the giv is used at all
5129 (there are two ways to deduce this:
5130 either the insn is executed on every iteration,
5131 or all uses follow that insn in the same basic block),
5132 - its final value can be calculated (this condition is different
5133 than the one above in record_giv)
5134 - it's not used before the it's set
5135 - no assignments to the biv occur during the giv's lifetime. */
5137 #if 0
5138 /* This is only called now when replaceable is known to be false. */
5139 /* Clear replaceable, so that it won't confuse final_giv_value. */
5140 v->replaceable = 0;
5141 #endif
5143 if ((final_value = final_giv_value (loop, v))
5144 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5146 int biv_increment_seen = 0, before_giv_insn = 0;
5147 rtx p = v->insn;
5148 rtx last_giv_use;
5150 v->replaceable = 1;
5152 /* When trying to determine whether or not a biv increment occurs
5153 during the lifetime of the giv, we can ignore uses of the variable
5154 outside the loop because final_value is true. Hence we can not
5155 use regno_last_uid and regno_first_uid as above in record_giv. */
5157 /* Search the loop to determine whether any assignments to the
5158 biv occur during the giv's lifetime. Start with the insn
5159 that sets the giv, and search around the loop until we come
5160 back to that insn again.
5162 Also fail if there is a jump within the giv's lifetime that jumps
5163 to somewhere outside the lifetime but still within the loop. This
5164 catches spaghetti code where the execution order is not linear, and
5165 hence the above test fails. Here we assume that the giv lifetime
5166 does not extend from one iteration of the loop to the next, so as
5167 to make the test easier. Since the lifetime isn't known yet,
5168 this requires two loops. See also record_giv above. */
5170 last_giv_use = v->insn;
5172 while (1)
5174 p = NEXT_INSN (p);
5175 if (p == loop->end)
5177 before_giv_insn = 1;
5178 p = NEXT_INSN (loop->start);
5180 if (p == v->insn)
5181 break;
5183 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5184 || GET_CODE (p) == CALL_INSN)
5186 /* It is possible for the BIV increment to use the GIV if we
5187 have a cycle. Thus we must be sure to check each insn for
5188 both BIV and GIV uses, and we must check for BIV uses
5189 first. */
5191 if (! biv_increment_seen
5192 && reg_set_p (v->src_reg, PATTERN (p)))
5193 biv_increment_seen = 1;
5195 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5197 if (biv_increment_seen || before_giv_insn)
5199 v->replaceable = 0;
5200 v->not_replaceable = 1;
5201 break;
5203 last_giv_use = p;
5208 /* Now that the lifetime of the giv is known, check for branches
5209 from within the lifetime to outside the lifetime if it is still
5210 replaceable. */
5212 if (v->replaceable)
5214 p = v->insn;
5215 while (1)
5217 p = NEXT_INSN (p);
5218 if (p == loop->end)
5219 p = NEXT_INSN (loop->start);
5220 if (p == last_giv_use)
5221 break;
5223 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5224 && LABEL_NAME (JUMP_LABEL (p))
5225 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5226 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5227 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5228 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5230 v->replaceable = 0;
5231 v->not_replaceable = 1;
5233 if (loop_dump_stream)
5234 fprintf (loop_dump_stream,
5235 "Found branch outside giv lifetime.\n");
5237 break;
5242 /* If it is replaceable, then save the final value. */
5243 if (v->replaceable)
5244 v->final_value = final_value;
5247 if (loop_dump_stream && v->replaceable)
5248 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5249 INSN_UID (v->insn), REGNO (v->dest_reg));
5252 /* Update the status of whether a giv can derive other givs.
5254 We need to do something special if there is or may be an update to the biv
5255 between the time the giv is defined and the time it is used to derive
5256 another giv.
5258 In addition, a giv that is only conditionally set is not allowed to
5259 derive another giv once a label has been passed.
5261 The cases we look at are when a label or an update to a biv is passed. */
5263 static void
5264 update_giv_derive (loop, p)
5265 const struct loop *loop;
5266 rtx p;
5268 struct loop_ivs *ivs = LOOP_IVS (loop);
5269 struct iv_class *bl;
5270 struct induction *biv, *giv;
5271 rtx tem;
5272 int dummy;
5274 /* Search all IV classes, then all bivs, and finally all givs.
5276 There are three cases we are concerned with. First we have the situation
5277 of a giv that is only updated conditionally. In that case, it may not
5278 derive any givs after a label is passed.
5280 The second case is when a biv update occurs, or may occur, after the
5281 definition of a giv. For certain biv updates (see below) that are
5282 known to occur between the giv definition and use, we can adjust the
5283 giv definition. For others, or when the biv update is conditional,
5284 we must prevent the giv from deriving any other givs. There are two
5285 sub-cases within this case.
5287 If this is a label, we are concerned with any biv update that is done
5288 conditionally, since it may be done after the giv is defined followed by
5289 a branch here (actually, we need to pass both a jump and a label, but
5290 this extra tracking doesn't seem worth it).
5292 If this is a jump, we are concerned about any biv update that may be
5293 executed multiple times. We are actually only concerned about
5294 backward jumps, but it is probably not worth performing the test
5295 on the jump again here.
5297 If this is a biv update, we must adjust the giv status to show that a
5298 subsequent biv update was performed. If this adjustment cannot be done,
5299 the giv cannot derive further givs. */
5301 for (bl = ivs->list; bl; bl = bl->next)
5302 for (biv = bl->biv; biv; biv = biv->next_iv)
5303 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5304 || biv->insn == p)
5306 for (giv = bl->giv; giv; giv = giv->next_iv)
5308 /* If cant_derive is already true, there is no point in
5309 checking all of these conditions again. */
5310 if (giv->cant_derive)
5311 continue;
5313 /* If this giv is conditionally set and we have passed a label,
5314 it cannot derive anything. */
5315 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5316 giv->cant_derive = 1;
5318 /* Skip givs that have mult_val == 0, since
5319 they are really invariants. Also skip those that are
5320 replaceable, since we know their lifetime doesn't contain
5321 any biv update. */
5322 else if (giv->mult_val == const0_rtx || giv->replaceable)
5323 continue;
5325 /* The only way we can allow this giv to derive another
5326 is if this is a biv increment and we can form the product
5327 of biv->add_val and giv->mult_val. In this case, we will
5328 be able to compute a compensation. */
5329 else if (biv->insn == p)
5331 rtx ext_val_dummy;
5333 tem = 0;
5334 if (biv->mult_val == const1_rtx)
5335 tem = simplify_giv_expr (loop,
5336 gen_rtx_MULT (giv->mode,
5337 biv->add_val,
5338 giv->mult_val),
5339 &ext_val_dummy, &dummy);
5341 if (tem && giv->derive_adjustment)
5342 tem = simplify_giv_expr
5343 (loop,
5344 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
5345 &ext_val_dummy, &dummy);
5347 if (tem)
5348 giv->derive_adjustment = tem;
5349 else
5350 giv->cant_derive = 1;
5352 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5353 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
5354 giv->cant_derive = 1;
5359 /* Check whether an insn is an increment legitimate for a basic induction var.
5360 X is the source of insn P, or a part of it.
5361 MODE is the mode in which X should be interpreted.
5363 DEST_REG is the putative biv, also the destination of the insn.
5364 We accept patterns of these forms:
5365 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
5366 REG = INVARIANT + REG
5368 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5369 store the additive term into *INC_VAL, and store the place where
5370 we found the additive term into *LOCATION.
5372 If X is an assignment of an invariant into DEST_REG, we set
5373 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5375 We also want to detect a BIV when it corresponds to a variable
5376 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5377 of the variable may be a PLUS that adds a SUBREG of that variable to
5378 an invariant and then sign- or zero-extends the result of the PLUS
5379 into the variable.
5381 Most GIVs in such cases will be in the promoted mode, since that is the
5382 probably the natural computation mode (and almost certainly the mode
5383 used for addresses) on the machine. So we view the pseudo-reg containing
5384 the variable as the BIV, as if it were simply incremented.
5386 Note that treating the entire pseudo as a BIV will result in making
5387 simple increments to any GIVs based on it. However, if the variable
5388 overflows in its declared mode but not its promoted mode, the result will
5389 be incorrect. This is acceptable if the variable is signed, since
5390 overflows in such cases are undefined, but not if it is unsigned, since
5391 those overflows are defined. So we only check for SIGN_EXTEND and
5392 not ZERO_EXTEND.
5394 If we cannot find a biv, we return 0. */
5396 static int
5397 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
5398 const struct loop *loop;
5399 register rtx x;
5400 enum machine_mode mode;
5401 rtx dest_reg;
5402 rtx p;
5403 rtx *inc_val;
5404 rtx *mult_val;
5405 rtx **location;
5407 register enum rtx_code code;
5408 rtx *argp, arg;
5409 rtx insn, set = 0;
5411 code = GET_CODE (x);
5412 *location = NULL;
5413 switch (code)
5415 case PLUS:
5416 if (rtx_equal_p (XEXP (x, 0), dest_reg)
5417 || (GET_CODE (XEXP (x, 0)) == SUBREG
5418 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5419 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
5421 argp = &XEXP (x, 1);
5423 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
5424 || (GET_CODE (XEXP (x, 1)) == SUBREG
5425 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5426 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
5428 argp = &XEXP (x, 0);
5430 else
5431 return 0;
5433 arg = *argp;
5434 if (loop_invariant_p (loop, arg) != 1)
5435 return 0;
5437 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
5438 *mult_val = const1_rtx;
5439 *location = argp;
5440 return 1;
5442 case SUBREG:
5443 /* If this is a SUBREG for a promoted variable, check the inner
5444 value. */
5445 if (SUBREG_PROMOTED_VAR_P (x))
5446 return basic_induction_var (loop, SUBREG_REG (x),
5447 GET_MODE (SUBREG_REG (x)),
5448 dest_reg, p, inc_val, mult_val, location);
5449 return 0;
5451 case REG:
5452 /* If this register is assigned in a previous insn, look at its
5453 source, but don't go outside the loop or past a label. */
5455 /* If this sets a register to itself, we would repeat any previous
5456 biv increment if we applied this strategy blindly. */
5457 if (rtx_equal_p (dest_reg, x))
5458 return 0;
5460 insn = p;
5461 while (1)
5463 rtx dest;
5466 insn = PREV_INSN (insn);
5468 while (insn && GET_CODE (insn) == NOTE
5469 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5471 if (!insn)
5472 break;
5473 set = single_set (insn);
5474 if (set == 0)
5475 break;
5476 dest = SET_DEST (set);
5477 if (dest == x
5478 || (GET_CODE (dest) == SUBREG
5479 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
5480 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
5481 && SUBREG_REG (dest) == x))
5482 return basic_induction_var (loop, SET_SRC (set),
5483 (GET_MODE (SET_SRC (set)) == VOIDmode
5484 ? GET_MODE (x)
5485 : GET_MODE (SET_SRC (set))),
5486 dest_reg, insn,
5487 inc_val, mult_val, location);
5489 while (GET_CODE (dest) == SIGN_EXTRACT
5490 || GET_CODE (dest) == ZERO_EXTRACT
5491 || GET_CODE (dest) == SUBREG
5492 || GET_CODE (dest) == STRICT_LOW_PART)
5493 dest = XEXP (dest, 0);
5494 if (dest == x)
5495 break;
5497 /* Fall through. */
5499 /* Can accept constant setting of biv only when inside inner most loop.
5500 Otherwise, a biv of an inner loop may be incorrectly recognized
5501 as a biv of the outer loop,
5502 causing code to be moved INTO the inner loop. */
5503 case MEM:
5504 if (loop_invariant_p (loop, x) != 1)
5505 return 0;
5506 case CONST_INT:
5507 case SYMBOL_REF:
5508 case CONST:
5509 /* convert_modes aborts if we try to convert to or from CCmode, so just
5510 exclude that case. It is very unlikely that a condition code value
5511 would be a useful iterator anyways. */
5512 if (loop->level == 1
5513 && GET_MODE_CLASS (mode) != MODE_CC
5514 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
5516 /* Possible bug here? Perhaps we don't know the mode of X. */
5517 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
5518 *mult_val = const0_rtx;
5519 return 1;
5521 else
5522 return 0;
5524 case SIGN_EXTEND:
5525 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5526 dest_reg, p, inc_val, mult_val, location);
5528 case ASHIFTRT:
5529 /* Similar, since this can be a sign extension. */
5530 for (insn = PREV_INSN (p);
5531 (insn && GET_CODE (insn) == NOTE
5532 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5533 insn = PREV_INSN (insn))
5536 if (insn)
5537 set = single_set (insn);
5539 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
5540 && set && SET_DEST (set) == XEXP (x, 0)
5541 && GET_CODE (XEXP (x, 1)) == CONST_INT
5542 && INTVAL (XEXP (x, 1)) >= 0
5543 && GET_CODE (SET_SRC (set)) == ASHIFT
5544 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
5545 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
5546 GET_MODE (XEXP (x, 0)),
5547 dest_reg, insn, inc_val, mult_val,
5548 location);
5549 return 0;
5551 default:
5552 return 0;
5556 /* A general induction variable (giv) is any quantity that is a linear
5557 function of a basic induction variable,
5558 i.e. giv = biv * mult_val + add_val.
5559 The coefficients can be any loop invariant quantity.
5560 A giv need not be computed directly from the biv;
5561 it can be computed by way of other givs. */
5563 /* Determine whether X computes a giv.
5564 If it does, return a nonzero value
5565 which is the benefit from eliminating the computation of X;
5566 set *SRC_REG to the register of the biv that it is computed from;
5567 set *ADD_VAL and *MULT_VAL to the coefficients,
5568 such that the value of X is biv * mult + add; */
5570 static int
5571 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
5572 is_addr, pbenefit, addr_mode)
5573 const struct loop *loop;
5574 rtx x;
5575 rtx *src_reg;
5576 rtx *add_val;
5577 rtx *mult_val;
5578 rtx *ext_val;
5579 int is_addr;
5580 int *pbenefit;
5581 enum machine_mode addr_mode;
5583 struct loop_ivs *ivs = LOOP_IVS (loop);
5584 rtx orig_x = x;
5586 /* If this is an invariant, forget it, it isn't a giv. */
5587 if (loop_invariant_p (loop, x) == 1)
5588 return 0;
5590 *pbenefit = 0;
5591 *ext_val = NULL_RTX;
5592 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
5593 if (x == 0)
5594 return 0;
5596 switch (GET_CODE (x))
5598 case USE:
5599 case CONST_INT:
5600 /* Since this is now an invariant and wasn't before, it must be a giv
5601 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5602 with. */
5603 *src_reg = ivs->list->biv->dest_reg;
5604 *mult_val = const0_rtx;
5605 *add_val = x;
5606 break;
5608 case REG:
5609 /* This is equivalent to a BIV. */
5610 *src_reg = x;
5611 *mult_val = const1_rtx;
5612 *add_val = const0_rtx;
5613 break;
5615 case PLUS:
5616 /* Either (plus (biv) (invar)) or
5617 (plus (mult (biv) (invar_1)) (invar_2)). */
5618 if (GET_CODE (XEXP (x, 0)) == MULT)
5620 *src_reg = XEXP (XEXP (x, 0), 0);
5621 *mult_val = XEXP (XEXP (x, 0), 1);
5623 else
5625 *src_reg = XEXP (x, 0);
5626 *mult_val = const1_rtx;
5628 *add_val = XEXP (x, 1);
5629 break;
5631 case MULT:
5632 /* ADD_VAL is zero. */
5633 *src_reg = XEXP (x, 0);
5634 *mult_val = XEXP (x, 1);
5635 *add_val = const0_rtx;
5636 break;
5638 default:
5639 abort ();
5642 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5643 unless they are CONST_INT). */
5644 if (GET_CODE (*add_val) == USE)
5645 *add_val = XEXP (*add_val, 0);
5646 if (GET_CODE (*mult_val) == USE)
5647 *mult_val = XEXP (*mult_val, 0);
5649 if (is_addr)
5650 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
5651 else
5652 *pbenefit += rtx_cost (orig_x, SET);
5654 /* Always return true if this is a giv so it will be detected as such,
5655 even if the benefit is zero or negative. This allows elimination
5656 of bivs that might otherwise not be eliminated. */
5657 return 1;
5660 /* Given an expression, X, try to form it as a linear function of a biv.
5661 We will canonicalize it to be of the form
5662 (plus (mult (BIV) (invar_1))
5663 (invar_2))
5664 with possible degeneracies.
5666 The invariant expressions must each be of a form that can be used as a
5667 machine operand. We surround then with a USE rtx (a hack, but localized
5668 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5669 routine; it is the caller's responsibility to strip them.
5671 If no such canonicalization is possible (i.e., two biv's are used or an
5672 expression that is neither invariant nor a biv or giv), this routine
5673 returns 0.
5675 For a non-zero return, the result will have a code of CONST_INT, USE,
5676 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5678 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5680 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
5681 static rtx sge_plus_constant PARAMS ((rtx, rtx));
5683 static rtx
5684 simplify_giv_expr (loop, x, ext_val, benefit)
5685 const struct loop *loop;
5686 rtx x;
5687 rtx *ext_val;
5688 int *benefit;
5690 struct loop_ivs *ivs = LOOP_IVS (loop);
5691 struct loop_regs *regs = LOOP_REGS (loop);
5692 enum machine_mode mode = GET_MODE (x);
5693 rtx arg0, arg1;
5694 rtx tem;
5696 /* If this is not an integer mode, or if we cannot do arithmetic in this
5697 mode, this can't be a giv. */
5698 if (mode != VOIDmode
5699 && (GET_MODE_CLASS (mode) != MODE_INT
5700 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
5701 return NULL_RTX;
5703 switch (GET_CODE (x))
5705 case PLUS:
5706 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5707 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5708 if (arg0 == 0 || arg1 == 0)
5709 return NULL_RTX;
5711 /* Put constant last, CONST_INT last if both constant. */
5712 if ((GET_CODE (arg0) == USE
5713 || GET_CODE (arg0) == CONST_INT)
5714 && ! ((GET_CODE (arg0) == USE
5715 && GET_CODE (arg1) == USE)
5716 || GET_CODE (arg1) == CONST_INT))
5717 tem = arg0, arg0 = arg1, arg1 = tem;
5719 /* Handle addition of zero, then addition of an invariant. */
5720 if (arg1 == const0_rtx)
5721 return arg0;
5722 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5723 switch (GET_CODE (arg0))
5725 case CONST_INT:
5726 case USE:
5727 /* Adding two invariants must result in an invariant, so enclose
5728 addition operation inside a USE and return it. */
5729 if (GET_CODE (arg0) == USE)
5730 arg0 = XEXP (arg0, 0);
5731 if (GET_CODE (arg1) == USE)
5732 arg1 = XEXP (arg1, 0);
5734 if (GET_CODE (arg0) == CONST_INT)
5735 tem = arg0, arg0 = arg1, arg1 = tem;
5736 if (GET_CODE (arg1) == CONST_INT)
5737 tem = sge_plus_constant (arg0, arg1);
5738 else
5739 tem = sge_plus (mode, arg0, arg1);
5741 if (GET_CODE (tem) != CONST_INT)
5742 tem = gen_rtx_USE (mode, tem);
5743 return tem;
5745 case REG:
5746 case MULT:
5747 /* biv + invar or mult + invar. Return sum. */
5748 return gen_rtx_PLUS (mode, arg0, arg1);
5750 case PLUS:
5751 /* (a + invar_1) + invar_2. Associate. */
5752 return
5753 simplify_giv_expr (loop,
5754 gen_rtx_PLUS (mode,
5755 XEXP (arg0, 0),
5756 gen_rtx_PLUS (mode,
5757 XEXP (arg0, 1),
5758 arg1)),
5759 ext_val, benefit);
5761 default:
5762 abort ();
5765 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5766 MULT to reduce cases. */
5767 if (GET_CODE (arg0) == REG)
5768 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
5769 if (GET_CODE (arg1) == REG)
5770 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
5772 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5773 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5774 Recurse to associate the second PLUS. */
5775 if (GET_CODE (arg1) == MULT)
5776 tem = arg0, arg0 = arg1, arg1 = tem;
5778 if (GET_CODE (arg1) == PLUS)
5779 return
5780 simplify_giv_expr (loop,
5781 gen_rtx_PLUS (mode,
5782 gen_rtx_PLUS (mode, arg0,
5783 XEXP (arg1, 0)),
5784 XEXP (arg1, 1)),
5785 ext_val, benefit);
5787 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5788 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
5789 return NULL_RTX;
5791 if (!rtx_equal_p (arg0, arg1))
5792 return NULL_RTX;
5794 return simplify_giv_expr (loop,
5795 gen_rtx_MULT (mode,
5796 XEXP (arg0, 0),
5797 gen_rtx_PLUS (mode,
5798 XEXP (arg0, 1),
5799 XEXP (arg1, 1))),
5800 ext_val, benefit);
5802 case MINUS:
5803 /* Handle "a - b" as "a + b * (-1)". */
5804 return simplify_giv_expr (loop,
5805 gen_rtx_PLUS (mode,
5806 XEXP (x, 0),
5807 gen_rtx_MULT (mode,
5808 XEXP (x, 1),
5809 constm1_rtx)),
5810 ext_val, benefit);
5812 case MULT:
5813 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5814 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
5815 if (arg0 == 0 || arg1 == 0)
5816 return NULL_RTX;
5818 /* Put constant last, CONST_INT last if both constant. */
5819 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5820 && GET_CODE (arg1) != CONST_INT)
5821 tem = arg0, arg0 = arg1, arg1 = tem;
5823 /* If second argument is not now constant, not giv. */
5824 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
5825 return NULL_RTX;
5827 /* Handle multiply by 0 or 1. */
5828 if (arg1 == const0_rtx)
5829 return const0_rtx;
5831 else if (arg1 == const1_rtx)
5832 return arg0;
5834 switch (GET_CODE (arg0))
5836 case REG:
5837 /* biv * invar. Done. */
5838 return gen_rtx_MULT (mode, arg0, arg1);
5840 case CONST_INT:
5841 /* Product of two constants. */
5842 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
5844 case USE:
5845 /* invar * invar is a giv, but attempt to simplify it somehow. */
5846 if (GET_CODE (arg1) != CONST_INT)
5847 return NULL_RTX;
5849 arg0 = XEXP (arg0, 0);
5850 if (GET_CODE (arg0) == MULT)
5852 /* (invar_0 * invar_1) * invar_2. Associate. */
5853 return simplify_giv_expr (loop,
5854 gen_rtx_MULT (mode,
5855 XEXP (arg0, 0),
5856 gen_rtx_MULT (mode,
5857 XEXP (arg0,
5859 arg1)),
5860 ext_val, benefit);
5862 /* Porpagate the MULT expressions to the intermost nodes. */
5863 else if (GET_CODE (arg0) == PLUS)
5865 /* (invar_0 + invar_1) * invar_2. Distribute. */
5866 return simplify_giv_expr (loop,
5867 gen_rtx_PLUS (mode,
5868 gen_rtx_MULT (mode,
5869 XEXP (arg0,
5871 arg1),
5872 gen_rtx_MULT (mode,
5873 XEXP (arg0,
5875 arg1)),
5876 ext_val, benefit);
5878 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
5880 case MULT:
5881 /* (a * invar_1) * invar_2. Associate. */
5882 return simplify_giv_expr (loop,
5883 gen_rtx_MULT (mode,
5884 XEXP (arg0, 0),
5885 gen_rtx_MULT (mode,
5886 XEXP (arg0, 1),
5887 arg1)),
5888 ext_val, benefit);
5890 case PLUS:
5891 /* (a + invar_1) * invar_2. Distribute. */
5892 return simplify_giv_expr (loop,
5893 gen_rtx_PLUS (mode,
5894 gen_rtx_MULT (mode,
5895 XEXP (arg0, 0),
5896 arg1),
5897 gen_rtx_MULT (mode,
5898 XEXP (arg0, 1),
5899 arg1)),
5900 ext_val, benefit);
5902 default:
5903 abort ();
5906 case ASHIFT:
5907 /* Shift by constant is multiply by power of two. */
5908 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5909 return 0;
5911 return
5912 simplify_giv_expr (loop,
5913 gen_rtx_MULT (mode,
5914 XEXP (x, 0),
5915 GEN_INT ((HOST_WIDE_INT) 1
5916 << INTVAL (XEXP (x, 1)))),
5917 ext_val, benefit);
5919 case NEG:
5920 /* "-a" is "a * (-1)" */
5921 return simplify_giv_expr (loop,
5922 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
5923 ext_val, benefit);
5925 case NOT:
5926 /* "~a" is "-a - 1". Silly, but easy. */
5927 return simplify_giv_expr (loop,
5928 gen_rtx_MINUS (mode,
5929 gen_rtx_NEG (mode, XEXP (x, 0)),
5930 const1_rtx),
5931 ext_val, benefit);
5933 case USE:
5934 /* Already in proper form for invariant. */
5935 return x;
5937 case SIGN_EXTEND:
5938 case ZERO_EXTEND:
5939 case TRUNCATE:
5940 /* Conditionally recognize extensions of simple IVs. After we've
5941 computed loop traversal counts and verified the range of the
5942 source IV, we'll reevaluate this as a GIV. */
5943 if (*ext_val == NULL_RTX)
5945 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
5946 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
5948 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
5949 return arg0;
5952 goto do_default;
5954 case REG:
5955 /* If this is a new register, we can't deal with it. */
5956 if (REGNO (x) >= max_reg_before_loop)
5957 return 0;
5959 /* Check for biv or giv. */
5960 switch (REG_IV_TYPE (ivs, REGNO (x)))
5962 case BASIC_INDUCT:
5963 return x;
5964 case GENERAL_INDUCT:
5966 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
5968 /* Form expression from giv and add benefit. Ensure this giv
5969 can derive another and subtract any needed adjustment if so. */
5971 /* Increasing the benefit here is risky. The only case in which it
5972 is arguably correct is if this is the only use of V. In other
5973 cases, this will artificially inflate the benefit of the current
5974 giv, and lead to suboptimal code. Thus, it is disabled, since
5975 potentially not reducing an only marginally beneficial giv is
5976 less harmful than reducing many givs that are not really
5977 beneficial. */
5979 rtx single_use = regs->array[REGNO (x)].single_usage;
5980 if (single_use && single_use != const0_rtx)
5981 *benefit += v->benefit;
5984 if (v->cant_derive)
5985 return 0;
5987 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
5988 v->src_reg, v->mult_val),
5989 v->add_val);
5991 if (v->derive_adjustment)
5992 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
5993 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
5994 if (*ext_val)
5996 if (!v->ext_dependant)
5997 return arg0;
5999 else
6001 *ext_val = v->ext_dependant;
6002 return arg0;
6004 return 0;
6007 default:
6008 do_default:
6009 /* If it isn't an induction variable, and it is invariant, we
6010 may be able to simplify things further by looking through
6011 the bits we just moved outside the loop. */
6012 if (loop_invariant_p (loop, x) == 1)
6014 struct movable *m;
6015 struct loop_movables *movables = LOOP_MOVABLES (loop);
6017 for (m = movables->head; m; m = m->next)
6018 if (rtx_equal_p (x, m->set_dest))
6020 /* Ok, we found a match. Substitute and simplify. */
6022 /* If we match another movable, we must use that, as
6023 this one is going away. */
6024 if (m->match)
6025 return simplify_giv_expr (loop, m->match->set_dest,
6026 ext_val, benefit);
6028 /* If consec is non-zero, this is a member of a group of
6029 instructions that were moved together. We handle this
6030 case only to the point of seeking to the last insn and
6031 looking for a REG_EQUAL. Fail if we don't find one. */
6032 if (m->consec != 0)
6034 int i = m->consec;
6035 tem = m->insn;
6038 tem = NEXT_INSN (tem);
6040 while (--i > 0);
6042 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6043 if (tem)
6044 tem = XEXP (tem, 0);
6046 else
6048 tem = single_set (m->insn);
6049 if (tem)
6050 tem = SET_SRC (tem);
6053 if (tem)
6055 /* What we are most interested in is pointer
6056 arithmetic on invariants -- only take
6057 patterns we may be able to do something with. */
6058 if (GET_CODE (tem) == PLUS
6059 || GET_CODE (tem) == MULT
6060 || GET_CODE (tem) == ASHIFT
6061 || GET_CODE (tem) == CONST_INT
6062 || GET_CODE (tem) == SYMBOL_REF)
6064 tem = simplify_giv_expr (loop, tem, ext_val,
6065 benefit);
6066 if (tem)
6067 return tem;
6069 else if (GET_CODE (tem) == CONST
6070 && GET_CODE (XEXP (tem, 0)) == PLUS
6071 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6072 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6074 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6075 ext_val, benefit);
6076 if (tem)
6077 return tem;
6080 break;
6083 break;
6086 /* Fall through to general case. */
6087 default:
6088 /* If invariant, return as USE (unless CONST_INT).
6089 Otherwise, not giv. */
6090 if (GET_CODE (x) == USE)
6091 x = XEXP (x, 0);
6093 if (loop_invariant_p (loop, x) == 1)
6095 if (GET_CODE (x) == CONST_INT)
6096 return x;
6097 if (GET_CODE (x) == CONST
6098 && GET_CODE (XEXP (x, 0)) == PLUS
6099 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6100 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6101 x = XEXP (x, 0);
6102 return gen_rtx_USE (mode, x);
6104 else
6105 return 0;
6109 /* This routine folds invariants such that there is only ever one
6110 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6112 static rtx
6113 sge_plus_constant (x, c)
6114 rtx x, c;
6116 if (GET_CODE (x) == CONST_INT)
6117 return GEN_INT (INTVAL (x) + INTVAL (c));
6118 else if (GET_CODE (x) != PLUS)
6119 return gen_rtx_PLUS (GET_MODE (x), x, c);
6120 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6122 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6123 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6125 else if (GET_CODE (XEXP (x, 0)) == PLUS
6126 || GET_CODE (XEXP (x, 1)) != PLUS)
6128 return gen_rtx_PLUS (GET_MODE (x),
6129 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6131 else
6133 return gen_rtx_PLUS (GET_MODE (x),
6134 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6138 static rtx
6139 sge_plus (mode, x, y)
6140 enum machine_mode mode;
6141 rtx x, y;
6143 while (GET_CODE (y) == PLUS)
6145 rtx a = XEXP (y, 0);
6146 if (GET_CODE (a) == CONST_INT)
6147 x = sge_plus_constant (x, a);
6148 else
6149 x = gen_rtx_PLUS (mode, x, a);
6150 y = XEXP (y, 1);
6152 if (GET_CODE (y) == CONST_INT)
6153 x = sge_plus_constant (x, y);
6154 else
6155 x = gen_rtx_PLUS (mode, x, y);
6156 return x;
6159 /* Help detect a giv that is calculated by several consecutive insns;
6160 for example,
6161 giv = biv * M
6162 giv = giv + A
6163 The caller has already identified the first insn P as having a giv as dest;
6164 we check that all other insns that set the same register follow
6165 immediately after P, that they alter nothing else,
6166 and that the result of the last is still a giv.
6168 The value is 0 if the reg set in P is not really a giv.
6169 Otherwise, the value is the amount gained by eliminating
6170 all the consecutive insns that compute the value.
6172 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6173 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6175 The coefficients of the ultimate giv value are stored in
6176 *MULT_VAL and *ADD_VAL. */
6178 static int
6179 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6180 add_val, mult_val, ext_val, last_consec_insn)
6181 const struct loop *loop;
6182 int first_benefit;
6183 rtx p;
6184 rtx src_reg;
6185 rtx dest_reg;
6186 rtx *add_val;
6187 rtx *mult_val;
6188 rtx *ext_val;
6189 rtx *last_consec_insn;
6191 struct loop_ivs *ivs = LOOP_IVS (loop);
6192 struct loop_regs *regs = LOOP_REGS (loop);
6193 int count;
6194 enum rtx_code code;
6195 int benefit;
6196 rtx temp;
6197 rtx set;
6199 /* Indicate that this is a giv so that we can update the value produced in
6200 each insn of the multi-insn sequence.
6202 This induction structure will be used only by the call to
6203 general_induction_var below, so we can allocate it on our stack.
6204 If this is a giv, our caller will replace the induct var entry with
6205 a new induction structure. */
6206 struct induction *v;
6208 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6209 return 0;
6211 v = (struct induction *) alloca (sizeof (struct induction));
6212 v->src_reg = src_reg;
6213 v->mult_val = *mult_val;
6214 v->add_val = *add_val;
6215 v->benefit = first_benefit;
6216 v->cant_derive = 0;
6217 v->derive_adjustment = 0;
6218 v->ext_dependant = NULL_RTX;
6220 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6221 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6223 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6225 while (count > 0)
6227 p = NEXT_INSN (p);
6228 code = GET_CODE (p);
6230 /* If libcall, skip to end of call sequence. */
6231 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6232 p = XEXP (temp, 0);
6234 if (code == INSN
6235 && (set = single_set (p))
6236 && GET_CODE (SET_DEST (set)) == REG
6237 && SET_DEST (set) == dest_reg
6238 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6239 add_val, mult_val, ext_val, 0,
6240 &benefit, VOIDmode)
6241 /* Giv created by equivalent expression. */
6242 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6243 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6244 add_val, mult_val, ext_val, 0,
6245 &benefit, VOIDmode)))
6246 && src_reg == v->src_reg)
6248 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6249 benefit += libcall_benefit (p);
6251 count--;
6252 v->mult_val = *mult_val;
6253 v->add_val = *add_val;
6254 v->benefit += benefit;
6256 else if (code != NOTE)
6258 /* Allow insns that set something other than this giv to a
6259 constant. Such insns are needed on machines which cannot
6260 include long constants and should not disqualify a giv. */
6261 if (code == INSN
6262 && (set = single_set (p))
6263 && SET_DEST (set) != dest_reg
6264 && CONSTANT_P (SET_SRC (set)))
6265 continue;
6267 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6268 return 0;
6272 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
6273 *last_consec_insn = p;
6274 return v->benefit;
6277 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6278 represented by G1. If no such expression can be found, or it is clear that
6279 it cannot possibly be a valid address, 0 is returned.
6281 To perform the computation, we note that
6282 G1 = x * v + a and
6283 G2 = y * v + b
6284 where `v' is the biv.
6286 So G2 = (y/b) * G1 + (b - a*y/x).
6288 Note that MULT = y/x.
6290 Update: A and B are now allowed to be additive expressions such that
6291 B contains all variables in A. That is, computing B-A will not require
6292 subtracting variables. */
6294 static rtx
6295 express_from_1 (a, b, mult)
6296 rtx a, b, mult;
6298 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6300 if (mult == const0_rtx)
6301 return b;
6303 /* If MULT is not 1, we cannot handle A with non-constants, since we
6304 would then be required to subtract multiples of the registers in A.
6305 This is theoretically possible, and may even apply to some Fortran
6306 constructs, but it is a lot of work and we do not attempt it here. */
6308 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6309 return NULL_RTX;
6311 /* In general these structures are sorted top to bottom (down the PLUS
6312 chain), but not left to right across the PLUS. If B is a higher
6313 order giv than A, we can strip one level and recurse. If A is higher
6314 order, we'll eventually bail out, but won't know that until the end.
6315 If they are the same, we'll strip one level around this loop. */
6317 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6319 rtx ra, rb, oa, ob, tmp;
6321 ra = XEXP (a, 0), oa = XEXP (a, 1);
6322 if (GET_CODE (ra) == PLUS)
6323 tmp = ra, ra = oa, oa = tmp;
6325 rb = XEXP (b, 0), ob = XEXP (b, 1);
6326 if (GET_CODE (rb) == PLUS)
6327 tmp = rb, rb = ob, ob = tmp;
6329 if (rtx_equal_p (ra, rb))
6330 /* We matched: remove one reg completely. */
6331 a = oa, b = ob;
6332 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6333 /* An alternate match. */
6334 a = oa, b = rb;
6335 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6336 /* An alternate match. */
6337 a = ra, b = ob;
6338 else
6340 /* Indicates an extra register in B. Strip one level from B and
6341 recurse, hoping B was the higher order expression. */
6342 ob = express_from_1 (a, ob, mult);
6343 if (ob == NULL_RTX)
6344 return NULL_RTX;
6345 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6349 /* Here we are at the last level of A, go through the cases hoping to
6350 get rid of everything but a constant. */
6352 if (GET_CODE (a) == PLUS)
6354 rtx ra, oa;
6356 ra = XEXP (a, 0), oa = XEXP (a, 1);
6357 if (rtx_equal_p (oa, b))
6358 oa = ra;
6359 else if (!rtx_equal_p (ra, b))
6360 return NULL_RTX;
6362 if (GET_CODE (oa) != CONST_INT)
6363 return NULL_RTX;
6365 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6367 else if (GET_CODE (a) == CONST_INT)
6369 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6371 else if (CONSTANT_P (a))
6373 return simplify_gen_binary (MINUS, GET_MODE (b) != VOIDmode ? GET_MODE (b) : GET_MODE (a), const0_rtx, a);
6375 else if (GET_CODE (b) == PLUS)
6377 if (rtx_equal_p (a, XEXP (b, 0)))
6378 return XEXP (b, 1);
6379 else if (rtx_equal_p (a, XEXP (b, 1)))
6380 return XEXP (b, 0);
6381 else
6382 return NULL_RTX;
6384 else if (rtx_equal_p (a, b))
6385 return const0_rtx;
6387 return NULL_RTX;
6391 express_from (g1, g2)
6392 struct induction *g1, *g2;
6394 rtx mult, add;
6396 /* The value that G1 will be multiplied by must be a constant integer. Also,
6397 the only chance we have of getting a valid address is if b*c/a (see above
6398 for notation) is also an integer. */
6399 if (GET_CODE (g1->mult_val) == CONST_INT
6400 && GET_CODE (g2->mult_val) == CONST_INT)
6402 if (g1->mult_val == const0_rtx
6403 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6404 return NULL_RTX;
6405 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6407 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6408 mult = const1_rtx;
6409 else
6411 /* ??? Find out if the one is a multiple of the other? */
6412 return NULL_RTX;
6415 add = express_from_1 (g1->add_val, g2->add_val, mult);
6416 if (add == NULL_RTX)
6418 /* Failed. If we've got a multiplication factor between G1 and G2,
6419 scale G1's addend and try again. */
6420 if (INTVAL (mult) > 1)
6422 rtx g1_add_val = g1->add_val;
6423 if (GET_CODE (g1_add_val) == MULT
6424 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
6426 HOST_WIDE_INT m;
6427 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
6428 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
6429 XEXP (g1_add_val, 0), GEN_INT (m));
6431 else
6433 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
6434 mult);
6437 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
6440 if (add == NULL_RTX)
6441 return NULL_RTX;
6443 /* Form simplified final result. */
6444 if (mult == const0_rtx)
6445 return add;
6446 else if (mult == const1_rtx)
6447 mult = g1->dest_reg;
6448 else
6449 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
6451 if (add == const0_rtx)
6452 return mult;
6453 else
6455 if (GET_CODE (add) == PLUS
6456 && CONSTANT_P (XEXP (add, 1)))
6458 rtx tem = XEXP (add, 1);
6459 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
6460 add = tem;
6463 return gen_rtx_PLUS (g2->mode, mult, add);
6467 /* Return an rtx, if any, that expresses giv G2 as a function of the register
6468 represented by G1. This indicates that G2 should be combined with G1 and
6469 that G2 can use (either directly or via an address expression) a register
6470 used to represent G1. */
6472 static rtx
6473 combine_givs_p (g1, g2)
6474 struct induction *g1, *g2;
6476 rtx comb, ret;
6478 /* With the introduction of ext dependant givs, we must care for modes.
6479 G2 must not use a wider mode than G1. */
6480 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
6481 return NULL_RTX;
6483 ret = comb = express_from (g1, g2);
6484 if (comb == NULL_RTX)
6485 return NULL_RTX;
6486 if (g1->mode != g2->mode)
6487 ret = gen_lowpart (g2->mode, comb);
6489 /* If these givs are identical, they can be combined. We use the results
6490 of express_from because the addends are not in a canonical form, so
6491 rtx_equal_p is a weaker test. */
6492 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
6493 combination to be the other way round. */
6494 if (comb == g1->dest_reg
6495 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
6497 return ret;
6500 /* If G2 can be expressed as a function of G1 and that function is valid
6501 as an address and no more expensive than using a register for G2,
6502 the expression of G2 in terms of G1 can be used. */
6503 if (ret != NULL_RTX
6504 && g2->giv_type == DEST_ADDR
6505 && memory_address_p (GET_MODE (g2->mem), ret)
6506 /* ??? Looses, especially with -fforce-addr, where *g2->location
6507 will always be a register, and so anything more complicated
6508 gets discarded. */
6509 #if 0
6510 #ifdef ADDRESS_COST
6511 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6512 #else
6513 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6514 #endif
6515 #endif
6518 return ret;
6521 return NULL_RTX;
6524 /* Check each extension dependant giv in this class to see if its
6525 root biv is safe from wrapping in the interior mode, which would
6526 make the giv illegal. */
6528 static void
6529 check_ext_dependant_givs (bl, loop_info)
6530 struct iv_class *bl;
6531 struct loop_info *loop_info;
6533 int ze_ok = 0, se_ok = 0, info_ok = 0;
6534 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
6535 HOST_WIDE_INT start_val;
6536 unsigned HOST_WIDE_INT u_end_val = 0;
6537 unsigned HOST_WIDE_INT u_start_val = 0;
6538 rtx incr = pc_rtx;
6539 struct induction *v;
6541 /* Make sure the iteration data is available. We must have
6542 constants in order to be certain of no overflow. */
6543 /* ??? An unknown iteration count with an increment of +-1
6544 combined with friendly exit tests of against an invariant
6545 value is also ameanable to optimization. Not implemented. */
6546 if (loop_info->n_iterations > 0
6547 && bl->initial_value
6548 && GET_CODE (bl->initial_value) == CONST_INT
6549 && (incr = biv_total_increment (bl))
6550 && GET_CODE (incr) == CONST_INT
6551 /* Make sure the host can represent the arithmetic. */
6552 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
6554 unsigned HOST_WIDE_INT abs_incr, total_incr;
6555 HOST_WIDE_INT s_end_val;
6556 int neg_incr;
6558 info_ok = 1;
6559 start_val = INTVAL (bl->initial_value);
6560 u_start_val = start_val;
6562 neg_incr = 0, abs_incr = INTVAL (incr);
6563 if (INTVAL (incr) < 0)
6564 neg_incr = 1, abs_incr = -abs_incr;
6565 total_incr = abs_incr * loop_info->n_iterations;
6567 /* Check for host arithmatic overflow. */
6568 if (total_incr / loop_info->n_iterations == abs_incr)
6570 unsigned HOST_WIDE_INT u_max;
6571 HOST_WIDE_INT s_max;
6573 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
6574 s_end_val = u_end_val;
6575 u_max = GET_MODE_MASK (biv_mode);
6576 s_max = u_max >> 1;
6578 /* Check zero extension of biv ok. */
6579 if (start_val >= 0
6580 /* Check for host arithmatic overflow. */
6581 && (neg_incr
6582 ? u_end_val < u_start_val
6583 : u_end_val > u_start_val)
6584 /* Check for target arithmetic overflow. */
6585 && (neg_incr
6586 ? 1 /* taken care of with host overflow */
6587 : u_end_val <= u_max))
6589 ze_ok = 1;
6592 /* Check sign extension of biv ok. */
6593 /* ??? While it is true that overflow with signed and pointer
6594 arithmetic is undefined, I fear too many programmers don't
6595 keep this fact in mind -- myself included on occasion.
6596 So leave alone with the signed overflow optimizations. */
6597 if (start_val >= -s_max - 1
6598 /* Check for host arithmatic overflow. */
6599 && (neg_incr
6600 ? s_end_val < start_val
6601 : s_end_val > start_val)
6602 /* Check for target arithmetic overflow. */
6603 && (neg_incr
6604 ? s_end_val >= -s_max - 1
6605 : s_end_val <= s_max))
6607 se_ok = 1;
6612 /* Invalidate givs that fail the tests. */
6613 for (v = bl->giv; v; v = v->next_iv)
6614 if (v->ext_dependant)
6616 enum rtx_code code = GET_CODE (v->ext_dependant);
6617 int ok = 0;
6619 switch (code)
6621 case SIGN_EXTEND:
6622 ok = se_ok;
6623 break;
6624 case ZERO_EXTEND:
6625 ok = ze_ok;
6626 break;
6628 case TRUNCATE:
6629 /* We don't know whether this value is being used as either
6630 signed or unsigned, so to safely truncate we must satisfy
6631 both. The initial check here verifies the BIV itself;
6632 once that is successful we may check its range wrt the
6633 derived GIV. */
6634 if (se_ok && ze_ok)
6636 enum machine_mode outer_mode = GET_MODE (v->ext_dependant);
6637 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
6639 /* We know from the above that both endpoints are nonnegative,
6640 and that there is no wrapping. Verify that both endpoints
6641 are within the (signed) range of the outer mode. */
6642 if (u_start_val <= max && u_end_val <= max)
6643 ok = 1;
6645 break;
6647 default:
6648 abort ();
6651 if (ok)
6653 if (loop_dump_stream)
6655 fprintf (loop_dump_stream,
6656 "Verified ext dependant giv at %d of reg %d\n",
6657 INSN_UID (v->insn), bl->regno);
6660 else
6662 if (loop_dump_stream)
6664 const char *why;
6666 if (info_ok)
6667 why = "biv iteration values overflowed";
6668 else
6670 if (incr == pc_rtx)
6671 incr = biv_total_increment (bl);
6672 if (incr == const1_rtx)
6673 why = "biv iteration info incomplete; incr by 1";
6674 else
6675 why = "biv iteration info incomplete";
6678 fprintf (loop_dump_stream,
6679 "Failed ext dependant giv at %d, %s\n",
6680 INSN_UID (v->insn), why);
6682 v->ignore = 1;
6683 bl->all_reduced = 0;
6688 /* Generate a version of VALUE in a mode appropriate for initializing V. */
6691 extend_value_for_giv (v, value)
6692 struct induction *v;
6693 rtx value;
6695 rtx ext_dep = v->ext_dependant;
6697 if (! ext_dep)
6698 return value;
6700 /* Recall that check_ext_dependant_givs verified that the known bounds
6701 of a biv did not overflow or wrap with respect to the extension for
6702 the giv. Therefore, constants need no additional adjustment. */
6703 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
6704 return value;
6706 /* Otherwise, we must adjust the value to compensate for the
6707 differing modes of the biv and the giv. */
6708 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
6711 struct combine_givs_stats
6713 int giv_number;
6714 int total_benefit;
6717 static int
6718 cmp_combine_givs_stats (xp, yp)
6719 const PTR xp;
6720 const PTR yp;
6722 const struct combine_givs_stats * const x =
6723 (const struct combine_givs_stats *) xp;
6724 const struct combine_givs_stats * const y =
6725 (const struct combine_givs_stats *) yp;
6726 int d;
6727 d = y->total_benefit - x->total_benefit;
6728 /* Stabilize the sort. */
6729 if (!d)
6730 d = x->giv_number - y->giv_number;
6731 return d;
6734 /* Check all pairs of givs for iv_class BL and see if any can be combined with
6735 any other. If so, point SAME to the giv combined with and set NEW_REG to
6736 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6737 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6739 static void
6740 combine_givs (regs, bl)
6741 struct loop_regs *regs;
6742 struct iv_class *bl;
6744 /* Additional benefit to add for being combined multiple times. */
6745 const int extra_benefit = 3;
6747 struct induction *g1, *g2, **giv_array;
6748 int i, j, k, giv_count;
6749 struct combine_givs_stats *stats;
6750 rtx *can_combine;
6752 /* Count givs, because bl->giv_count is incorrect here. */
6753 giv_count = 0;
6754 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6755 if (!g1->ignore)
6756 giv_count++;
6758 giv_array
6759 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6760 i = 0;
6761 for (g1 = bl->giv; g1; g1 = g1->next_iv)
6762 if (!g1->ignore)
6763 giv_array[i++] = g1;
6765 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
6766 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
6768 for (i = 0; i < giv_count; i++)
6770 int this_benefit;
6771 rtx single_use;
6773 g1 = giv_array[i];
6774 stats[i].giv_number = i;
6776 /* If a DEST_REG GIV is used only once, do not allow it to combine
6777 with anything, for in doing so we will gain nothing that cannot
6778 be had by simply letting the GIV with which we would have combined
6779 to be reduced on its own. The losage shows up in particular with
6780 DEST_ADDR targets on hosts with reg+reg addressing, though it can
6781 be seen elsewhere as well. */
6782 if (g1->giv_type == DEST_REG
6783 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
6784 && single_use != const0_rtx)
6785 continue;
6787 this_benefit = g1->benefit;
6788 /* Add an additional weight for zero addends. */
6789 if (g1->no_const_addval)
6790 this_benefit += 1;
6792 for (j = 0; j < giv_count; j++)
6794 rtx this_combine;
6796 g2 = giv_array[j];
6797 if (g1 != g2
6798 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6800 can_combine[i * giv_count + j] = this_combine;
6801 this_benefit += g2->benefit + extra_benefit;
6804 stats[i].total_benefit = this_benefit;
6807 /* Iterate, combining until we can't. */
6808 restart:
6809 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
6811 if (loop_dump_stream)
6813 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6814 for (k = 0; k < giv_count; k++)
6816 g1 = giv_array[stats[k].giv_number];
6817 if (!g1->combined_with && !g1->same)
6818 fprintf (loop_dump_stream, " {%d, %d}",
6819 INSN_UID (giv_array[stats[k].giv_number]->insn),
6820 stats[k].total_benefit);
6822 putc ('\n', loop_dump_stream);
6825 for (k = 0; k < giv_count; k++)
6827 int g1_add_benefit = 0;
6829 i = stats[k].giv_number;
6830 g1 = giv_array[i];
6832 /* If it has already been combined, skip. */
6833 if (g1->combined_with || g1->same)
6834 continue;
6836 for (j = 0; j < giv_count; j++)
6838 g2 = giv_array[j];
6839 if (g1 != g2 && can_combine[i * giv_count + j]
6840 /* If it has already been combined, skip. */
6841 && ! g2->same && ! g2->combined_with)
6843 int l;
6845 g2->new_reg = can_combine[i * giv_count + j];
6846 g2->same = g1;
6847 g1->combined_with++;
6848 g1->lifetime += g2->lifetime;
6850 g1_add_benefit += g2->benefit;
6852 /* ??? The new final_[bg]iv_value code does a much better job
6853 of finding replaceable giv's, and hence this code may no
6854 longer be necessary. */
6855 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6856 g1_add_benefit -= copy_cost;
6858 /* To help optimize the next set of combinations, remove
6859 this giv from the benefits of other potential mates. */
6860 for (l = 0; l < giv_count; ++l)
6862 int m = stats[l].giv_number;
6863 if (can_combine[m * giv_count + j])
6864 stats[l].total_benefit -= g2->benefit + extra_benefit;
6867 if (loop_dump_stream)
6868 fprintf (loop_dump_stream,
6869 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
6870 INSN_UID (g2->insn), INSN_UID (g1->insn),
6871 g1->benefit, g1_add_benefit, g1->lifetime);
6875 /* To help optimize the next set of combinations, remove
6876 this giv from the benefits of other potential mates. */
6877 if (g1->combined_with)
6879 for (j = 0; j < giv_count; ++j)
6881 int m = stats[j].giv_number;
6882 if (can_combine[m * giv_count + i])
6883 stats[j].total_benefit -= g1->benefit + extra_benefit;
6886 g1->benefit += g1_add_benefit;
6888 /* We've finished with this giv, and everything it touched.
6889 Restart the combination so that proper weights for the
6890 rest of the givs are properly taken into account. */
6891 /* ??? Ideally we would compact the arrays at this point, so
6892 as to not cover old ground. But sanely compacting
6893 can_combine is tricky. */
6894 goto restart;
6898 /* Clean up. */
6899 free (stats);
6900 free (can_combine);
6903 /* Generate sequence for REG = B * M + A. */
6905 static rtx
6906 gen_add_mult (b, m, a, reg)
6907 rtx b; /* initial value of basic induction variable */
6908 rtx m; /* multiplicative constant */
6909 rtx a; /* additive constant */
6910 rtx reg; /* destination register */
6912 rtx seq;
6913 rtx result;
6915 start_sequence ();
6916 /* Use unsigned arithmetic. */
6917 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
6918 if (reg != result)
6919 emit_move_insn (reg, result);
6920 seq = gen_sequence ();
6921 end_sequence ();
6923 return seq;
6927 /* Update registers created in insn sequence SEQ. */
6929 static void
6930 loop_regs_update (loop, seq)
6931 const struct loop *loop ATTRIBUTE_UNUSED;
6932 rtx seq;
6934 /* Update register info for alias analysis. */
6936 if (GET_CODE (seq) == SEQUENCE)
6938 int i;
6939 for (i = 0; i < XVECLEN (seq, 0); ++i)
6941 rtx set = single_set (XVECEXP (seq, 0, i));
6942 if (set && GET_CODE (SET_DEST (set)) == REG)
6943 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6946 else
6948 rtx set = single_set (seq);
6949 if (set && GET_CODE (SET_DEST (set)) == REG)
6950 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6955 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
6957 void
6958 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
6959 const struct loop *loop;
6960 rtx b; /* initial value of basic induction variable */
6961 rtx m; /* multiplicative constant */
6962 rtx a; /* additive constant */
6963 rtx reg; /* destination register */
6964 basic_block before_bb;
6965 rtx before_insn;
6967 rtx seq;
6969 if (! before_insn)
6971 loop_iv_add_mult_hoist (loop, b, m, a, reg);
6972 return;
6975 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
6976 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
6978 /* Increase the lifetime of any invariants moved further in code. */
6979 update_reg_last_use (a, before_insn);
6980 update_reg_last_use (b, before_insn);
6981 update_reg_last_use (m, before_insn);
6983 loop_insn_emit_before (loop, before_bb, before_insn, seq);
6985 /* It is possible that the expansion created lots of new registers.
6986 Iterate over the sequence we just created and record them all. */
6987 loop_regs_update (loop, seq);
6991 /* Emit insns in loop pre-header to set REG = B * M + A. */
6993 void
6994 loop_iv_add_mult_sink (loop, b, m, a, reg)
6995 const struct loop *loop;
6996 rtx b; /* initial value of basic induction variable */
6997 rtx m; /* multiplicative constant */
6998 rtx a; /* additive constant */
6999 rtx reg; /* destination register */
7001 rtx seq;
7003 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7004 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7006 /* Increase the lifetime of any invariants moved further in code.
7007 ???? Is this really necessary? */
7008 update_reg_last_use (a, loop->sink);
7009 update_reg_last_use (b, loop->sink);
7010 update_reg_last_use (m, loop->sink);
7012 loop_insn_sink (loop, seq);
7014 /* It is possible that the expansion created lots of new registers.
7015 Iterate over the sequence we just created and record them all. */
7016 loop_regs_update (loop, seq);
7020 /* Emit insns after loop to set REG = B * M + A. */
7022 void
7023 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7024 const struct loop *loop;
7025 rtx b; /* initial value of basic induction variable */
7026 rtx m; /* multiplicative constant */
7027 rtx a; /* additive constant */
7028 rtx reg; /* destination register */
7030 rtx seq;
7032 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7033 seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg);
7035 loop_insn_hoist (loop, seq);
7037 /* It is possible that the expansion created lots of new registers.
7038 Iterate over the sequence we just created and record them all. */
7039 loop_regs_update (loop, seq);
7044 /* Similar to gen_add_mult, but compute cost rather than generating
7045 sequence. */
7047 static int
7048 iv_add_mult_cost (b, m, a, reg)
7049 rtx b; /* initial value of basic induction variable */
7050 rtx m; /* multiplicative constant */
7051 rtx a; /* additive constant */
7052 rtx reg; /* destination register */
7054 int cost = 0;
7055 rtx last, result;
7057 start_sequence ();
7058 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7059 if (reg != result)
7060 emit_move_insn (reg, result);
7061 last = get_last_insn ();
7062 while (last)
7064 rtx t = single_set (last);
7065 if (t)
7066 cost += rtx_cost (SET_SRC (t), SET);
7067 last = PREV_INSN (last);
7069 end_sequence ();
7070 return cost;
7073 /* Test whether A * B can be computed without
7074 an actual multiply insn. Value is 1 if so. */
7076 static int
7077 product_cheap_p (a, b)
7078 rtx a;
7079 rtx b;
7081 int i;
7082 rtx tmp;
7083 int win = 1;
7085 /* If only one is constant, make it B. */
7086 if (GET_CODE (a) == CONST_INT)
7087 tmp = a, a = b, b = tmp;
7089 /* If first constant, both constant, so don't need multiply. */
7090 if (GET_CODE (a) == CONST_INT)
7091 return 1;
7093 /* If second not constant, neither is constant, so would need multiply. */
7094 if (GET_CODE (b) != CONST_INT)
7095 return 0;
7097 /* One operand is constant, so might not need multiply insn. Generate the
7098 code for the multiply and see if a call or multiply, or long sequence
7099 of insns is generated. */
7101 start_sequence ();
7102 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7103 tmp = gen_sequence ();
7104 end_sequence ();
7106 if (GET_CODE (tmp) == SEQUENCE)
7108 if (XVEC (tmp, 0) == 0)
7109 win = 1;
7110 else if (XVECLEN (tmp, 0) > 3)
7111 win = 0;
7112 else
7113 for (i = 0; i < XVECLEN (tmp, 0); i++)
7115 rtx insn = XVECEXP (tmp, 0, i);
7117 if (GET_CODE (insn) != INSN
7118 || (GET_CODE (PATTERN (insn)) == SET
7119 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
7120 || (GET_CODE (PATTERN (insn)) == PARALLEL
7121 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
7122 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
7124 win = 0;
7125 break;
7129 else if (GET_CODE (tmp) == SET
7130 && GET_CODE (SET_SRC (tmp)) == MULT)
7131 win = 0;
7132 else if (GET_CODE (tmp) == PARALLEL
7133 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7134 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7135 win = 0;
7137 return win;
7140 /* Check to see if loop can be terminated by a "decrement and branch until
7141 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7142 Also try reversing an increment loop to a decrement loop
7143 to see if the optimization can be performed.
7144 Value is nonzero if optimization was performed. */
7146 /* This is useful even if the architecture doesn't have such an insn,
7147 because it might change a loops which increments from 0 to n to a loop
7148 which decrements from n to 0. A loop that decrements to zero is usually
7149 faster than one that increments from zero. */
7151 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7152 such as approx_final_value, biv_total_increment, loop_iterations, and
7153 final_[bg]iv_value. */
7155 static int
7156 check_dbra_loop (loop, insn_count)
7157 struct loop *loop;
7158 int insn_count;
7160 struct loop_info *loop_info = LOOP_INFO (loop);
7161 struct loop_regs *regs = LOOP_REGS (loop);
7162 struct loop_ivs *ivs = LOOP_IVS (loop);
7163 struct iv_class *bl;
7164 rtx reg;
7165 rtx jump_label;
7166 rtx final_value;
7167 rtx start_value;
7168 rtx new_add_val;
7169 rtx comparison;
7170 rtx before_comparison;
7171 rtx p;
7172 rtx jump;
7173 rtx first_compare;
7174 int compare_and_branch;
7175 rtx loop_start = loop->start;
7176 rtx loop_end = loop->end;
7178 /* If last insn is a conditional branch, and the insn before tests a
7179 register value, try to optimize it. Otherwise, we can't do anything. */
7181 jump = PREV_INSN (loop_end);
7182 comparison = get_condition_for_loop (loop, jump);
7183 if (comparison == 0)
7184 return 0;
7185 if (!onlyjump_p (jump))
7186 return 0;
7188 /* Try to compute whether the compare/branch at the loop end is one or
7189 two instructions. */
7190 get_condition (jump, &first_compare);
7191 if (first_compare == jump)
7192 compare_and_branch = 1;
7193 else if (first_compare == prev_nonnote_insn (jump))
7194 compare_and_branch = 2;
7195 else
7196 return 0;
7199 /* If more than one condition is present to control the loop, then
7200 do not proceed, as this function does not know how to rewrite
7201 loop tests with more than one condition.
7203 Look backwards from the first insn in the last comparison
7204 sequence and see if we've got another comparison sequence. */
7206 rtx jump1;
7207 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
7208 if (GET_CODE (jump1) == JUMP_INSN)
7209 return 0;
7212 /* Check all of the bivs to see if the compare uses one of them.
7213 Skip biv's set more than once because we can't guarantee that
7214 it will be zero on the last iteration. Also skip if the biv is
7215 used between its update and the test insn. */
7217 for (bl = ivs->list; bl; bl = bl->next)
7219 if (bl->biv_count == 1
7220 && ! bl->biv->maybe_multiple
7221 && bl->biv->dest_reg == XEXP (comparison, 0)
7222 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7223 first_compare))
7224 break;
7227 if (! bl)
7228 return 0;
7230 /* Look for the case where the basic induction variable is always
7231 nonnegative, and equals zero on the last iteration.
7232 In this case, add a reg_note REG_NONNEG, which allows the
7233 m68k DBRA instruction to be used. */
7235 if (((GET_CODE (comparison) == GT
7236 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7237 && INTVAL (XEXP (comparison, 1)) == -1)
7238 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7239 && GET_CODE (bl->biv->add_val) == CONST_INT
7240 && INTVAL (bl->biv->add_val) < 0)
7242 /* Initial value must be greater than 0,
7243 init_val % -dec_value == 0 to ensure that it equals zero on
7244 the last iteration */
7246 if (GET_CODE (bl->initial_value) == CONST_INT
7247 && INTVAL (bl->initial_value) > 0
7248 && (INTVAL (bl->initial_value)
7249 % (-INTVAL (bl->biv->add_val))) == 0)
7251 /* register always nonnegative, add REG_NOTE to branch */
7252 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7253 REG_NOTES (jump)
7254 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7255 REG_NOTES (jump));
7256 bl->nonneg = 1;
7258 return 1;
7261 /* If the decrement is 1 and the value was tested as >= 0 before
7262 the loop, then we can safely optimize. */
7263 for (p = loop_start; p; p = PREV_INSN (p))
7265 if (GET_CODE (p) == CODE_LABEL)
7266 break;
7267 if (GET_CODE (p) != JUMP_INSN)
7268 continue;
7270 before_comparison = get_condition_for_loop (loop, p);
7271 if (before_comparison
7272 && XEXP (before_comparison, 0) == bl->biv->dest_reg
7273 && GET_CODE (before_comparison) == LT
7274 && XEXP (before_comparison, 1) == const0_rtx
7275 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
7276 && INTVAL (bl->biv->add_val) == -1)
7278 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
7279 REG_NOTES (jump)
7280 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
7281 REG_NOTES (jump));
7282 bl->nonneg = 1;
7284 return 1;
7288 else if (GET_CODE (bl->biv->add_val) == CONST_INT
7289 && INTVAL (bl->biv->add_val) > 0)
7291 /* Try to change inc to dec, so can apply above optimization. */
7292 /* Can do this if:
7293 all registers modified are induction variables or invariant,
7294 all memory references have non-overlapping addresses
7295 (obviously true if only one write)
7296 allow 2 insns for the compare/jump at the end of the loop. */
7297 /* Also, we must avoid any instructions which use both the reversed
7298 biv and another biv. Such instructions will fail if the loop is
7299 reversed. We meet this condition by requiring that either
7300 no_use_except_counting is true, or else that there is only
7301 one biv. */
7302 int num_nonfixed_reads = 0;
7303 /* 1 if the iteration var is used only to count iterations. */
7304 int no_use_except_counting = 0;
7305 /* 1 if the loop has no memory store, or it has a single memory store
7306 which is reversible. */
7307 int reversible_mem_store = 1;
7309 if (bl->giv_count == 0 && ! loop->exit_count)
7311 rtx bivreg = regno_reg_rtx[bl->regno];
7312 struct iv_class *blt;
7314 /* If there are no givs for this biv, and the only exit is the
7315 fall through at the end of the loop, then
7316 see if perhaps there are no uses except to count. */
7317 no_use_except_counting = 1;
7318 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7319 if (INSN_P (p))
7321 rtx set = single_set (p);
7323 if (set && GET_CODE (SET_DEST (set)) == REG
7324 && REGNO (SET_DEST (set)) == bl->regno)
7325 /* An insn that sets the biv is okay. */
7327 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
7328 || p == prev_nonnote_insn (loop_end))
7329 && reg_mentioned_p (bivreg, PATTERN (p)))
7331 /* If either of these insns uses the biv and sets a pseudo
7332 that has more than one usage, then the biv has uses
7333 other than counting since it's used to derive a value
7334 that is used more than one time. */
7335 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
7336 regs);
7337 if (regs->multiple_uses)
7339 no_use_except_counting = 0;
7340 break;
7343 else if (reg_mentioned_p (bivreg, PATTERN (p)))
7345 no_use_except_counting = 0;
7346 break;
7350 /* A biv has uses besides counting if it is used to set another biv. */
7351 for (blt = ivs->list; blt; blt = blt->next)
7352 if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
7354 no_use_except_counting = 0;
7355 break;
7359 if (no_use_except_counting)
7360 /* No need to worry about MEMs. */
7362 else if (loop_info->num_mem_sets <= 1)
7364 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7365 if (INSN_P (p))
7366 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
7368 /* If the loop has a single store, and the destination address is
7369 invariant, then we can't reverse the loop, because this address
7370 might then have the wrong value at loop exit.
7371 This would work if the source was invariant also, however, in that
7372 case, the insn should have been moved out of the loop. */
7374 if (loop_info->num_mem_sets == 1)
7376 struct induction *v;
7378 /* If we could prove that each of the memory locations
7379 written to was different, then we could reverse the
7380 store -- but we don't presently have any way of
7381 knowing that. */
7382 reversible_mem_store = 0;
7384 /* If the store depends on a register that is set after the
7385 store, it depends on the initial value, and is thus not
7386 reversible. */
7387 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
7389 if (v->giv_type == DEST_REG
7390 && reg_mentioned_p (v->dest_reg,
7391 PATTERN (loop_info->first_loop_store_insn))
7392 && loop_insn_first_p (loop_info->first_loop_store_insn,
7393 v->insn))
7394 reversible_mem_store = 0;
7398 else
7399 return 0;
7401 /* This code only acts for innermost loops. Also it simplifies
7402 the memory address check by only reversing loops with
7403 zero or one memory access.
7404 Two memory accesses could involve parts of the same array,
7405 and that can't be reversed.
7406 If the biv is used only for counting, than we don't need to worry
7407 about all these things. */
7409 if ((num_nonfixed_reads <= 1
7410 && ! loop_info->has_nonconst_call
7411 && ! loop_info->has_volatile
7412 && reversible_mem_store
7413 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
7414 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
7415 && (bl == ivs->list && bl->next == 0))
7416 || no_use_except_counting)
7418 rtx tem;
7420 /* Loop can be reversed. */
7421 if (loop_dump_stream)
7422 fprintf (loop_dump_stream, "Can reverse loop\n");
7424 /* Now check other conditions:
7426 The increment must be a constant, as must the initial value,
7427 and the comparison code must be LT.
7429 This test can probably be improved since +/- 1 in the constant
7430 can be obtained by changing LT to LE and vice versa; this is
7431 confusing. */
7433 if (comparison
7434 /* for constants, LE gets turned into LT */
7435 && (GET_CODE (comparison) == LT
7436 || (GET_CODE (comparison) == LE
7437 && no_use_except_counting)))
7439 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
7440 rtx initial_value, comparison_value;
7441 int nonneg = 0;
7442 enum rtx_code cmp_code;
7443 int comparison_const_width;
7444 unsigned HOST_WIDE_INT comparison_sign_mask;
7446 add_val = INTVAL (bl->biv->add_val);
7447 comparison_value = XEXP (comparison, 1);
7448 if (GET_MODE (comparison_value) == VOIDmode)
7449 comparison_const_width
7450 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
7451 else
7452 comparison_const_width
7453 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
7454 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
7455 comparison_const_width = HOST_BITS_PER_WIDE_INT;
7456 comparison_sign_mask
7457 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
7459 /* If the comparison value is not a loop invariant, then we
7460 can not reverse this loop.
7462 ??? If the insns which initialize the comparison value as
7463 a whole compute an invariant result, then we could move
7464 them out of the loop and proceed with loop reversal. */
7465 if (! loop_invariant_p (loop, comparison_value))
7466 return 0;
7468 if (GET_CODE (comparison_value) == CONST_INT)
7469 comparison_val = INTVAL (comparison_value);
7470 initial_value = bl->initial_value;
7472 /* Normalize the initial value if it is an integer and
7473 has no other use except as a counter. This will allow
7474 a few more loops to be reversed. */
7475 if (no_use_except_counting
7476 && GET_CODE (comparison_value) == CONST_INT
7477 && GET_CODE (initial_value) == CONST_INT)
7479 comparison_val = comparison_val - INTVAL (bl->initial_value);
7480 /* The code below requires comparison_val to be a multiple
7481 of add_val in order to do the loop reversal, so
7482 round up comparison_val to a multiple of add_val.
7483 Since comparison_value is constant, we know that the
7484 current comparison code is LT. */
7485 comparison_val = comparison_val + add_val - 1;
7486 comparison_val
7487 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
7488 /* We postpone overflow checks for COMPARISON_VAL here;
7489 even if there is an overflow, we might still be able to
7490 reverse the loop, if converting the loop exit test to
7491 NE is possible. */
7492 initial_value = const0_rtx;
7495 /* First check if we can do a vanilla loop reversal. */
7496 if (initial_value == const0_rtx
7497 /* If we have a decrement_and_branch_on_count,
7498 prefer the NE test, since this will allow that
7499 instruction to be generated. Note that we must
7500 use a vanilla loop reversal if the biv is used to
7501 calculate a giv or has a non-counting use. */
7502 #if ! defined (HAVE_decrement_and_branch_until_zero) \
7503 && defined (HAVE_decrement_and_branch_on_count)
7504 && (! (add_val == 1 && loop->vtop
7505 && (bl->biv_count == 0
7506 || no_use_except_counting)))
7507 #endif
7508 && GET_CODE (comparison_value) == CONST_INT
7509 /* Now do postponed overflow checks on COMPARISON_VAL. */
7510 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
7511 & comparison_sign_mask))
7513 /* Register will always be nonnegative, with value
7514 0 on last iteration */
7515 add_adjust = add_val;
7516 nonneg = 1;
7517 cmp_code = GE;
7519 else if (add_val == 1 && loop->vtop
7520 && (bl->biv_count == 0
7521 || no_use_except_counting))
7523 add_adjust = 0;
7524 cmp_code = NE;
7526 else
7527 return 0;
7529 if (GET_CODE (comparison) == LE)
7530 add_adjust -= add_val;
7532 /* If the initial value is not zero, or if the comparison
7533 value is not an exact multiple of the increment, then we
7534 can not reverse this loop. */
7535 if (initial_value == const0_rtx
7536 && GET_CODE (comparison_value) == CONST_INT)
7538 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
7539 return 0;
7541 else
7543 if (! no_use_except_counting || add_val != 1)
7544 return 0;
7547 final_value = comparison_value;
7549 /* Reset these in case we normalized the initial value
7550 and comparison value above. */
7551 if (GET_CODE (comparison_value) == CONST_INT
7552 && GET_CODE (initial_value) == CONST_INT)
7554 comparison_value = GEN_INT (comparison_val);
7555 final_value
7556 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
7558 bl->initial_value = initial_value;
7560 /* Save some info needed to produce the new insns. */
7561 reg = bl->biv->dest_reg;
7562 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
7563 if (jump_label == pc_rtx)
7564 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
7565 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
7567 /* Set start_value; if this is not a CONST_INT, we need
7568 to generate a SUB.
7569 Initialize biv to start_value before loop start.
7570 The old initializing insn will be deleted as a
7571 dead store by flow.c. */
7572 if (initial_value == const0_rtx
7573 && GET_CODE (comparison_value) == CONST_INT)
7575 start_value = GEN_INT (comparison_val - add_adjust);
7576 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
7578 else if (GET_CODE (initial_value) == CONST_INT)
7580 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
7581 enum machine_mode mode = GET_MODE (reg);
7582 enum insn_code icode
7583 = add_optab->handlers[(int) mode].insn_code;
7585 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
7586 || ! ((*insn_data[icode].operand[1].predicate)
7587 (comparison_value, mode))
7588 || ! ((*insn_data[icode].operand[2].predicate)
7589 (offset, mode)))
7590 return 0;
7591 start_value
7592 = gen_rtx_PLUS (mode, comparison_value, offset);
7593 loop_insn_hoist (loop, (GEN_FCN (icode)
7594 (reg, comparison_value, offset)));
7595 if (GET_CODE (comparison) == LE)
7596 final_value = gen_rtx_PLUS (mode, comparison_value,
7597 GEN_INT (add_val));
7599 else if (! add_adjust)
7601 enum machine_mode mode = GET_MODE (reg);
7602 enum insn_code icode
7603 = sub_optab->handlers[(int) mode].insn_code;
7604 if (! (*insn_data[icode].operand[0].predicate) (reg, mode)
7605 || ! ((*insn_data[icode].operand[1].predicate)
7606 (comparison_value, mode))
7607 || ! ((*insn_data[icode].operand[2].predicate)
7608 (initial_value, mode)))
7609 return 0;
7610 start_value
7611 = gen_rtx_MINUS (mode, comparison_value, initial_value);
7612 loop_insn_hoist (loop, (GEN_FCN (icode)
7613 (reg, comparison_value,
7614 initial_value)));
7616 else
7617 /* We could handle the other cases too, but it'll be
7618 better to have a testcase first. */
7619 return 0;
7621 /* We may not have a single insn which can increment a reg, so
7622 create a sequence to hold all the insns from expand_inc. */
7623 start_sequence ();
7624 expand_inc (reg, new_add_val);
7625 tem = gen_sequence ();
7626 end_sequence ();
7628 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
7629 delete_insn (bl->biv->insn);
7631 /* Update biv info to reflect its new status. */
7632 bl->biv->insn = p;
7633 bl->initial_value = start_value;
7634 bl->biv->add_val = new_add_val;
7636 /* Update loop info. */
7637 loop_info->initial_value = reg;
7638 loop_info->initial_equiv_value = reg;
7639 loop_info->final_value = const0_rtx;
7640 loop_info->final_equiv_value = const0_rtx;
7641 loop_info->comparison_value = const0_rtx;
7642 loop_info->comparison_code = cmp_code;
7643 loop_info->increment = new_add_val;
7645 /* Inc LABEL_NUSES so that delete_insn will
7646 not delete the label. */
7647 LABEL_NUSES (XEXP (jump_label, 0))++;
7649 /* Emit an insn after the end of the loop to set the biv's
7650 proper exit value if it is used anywhere outside the loop. */
7651 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
7652 || ! bl->init_insn
7653 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
7654 loop_insn_sink (loop, gen_move_insn (reg, final_value));
7656 /* Delete compare/branch at end of loop. */
7657 delete_insn (PREV_INSN (loop_end));
7658 if (compare_and_branch == 2)
7659 delete_insn (first_compare);
7661 /* Add new compare/branch insn at end of loop. */
7662 start_sequence ();
7663 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
7664 GET_MODE (reg), 0, 0,
7665 XEXP (jump_label, 0));
7666 tem = gen_sequence ();
7667 end_sequence ();
7668 emit_jump_insn_before (tem, loop_end);
7670 for (tem = PREV_INSN (loop_end);
7671 tem && GET_CODE (tem) != JUMP_INSN;
7672 tem = PREV_INSN (tem))
7675 if (tem)
7676 JUMP_LABEL (tem) = XEXP (jump_label, 0);
7678 if (nonneg)
7680 if (tem)
7682 /* Increment of LABEL_NUSES done above. */
7683 /* Register is now always nonnegative,
7684 so add REG_NONNEG note to the branch. */
7685 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
7686 REG_NOTES (tem));
7688 bl->nonneg = 1;
7691 /* No insn may reference both the reversed and another biv or it
7692 will fail (see comment near the top of the loop reversal
7693 code).
7694 Earlier on, we have verified that the biv has no use except
7695 counting, or it is the only biv in this function.
7696 However, the code that computes no_use_except_counting does
7697 not verify reg notes. It's possible to have an insn that
7698 references another biv, and has a REG_EQUAL note with an
7699 expression based on the reversed biv. To avoid this case,
7700 remove all REG_EQUAL notes based on the reversed biv
7701 here. */
7702 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
7703 if (INSN_P (p))
7705 rtx *pnote;
7706 rtx set = single_set (p);
7707 /* If this is a set of a GIV based on the reversed biv, any
7708 REG_EQUAL notes should still be correct. */
7709 if (! set
7710 || GET_CODE (SET_DEST (set)) != REG
7711 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
7712 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
7713 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
7714 for (pnote = &REG_NOTES (p); *pnote;)
7716 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
7717 && reg_mentioned_p (regno_reg_rtx[bl->regno],
7718 XEXP (*pnote, 0)))
7719 *pnote = XEXP (*pnote, 1);
7720 else
7721 pnote = &XEXP (*pnote, 1);
7725 /* Mark that this biv has been reversed. Each giv which depends
7726 on this biv, and which is also live past the end of the loop
7727 will have to be fixed up. */
7729 bl->reversed = 1;
7731 if (loop_dump_stream)
7733 fprintf (loop_dump_stream, "Reversed loop");
7734 if (bl->nonneg)
7735 fprintf (loop_dump_stream, " and added reg_nonneg\n");
7736 else
7737 fprintf (loop_dump_stream, "\n");
7740 return 1;
7745 return 0;
7748 /* Verify whether the biv BL appears to be eliminable,
7749 based on the insns in the loop that refer to it.
7751 If ELIMINATE_P is non-zero, actually do the elimination.
7753 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7754 determine whether invariant insns should be placed inside or at the
7755 start of the loop. */
7757 static int
7758 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
7759 const struct loop *loop;
7760 struct iv_class *bl;
7761 int eliminate_p;
7762 int threshold, insn_count;
7764 struct loop_ivs *ivs = LOOP_IVS (loop);
7765 rtx reg = bl->biv->dest_reg;
7766 rtx p;
7768 /* Scan all insns in the loop, stopping if we find one that uses the
7769 biv in a way that we cannot eliminate. */
7771 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
7773 enum rtx_code code = GET_CODE (p);
7774 basic_block where_bb = 0;
7775 rtx where_insn = threshold >= insn_count ? 0 : p;
7777 /* If this is a libcall that sets a giv, skip ahead to its end. */
7778 if (GET_RTX_CLASS (code) == 'i')
7780 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
7782 if (note)
7784 rtx last = XEXP (note, 0);
7785 rtx set = single_set (last);
7787 if (set && GET_CODE (SET_DEST (set)) == REG)
7789 unsigned int regno = REGNO (SET_DEST (set));
7791 if (regno < ivs->n_regs
7792 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
7793 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
7794 p = last;
7798 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7799 && reg_mentioned_p (reg, PATTERN (p))
7800 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
7801 eliminate_p, where_bb, where_insn))
7803 if (loop_dump_stream)
7804 fprintf (loop_dump_stream,
7805 "Cannot eliminate biv %d: biv used in insn %d.\n",
7806 bl->regno, INSN_UID (p));
7807 break;
7811 if (p == loop->end)
7813 if (loop_dump_stream)
7814 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7815 bl->regno, eliminate_p ? "was" : "can be");
7816 return 1;
7819 return 0;
7822 /* INSN and REFERENCE are instructions in the same insn chain.
7823 Return non-zero if INSN is first. */
7826 loop_insn_first_p (insn, reference)
7827 rtx insn, reference;
7829 rtx p, q;
7831 for (p = insn, q = reference;;)
7833 /* Start with test for not first so that INSN == REFERENCE yields not
7834 first. */
7835 if (q == insn || ! p)
7836 return 0;
7837 if (p == reference || ! q)
7838 return 1;
7840 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
7841 previous insn, hence the <= comparison below does not work if
7842 P is a note. */
7843 if (INSN_UID (p) < max_uid_for_loop
7844 && INSN_UID (q) < max_uid_for_loop
7845 && GET_CODE (p) != NOTE)
7846 return INSN_LUID (p) <= INSN_LUID (q);
7848 if (INSN_UID (p) >= max_uid_for_loop
7849 || GET_CODE (p) == NOTE)
7850 p = NEXT_INSN (p);
7851 if (INSN_UID (q) >= max_uid_for_loop)
7852 q = NEXT_INSN (q);
7856 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
7857 the offset that we have to take into account due to auto-increment /
7858 div derivation is zero. */
7859 static int
7860 biv_elimination_giv_has_0_offset (biv, giv, insn)
7861 struct induction *biv, *giv;
7862 rtx insn;
7864 /* If the giv V had the auto-inc address optimization applied
7865 to it, and INSN occurs between the giv insn and the biv
7866 insn, then we'd have to adjust the value used here.
7867 This is rare, so we don't bother to make this possible. */
7868 if (giv->auto_inc_opt
7869 && ((loop_insn_first_p (giv->insn, insn)
7870 && loop_insn_first_p (insn, biv->insn))
7871 || (loop_insn_first_p (biv->insn, insn)
7872 && loop_insn_first_p (insn, giv->insn))))
7873 return 0;
7875 return 1;
7878 /* If BL appears in X (part of the pattern of INSN), see if we can
7879 eliminate its use. If so, return 1. If not, return 0.
7881 If BIV does not appear in X, return 1.
7883 If ELIMINATE_P is non-zero, actually do the elimination.
7884 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
7885 Depending on how many items have been moved out of the loop, it
7886 will either be before INSN (when WHERE_INSN is non-zero) or at the
7887 start of the loop (when WHERE_INSN is zero). */
7889 static int
7890 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
7891 const struct loop *loop;
7892 rtx x, insn;
7893 struct iv_class *bl;
7894 int eliminate_p;
7895 basic_block where_bb;
7896 rtx where_insn;
7898 enum rtx_code code = GET_CODE (x);
7899 rtx reg = bl->biv->dest_reg;
7900 enum machine_mode mode = GET_MODE (reg);
7901 struct induction *v;
7902 rtx arg, tem;
7903 #ifdef HAVE_cc0
7904 rtx new;
7905 #endif
7906 int arg_operand;
7907 const char *fmt;
7908 int i, j;
7910 switch (code)
7912 case REG:
7913 /* If we haven't already been able to do something with this BIV,
7914 we can't eliminate it. */
7915 if (x == reg)
7916 return 0;
7917 return 1;
7919 case SET:
7920 /* If this sets the BIV, it is not a problem. */
7921 if (SET_DEST (x) == reg)
7922 return 1;
7924 /* If this is an insn that defines a giv, it is also ok because
7925 it will go away when the giv is reduced. */
7926 for (v = bl->giv; v; v = v->next_iv)
7927 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7928 return 1;
7930 #ifdef HAVE_cc0
7931 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7933 /* Can replace with any giv that was reduced and
7934 that has (MULT_VAL != 0) and (ADD_VAL == 0).
7935 Require a constant for MULT_VAL, so we know it's nonzero.
7936 ??? We disable this optimization to avoid potential
7937 overflows. */
7939 for (v = bl->giv; v; v = v->next_iv)
7940 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
7941 && v->add_val == const0_rtx
7942 && ! v->ignore && ! v->maybe_dead && v->always_computable
7943 && v->mode == mode
7944 && 0)
7946 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7947 continue;
7949 if (! eliminate_p)
7950 return 1;
7952 /* If the giv has the opposite direction of change,
7953 then reverse the comparison. */
7954 if (INTVAL (v->mult_val) < 0)
7955 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7956 const0_rtx, v->new_reg);
7957 else
7958 new = v->new_reg;
7960 /* We can probably test that giv's reduced reg. */
7961 if (validate_change (insn, &SET_SRC (x), new, 0))
7962 return 1;
7965 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7966 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
7967 Require a constant for MULT_VAL, so we know it's nonzero.
7968 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7969 overflow problem. */
7971 for (v = bl->giv; v; v = v->next_iv)
7972 if (GET_CODE (v->mult_val) == CONST_INT
7973 && v->mult_val != const0_rtx
7974 && ! v->ignore && ! v->maybe_dead && v->always_computable
7975 && v->mode == mode
7976 && (GET_CODE (v->add_val) == SYMBOL_REF
7977 || GET_CODE (v->add_val) == LABEL_REF
7978 || GET_CODE (v->add_val) == CONST
7979 || (GET_CODE (v->add_val) == REG
7980 && REG_POINTER (v->add_val))))
7982 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
7983 continue;
7985 if (! eliminate_p)
7986 return 1;
7988 /* If the giv has the opposite direction of change,
7989 then reverse the comparison. */
7990 if (INTVAL (v->mult_val) < 0)
7991 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7992 v->new_reg);
7993 else
7994 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7995 copy_rtx (v->add_val));
7997 /* Replace biv with the giv's reduced register. */
7998 update_reg_last_use (v->add_val, insn);
7999 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8000 return 1;
8002 /* Insn doesn't support that constant or invariant. Copy it
8003 into a register (it will be a loop invariant.) */
8004 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8006 loop_insn_emit_before (loop, 0, where_insn,
8007 gen_move_insn (tem,
8008 copy_rtx (v->add_val)));
8010 /* Substitute the new register for its invariant value in
8011 the compare expression. */
8012 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8013 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8014 return 1;
8017 #endif
8018 break;
8020 case COMPARE:
8021 case EQ: case NE:
8022 case GT: case GE: case GTU: case GEU:
8023 case LT: case LE: case LTU: case LEU:
8024 /* See if either argument is the biv. */
8025 if (XEXP (x, 0) == reg)
8026 arg = XEXP (x, 1), arg_operand = 1;
8027 else if (XEXP (x, 1) == reg)
8028 arg = XEXP (x, 0), arg_operand = 0;
8029 else
8030 break;
8032 if (CONSTANT_P (arg))
8034 /* First try to replace with any giv that has constant positive
8035 mult_val and constant add_val. We might be able to support
8036 negative mult_val, but it seems complex to do it in general. */
8038 for (v = bl->giv; v; v = v->next_iv)
8039 if (GET_CODE (v->mult_val) == CONST_INT
8040 && INTVAL (v->mult_val) > 0
8041 && (GET_CODE (v->add_val) == SYMBOL_REF
8042 || GET_CODE (v->add_val) == LABEL_REF
8043 || GET_CODE (v->add_val) == CONST
8044 || (GET_CODE (v->add_val) == REG
8045 && REG_POINTER (v->add_val)))
8046 && ! v->ignore && ! v->maybe_dead && v->always_computable
8047 && v->mode == mode)
8049 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8050 continue;
8052 if (! eliminate_p)
8053 return 1;
8055 /* Replace biv with the giv's reduced reg. */
8056 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8058 /* If all constants are actually constant integers and
8059 the derived constant can be directly placed in the COMPARE,
8060 do so. */
8061 if (GET_CODE (arg) == CONST_INT
8062 && GET_CODE (v->mult_val) == CONST_INT
8063 && GET_CODE (v->add_val) == CONST_INT)
8065 validate_change (insn, &XEXP (x, arg_operand),
8066 GEN_INT (INTVAL (arg)
8067 * INTVAL (v->mult_val)
8068 + INTVAL (v->add_val)), 1);
8070 else
8072 /* Otherwise, load it into a register. */
8073 tem = gen_reg_rtx (mode);
8074 loop_iv_add_mult_emit_before (loop, arg,
8075 v->mult_val, v->add_val,
8076 tem, where_bb, where_insn);
8077 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8079 if (apply_change_group ())
8080 return 1;
8083 /* Look for giv with positive constant mult_val and nonconst add_val.
8084 Insert insns to calculate new compare value.
8085 ??? Turn this off due to possible overflow. */
8087 for (v = bl->giv; v; v = v->next_iv)
8088 if (GET_CODE (v->mult_val) == CONST_INT
8089 && INTVAL (v->mult_val) > 0
8090 && ! v->ignore && ! v->maybe_dead && v->always_computable
8091 && v->mode == mode
8092 && 0)
8094 rtx tem;
8096 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8097 continue;
8099 if (! eliminate_p)
8100 return 1;
8102 tem = gen_reg_rtx (mode);
8104 /* Replace biv with giv's reduced register. */
8105 validate_change (insn, &XEXP (x, 1 - arg_operand),
8106 v->new_reg, 1);
8108 /* Compute value to compare against. */
8109 loop_iv_add_mult_emit_before (loop, arg,
8110 v->mult_val, v->add_val,
8111 tem, where_bb, where_insn);
8112 /* Use it in this insn. */
8113 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8114 if (apply_change_group ())
8115 return 1;
8118 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8120 if (loop_invariant_p (loop, arg) == 1)
8122 /* Look for giv with constant positive mult_val and nonconst
8123 add_val. Insert insns to compute new compare value.
8124 ??? Turn this off due to possible overflow. */
8126 for (v = bl->giv; v; v = v->next_iv)
8127 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8128 && ! v->ignore && ! v->maybe_dead && v->always_computable
8129 && v->mode == mode
8130 && 0)
8132 rtx tem;
8134 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8135 continue;
8137 if (! eliminate_p)
8138 return 1;
8140 tem = gen_reg_rtx (mode);
8142 /* Replace biv with giv's reduced register. */
8143 validate_change (insn, &XEXP (x, 1 - arg_operand),
8144 v->new_reg, 1);
8146 /* Compute value to compare against. */
8147 loop_iv_add_mult_emit_before (loop, arg,
8148 v->mult_val, v->add_val,
8149 tem, where_bb, where_insn);
8150 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8151 if (apply_change_group ())
8152 return 1;
8156 /* This code has problems. Basically, you can't know when
8157 seeing if we will eliminate BL, whether a particular giv
8158 of ARG will be reduced. If it isn't going to be reduced,
8159 we can't eliminate BL. We can try forcing it to be reduced,
8160 but that can generate poor code.
8162 The problem is that the benefit of reducing TV, below should
8163 be increased if BL can actually be eliminated, but this means
8164 we might have to do a topological sort of the order in which
8165 we try to process biv. It doesn't seem worthwhile to do
8166 this sort of thing now. */
8168 #if 0
8169 /* Otherwise the reg compared with had better be a biv. */
8170 if (GET_CODE (arg) != REG
8171 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8172 return 0;
8174 /* Look for a pair of givs, one for each biv,
8175 with identical coefficients. */
8176 for (v = bl->giv; v; v = v->next_iv)
8178 struct induction *tv;
8180 if (v->ignore || v->maybe_dead || v->mode != mode)
8181 continue;
8183 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8184 tv = tv->next_iv)
8185 if (! tv->ignore && ! tv->maybe_dead
8186 && rtx_equal_p (tv->mult_val, v->mult_val)
8187 && rtx_equal_p (tv->add_val, v->add_val)
8188 && tv->mode == mode)
8190 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8191 continue;
8193 if (! eliminate_p)
8194 return 1;
8196 /* Replace biv with its giv's reduced reg. */
8197 XEXP (x, 1 - arg_operand) = v->new_reg;
8198 /* Replace other operand with the other giv's
8199 reduced reg. */
8200 XEXP (x, arg_operand) = tv->new_reg;
8201 return 1;
8204 #endif
8207 /* If we get here, the biv can't be eliminated. */
8208 return 0;
8210 case MEM:
8211 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8212 biv is used in it, since it will be replaced. */
8213 for (v = bl->giv; v; v = v->next_iv)
8214 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8215 return 1;
8216 break;
8218 default:
8219 break;
8222 /* See if any subexpression fails elimination. */
8223 fmt = GET_RTX_FORMAT (code);
8224 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8226 switch (fmt[i])
8228 case 'e':
8229 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8230 eliminate_p, where_bb, where_insn))
8231 return 0;
8232 break;
8234 case 'E':
8235 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8236 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8237 eliminate_p, where_bb, where_insn))
8238 return 0;
8239 break;
8243 return 1;
8246 /* Return nonzero if the last use of REG
8247 is in an insn following INSN in the same basic block. */
8249 static int
8250 last_use_this_basic_block (reg, insn)
8251 rtx reg;
8252 rtx insn;
8254 rtx n;
8255 for (n = insn;
8256 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
8257 n = NEXT_INSN (n))
8259 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
8260 return 1;
8262 return 0;
8265 /* Called via `note_stores' to record the initial value of a biv. Here we
8266 just record the location of the set and process it later. */
8268 static void
8269 record_initial (dest, set, data)
8270 rtx dest;
8271 rtx set;
8272 void *data ATTRIBUTE_UNUSED;
8274 struct loop_ivs *ivs = (struct loop_ivs *) data;
8275 struct iv_class *bl;
8277 if (GET_CODE (dest) != REG
8278 || REGNO (dest) >= ivs->n_regs
8279 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
8280 return;
8282 bl = REG_IV_CLASS (ivs, REGNO (dest));
8284 /* If this is the first set found, record it. */
8285 if (bl->init_insn == 0)
8287 bl->init_insn = note_insn;
8288 bl->init_set = set;
8292 /* If any of the registers in X are "old" and currently have a last use earlier
8293 than INSN, update them to have a last use of INSN. Their actual last use
8294 will be the previous insn but it will not have a valid uid_luid so we can't
8295 use it. X must be a source expression only. */
8297 static void
8298 update_reg_last_use (x, insn)
8299 rtx x;
8300 rtx insn;
8302 /* Check for the case where INSN does not have a valid luid. In this case,
8303 there is no need to modify the regno_last_uid, as this can only happen
8304 when code is inserted after the loop_end to set a pseudo's final value,
8305 and hence this insn will never be the last use of x.
8306 ???? This comment is not correct. See for example loop_givs_reduce.
8307 This may insert an insn before another new insn. */
8308 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
8309 && INSN_UID (insn) < max_uid_for_loop
8310 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
8312 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
8314 else
8316 register int i, j;
8317 register const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
8318 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8320 if (fmt[i] == 'e')
8321 update_reg_last_use (XEXP (x, i), insn);
8322 else if (fmt[i] == 'E')
8323 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8324 update_reg_last_use (XVECEXP (x, i, j), insn);
8329 /* Given an insn INSN and condition COND, return the condition in a
8330 canonical form to simplify testing by callers. Specifically:
8332 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
8333 (2) Both operands will be machine operands; (cc0) will have been replaced.
8334 (3) If an operand is a constant, it will be the second operand.
8335 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
8336 for GE, GEU, and LEU.
8338 If the condition cannot be understood, or is an inequality floating-point
8339 comparison which needs to be reversed, 0 will be returned.
8341 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
8343 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8344 insn used in locating the condition was found. If a replacement test
8345 of the condition is desired, it should be placed in front of that
8346 insn and we will be sure that the inputs are still valid.
8348 If WANT_REG is non-zero, we wish the condition to be relative to that
8349 register, if possible. Therefore, do not canonicalize the condition
8350 further. */
8353 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
8354 rtx insn;
8355 rtx cond;
8356 int reverse;
8357 rtx *earliest;
8358 rtx want_reg;
8360 enum rtx_code code;
8361 rtx prev = insn;
8362 rtx set;
8363 rtx tem;
8364 rtx op0, op1;
8365 int reverse_code = 0;
8366 enum machine_mode mode;
8368 code = GET_CODE (cond);
8369 mode = GET_MODE (cond);
8370 op0 = XEXP (cond, 0);
8371 op1 = XEXP (cond, 1);
8373 if (reverse)
8374 code = reversed_comparison_code (cond, insn);
8375 if (code == UNKNOWN)
8376 return 0;
8378 if (earliest)
8379 *earliest = insn;
8381 /* If we are comparing a register with zero, see if the register is set
8382 in the previous insn to a COMPARE or a comparison operation. Perform
8383 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
8384 in cse.c */
8386 while (GET_RTX_CLASS (code) == '<'
8387 && op1 == CONST0_RTX (GET_MODE (op0))
8388 && op0 != want_reg)
8390 /* Set non-zero when we find something of interest. */
8391 rtx x = 0;
8393 #ifdef HAVE_cc0
8394 /* If comparison with cc0, import actual comparison from compare
8395 insn. */
8396 if (op0 == cc0_rtx)
8398 if ((prev = prev_nonnote_insn (prev)) == 0
8399 || GET_CODE (prev) != INSN
8400 || (set = single_set (prev)) == 0
8401 || SET_DEST (set) != cc0_rtx)
8402 return 0;
8404 op0 = SET_SRC (set);
8405 op1 = CONST0_RTX (GET_MODE (op0));
8406 if (earliest)
8407 *earliest = prev;
8409 #endif
8411 /* If this is a COMPARE, pick up the two things being compared. */
8412 if (GET_CODE (op0) == COMPARE)
8414 op1 = XEXP (op0, 1);
8415 op0 = XEXP (op0, 0);
8416 continue;
8418 else if (GET_CODE (op0) != REG)
8419 break;
8421 /* Go back to the previous insn. Stop if it is not an INSN. We also
8422 stop if it isn't a single set or if it has a REG_INC note because
8423 we don't want to bother dealing with it. */
8425 if ((prev = prev_nonnote_insn (prev)) == 0
8426 || GET_CODE (prev) != INSN
8427 || FIND_REG_INC_NOTE (prev, 0))
8428 break;
8430 set = set_of (op0, prev);
8432 if (set
8433 && (GET_CODE (set) != SET
8434 || !rtx_equal_p (SET_DEST (set), op0)))
8435 break;
8437 /* If this is setting OP0, get what it sets it to if it looks
8438 relevant. */
8439 if (set)
8441 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
8443 /* ??? We may not combine comparisons done in a CCmode with
8444 comparisons not done in a CCmode. This is to aid targets
8445 like Alpha that have an IEEE compliant EQ instruction, and
8446 a non-IEEE compliant BEQ instruction. The use of CCmode is
8447 actually artificial, simply to prevent the combination, but
8448 should not affect other platforms.
8450 However, we must allow VOIDmode comparisons to match either
8451 CCmode or non-CCmode comparison, because some ports have
8452 modeless comparisons inside branch patterns.
8454 ??? This mode check should perhaps look more like the mode check
8455 in simplify_comparison in combine. */
8457 if ((GET_CODE (SET_SRC (set)) == COMPARE
8458 || (((code == NE
8459 || (code == LT
8460 && GET_MODE_CLASS (inner_mode) == MODE_INT
8461 && (GET_MODE_BITSIZE (inner_mode)
8462 <= HOST_BITS_PER_WIDE_INT)
8463 && (STORE_FLAG_VALUE
8464 & ((HOST_WIDE_INT) 1
8465 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8466 #ifdef FLOAT_STORE_FLAG_VALUE
8467 || (code == LT
8468 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8469 && (REAL_VALUE_NEGATIVE
8470 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8471 #endif
8473 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
8474 && (((GET_MODE_CLASS (mode) == MODE_CC)
8475 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8476 || mode == VOIDmode || inner_mode == VOIDmode))
8477 x = SET_SRC (set);
8478 else if (((code == EQ
8479 || (code == GE
8480 && (GET_MODE_BITSIZE (inner_mode)
8481 <= HOST_BITS_PER_WIDE_INT)
8482 && GET_MODE_CLASS (inner_mode) == MODE_INT
8483 && (STORE_FLAG_VALUE
8484 & ((HOST_WIDE_INT) 1
8485 << (GET_MODE_BITSIZE (inner_mode) - 1))))
8486 #ifdef FLOAT_STORE_FLAG_VALUE
8487 || (code == GE
8488 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
8489 && (REAL_VALUE_NEGATIVE
8490 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
8491 #endif
8493 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
8494 && (((GET_MODE_CLASS (mode) == MODE_CC)
8495 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
8496 || mode == VOIDmode || inner_mode == VOIDmode))
8499 reverse_code = 1;
8500 x = SET_SRC (set);
8502 else
8503 break;
8506 else if (reg_set_p (op0, prev))
8507 /* If this sets OP0, but not directly, we have to give up. */
8508 break;
8510 if (x)
8512 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
8513 code = GET_CODE (x);
8514 if (reverse_code)
8516 code = reversed_comparison_code (x, prev);
8517 if (code == UNKNOWN)
8518 return 0;
8519 reverse_code = 0;
8522 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
8523 if (earliest)
8524 *earliest = prev;
8528 /* If constant is first, put it last. */
8529 if (CONSTANT_P (op0))
8530 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
8532 /* If OP0 is the result of a comparison, we weren't able to find what
8533 was really being compared, so fail. */
8534 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
8535 return 0;
8537 /* Canonicalize any ordered comparison with integers involving equality
8538 if we can do computations in the relevant mode and we do not
8539 overflow. */
8541 if (GET_CODE (op1) == CONST_INT
8542 && GET_MODE (op0) != VOIDmode
8543 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
8545 HOST_WIDE_INT const_val = INTVAL (op1);
8546 unsigned HOST_WIDE_INT uconst_val = const_val;
8547 unsigned HOST_WIDE_INT max_val
8548 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
8550 switch (code)
8552 case LE:
8553 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
8554 code = LT, op1 = GEN_INT (const_val + 1);
8555 break;
8557 /* When cross-compiling, const_val might be sign-extended from
8558 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
8559 case GE:
8560 if ((HOST_WIDE_INT) (const_val & max_val)
8561 != (((HOST_WIDE_INT) 1
8562 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
8563 code = GT, op1 = GEN_INT (const_val - 1);
8564 break;
8566 case LEU:
8567 if (uconst_val < max_val)
8568 code = LTU, op1 = GEN_INT (uconst_val + 1);
8569 break;
8571 case GEU:
8572 if (uconst_val != 0)
8573 code = GTU, op1 = GEN_INT (uconst_val - 1);
8574 break;
8576 default:
8577 break;
8581 #ifdef HAVE_cc0
8582 /* Never return CC0; return zero instead. */
8583 if (op0 == cc0_rtx)
8584 return 0;
8585 #endif
8587 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
8590 /* Given a jump insn JUMP, return the condition that will cause it to branch
8591 to its JUMP_LABEL. If the condition cannot be understood, or is an
8592 inequality floating-point comparison which needs to be reversed, 0 will
8593 be returned.
8595 If EARLIEST is non-zero, it is a pointer to a place where the earliest
8596 insn used in locating the condition was found. If a replacement test
8597 of the condition is desired, it should be placed in front of that
8598 insn and we will be sure that the inputs are still valid. */
8601 get_condition (jump, earliest)
8602 rtx jump;
8603 rtx *earliest;
8605 rtx cond;
8606 int reverse;
8607 rtx set;
8609 /* If this is not a standard conditional jump, we can't parse it. */
8610 if (GET_CODE (jump) != JUMP_INSN
8611 || ! any_condjump_p (jump))
8612 return 0;
8613 set = pc_set (jump);
8615 cond = XEXP (SET_SRC (set), 0);
8617 /* If this branches to JUMP_LABEL when the condition is false, reverse
8618 the condition. */
8619 reverse
8620 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
8621 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
8623 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
8626 /* Similar to above routine, except that we also put an invariant last
8627 unless both operands are invariants. */
8630 get_condition_for_loop (loop, x)
8631 const struct loop *loop;
8632 rtx x;
8634 rtx comparison = get_condition (x, (rtx*)0);
8636 if (comparison == 0
8637 || ! loop_invariant_p (loop, XEXP (comparison, 0))
8638 || loop_invariant_p (loop, XEXP (comparison, 1)))
8639 return comparison;
8641 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
8642 XEXP (comparison, 1), XEXP (comparison, 0));
8645 /* Scan the function and determine whether it has indirect (computed) jumps.
8647 This is taken mostly from flow.c; similar code exists elsewhere
8648 in the compiler. It may be useful to put this into rtlanal.c. */
8649 static int
8650 indirect_jump_in_function_p (start)
8651 rtx start;
8653 rtx insn;
8655 for (insn = start; insn; insn = NEXT_INSN (insn))
8656 if (computed_jump_p (insn))
8657 return 1;
8659 return 0;
8662 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
8663 documentation for LOOP_MEMS for the definition of `appropriate'.
8664 This function is called from prescan_loop via for_each_rtx. */
8666 static int
8667 insert_loop_mem (mem, data)
8668 rtx *mem;
8669 void *data ATTRIBUTE_UNUSED;
8671 struct loop_info *loop_info = data;
8672 int i;
8673 rtx m = *mem;
8675 if (m == NULL_RTX)
8676 return 0;
8678 switch (GET_CODE (m))
8680 case MEM:
8681 break;
8683 case CLOBBER:
8684 /* We're not interested in MEMs that are only clobbered. */
8685 return -1;
8687 case CONST_DOUBLE:
8688 /* We're not interested in the MEM associated with a
8689 CONST_DOUBLE, so there's no need to traverse into this. */
8690 return -1;
8692 case EXPR_LIST:
8693 /* We're not interested in any MEMs that only appear in notes. */
8694 return -1;
8696 default:
8697 /* This is not a MEM. */
8698 return 0;
8701 /* See if we've already seen this MEM. */
8702 for (i = 0; i < loop_info->mems_idx; ++i)
8703 if (rtx_equal_p (m, loop_info->mems[i].mem))
8705 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
8706 /* The modes of the two memory accesses are different. If
8707 this happens, something tricky is going on, and we just
8708 don't optimize accesses to this MEM. */
8709 loop_info->mems[i].optimize = 0;
8711 return 0;
8714 /* Resize the array, if necessary. */
8715 if (loop_info->mems_idx == loop_info->mems_allocated)
8717 if (loop_info->mems_allocated != 0)
8718 loop_info->mems_allocated *= 2;
8719 else
8720 loop_info->mems_allocated = 32;
8722 loop_info->mems = (loop_mem_info *)
8723 xrealloc (loop_info->mems,
8724 loop_info->mems_allocated * sizeof (loop_mem_info));
8727 /* Actually insert the MEM. */
8728 loop_info->mems[loop_info->mems_idx].mem = m;
8729 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8730 because we can't put it in a register. We still store it in the
8731 table, though, so that if we see the same address later, but in a
8732 non-BLK mode, we'll not think we can optimize it at that point. */
8733 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
8734 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
8735 ++loop_info->mems_idx;
8737 return 0;
8741 /* Allocate REGS->ARRAY or reallocate it if it is too small.
8743 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
8744 register that is modified by an insn between FROM and TO. If the
8745 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
8746 more, stop incrementing it, to avoid overflow.
8748 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
8749 register I is used, if it is only used once. Otherwise, it is set
8750 to 0 (for no uses) or const0_rtx for more than one use. This
8751 parameter may be zero, in which case this processing is not done.
8753 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
8754 optimize register I. */
8756 static void
8757 loop_regs_scan (loop, extra_size)
8758 const struct loop *loop;
8759 int extra_size;
8761 struct loop_regs *regs = LOOP_REGS (loop);
8762 int old_nregs;
8763 /* last_set[n] is nonzero iff reg n has been set in the current
8764 basic block. In that case, it is the insn that last set reg n. */
8765 rtx *last_set;
8766 rtx insn;
8767 int i;
8769 old_nregs = regs->num;
8770 regs->num = max_reg_num ();
8772 /* Grow the regs array if not allocated or too small. */
8773 if (regs->num >= regs->size)
8775 regs->size = regs->num + extra_size;
8777 regs->array = (struct loop_reg *)
8778 xrealloc (regs->array, regs->size * sizeof (*regs->array));
8780 /* Zero the new elements. */
8781 memset (regs->array + old_nregs, 0,
8782 (regs->size - old_nregs) * sizeof (*regs->array));
8785 /* Clear previously scanned fields but do not clear n_times_set. */
8786 for (i = 0; i < old_nregs; i++)
8788 regs->array[i].set_in_loop = 0;
8789 regs->array[i].may_not_optimize = 0;
8790 regs->array[i].single_usage = NULL_RTX;
8793 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
8795 /* Scan the loop, recording register usage. */
8796 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8797 insn = NEXT_INSN (insn))
8799 if (INSN_P (insn))
8801 /* Record registers that have exactly one use. */
8802 find_single_use_in_loop (regs, insn, PATTERN (insn));
8804 /* Include uses in REG_EQUAL notes. */
8805 if (REG_NOTES (insn))
8806 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
8808 if (GET_CODE (PATTERN (insn)) == SET
8809 || GET_CODE (PATTERN (insn)) == CLOBBER)
8810 count_one_set (regs, insn, PATTERN (insn), last_set);
8811 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8813 register int i;
8814 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8815 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
8816 last_set);
8820 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
8821 memset (last_set, 0, regs->num * sizeof (rtx));
8824 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8826 regs->array[i].may_not_optimize = 1;
8827 regs->array[i].set_in_loop = 1;
8830 #ifdef AVOID_CCMODE_COPIES
8831 /* Don't try to move insns which set CC registers if we should not
8832 create CCmode register copies. */
8833 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
8834 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8835 regs->array[i].may_not_optimize = 1;
8836 #endif
8838 /* Set regs->array[I].n_times_set for the new registers. */
8839 for (i = old_nregs; i < regs->num; i++)
8840 regs->array[i].n_times_set = regs->array[i].set_in_loop;
8842 free (last_set);
8845 /* Returns the number of real INSNs in the LOOP. */
8847 static int
8848 count_insns_in_loop (loop)
8849 const struct loop *loop;
8851 int count = 0;
8852 rtx insn;
8854 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
8855 insn = NEXT_INSN (insn))
8856 if (INSN_P (insn))
8857 ++count;
8859 return count;
8862 /* Move MEMs into registers for the duration of the loop. */
8864 static void
8865 load_mems (loop)
8866 const struct loop *loop;
8868 struct loop_info *loop_info = LOOP_INFO (loop);
8869 struct loop_regs *regs = LOOP_REGS (loop);
8870 int maybe_never = 0;
8871 int i;
8872 rtx p, prev_ebb_head;
8873 rtx label = NULL_RTX;
8874 rtx end_label;
8875 /* Nonzero if the next instruction may never be executed. */
8876 int next_maybe_never = 0;
8877 unsigned int last_max_reg = max_reg_num ();
8879 if (loop_info->mems_idx == 0)
8880 return;
8882 /* We cannot use next_label here because it skips over normal insns. */
8883 end_label = next_nonnote_insn (loop->end);
8884 if (end_label && GET_CODE (end_label) != CODE_LABEL)
8885 end_label = NULL_RTX;
8887 /* Check to see if it's possible that some instructions in the loop are
8888 never executed. Also check if there is a goto out of the loop other
8889 than right after the end of the loop. */
8890 for (p = next_insn_in_loop (loop, loop->scan_start);
8891 p != NULL_RTX;
8892 p = next_insn_in_loop (loop, p))
8894 if (GET_CODE (p) == CODE_LABEL)
8895 maybe_never = 1;
8896 else if (GET_CODE (p) == JUMP_INSN
8897 /* If we enter the loop in the middle, and scan
8898 around to the beginning, don't set maybe_never
8899 for that. This must be an unconditional jump,
8900 otherwise the code at the top of the loop might
8901 never be executed. Unconditional jumps are
8902 followed a by barrier then loop end. */
8903 && ! (GET_CODE (p) == JUMP_INSN
8904 && JUMP_LABEL (p) == loop->top
8905 && NEXT_INSN (NEXT_INSN (p)) == loop->end
8906 && any_uncondjump_p (p)))
8908 /* If this is a jump outside of the loop but not right
8909 after the end of the loop, we would have to emit new fixup
8910 sequences for each such label. */
8911 if (/* If we can't tell where control might go when this
8912 JUMP_INSN is executed, we must be conservative. */
8913 !JUMP_LABEL (p)
8914 || (JUMP_LABEL (p) != end_label
8915 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
8916 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
8917 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
8918 return;
8920 if (!any_condjump_p (p))
8921 /* Something complicated. */
8922 maybe_never = 1;
8923 else
8924 /* If there are any more instructions in the loop, they
8925 might not be reached. */
8926 next_maybe_never = 1;
8928 else if (next_maybe_never)
8929 maybe_never = 1;
8932 /* Find start of the extended basic block that enters the loop. */
8933 for (p = loop->start;
8934 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
8935 p = PREV_INSN (p))
8937 prev_ebb_head = p;
8939 cselib_init ();
8941 /* Build table of mems that get set to constant values before the
8942 loop. */
8943 for (; p != loop->start; p = NEXT_INSN (p))
8944 cselib_process_insn (p);
8946 /* Actually move the MEMs. */
8947 for (i = 0; i < loop_info->mems_idx; ++i)
8949 regset_head load_copies;
8950 regset_head store_copies;
8951 int written = 0;
8952 rtx reg;
8953 rtx mem = loop_info->mems[i].mem;
8954 rtx mem_list_entry;
8956 if (MEM_VOLATILE_P (mem)
8957 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
8958 /* There's no telling whether or not MEM is modified. */
8959 loop_info->mems[i].optimize = 0;
8961 /* Go through the MEMs written to in the loop to see if this
8962 one is aliased by one of them. */
8963 mem_list_entry = loop_info->store_mems;
8964 while (mem_list_entry)
8966 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
8967 written = 1;
8968 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
8969 mem, rtx_varies_p))
8971 /* MEM is indeed aliased by this store. */
8972 loop_info->mems[i].optimize = 0;
8973 break;
8975 mem_list_entry = XEXP (mem_list_entry, 1);
8978 if (flag_float_store && written
8979 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
8980 loop_info->mems[i].optimize = 0;
8982 /* If this MEM is written to, we must be sure that there
8983 are no reads from another MEM that aliases this one. */
8984 if (loop_info->mems[i].optimize && written)
8986 int j;
8988 for (j = 0; j < loop_info->mems_idx; ++j)
8990 if (j == i)
8991 continue;
8992 else if (true_dependence (mem,
8993 VOIDmode,
8994 loop_info->mems[j].mem,
8995 rtx_varies_p))
8997 /* It's not safe to hoist loop_info->mems[i] out of
8998 the loop because writes to it might not be
8999 seen by reads from loop_info->mems[j]. */
9000 loop_info->mems[i].optimize = 0;
9001 break;
9006 if (maybe_never && may_trap_p (mem))
9007 /* We can't access the MEM outside the loop; it might
9008 cause a trap that wouldn't have happened otherwise. */
9009 loop_info->mems[i].optimize = 0;
9011 if (!loop_info->mems[i].optimize)
9012 /* We thought we were going to lift this MEM out of the
9013 loop, but later discovered that we could not. */
9014 continue;
9016 INIT_REG_SET (&load_copies);
9017 INIT_REG_SET (&store_copies);
9019 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9020 order to keep scan_loop from moving stores to this MEM
9021 out of the loop just because this REG is neither a
9022 user-variable nor used in the loop test. */
9023 reg = gen_reg_rtx (GET_MODE (mem));
9024 REG_USERVAR_P (reg) = 1;
9025 loop_info->mems[i].reg = reg;
9027 /* Now, replace all references to the MEM with the
9028 corresponding pseudos. */
9029 maybe_never = 0;
9030 for (p = next_insn_in_loop (loop, loop->scan_start);
9031 p != NULL_RTX;
9032 p = next_insn_in_loop (loop, p))
9034 if (INSN_P (p))
9036 rtx set;
9038 set = single_set (p);
9040 /* See if this copies the mem into a register that isn't
9041 modified afterwards. We'll try to do copy propagation
9042 a little further on. */
9043 if (set
9044 /* @@@ This test is _way_ too conservative. */
9045 && ! maybe_never
9046 && GET_CODE (SET_DEST (set)) == REG
9047 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9048 && REGNO (SET_DEST (set)) < last_max_reg
9049 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9050 && rtx_equal_p (SET_SRC (set), mem))
9051 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9053 /* See if this copies the mem from a register that isn't
9054 modified afterwards. We'll try to remove the
9055 redundant copy later on by doing a little register
9056 renaming and copy propagation. This will help
9057 to untangle things for the BIV detection code. */
9058 if (set
9059 && ! maybe_never
9060 && GET_CODE (SET_SRC (set)) == REG
9061 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9062 && REGNO (SET_SRC (set)) < last_max_reg
9063 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9064 && rtx_equal_p (SET_DEST (set), mem))
9065 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9067 /* Replace the memory reference with the shadow register. */
9068 replace_loop_mems (p, loop_info->mems[i].mem,
9069 loop_info->mems[i].reg);
9072 if (GET_CODE (p) == CODE_LABEL
9073 || GET_CODE (p) == JUMP_INSN)
9074 maybe_never = 1;
9077 if (! apply_change_group ())
9078 /* We couldn't replace all occurrences of the MEM. */
9079 loop_info->mems[i].optimize = 0;
9080 else
9082 /* Load the memory immediately before LOOP->START, which is
9083 the NOTE_LOOP_BEG. */
9084 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9085 rtx set;
9086 rtx best = mem;
9087 int j;
9088 struct elt_loc_list *const_equiv = 0;
9090 if (e)
9092 struct elt_loc_list *equiv;
9093 struct elt_loc_list *best_equiv = 0;
9094 for (equiv = e->locs; equiv; equiv = equiv->next)
9096 if (CONSTANT_P (equiv->loc))
9097 const_equiv = equiv;
9098 else if (GET_CODE (equiv->loc) == REG
9099 /* Extending hard register lifetimes causes crash
9100 on SRC targets. Doing so on non-SRC is
9101 probably also not good idea, since we most
9102 probably have pseudoregister equivalence as
9103 well. */
9104 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9105 best_equiv = equiv;
9107 /* Use the constant equivalence if that is cheap enough. */
9108 if (! best_equiv)
9109 best_equiv = const_equiv;
9110 else if (const_equiv
9111 && (rtx_cost (const_equiv->loc, SET)
9112 <= rtx_cost (best_equiv->loc, SET)))
9114 best_equiv = const_equiv;
9115 const_equiv = 0;
9118 /* If best_equiv is nonzero, we know that MEM is set to a
9119 constant or register before the loop. We will use this
9120 knowledge to initialize the shadow register with that
9121 constant or reg rather than by loading from MEM. */
9122 if (best_equiv)
9123 best = copy_rtx (best_equiv->loc);
9126 set = gen_move_insn (reg, best);
9127 set = loop_insn_hoist (loop, set);
9128 if (REG_P (best))
9130 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9131 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9133 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9134 break;
9138 if (const_equiv)
9139 REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL,
9140 copy_rtx (const_equiv->loc),
9141 REG_NOTES (set));
9143 if (written)
9145 if (label == NULL_RTX)
9147 label = gen_label_rtx ();
9148 emit_label_after (label, loop->end);
9151 /* Store the memory immediately after END, which is
9152 the NOTE_LOOP_END. */
9153 set = gen_move_insn (copy_rtx (mem), reg);
9154 loop_insn_emit_after (loop, 0, label, set);
9157 if (loop_dump_stream)
9159 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9160 REGNO (reg), (written ? "r/w" : "r/o"));
9161 print_rtl (loop_dump_stream, mem);
9162 fputc ('\n', loop_dump_stream);
9165 /* Attempt a bit of copy propagation. This helps untangle the
9166 data flow, and enables {basic,general}_induction_var to find
9167 more bivs/givs. */
9168 EXECUTE_IF_SET_IN_REG_SET
9169 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9171 try_copy_prop (loop, reg, j);
9173 CLEAR_REG_SET (&load_copies);
9175 EXECUTE_IF_SET_IN_REG_SET
9176 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9178 try_swap_copy_prop (loop, reg, j);
9180 CLEAR_REG_SET (&store_copies);
9184 if (label != NULL_RTX && end_label != NULL_RTX)
9186 /* Now, we need to replace all references to the previous exit
9187 label with the new one. */
9188 rtx_pair rr;
9189 rr.r1 = end_label;
9190 rr.r2 = label;
9192 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9194 for_each_rtx (&p, replace_label, &rr);
9196 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
9197 field. This is not handled by for_each_rtx because it doesn't
9198 handle unprinted ('0') fields. We need to update JUMP_LABEL
9199 because the immediately following unroll pass will use it.
9200 replace_label would not work anyways, because that only handles
9201 LABEL_REFs. */
9202 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
9203 JUMP_LABEL (p) = label;
9207 cselib_finish ();
9210 /* For communication between note_reg_stored and its caller. */
9211 struct note_reg_stored_arg
9213 int set_seen;
9214 rtx reg;
9217 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9218 is equal to ARG. */
9219 static void
9220 note_reg_stored (x, setter, arg)
9221 rtx x, setter ATTRIBUTE_UNUSED;
9222 void *arg;
9224 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9225 if (t->reg == x)
9226 t->set_seen = 1;
9229 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9230 There must be exactly one insn that sets this pseudo; it will be
9231 deleted if all replacements succeed and we can prove that the register
9232 is not used after the loop. */
9234 static void
9235 try_copy_prop (loop, replacement, regno)
9236 const struct loop *loop;
9237 rtx replacement;
9238 unsigned int regno;
9240 /* This is the reg that we are copying from. */
9241 rtx reg_rtx = regno_reg_rtx[regno];
9242 rtx init_insn = 0;
9243 rtx insn;
9244 /* These help keep track of whether we replaced all uses of the reg. */
9245 int replaced_last = 0;
9246 int store_is_first = 0;
9248 for (insn = next_insn_in_loop (loop, loop->scan_start);
9249 insn != NULL_RTX;
9250 insn = next_insn_in_loop (loop, insn))
9252 rtx set;
9254 /* Only substitute within one extended basic block from the initializing
9255 insn. */
9256 if (GET_CODE (insn) == CODE_LABEL && init_insn)
9257 break;
9259 if (! INSN_P (insn))
9260 continue;
9262 /* Is this the initializing insn? */
9263 set = single_set (insn);
9264 if (set
9265 && GET_CODE (SET_DEST (set)) == REG
9266 && REGNO (SET_DEST (set)) == regno)
9268 if (init_insn)
9269 abort ();
9271 init_insn = insn;
9272 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
9273 store_is_first = 1;
9276 /* Only substitute after seeing the initializing insn. */
9277 if (init_insn && insn != init_insn)
9279 struct note_reg_stored_arg arg;
9281 replace_loop_regs (insn, reg_rtx, replacement);
9282 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
9283 replaced_last = 1;
9285 /* Stop replacing when REPLACEMENT is modified. */
9286 arg.reg = replacement;
9287 arg.set_seen = 0;
9288 note_stores (PATTERN (insn), note_reg_stored, &arg);
9289 if (arg.set_seen)
9290 break;
9293 if (! init_insn)
9294 abort ();
9295 if (apply_change_group ())
9297 if (loop_dump_stream)
9298 fprintf (loop_dump_stream, " Replaced reg %d", regno);
9299 if (store_is_first && replaced_last)
9301 rtx first;
9302 rtx retval_note;
9304 /* Assume we're just deleting INIT_INSN. */
9305 first = init_insn;
9306 /* Look for REG_RETVAL note. If we're deleting the end of
9307 the libcall sequence, the whole sequence can go. */
9308 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
9309 /* If we found a REG_RETVAL note, find the first instruction
9310 in the sequence. */
9311 if (retval_note)
9312 first = XEXP (retval_note, 0);
9314 /* Delete the instructions. */
9315 loop_delete_insns (first, init_insn);
9317 if (loop_dump_stream)
9318 fprintf (loop_dump_stream, ".\n");
9322 /* Replace all the instructions from FIRST up to and including LAST
9323 with NOTE_INSN_DELETED notes. */
9325 static void
9326 loop_delete_insns (first, last)
9327 rtx first;
9328 rtx last;
9330 while (1)
9332 PUT_CODE (first, NOTE);
9333 NOTE_LINE_NUMBER (first) = NOTE_INSN_DELETED;
9334 if (loop_dump_stream)
9335 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
9336 INSN_UID (first));
9338 /* If this was the LAST instructions we're supposed to delete,
9339 we're done. */
9340 if (first == last)
9341 break;
9343 first = NEXT_INSN (first);
9347 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
9348 loop LOOP if the order of the sets of these registers can be
9349 swapped. There must be exactly one insn within the loop that sets
9350 this pseudo followed immediately by a move insn that sets
9351 REPLACEMENT with REGNO. */
9352 static void
9353 try_swap_copy_prop (loop, replacement, regno)
9354 const struct loop *loop;
9355 rtx replacement;
9356 unsigned int regno;
9358 rtx insn;
9359 rtx set = NULL_RTX;
9360 unsigned int new_regno;
9362 new_regno = REGNO (replacement);
9364 for (insn = next_insn_in_loop (loop, loop->scan_start);
9365 insn != NULL_RTX;
9366 insn = next_insn_in_loop (loop, insn))
9368 /* Search for the insn that copies REGNO to NEW_REGNO? */
9369 if (INSN_P (insn)
9370 && (set = single_set (insn))
9371 && GET_CODE (SET_DEST (set)) == REG
9372 && REGNO (SET_DEST (set)) == new_regno
9373 && GET_CODE (SET_SRC (set)) == REG
9374 && REGNO (SET_SRC (set)) == regno)
9375 break;
9378 if (insn != NULL_RTX)
9380 rtx prev_insn;
9381 rtx prev_set;
9383 /* Some DEF-USE info would come in handy here to make this
9384 function more general. For now, just check the previous insn
9385 which is the most likely candidate for setting REGNO. */
9387 prev_insn = PREV_INSN (insn);
9389 if (INSN_P (insn)
9390 && (prev_set = single_set (prev_insn))
9391 && GET_CODE (SET_DEST (prev_set)) == REG
9392 && REGNO (SET_DEST (prev_set)) == regno)
9394 /* We have:
9395 (set (reg regno) (expr))
9396 (set (reg new_regno) (reg regno))
9398 so try converting this to:
9399 (set (reg new_regno) (expr))
9400 (set (reg regno) (reg new_regno))
9402 The former construct is often generated when a global
9403 variable used for an induction variable is shadowed by a
9404 register (NEW_REGNO). The latter construct improves the
9405 chances of GIV replacement and BIV elimination. */
9407 validate_change (prev_insn, &SET_DEST (prev_set),
9408 replacement, 1);
9409 validate_change (insn, &SET_DEST (set),
9410 SET_SRC (set), 1);
9411 validate_change (insn, &SET_SRC (set),
9412 replacement, 1);
9414 if (apply_change_group ())
9416 if (loop_dump_stream)
9417 fprintf (loop_dump_stream,
9418 " Swapped set of reg %d at %d with reg %d at %d.\n",
9419 regno, INSN_UID (insn),
9420 new_regno, INSN_UID (prev_insn));
9422 /* Update first use of REGNO. */
9423 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
9424 REGNO_FIRST_UID (regno) = INSN_UID (insn);
9426 /* Now perform copy propagation to hopefully
9427 remove all uses of REGNO within the loop. */
9428 try_copy_prop (loop, replacement, regno);
9434 /* Replace MEM with its associated pseudo register. This function is
9435 called from load_mems via for_each_rtx. DATA is actually a pointer
9436 to a structure describing the instruction currently being scanned
9437 and the MEM we are currently replacing. */
9439 static int
9440 replace_loop_mem (mem, data)
9441 rtx *mem;
9442 void *data;
9444 loop_replace_args *args = (loop_replace_args *) data;
9445 rtx m = *mem;
9447 if (m == NULL_RTX)
9448 return 0;
9450 switch (GET_CODE (m))
9452 case MEM:
9453 break;
9455 case CONST_DOUBLE:
9456 /* We're not interested in the MEM associated with a
9457 CONST_DOUBLE, so there's no need to traverse into one. */
9458 return -1;
9460 default:
9461 /* This is not a MEM. */
9462 return 0;
9465 if (!rtx_equal_p (args->match, m))
9466 /* This is not the MEM we are currently replacing. */
9467 return 0;
9469 /* Actually replace the MEM. */
9470 validate_change (args->insn, mem, args->replacement, 1);
9472 return 0;
9475 static void
9476 replace_loop_mems (insn, mem, reg)
9477 rtx insn;
9478 rtx mem;
9479 rtx reg;
9481 loop_replace_args args;
9483 args.insn = insn;
9484 args.match = mem;
9485 args.replacement = reg;
9487 for_each_rtx (&insn, replace_loop_mem, &args);
9490 /* Replace one register with another. Called through for_each_rtx; PX points
9491 to the rtx being scanned. DATA is actually a pointer to
9492 a structure of arguments. */
9494 static int
9495 replace_loop_reg (px, data)
9496 rtx *px;
9497 void *data;
9499 rtx x = *px;
9500 loop_replace_args *args = (loop_replace_args *) data;
9502 if (x == NULL_RTX)
9503 return 0;
9505 if (x == args->match)
9506 validate_change (args->insn, px, args->replacement, 1);
9508 return 0;
9511 static void
9512 replace_loop_regs (insn, reg, replacement)
9513 rtx insn;
9514 rtx reg;
9515 rtx replacement;
9517 loop_replace_args args;
9519 args.insn = insn;
9520 args.match = reg;
9521 args.replacement = replacement;
9523 for_each_rtx (&insn, replace_loop_reg, &args);
9526 /* Replace occurrences of the old exit label for the loop with the new
9527 one. DATA is an rtx_pair containing the old and new labels,
9528 respectively. */
9530 static int
9531 replace_label (x, data)
9532 rtx *x;
9533 void *data;
9535 rtx l = *x;
9536 rtx old_label = ((rtx_pair *) data)->r1;
9537 rtx new_label = ((rtx_pair *) data)->r2;
9539 if (l == NULL_RTX)
9540 return 0;
9542 if (GET_CODE (l) != LABEL_REF)
9543 return 0;
9545 if (XEXP (l, 0) != old_label)
9546 return 0;
9548 XEXP (l, 0) = new_label;
9549 ++LABEL_NUSES (new_label);
9550 --LABEL_NUSES (old_label);
9552 return 0;
9555 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
9556 (ignored in the interim). */
9558 static rtx
9559 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
9560 const struct loop *loop ATTRIBUTE_UNUSED;
9561 basic_block where_bb ATTRIBUTE_UNUSED;
9562 rtx where_insn;
9563 rtx pattern;
9565 return emit_insn_after (pattern, where_insn);
9569 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
9570 in basic block WHERE_BB (ignored in the interim) within the loop
9571 otherwise hoist PATTERN into the loop pre-header. */
9574 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
9575 const struct loop *loop;
9576 basic_block where_bb ATTRIBUTE_UNUSED;
9577 rtx where_insn;
9578 rtx pattern;
9580 if (! where_insn)
9581 return loop_insn_hoist (loop, pattern);
9582 return emit_insn_before (pattern, where_insn);
9586 /* Emit call insn for PATTERN before WHERE_INSN in basic block
9587 WHERE_BB (ignored in the interim) within the loop. */
9589 static rtx
9590 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
9591 const struct loop *loop ATTRIBUTE_UNUSED;
9592 basic_block where_bb ATTRIBUTE_UNUSED;
9593 rtx where_insn;
9594 rtx pattern;
9596 return emit_call_insn_before (pattern, where_insn);
9600 /* Hoist insn for PATTERN into the loop pre-header. */
9603 loop_insn_hoist (loop, pattern)
9604 const struct loop *loop;
9605 rtx pattern;
9607 return loop_insn_emit_before (loop, 0, loop->start, pattern);
9611 /* Hoist call insn for PATTERN into the loop pre-header. */
9613 static rtx
9614 loop_call_insn_hoist (loop, pattern)
9615 const struct loop *loop;
9616 rtx pattern;
9618 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
9622 /* Sink insn for PATTERN after the loop end. */
9625 loop_insn_sink (loop, pattern)
9626 const struct loop *loop;
9627 rtx pattern;
9629 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
9633 /* If the loop has multiple exits, emit insn for PATTERN before the
9634 loop to ensure that it will always be executed no matter how the
9635 loop exits. Otherwise, emit the insn for PATTERN after the loop,
9636 since this is slightly more efficient. */
9638 static rtx
9639 loop_insn_sink_or_swim (loop, pattern)
9640 const struct loop *loop;
9641 rtx pattern;
9643 if (loop->exit_count)
9644 return loop_insn_hoist (loop, pattern);
9645 else
9646 return loop_insn_sink (loop, pattern);
9649 static void
9650 loop_ivs_dump (loop, file, verbose)
9651 const struct loop *loop;
9652 FILE *file;
9653 int verbose;
9655 struct iv_class *bl;
9656 int iv_num = 0;
9658 if (! loop || ! file)
9659 return;
9661 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9662 iv_num++;
9664 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
9666 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
9668 loop_iv_class_dump (bl, file, verbose);
9669 fputc ('\n', file);
9674 static void
9675 loop_iv_class_dump (bl, file, verbose)
9676 const struct iv_class *bl;
9677 FILE *file;
9678 int verbose ATTRIBUTE_UNUSED;
9680 struct induction *v;
9681 rtx incr;
9682 int i;
9684 if (! bl || ! file)
9685 return;
9687 fprintf (file, "IV class for reg %d, benefit %d\n",
9688 bl->regno, bl->total_benefit);
9690 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
9691 if (bl->initial_value)
9693 fprintf (file, ", init val: ");
9694 print_simple_rtl (file, bl->initial_value);
9696 if (bl->initial_test)
9698 fprintf (file, ", init test: ");
9699 print_simple_rtl (file, bl->initial_test);
9701 fputc ('\n', file);
9703 if (bl->final_value)
9705 fprintf (file, " Final val: ");
9706 print_simple_rtl (file, bl->final_value);
9707 fputc ('\n', file);
9710 if ((incr = biv_total_increment (bl)))
9712 fprintf (file, " Total increment: ");
9713 print_simple_rtl (file, incr);
9714 fputc ('\n', file);
9717 /* List the increments. */
9718 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
9720 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
9721 print_simple_rtl (file, v->add_val);
9722 fputc ('\n', file);
9725 /* List the givs. */
9726 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
9728 fprintf (file, " Giv%d: insn %d, benefit %d, ",
9729 i, INSN_UID (v->insn), v->benefit);
9730 if (v->giv_type == DEST_ADDR)
9731 print_simple_rtl (file, v->mem);
9732 else
9733 print_simple_rtl (file, single_set (v->insn));
9734 fputc ('\n', file);
9739 static void
9740 loop_biv_dump (v, file, verbose)
9741 const struct induction *v;
9742 FILE *file;
9743 int verbose;
9745 if (! v || ! file)
9746 return;
9748 fprintf (file,
9749 "Biv %d: insn %d",
9750 REGNO (v->dest_reg), INSN_UID (v->insn));
9751 fprintf (file, " const ");
9752 print_simple_rtl (file, v->add_val);
9754 if (verbose && v->final_value)
9756 fputc ('\n', file);
9757 fprintf (file, " final ");
9758 print_simple_rtl (file, v->final_value);
9761 fputc ('\n', file);
9765 static void
9766 loop_giv_dump (v, file, verbose)
9767 const struct induction *v;
9768 FILE *file;
9769 int verbose;
9771 if (! v || ! file)
9772 return;
9774 if (v->giv_type == DEST_REG)
9775 fprintf (file, "Giv %d: insn %d",
9776 REGNO (v->dest_reg), INSN_UID (v->insn));
9777 else
9778 fprintf (file, "Dest address: insn %d",
9779 INSN_UID (v->insn));
9781 fprintf (file, " src reg %d benefit %d",
9782 REGNO (v->src_reg), v->benefit);
9783 fprintf (file, " lifetime %d",
9784 v->lifetime);
9786 if (v->replaceable)
9787 fprintf (file, " replaceable");
9789 if (v->no_const_addval)
9790 fprintf (file, " ncav");
9792 if (v->ext_dependant)
9794 switch (GET_CODE (v->ext_dependant))
9796 case SIGN_EXTEND:
9797 fprintf (file, " ext se");
9798 break;
9799 case ZERO_EXTEND:
9800 fprintf (file, " ext ze");
9801 break;
9802 case TRUNCATE:
9803 fprintf (file, " ext tr");
9804 break;
9805 default:
9806 abort ();
9810 fputc ('\n', file);
9811 fprintf (file, " mult ");
9812 print_simple_rtl (file, v->mult_val);
9814 fputc ('\n', file);
9815 fprintf (file, " add ");
9816 print_simple_rtl (file, v->add_val);
9818 if (verbose && v->final_value)
9820 fputc ('\n', file);
9821 fprintf (file, " final ");
9822 print_simple_rtl (file, v->final_value);
9825 fputc ('\n', file);
9829 void
9830 debug_ivs (loop)
9831 const struct loop *loop;
9833 loop_ivs_dump (loop, stderr, 1);
9837 void
9838 debug_iv_class (bl)
9839 const struct iv_class *bl;
9841 loop_iv_class_dump (bl, stderr, 1);
9845 void
9846 debug_biv (v)
9847 const struct induction *v;
9849 loop_biv_dump (v, stderr, 1);
9853 void
9854 debug_giv (v)
9855 const struct induction *v;
9857 loop_giv_dump (v, stderr, 1);
9861 #define LOOP_BLOCK_NUM_1(INSN) \
9862 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
9864 /* The notes do not have an assigned block, so look at the next insn. */
9865 #define LOOP_BLOCK_NUM(INSN) \
9866 ((INSN) ? (GET_CODE (INSN) == NOTE \
9867 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
9868 : LOOP_BLOCK_NUM_1 (INSN)) \
9869 : -1)
9871 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
9873 static void
9874 loop_dump_aux (loop, file, verbose)
9875 const struct loop *loop;
9876 FILE *file;
9877 int verbose ATTRIBUTE_UNUSED;
9879 rtx label;
9881 if (! loop || ! file)
9882 return;
9884 /* Print diagnostics to compare our concept of a loop with
9885 what the loop notes say. */
9886 if (! PREV_INSN (loop->first->head)
9887 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
9888 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
9889 != NOTE_INSN_LOOP_BEG)
9890 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
9891 INSN_UID (PREV_INSN (loop->first->head)));
9892 if (! NEXT_INSN (loop->last->end)
9893 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
9894 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
9895 != NOTE_INSN_LOOP_END)
9896 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
9897 INSN_UID (NEXT_INSN (loop->last->end)));
9899 if (loop->start)
9901 fprintf (file,
9902 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
9903 LOOP_BLOCK_NUM (loop->start),
9904 LOOP_INSN_UID (loop->start),
9905 LOOP_BLOCK_NUM (loop->cont),
9906 LOOP_INSN_UID (loop->cont),
9907 LOOP_BLOCK_NUM (loop->cont),
9908 LOOP_INSN_UID (loop->cont),
9909 LOOP_BLOCK_NUM (loop->vtop),
9910 LOOP_INSN_UID (loop->vtop),
9911 LOOP_BLOCK_NUM (loop->end),
9912 LOOP_INSN_UID (loop->end));
9913 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
9914 LOOP_BLOCK_NUM (loop->top),
9915 LOOP_INSN_UID (loop->top),
9916 LOOP_BLOCK_NUM (loop->scan_start),
9917 LOOP_INSN_UID (loop->scan_start));
9918 fprintf (file, ";; exit_count %d", loop->exit_count);
9919 if (loop->exit_count)
9921 fputs (", labels:", file);
9922 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
9924 fprintf (file, " %d ",
9925 LOOP_INSN_UID (XEXP (label, 0)));
9928 fputs ("\n", file);
9930 /* This can happen when a marked loop appears as two nested loops,
9931 say from while (a || b) {}. The inner loop won't match
9932 the loop markers but the outer one will. */
9933 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
9934 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
9938 /* Call this function from the debugger to dump LOOP. */
9940 void
9941 debug_loop (loop)
9942 const struct loop *loop;
9944 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
9947 /* Call this function from the debugger to dump LOOPS. */
9949 void
9950 debug_loops (loops)
9951 const struct loops *loops;
9953 flow_loops_dump (loops, stderr, loop_dump_aux, 1);